pax_global_header00006660000000000000000000000064131526412160014513gustar00rootroot0000000000000052 comment=d204f6d1a0bfdec6c8167e7abf86b00dd3bb8f8f csg-1.4.1/000077500000000000000000000000001315264121600122725ustar00rootroot00000000000000csg-1.4.1/.gitignore000066400000000000000000000017541315264121600142710ustar00rootroot00000000000000*~ csg_*.man csg_*.t2t csg_*.help libvotca*.pc libvotca*.a libvotca*.so libvotca*.so.* Makefile rules.ninja build.ninja .ninja_* build scripts/csg_call scripts/csg_inverse scripts/help2t2t scripts/csg_inverse.t2t scripts/*.out share/doc/Doxyfile share/doc/html/* share/man/*.man share/template/template_serial share/template/template_threaded share/xml/csg_defaults.xml src/csg_boltzmann/csg_boltzmann src/libcsg/version.cc src/tools/csg_dump src/tools/csg_fmatch src/tools/csg_gmxtopol src/tools/csg_imcrepack src/tools/csg_map src/tools/csg_property src/tools/csg_resample src/tools/csg_stat src/tools/csg_part_dist src/tools/csg_density src/tools/csg_reupdate src/tools/csg_dlptopol src/libcsg/gitversion.h src/libcsg/votca_config.h netbeans/*/build netbeans/*/dist src/csg_boltzmann/build */nbproject/private Makefile-variables.mk Package-Debug.bash Package-Release.bash Package-profile_release.bash CMakeFiles cmake_install.cmake cmake_uninstall.cmake CMakeCache.txt install_manifest.txt build* csg-1.4.1/.travis.yml000066400000000000000000000141161315264121600144060ustar00rootroot00000000000000#the original source of this file can be found in tools repository change it ther # changes in tools|csg tiggered a full rebuild # changes in csgapps|csg-manual|csg-tutorials only trigger tools+csg+ifself language: cpp sudo: false addons: apt: sources: - boost-latest - george-edison55-precise-backports - ubuntu-toolchain-r-test packages: - ccache - gcc-4.8 - g++-4.8 - libfftw3-dev - cmake - cmake-data - libgsl0-dev - txt2tags - libboost1.55-all-dev - libexpat1-dev - libsqlite3-dev - libhdf5-serial-dev - pkg-config - pgf - texlive-fonts-recommended - texlive-latex-extra - texlive-latex-recommended - cm-super - doxygen - graphviz - ghostscript - gnuplot-nox - octave env: global: - HDF5_VERSION=1.8.18 - GMX_MAX_THREADS=4 - CSG_MDRUN_OPTS="-ntmpi 4" - CCACHE_CPP2=yes #for clang - GVER=4.8 matrix: #NINJA=1 is currently not support by Travis' cmake, enable when >=2.8.9 - XTP=yes BDIR=build WERROR=yes J=4 TYPE=Debug - XTP=yes BDIR=build WALL=yes J=4 TYPE=Debug - XTP=yes BDIR=build WERROR=yes J=4 TYPE=Release - XTP=yes BDIR=build WALL=yes J=4 TYPE=Release MAN=yes - WALL=yes GMX_DOUBLE=yes J=4 - WALL=yes GMX_VERSION=5.0 J=4 - WALL=yes GMX_VERSION=5.0 GMX_DOUBLE=yes J=4 - WALL=yes GMX_VERSION=5.1 J=4 - WALL=yes GMX_VERSION=5.1 GMX_DOUBLE=yes J=4 - WALL=yes GMX_VERSION=9999 J=4 - WALL=yes GMX_VERSION=9999 GMX_DOUBLE=yes J=4 - MINIMAL=yes J=4 - CSG_MDRUN_STEPS=500 TEST=hexane/ibi_bonded J=4 - CSG_MDRUN_STEPS=500 TEST=hexane/ibi J=4 - CSG_MDRUN_STEPS=500 TEST=methanol/ibi J=4 - CSG_MDRUN_STEPS=10000 TEST=methanol/imc J=4 - CSG_MDRUN_STEPS=5000 TEST=methanol-water/X_0.938/re J=4 - CSG_MDRUN_STEPS=500 TEST=methanol-water/X_0.938/simplex J=4 - CSG_MDRUN_STEPS=5000 TEST=methanol-water/X_0.062/re J=4 - CSG_MDRUN_STEPS=500 TEST=methanol-water/X_0.062/simplex J=4 - CSG_MDRUN_STEPS=5000 TEST=methanol-water/X_0.5/re J=4 - CSG_MDRUN_STEPS=500 TEST=methanol-water/X_0.5/simplex J=4 - CSG_MDRUN_STEPS=500 TEST=propane/ibi J=4 - CSG_MDRUN_STEPS=10000 TEST=propane/imc J=4 - CSG_MDRUN_STEPS=500 TEST=spce/cma/density J=4 - CSG_MDRUN_STEPS=500 TEST=spce/cma/simple J=4 - CSG_MDRUN_STEPS=500 TEST=spce/ibi J=4 - CSG_MDRUN_STEPS=500 TEST=spce/ibi_pressure J=4 - CSG_MDRUN_STEPS=10000 TEST=spce/imc J=4 - CSG_MDRUN_STEPS=10000 TEST=spce/realtime J=4 - CSG_MDRUN_STEPS=5000 TEST=spce/re J=4 - CSG_MDRUN_STEPS=500 TEST=spce/simplex/density J=4 - CSG_MDRUN_STEPS=500 TEST=spce/simplex/pressure J=4 - CSG_MDRUN_STEPS=500 TEST=spce/simplex/simple J=4 - CSG_MDRUN_STEPS=500 TEST=urea-water/ibi J=4 - CSG_MDRUN_STEPS=500 TEST=urea-water/kb-ibi J=4 - CSG_MDRUN_STEPS=500 TEST=urea-water/cibi J=4 - CSG_MDRUN_STEPS=500 TEST=He-Ar/imc J=4 before_script: - rm -vrf * .git - wget https://raw.githubusercontent.com/votca/buildutil/master/build.sh && chmod 755 build.sh - mkdir -p "$HOME/votca/src" - if [[ ${MINIMAL} != yes ]]; then wget -qO- https://support.hdfgroup.org/ftp/HDF5/releases/hdf5-${HDF5_VERSION%.*}/hdf5-${HDF5_VERSION}/src/hdf5-${HDF5_VERSION}.tar.gz | tar -xz && cd hdf5-${HDF5_VERSION} && ./configure --prefix=$HOME/hdf5 && make -j4 &> /dev/null && make install && cd ..; fi - if [[ ${GMX_VERSION} = 9999 ]]; then CMAKE_VERSION=3.4.3-Linux-x86_64 && wget --no-check-certificate -qO- http://www.cmake.org/files/v${CMAKE_VERSION:0:3}/cmake-${CMAKE_VERSION}.tar.gz | tar -xz && export CMAKE="$PWD/cmake-${CMAKE_VERSION}/bin/cmake" ; fi - if [[ ${MAN} ]]; then git clone --depth=1 https://github.com/votca/doxygen.git "$HOME/votca/src/devdoc"; fi - if [[ ${GVER} ]]; then export CC=gcc-${GVER}; export CXX=g++-${GVER}; fi - pip install --user numpy script: - PATH="$HOME/hdf5:$PATH" ./build.sh -Wu --prefix "$HOME/votca" ${TYPE:+-DCMAKE_BUILD_TYPE=${TYPE}} ${TEST:+--runtest=$TEST} ${WERROR:+--warn-to-errors} -DWITH_H5MD=ON ${MINIMAL:+--minimal} ${MAN:+--devdoc} ${J:+-j$J} ${BDIR:+--builddir=$BDIR} ${NINJA:+--ninja} ${WALL:+--Wall} ${GMX_VERSION:+--gmx-release ${GMX_VERSION}} --directory "$HOME/votca/src" --no-clean --depth 1 -DGMX_USE_RDTSCP=OFF ${GMX_DOUBLE:+-DGMX_DOUBLE=yes} tools $([[ ${MINIMAL} ]] || echo gromacs) csg csgapps ${MAN:+csg-manual} ${TEST:+csg-tutorials} ${CTP:+kmc moo ctp} ${XTP:+xtp} after_success: - if [[ ${MAN} && ${CC} = gcc* ]]; then cp "$HOME"/votca/src/{csg-manual,devdoc}/manual.pdf; cd "$HOME/votca/src/devdoc"; if [[ ${TRAVIS_BRANCH} = master && ${encrypted_3780e1dc437f_key} && ${encrypted_3780e1dc437f_iv} && ${TRAVIS_PULL_REQUEST} == false ]]; then git config --global user.name "Automatic Deployment (Travis CI)"; git config --global user.email "votca-commits@googlegroups.com"; git add --all && git commit -m "Documentation Update"; openssl aes-256-cbc -K $encrypted_3780e1dc437f_key -iv $encrypted_3780e1dc437f_iv -in deploy.enc -out ~/.ssh/id_rsa -d; chmod 600 ~/.ssh/id_rsa; git push git@github.com:votca/doxygen.git gh-pages; else git diff --no-color | head -n 500; fi; fi cache: - ccache compiler: - gcc notifications: email: recipients: #encrypted votca-commits@googlegroups.com so that fork don't notify us (travis-ci/travis-ci#6100) secure: yAYhUIO6l2FKfapQNhde+nTH2tGbOj4yWgzcYXyw8Hw/mtZ84uShUeB7asGklr0JFQ8zXw7vg1HT1a2uU09OCx2RcKXnlJ66yFRnBqvzC80yTxlJhYjJsHSXo5/pmT5d045k6WTH3IxHc9Flx2szbavQPz/HLxxI+fN4kfNJxtOVghiQqlUdNDEnwq1raliPOAxS/ANQt3phPmrZUS7I/QGfEOQlwEzzgV7NWPpwR3L83hIfu4dOM5FTFkKHqXQXxjMxvI1KnEr8o+yU7m/l4ruYkEw8Axsnd4NjiF8JsWPZOkee24+cspRl6bTgTb3w7l4beuWTfcQKg8svGeeoMM5RWjwZJnUhYywSwkuMdeeZ5i0RJAIbq1tcoWYSL5KdwGs3NPLu1J1OR3ArUUXFVN/m7njgnr/3XRNx5tXYZ9A1blAMBTAFMzik3yKhERA4QJz9aUXZEwfPQ1FTZEuYm8fhnY5+n+NgKzXgLPbVcgAxoTmseSf+rnvZXltlX6Dxql09iqGhxJlRBw8PJuepFf+J+wU28g+dcm6N8U/TNTOCmO6WgqKw/65Hbv8Xhd6pMC/oTVeal20GRT1dUBAgG2sb0Ht9jX56G+WZ0GRUr1vYMONvEX3NNiIHyyA8iGOpBL/+P1TOucXYyKNZcfV10NugUO2NevPny54vQr2kDnA= csg-1.4.1/CHANGELOG.md000066400000000000000000000173451315264121600141150ustar00rootroot00000000000000For more detailed information about the changes see the history of the [repository](https://github.com/votca/csg/commits/master). ## Version 1.4.1 (released 02.09.17) * fix pkg-config files * fix build with gmx-2017 * added CSG_MDRUN_OPTS to run_gromacs.sh * table_to_tab.pl: fix forces for LAMMPS * csg_inverse: warn users of bug #179 * run_gromacs.sh: check nstxout-compressed * tools: fix parallel build with mkl ## Version 1.4 _SuperKurt_ (released 29.10.16) * added cibi tutorial and manual section * install mkl headers if mkl is enabled * updated copyright * xmltopologyparser: tokenize on \n and \t as well (#195) * tools: added support for boost-1.62 ## Version 1.4_rc1 (released 26.09.16) * switch to c++-11 * dihedral support in csg_fmatch * support for tabulated bond in >=gmx-5 * added full featured XMLTopologyReader * added regularization for IMC * changed neighborlist from std::list to std::vector * added cibi method * tools: added support for mkl as gsl replacement * lots of reader bug fixes * dropped support for gromacs-4 and clean up * dropped multi_g_rdf script * dropped thermforce iteration method * moved h5md reader to hdf5 without c++ api ## Version 1.3.1 (released 19.08.16) * histogram: use floor() on bin value * calculator: fixed namespace * VOTCARC: added shebang * fixed gromacs detection with >=cmake-3.4 ## Version 1.3 _SuperUzma_ (released 15.01.16) * re-implemented csg_boltzmann --excl * added support for upcoming gromacs 2016 ## Version 1.3_rc1 (released 23.09.15) * added new iterative methods: relative entropy, simplex optimization * added support for using hoomd-blue, lammps, ESPResSo, ESPResSo++ and dl_poly for iterative methods * added pre-simulation feature for GROMACS (e.g. for minimization) * added rudimentary support for IBI with bonded interaction * made pdb reader work with libgmx * added support for h5md, dl_ploy file format * added support for numpy in IMC * cmake: added BUILD_MANPAGES option, git support minor fixes * cmake: dropped internal boost replacement * many many many small bug fixes and improvements ## Version 1.2.4 (released 31.08.14) * support for Gromacs 5.0 * support for Boost 1.53 * fixed use of nawk instead of gawk under MacOs * fixed python shebang * fixed linking issue under Fedora * fixed thermforce calculation for xsplit case ## Version 1.2.3 (released 14.08.12) * improved AIX support * fixed install on 64-bit linux systems * fixed a bug in histogram class * fixed rdf calculation for r_min > 0 (histogram bug) * updated documentation ## Version 1.2.2 (released 10.01.12) * added numpy solver for IMC * cmake: updated FindGROMACS.cmake * fixed coredump in csg_property (issue 114) * fixed namespace in Fedora * fixed problem with newlines in csg_property * cmake: allow static fftw and gsl * added dummy c function for cmake * fixed conflicting type headers (real was defined) ## Version 1.2.1 (released 25.08.11) * csg_inverse: improve initial guess of the potential * csg_inverse: fixes for min!=0 * table_extrapolate.pl: fixed flags and first point * fixed tf iteration for multiple components * fixed round-off error in grid search and csg_calc * csg_inverse: typo fixed and additional checks * fixed soname of libs * improved cmake checks and error messages * fixed pkg-config file ## Version 1.2 _SuperDoris_ (released 17.06.11) * changed buildsystem to cmake * added thermforce iteration method * added csg_density * a lot of framework clean up * added type selector name:* * allow long and restart of simulations * added database class through sqlite3 ## Version 1.1.2 (released 04.04.11) * csg_fmatch: added support for known forces (--trj-force option) * fixed head of votca.7 manpage ## Version 1.1.1 (released 01.03.11) * fixed csg_inverse --clean * make postupdate pressure work again * fixed bug when reading exclusions from tpr * end with error in csg_stat if bead type does not exist (issue 77) ## Version 1.1 _SuperAnn_ (released 18.02.11) * added support for gromacs 5.0 * csg_dump: can dump exclusion * added boundarycondition class * added man pages, man7 and man1 for all bins * csg_inverse: renamed ibm to ibi * csg_inverse: many internal improvements * csg_stat: added thread support, read exclusions from tpr file, uses grid search by default * csg_inverse: added convergence check, postadd plot, better logging and weaker die * csg_resample: added boundary conditions option and akima spline support * csg_stat or csg_fmatch give an error if trj not given (issue 29) * csg_get_interaction_property knows about defaults * fixed segfault in mapping (Fixes issue 27) * fixed bug in gromacs writer (frame.bX = true) * fixed segfault in gromacs writer (issue 54) * added thread class * added spline class, with akima spline, linear spline * random.cc: avoid calling of exit() * added lexical cast class ## Version 1.0.1 (released 01.12.10) * fixed custom md programs in sim scripts (issue 1) * completion file is back from tools * issue #21: fixed strange kink when pot.in was provided * added --disable-rc-files to configure * csg_call/csg_inverse: added installdir as failback for CSGSHARE * fixed a bug in VOTCARC.csh for empty LD_LIBRARY_PATH * completion file has moved back to csg * added --disable-rc-files to configure * updated bundled libtool to 2.2.10 ## Version 1.0 (released 30.09.10) * added postupdate script for scaling the update * added options to perform analysis without giving a mapping file (--no-map). No need for dummy mapping file in IMC and csg_stat anymore. * allow comments in tables * fixed bug in pressure correction when p is negative * added support for gromacs devel version * fixed a bug when compiling with gcc-4.4 * fixed a bug that pot.cur was change at every step * added application class for easy implementation of analysis programs * fixed bug if initial potential was given and not used * restart points in iterative procedure are not anymore deleted after step finished * preliminary reader for ESPResSo Blockfiles and ESPResSo as Simulation program for csg_inverse * preliminary reader for LAMMPS dump files (very limited features) * allow compling without gromacs * a lot new xml optionsfull support for gromacs 4.5 * added libvotca_expat to allow compiling without expat * allow comments in tables * added application class to create standardized applications * all boost dependecy are now in tools * fixes in table format, flags is always last row now * allow compling without fftw (needed for csg_boltzmann only) * allow compling without gsl (needed for csg_resample and csg_fmatch) ## Version 1.0_rc5 (released 16.03.10) * fixed --first-frame option (--first-frame 1 before started at second frame) * fixed compatibility issue when using gromacs development version * updated configure, see --help * added multi_g_density * CSGRC is replaced by VOTCARC of votca_tools * using libexpat instead of libxml2 * added libvotca_boost to allow compiling without boost * using pkg-config to detect package flags * compiles under AIX with xlC * added VOTCARC to initialize all votca parts * updated configure, see --help ## Version 1.0_rc4 (released 08.02.10) * using libtool to build shared libs\ * fixed a bug in error calculation of multi_g_rdf ## Version 1.0_rc3 (released 29.01.10) * added option --wall-time to csg_inverse if run on a queueing system * added option for IBI to run in parallel * multi_g_rdf, a multiplexed version of g_rdf was added * added some options to csg_call * csg_resample now also calc derivatives * fixed a bug in reading stuff from mpd file * corrected bug in tokenizer * fixed a bug in calculation of version string * some fixes concerning autotools ## Version 1.0_rc2 (released 16.12.09) * added version string to scripts * fixed typo in calculation of version string * added NOTICE and LICENSE to the dist tarball ## Version 1.0_rc1 (released 11.12.09) * initial version csg-1.4.1/CMakeLists.txt000066400000000000000000000122721315264121600150360ustar00rootroot00000000000000cmake_minimum_required(VERSION 2.8.11) project(votca-csg) set(PROJECT_VERSION "1.4.1") set(PROJECT_CONTACT "bugs@votca.org") string(REGEX REPLACE "^[1-9]+\\.([1-9]+).*$" "\\1" SOVERSION "${PROJECT_VERSION}") if (NOT ${SOVERSION} MATCHES "[1-9]+") message(FATAL_ERROR "Could not determind SOVERSION from ${PROJECT_VERSION}") endif (NOT ${SOVERSION} MATCHES "[1-9]+") # Cmake modules/macros are in a subdirectory to keep this file cleaner set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/CMakeModules) if(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CXX_FLAGS) #Release comes with -O3 by default set(CMAKE_BUILD_TYPE Release CACHE STRING "Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel." FORCE) endif(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CXX_FLAGS) if(CMAKE_BUILD_TYPE STREQUAL Debug) add_definitions(-DDEBUG) endif(CMAKE_BUILD_TYPE STREQUAL Debug) enable_language(CXX) ###################################################################### # compiler tests # these need ot be done early (before further tests). ##################################################################### include(CheckCXXCompilerFlag) check_cxx_compiler_flag("-std=c++11" COMPILER_SUPPORTS_CXX11) if(COMPILER_SUPPORTS_CXX11) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") else() message(FATAL_ERROR "Could not find a C++-11 compiler") endif() ######################################################################## # User input options # ######################################################################## option(BUILD_SHARED_LIBS "Build shared libs" ON) if (NOT DEFINED LIB) set(LIB "lib") endif(NOT DEFINED LIB) if (NOT DEFINED MAN) set(MAN "share/man") endif(NOT DEFINED MAN) if (NOT DEFINED DATA) set(DATA "share/votca") endif(NOT DEFINED DATA) # this has to be the include specification! include_directories(${CMAKE_CURRENT_SOURCE_DIR}/include) ######################################################################## #Find external packages ######################################################################## if(IS_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/.git) find_package(Git) endif(IS_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/.git) find_package(Threads REQUIRED) set(THREAD_LIBRARIES ${CMAKE_THREAD_LIBS_INIT}) option(BUILD_MANPAGES "Build manpages" ON) if (BUILD_MANPAGES) find_package(TXT2TAGS) find_package(UnixCommands) else (BUILD_MANPAGES) #we use TXT2TAGS_FOUND conditionals in other CMakeLists.txt #so that TXT2TAGS is never required set(TXT2TAGS_FOUND) endif (BUILD_MANPAGES) find_package(Boost 1.39.0 REQUIRED COMPONENTS program_options filesystem system ) include_directories(${Boost_INCLUDE_DIRS}) set (BOOST_CFLAGS_PKG "-I${Boost_INCLUDE_DIRS}") set(BOOST_LIBS_PKG "-L${Boost_LIBRARY_DIRS}") foreach(_blib ${Boost_LIBRARIES}) string(REGEX REPLACE ".*/lib([^/]*)\\.[^.]*$" "-l\\1" _blib ${_blib}) set(BOOST_LIBS_PKG "${BOOST_LIBS_PKG} ${_blib}") endforeach(_blib) find_package(VOTCA_TOOLS REQUIRED) include_directories(${VOTCA_TOOLS_INCLUDE_DIRS}) option(WITH_GMX "Build gromacs reader/writer, disabling leads to reduced functionality!" ON) if (WITH_GMX) find_package(GROMACS REQUIRED) endif(WITH_GMX) ######################################################################## # Basic system tests (standard libraries, headers, functions, types) # ######################################################################## include(CheckIncludeFile) foreach(HEADER assert.h) check_include_file(${HEADER} FOUND_${HEADER}) if(NOT FOUND_${HEADER}) message(FATAL_ERROR "Could not find needed header - ${HEADER}") endif(NOT FOUND_${HEADER}) endforeach(HEADER) include(CheckIncludeFileCXX) foreach(HEADER algorithm fstream iomanip iostream list map numeric sstream stdexcept string vector cstdlib) check_include_file_cxx(${HEADER} FOUND_${HEADER}) if(NOT FOUND_${HEADER}) message(FATAL_ERROR "Could not find needed header - ${HEADER}") endif(NOT FOUND_${HEADER}) endforeach(HEADER) set(MATH_LIBRARIES "m" CACHE STRING "math library") mark_as_advanced( MATH_LIBRARIES ) include(CheckLibraryExists) foreach(FUNC sqrt) check_library_exists(${MATH_LIBRARIES} ${FUNC} "" FOUND_${FUNC}_${MATH_LIBRARIES}) if(NOT FOUND_${FUNC}_${MATH_LIBRARIES}) message(FATAL_ERROR "Could not find needed math function - ${FUNC}") endif(NOT FOUND_${FUNC}_${MATH_LIBRARIES}) endforeach(FUNC) ###################################### # Include the following subdirectory # ###################################### if(NOT TARGET manpages) add_custom_target(manpages ALL) endif() add_subdirectory(scripts) add_subdirectory(src) add_subdirectory(include/votca/csg) add_subdirectory(share) file(GLOB VOTCA_CMAKE_MODULES CMakeModules/FindVOTCA* CMakeModules/FindSQLITE3.cmake) install(FILES ${VOTCA_CMAKE_MODULES} DESTINATION ${DATA}/template/CMakeModules) configure_file(${CMAKE_MODULE_PATH}/cmake_uninstall.cmake.in ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake IMMEDIATE @ONLY) add_custom_target(uninstall-csg COMMAND ${CMAKE_COMMAND} -P ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake) if(NOT TARGET uninstall) add_custom_target(uninstall) endif() add_dependencies(uninstall uninstall-csg) include(FeatureSummary) feature_summary(INCLUDE_QUIET_PACKAGES WHAT ALL) csg-1.4.1/CMakeModules/000077500000000000000000000000001315264121600146035ustar00rootroot00000000000000csg-1.4.1/CMakeModules/CheckCXXLibraryExists.cmake000066400000000000000000000063561315264121600217440ustar00rootroot00000000000000#.rst: # CheckCXXLibraryExists # ------------------ # # Check if the CXX function exists. # # CHECK_CXX_LIBRARY_EXISTS (LIBRARY FUNCTION LOCATION VARIABLE) # # :: # # LIBRARY - the name of the library you are looking for # FUNCTION - the name of the function # LOCATION - location where the library should be found # VARIABLE - variable to store the result # Will be created as an internal cache variable. # # # # The following variables may be set before calling this macro to modify # the way the check is run: # # :: # # CMAKE_REQUIRED_FLAGS = string of compile command line flags # CMAKE_REQUIRED_DEFINITIONS = list of macros to define (-DFOO=bar) # CMAKE_REQUIRED_LIBRARIES = list of libraries to link # CMAKE_REQUIRED_QUIET = execute quietly without messages #============================================================================= # Copyright 2002-2009 Kitware, Inc. # Copyright 2015 The VOTCA Development Team (http://www.votca.org) # # Distributed under the OSI-approved BSD License (the "License"); # see accompanying file Copyright.txt for details. # # This software is distributed WITHOUT ANY WARRANTY; without even the # implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the License for more information. #============================================================================= # (To distribute this file outside of CMake, substitute the full # License text for the above reference.) macro(CHECK_CXX_LIBRARY_EXISTS LIBRARY FUNCTION LOCATION VARIABLE) if(NOT DEFINED "${VARIABLE}") set(MACRO_CHECK_LIBRARY_EXISTS_DEFINITION "-DCHECK_FUNCTION_EXISTS=${FUNCTION} ${CMAKE_REQUIRED_FLAGS}") if(NOT CMAKE_REQUIRED_QUIET) message(STATUS "Looking for c++ ${FUNCTION} in ${LIBRARY}") endif() set(CHECK_LIBRARY_EXISTS_LIBRARIES ${LIBRARY}) if(CMAKE_REQUIRED_LIBRARIES) set(CHECK_LIBRARY_EXISTS_LIBRARIES ${CHECK_LIBRARY_EXISTS_LIBRARIES} ${CMAKE_REQUIRED_LIBRARIES}) endif() try_compile(${VARIABLE} ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/CMakeModules/CheckFunctionExists.cpp COMPILE_DEFINITIONS ${CMAKE_REQUIRED_DEFINITIONS} LINK_LIBRARIES ${CHECK_LIBRARY_EXISTS_LIBRARIES} CMAKE_FLAGS -DCOMPILE_DEFINITIONS:STRING=${MACRO_CHECK_LIBRARY_EXISTS_DEFINITION} -DLINK_DIRECTORIES:STRING=${LOCATION} OUTPUT_VARIABLE OUTPUT) if(${VARIABLE}) if(NOT CMAKE_REQUIRED_QUIET) message(STATUS "Looking for c++ ${FUNCTION} in ${LIBRARY} - found") endif() set(${VARIABLE} 1 CACHE INTERNAL "Have library ${LIBRARY}") file(APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeOutput.log "Determining if the function ${FUNCTION} exists in the ${LIBRARY} " "passed with the following output:\n" "${OUTPUT}\n\n") else() if(NOT CMAKE_REQUIRED_QUIET) message(STATUS "Looking for c++ ${FUNCTION} in ${LIBRARY} - not found") endif() set(${VARIABLE} "" CACHE INTERNAL "Have library ${LIBRARY}") file(APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log "Determining if the function ${FUNCTION} exists in the ${LIBRARY} " "failed with the following output:\n" "${OUTPUT}\n\n") endif() endif() endmacro() csg-1.4.1/CMakeModules/CheckFunctionExists.cpp000066400000000000000000000005741315264121600212400ustar00rootroot00000000000000#ifdef CHECK_FUNCTION_EXISTS char CHECK_FUNCTION_EXISTS(); #ifdef __CLASSIC_C__ int main(){ int ac; char*av[]; #else int main(int ac, char*av[]){ #endif CHECK_FUNCTION_EXISTS(); if(ac > 1000) { return *av[0]; } return 0; } #else /* CHECK_FUNCTION_EXISTS */ # error "CHECK_FUNCTION_EXISTS has to specify the function" #endif /* CHECK_FUNCTION_EXISTS */ csg-1.4.1/CMakeModules/FindGROMACS.cmake000066400000000000000000000104351315264121600175040ustar00rootroot00000000000000# - Finds parts of gromacs # Find the native gromacs compents headers and libraries. # # GROMACS_INCLUDE_DIRS - where to find gromacs headers. # GROMACS_LIBRARIES - List of libraries when used by gromacs. # GROMACS_FOUND - True if all gromacs componets were found. # GROMACS_DEFINITIONS - Extra definies needed by gromacs # GROMACS_VERSION - Gromacs lib interface version # # Copyright 2009-2015 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # find_package(PkgConfig) pkg_check_modules(PC_GROMACS_D libgromacs_d) pkg_check_modules(PC_GROMACS libgromacs) find_library(GROMACS_LIBRARY NAMES gromacs_d gromacs HINTS ${PC_GROMACS_D_LIBRARY_DIRS} ${PC_GROMACS_LIBRARY_DIRS}) if (GROMACS_LIBRARY) include(CheckLibraryExists) include(CheckCXXLibraryExists) check_library_exists("${GROMACS_LIBRARY}" GromacsVersion "" FOUND_GROMACS_VERSION) if(NOT FOUND_GROMACS_VERSION) check_cxx_library_exists("${GROMACS_LIBRARY}" gmx_version "" FOUND_GROMACS_VERSION_CXX) endif() if(NOT FOUND_GROMACS_VERSION AND NOT FOUND_GROMACS_VERSION_CXX) message(FATAL_ERROR "Could not find GromacsVersion in ${GROMACS_LIBRARY}, take look at the error message in ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log to find out what was going wrong. If you don't have pkg-config installed you will most likely have to set GROMACS_LIBRARY by hand which sets the gromacs lib and it's dependencies (i.e. -DGROMACS_LIBRARY='/path/to/libgmx.so;/path/to/libblas.so;/path/to/libm.so')!") endif() check_library_exists("${GROMACS_LIBRARY}" init_domdec_vsites "" FOUND_GROMACS_INIT_DOMDEC_VSITES) check_library_exists("${GROMACS_LIBRARY}" gmx_gpu_sharing_supported "" FOUND_GROMACS_GMX_GPU_SHARING_SUPPORTED) #check is above if(FOUND_GROMACS_VERSION_CXX) set(GROMACS_VERSION 52) elseif(FOUND_GROMACS_GMX_GPU_SHARING_SUPPORTED) set(GROMACS_VERSION 51) elseif(FOUND_GROMACS_INIT_DOMDEC_VSITES) set(GROMACS_VERSION 50) else() message(FATAL_ERROR "Could not find gmx_version, init_domdec_vsites nor gmx_gpu_sharing_supported in the gromacs library, take look at the error message in ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log to find out what was going wrong. This most likely means that your gromacs version is too old, we need at least gromacs 5 !") endif() check_cxx_library_exists("${GROMACS_LIBRARY}" gmx_is_single_precision "" FOUND_GMX_IS_SINGLE_PRECISION) check_cxx_library_exists("${GROMACS_LIBRARY}" gmx_is_double_precision "" FOUND_GMX_IS_DOUBLE_PRECISION) if(FOUND_GMX_IS_DOUBLE_PRECISION AND GROMACS_VERSION GREATER 51) set(GROMACS_DEFINITIONS "-DGMX_DOUBLE=1") elseif(FOUND_GMX_IS_SINGLE_PRECISION AND GROMACS_VERSION GREATER 51) set(GROMACS_DEFINITIONS "-DGMX_DOUBLE=0") elseif(FOUND_GMX_IS_DOUBLE_PRECISION) set(GROMACS_DEFINITIONS "-DGMX_DOUBLE") elseif(NOT FOUND_GMX_IS_SINGLE_PRECISION) message(FATAL_ERROR "Could not find neither gmx_is_single_precision nor gmx_is_double_precision in the gromacs library, that is very very strange, take look at the error message in ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log to find out what was going wrong. This most likely means that your gromacs version is too old, we need at least gromacs 5 !") endif() endif (GROMACS_LIBRARY) find_path(GROMACS_INCLUDE_DIR gromacs/fileio/tpxio.h HINTS ${PC_GROMACS_D_INCLUDE_DIRS} ${PC_GROMACS_INCLUDE_DIRS}) set(GROMACS_LIBRARIES "${GROMACS_LIBRARY}" ) set(GROMACS_INCLUDE_DIRS "${GROMACS_INCLUDE_DIR}" ) include(FindPackageHandleStandardArgs) # handle the QUIETLY and REQUIRED arguments and set GROMACS_FOUND to TRUE # if all listed variables are TRUE find_package_handle_standard_args(GROMACS DEFAULT_MSG GROMACS_LIBRARY GROMACS_INCLUDE_DIR GROMACS_VERSION) mark_as_advanced(GROMACS_LIBRARY GROMACS_INCLUDE_DIR GROMACS_VERSION) csg-1.4.1/CMakeModules/FindSQLITE3.cmake000066400000000000000000000025271315264121600175000ustar00rootroot00000000000000# - Find libsqlite3 # Find the native libsqlite3 headers and libraries. # # SQLITE3_INCLUDE_DIRS - where to find sqlite3.h, etc # SQLITE3_LIBRARIES - List of libraries when using sqlite3. # SQLITE3_FOUND - True if sqlite3 found. # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # find_package(PkgConfig) pkg_check_modules(PC_SQLITE3 sqlite3) find_path(SQLITE3_INCLUDE_DIR sqlite3.h HINTS ${PC_SQLITE3_INCLUDE_DIRS}) find_library(SQLITE3_LIBRARY NAMES sqlite3 HINTS ${PC_SQLITE3_LIBRARY_DIRS} ) set(SQLITE3_LIBRARIES "${SQLITE3_LIBRARY}" ) set(SQLITE3_INCLUDE_DIRS "${SQLITE3_INCLUDE_DIR}" ) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(SQLITE3 DEFAULT_MSG SQLITE3_LIBRARY SQLITE3_INCLUDE_DIR ) mark_as_advanced(SQLITE3_INCLUDE_DIR SQLITE3_LIBRARY ) csg-1.4.1/CMakeModules/FindTXT2TAGS.cmake000066400000000000000000000040111315264121600176220ustar00rootroot00000000000000# Copyright (C) 2011 Votca Development Team # # This file was derived from FindGnuplot.cmake shipped with CMake 2.6.3. # # - this module looks for txt2tags # # Once done this will define # # TXT2TAGS_FOUND - system has txt2tags # TXT2TAGS_EXECUTABLE - the txt2tags executable # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #============================================================================= # Copyright 2002-2009 Kitware, Inc. # # Distributed under the OSI-approved BSD License (the "License"); # see accompanying file Copyright.txt for details. # # This software is distributed WITHOUT ANY WARRANTY; without even the # implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the License for more information. #============================================================================= # (To distributed this file outside of CMake, substitute the full # License text for the above reference.) INCLUDE(FindCygwin) FIND_PROGRAM(TXT2TAGS_EXECUTABLE NAMES txt2tags txt2tags-2.5 txt2tags-2.6 PATHS ${CYGWIN_INSTALL_PATH}/bin ) # handle the QUIETLY and REQUIRED arguments and set TXT2TAGS_FOUND to TRUE if # all listed variables are TRUE INCLUDE(FindPackageHandleStandardArgs) FIND_PACKAGE_HANDLE_STANDARD_ARGS(TXT2TAGS DEFAULT_MSG TXT2TAGS_EXECUTABLE) IF(NOT TXT2TAGS_FOUND) message("txt2tags not found, help cmake to find it by setting TXT2TAGS_EXECUTABLE") ENDIF(NOT TXT2TAGS_FOUND) MARK_AS_ADVANCED( TXT2TAGS_EXECUTABLE ) csg-1.4.1/CMakeModules/FindVOTCA_CSG.cmake000066400000000000000000000044261315264121600177640ustar00rootroot00000000000000# - Find libvotca_csg # Find the native libvotca_csg headers and libraries. # # VOTCA_CSG_INCLUDE_DIRS - where to find votca/csg/version.h, etc. # VOTCA_CSG_LIBRARIES - List of libraries when using expat. # VOTCA_CSG_FOUND - True if expat found. # VOTCA_CSG_HAS_SQLITE3 - True if votca csg was build with sqlite3 support # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # find_package(PkgConfig) pkg_check_modules(PC_VOTCA_CSG libvotca_csg) find_path(VOTCA_CSG_INCLUDE_DIR votca/csg/version.h HINTS ${PC_VOTCA_CSG_INCLUDE_DIRS}) find_library(VOTCA_CSG_LIBRARY NAMES votca_csg HINTS ${PC_VOTCA_CSG_LIBRARY_DIRS} ) set(VOTCA_CSG_LIBRARIES "${VOTCA_CSG_LIBRARY}" ) set(VOTCA_CSG_INCLUDE_DIRS "${VOTCA_CSG_INCLUDE_DIR}" ) include(FindPackageHandleStandardArgs) # handle the QUIETLY and REQUIRED arguments and set VOTCA_CSG_FOUND to TRUE # if all listed variables are TRUE find_package_handle_standard_args(VOTCA_CSG DEFAULT_MSG VOTCA_CSG_LIBRARY VOTCA_CSG_INCLUDE_DIR ) if (VOTCA_CSG_FOUND AND NOT VOTCA_CSG_LIBRARY STREQUAL "votca_csg") include(CheckLibraryExists) check_library_exists("${VOTCA_CSG_LIBRARY}" VotcaCsgFromC "" FOUND_VOTCA_CSG_VERSION) if(NOT FOUND_VOTCA_CSG_VERSION) message(FATAL_ERROR "Could not find VotcaCsgFromC in ${VOTCA_CSG_LIBRARY}, take look at the error message in ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log to find out what was going wrong. If you don't have pkg-config installed you will most likely have to set VOTCA_CSG_LIBRARY by hand, which set votca_csg lib it's dependencies (i.e. -DVOTCA_CSG_LIBRARY='/path/to/libvotca_csg.so;/path/to/libgsl.so;/path/to/libm.so') !") endif(NOT FOUND_VOTCA_CSG_VERSION) endif () mark_as_advanced(VOTCA_CSG_INCLUDE_DIR VOTCA_CSG_LIBRARY ) csg-1.4.1/CMakeModules/FindVOTCA_TOOLS.cmake000066400000000000000000000053411315264121600202450ustar00rootroot00000000000000# - Find libvotca_tools # Find the native libvotca_tools headers and libraries. # # VOTCA_TOOLS_INCLUDE_DIRS - where to find votca/tools/version.h, etc. # VOTCA_TOOLS_LIBRARIES - List of libraries when using expat. # VOTCA_TOOLS_FOUND - True if expat found. # VOTCA_TOOLS_HAS_SQLITE3 - True if votca tools was build with sqlite3 support # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # find_package(PkgConfig) pkg_check_modules(PC_VOTCA_TOOLS libvotca_tools) find_path(VOTCA_TOOLS_INCLUDE_DIR votca/tools/version.h HINTS ${PC_VOTCA_TOOLS_INCLUDE_DIRS}) find_path(VOTCA_TOOLS_HAS_SQLITE3 votca/tools/database.h HINTS ${VOTCA_TOOLS_INCLUDE_DIR} ${PC_VOTCA_TOOLS_INCLUDE_DIRS}) if (VOTCA_TOOLS_HAS_SQLITE3) #due to include in database.h find_package(SQLITE3 REQUIRED) set(VOTCA_TOOLS_INCLUDE_DIRS "${VOTCA_TOOLS_INCLUDE_DIR};${SQLITE3_INCLUDE_DIR}" ) else(VOTCA_TOOLS_HAS_SQLITE3) set(VOTCA_TOOLS_INCLUDE_DIRS "${VOTCA_TOOLS_INCLUDE_DIR}" ) endif (VOTCA_TOOLS_HAS_SQLITE3) find_library(VOTCA_TOOLS_LIBRARY NAMES votca_tools HINTS ${PC_VOTCA_TOOLS_LIBRARY_DIRS} ) set(VOTCA_TOOLS_LIBRARIES "${VOTCA_TOOLS_LIBRARY}" ) include(FindPackageHandleStandardArgs) # handle the QUIETLY and REQUIRED arguments and set VOTCA_TOOLS_FOUND to TRUE # if all listed variables are TRUE find_package_handle_standard_args(VOTCA_TOOLS DEFAULT_MSG VOTCA_TOOLS_LIBRARY VOTCA_TOOLS_INCLUDE_DIR ) if (VOTCA_TOOLS_FOUND AND NOT VOTCA_TOOLS_LIBRARY STREQUAL "votca_tools") include(CheckLibraryExists) check_library_exists("${VOTCA_TOOLS_LIBRARY}" VotcaToolsFromC "" FOUND_VOTCA_TOOLS_VERSION) if(NOT FOUND_VOTCA_TOOLS_VERSION) message(FATAL_ERROR "Could not find VotcaToolsFromC in ${VOTCA_TOOLS_LIBRARY}, take look at the error message in ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log to find out what was going wrong. If you don't have pkg-config installed you will most likely have to set VOTCA_TOOLS_LIBRARY by hand, which set votca_tools lib it's dependencies (i.e. -DVOTCA_TOOLS_LIBRARY='/path/to/libvotca_tools.so;/path/to/libgsl.so;/path/to/libm.so') !") endif(NOT FOUND_VOTCA_TOOLS_VERSION) endif () mark_as_advanced(VOTCA_TOOLS_INCLUDE_DIR VOTCA_TOOLS_LIBRARY ) csg-1.4.1/CMakeModules/cmake_uninstall.cmake.in000066400000000000000000000016551315264121600213720ustar00rootroot00000000000000IF(NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt") MESSAGE(FATAL_ERROR "Cannot find install manifest: \"@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt\"") ENDIF(NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt") FILE(READ "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt" files) STRING(REGEX REPLACE "\n" ";" files "${files}") FOREACH(file ${files}) MESSAGE(STATUS "Uninstalling \"$ENV{DESTDIR}${file}\"") IF(EXISTS "$ENV{DESTDIR}${file}") EXEC_PROGRAM( "@CMAKE_COMMAND@" ARGS "-E remove \"$ENV{DESTDIR}${file}\"" OUTPUT_VARIABLE rm_out RETURN_VALUE rm_retval ) IF(NOT "${rm_retval}" STREQUAL 0) MESSAGE(FATAL_ERROR "Problem when removing \"$ENV{DESTDIR}${file}\"") ENDIF(NOT "${rm_retval}" STREQUAL 0) ELSE(EXISTS "$ENV{DESTDIR}${file}") MESSAGE(STATUS "File \"$ENV{DESTDIR}${file}\" does not exist.") ENDIF(EXISTS "$ENV{DESTDIR}${file}") ENDFOREACH(file) csg-1.4.1/CMakeModules/gitscript.cmake000066400000000000000000000016651315264121600176250ustar00rootroot00000000000000if (GIT_EXECUTABLE) #later use git describe here execute_process( COMMAND ${GIT_EXECUTABLE} rev-parse --short HEAD WORKING_DIRECTORY ${TOP_SOURCE_DIR} OUTPUT_VARIABLE THIS_GIT_ID OUTPUT_STRIP_TRAILING_WHITESPACE) execute_process( COMMAND ${GIT_EXECUTABLE} diff-index --name-only HEAD WORKING_DIRECTORY ${TOP_SOURCE_DIR} OUTPUT_VARIABLE _HAS_CHANGES OUTPUT_STRIP_TRAILING_WHITESPACE ERROR_QUIET) if (NOT "${_HAS_CHANGES}" STREQUAL "") set(THIS_GIT_ID "${THIS_GIT_ID} (dirty)") endif() message("Current git revision is ${THIS_GIT_ID}") set(THIS_GIT_ID "gitid: ${THIS_GIT_ID}") else() set (THIS_GIT_ID) endif() file(READ ${INPUT} CONTENT) string(REGEX REPLACE "#CSG_GIT_ID#" "${THIS_GIT_ID}" NEW_CONTENT "${CONTENT}") file(WRITE "${OUTPUT}.tmp" "${NEW_CONTENT}") execute_process(COMMAND ${CMAKE_COMMAND} -E copy_if_different ${OUTPUT}.tmp ${OUTPUT}) execute_process(COMMAND ${CMAKE_COMMAND} -E remove ${OUTPUT}.tmp) csg-1.4.1/CMakeModules/gitversion.cmake000066400000000000000000000017161315264121600200030ustar00rootroot00000000000000if (GIT_EXECUTABLE) #later use git describe here execute_process( COMMAND ${GIT_EXECUTABLE} rev-parse --short HEAD WORKING_DIRECTORY ${TOP_SOURCE_DIR} OUTPUT_VARIABLE THIS_GIT_ID OUTPUT_STRIP_TRAILING_WHITESPACE) execute_process( COMMAND ${GIT_EXECUTABLE} diff-index --name-only HEAD WORKING_DIRECTORY ${TOP_SOURCE_DIR} OUTPUT_VARIABLE _HAS_CHANGES OUTPUT_STRIP_TRAILING_WHITESPACE ERROR_QUIET) if (NOT "${_HAS_CHANGES}" STREQUAL "") set(THIS_GIT_ID "${THIS_GIT_ID} (dirty)") endif() message("Current git revision is ${THIS_GIT_ID}") set(THIS_GIT_ID "gitid: ${THIS_GIT_ID}") else() set (THIS_GIT_ID) endif() set (GIT_HEADER "gitversion.h") set (NEW_GIT_HEADER "new_gitversion.h") file(WRITE ${NEW_GIT_HEADER} "static const std::string gitversion = \"${THIS_GIT_ID}\";\n") execute_process(COMMAND ${CMAKE_COMMAND} -E copy_if_different ${NEW_GIT_HEADER} ${GIT_HEADER}) execute_process(COMMAND ${CMAKE_COMMAND} -E remove ${NEW_GIT_HEADER}) csg-1.4.1/LICENSE000066400000000000000000000261361315264121600133070ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. csg-1.4.1/NOTICE000066400000000000000000000007321315264121600132000ustar00rootroot00000000000000Versatile Object-oriented Toolkit for Coarse-graining Applications Copyright 2009-2011 The VOTCA Development Team This product includes software developed at The VOTCA Development Team (http://www.votca.org). This software contains code, in particular function_common.sh, derived from a script collection by C. Junghans. Thanks to Nikolaus Hansen (hansen@lri.fr, @nikohansen on github) for providing the bundled version of cma.py, which is licensed under BSD 3-Clause. csg-1.4.1/README.md000066400000000000000000000020451315264121600135520ustar00rootroot00000000000000Further information on VOTCA can be found at http://www.votca.org The development of VOTCA is mainly funded by academic research grants. If you use this package, please cite the following VOTCA papers: * _Relative entropy and optimization-driven coarse-graining methods in VOTCA_, S.Y. Mashayak, M. Jochum, K. Koschke, N.R. Aluru, V. Ruehle, and C. Junghans, [PLoS one 10, e131754 (2015)](http://dx.doi.org/10.1371/journal.pone.0131754). * _Hybrid approaches to coarse-graining using the VOTCA package: liquid hexane_, V. Ruehle and C. Junghans, [Macromol. Theory Simul. 20, 472 (2011)](http://dx.doi.org/10.1002/mats.201100011). * _Versatile Object-oriented Toolkit for Coarse-graining Applications_, V.Ruehle, C. Junghans, A. Lukyanov, K. Kremer, and D. Andrienko, [J. Chem. Theo. Comp. 5 (12), 3211 (2009)](http://dx.doi.org/10.1021/ct900369w). In case of questions, please post them in the google discussion group for votca at: http://groups.google.com/group/votca You can contact the VOTCA Development Team at devs@votca.org. csg-1.4.1/include/000077500000000000000000000000001315264121600137155ustar00rootroot00000000000000csg-1.4.1/include/votca/000077500000000000000000000000001315264121600150315ustar00rootroot00000000000000csg-1.4.1/include/votca/csg/000077500000000000000000000000001315264121600156055ustar00rootroot00000000000000csg-1.4.1/include/votca/csg/CMakeLists.txt000066400000000000000000000001721315264121600203450ustar00rootroot00000000000000file(GLOB_RECURSE VOTCA_HEADERS *.h potentialfunctions/*.h) install(FILES ${VOTCA_HEADERS} DESTINATION include/votca/csg) csg-1.4.1/include/votca/csg/bead.h000066400000000000000000000263441315264121600166620ustar00rootroot00000000000000/* * Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef _bead_H #define _bead_H #include #include #include #include #include #include "beadtype.h" #include "topologyitem.h" namespace votca { namespace csg { using namespace votca::tools; using namespace std; class Molecule; /** \brief information about a bead The Bead class describes an atom or a coarse grained bead. It stores information like the id, the name, the mass, the charge and the residue it belongs to. The coordinates are stored in the configuration class. \todo change resnr to pointer \todo make sure bead belongs to topology */ class Bead : public TopologyItem { public: /** * destructor */ virtual ~Bead() {} /** * get the id of the bead * * \return bead id */ const int &getId() const { return _id; } /** * get bead name * \return bead name */ const string &getName() const { return _name; } /** * set bead name * \param name bead name */ void setName(const string &name) { _name=name; } /** * get the bead type * \return bead type object */ const BeadType *getType() const { return _type; } /** * set the bead type * \param bead type object */ void setType(BeadType *type) { _type=type; } /** * get the bead type pointer (not constant) * \return bead type object */ BeadType *Type() const { return _type; } /** * get the residu number of the bead * \return residue id */ const int &getResnr() const { return _resnr; } /** * get the mass of the bead * \return bead mass */ const double &getM() const { return _m; } /** * get the charge of the bead * \return bead charge */ const double &getQ() const { return _q; } /** * set the mass of the bead * \param m bead mass */ void setM(const double &m) { _m=m; } /** * set the charge of the bead * \param q bead charge */ void setQ(const double &q) { _q=q; } /** * \brief get the symmetry of the bead * * Returns the number of unique axis of the bead, it can be * 1 for a spherical bead * 3 for an ellipsoidal bead * 2 (currently not used), could be disk like particle * * \return bead symmetry */ byte_t getSymmetry() const { return _symmetry; } /** * set the position of the bead * \param r bead position */ void setPos(const vec &r); /** * get the position of the bead * \return bead position */ const vec &getPos() const; /** * set the velocity of the bead * @param r bead velocity */ void setVel(const vec &r); /** * get the velocity of the bead * \return bead velocity */ const vec &getVel() const; /** * \brief set first orientation (normal vector) vector of bead * * see getU for details * * @param u bead orientation u */ void setU(const vec &u); /** * \brief get first orientation (normal vector) vector of bead * * Non-spherical beads (symmetry 3) have a internal coordinates system and the * axes are denoted as u, v and w. Currently the non-spherical mapping is hardcoded and * the axis u is calculated by the eigenvector with the lowest eigenvector of * the mapped beads and has the meaning of a normal vector if the reference beads * have a disc like shape. The sign of the normal vector is determined in combination * with the vectors v and w to build up a right handed (??) coordinate system. * * \return bead orientation u */ const vec &getU() const; /** * \brief set second orientation vector of bead * * see getV for details * * @param v bead orientation v */ void setV(const vec &v); /** * \brief get second orientation vector of bead * * Non-spherical beads (symmetry 3) have a internal coordinates system and the * axes are denoted as u, v and w. Currently the non-spherical mapping is hardcoded and * the axis v is the vector which connects first and second reference atom * in the mapping (only orthogonal component to u). * * \return bead orientation u */ const vec &getV() const; /** * \brief set third orientation vector of bead * * see getW for details * * @param w bead orientation w */ void setW(const vec &w); /** * \brief get third orientation vector of bead * * Non-spherical beads (symmetry 3) have a internal coordinates system and the * axes are denoted as u, v and w. Currently the non-spherical mapping is hardcoded and * the axis w is orthogonal to u and v. * * \return bead orientation w */ const vec &getW() const; /** * direct access (read/write) to the position of the bead * \return reference to position */ vec &Pos() { return _pos; } /** * direct access (read/write) to the velocity of the bead * \return reference to velocity */ vec &Vel() { return _vel; } /** * direct access (read/write) to orientation u of the bead * \return reference to u */ vec &U() { return _u; } /** * direct access (read/write) to the orientation v of the bead * \return reference to v */ vec &V() { return _v; } /** * direct access (read/write) to the orientation w of the bead * \return reference to w */ vec &W() { return _w; } /** * direct access (read/write) to the force of the bead * \return reference to force */ vec &F() { return _f; } /** * set force acting on bead * @param F force */ void setF(const vec &F); /** * \brief get the force acting on the bead * * Forces have to be provided by the trajectory. If beads are mapped, forces * of coarse-grained beads are also calculated. * * \return force on bead */ const vec &getF() const; /** does this configuration store positions? */ bool HasPos() {return _bPos; } /** does this configuration store velocities? */ bool HasVel() {return _bVel; } /** does this configuration store forces? */ bool HasF() {return _bF; } /** does this configuration store u-orientations? */ bool HasU() {return _bU; } /** does this configuration store v-orientations? */ bool HasV() {return _bV; } /** does this configuration store w-orientations? */ bool HasW() {return _bW; } /** dos the bead store a position */ void HasPos(bool b); /** dos the bead store a velocity */ void HasVel(bool b); /** dos the bead store a force */ void HasF(bool b); /** doe the bead store an orientation u */ void HasU(bool b); /** doe the bead store an orientation v */ void HasV(bool b); /** doe the bead store an orientation w */ void HasW(bool b); /** * molecule the bead belongs to * \return Molecule object */ Molecule *getMolecule() { return _mol; } /** * If it is a mapped beads, returns te bead id the cg bead was created from * \return vector of bead ids of reference atoms */ vector &ParentBeads() { return _parent_beads; }; /** * \brief Function to add arbitrary user data to bead * * The user can attach pointers to own created objects to beads. Currently * the user has to take care about deletion of the objects at the end. * * \todo change this to shared_pointer * * \param userdata userdata */ template void setUserData(T *userdata) { _userdata = (void*)userdata; } /** * get userdata attached to bead * @return pointer to userdata */ template T *getUserData() { return (T *)_userdata; } /** * \brief Additional options of bead * * The options object stores additional options which can be attached to * the bead. For mapped beads, it contains all the values which were specified * in the xml mapping file. This allows to at user defined options to the xml * which are automatically read in on creation of the coare-grained bead. * * \return Property object containing options */ Property &Options() { return *_options; } /** * update pointer to options object of bead * \param options pointer to options object of bead */ void setOptions(Property &options) { _options = &options; } protected: int _id; vector _parent_beads; BeadType *_type; Molecule *_mol; // TODO: this is so far a pointer. this should change! each bead should have own options. Property *_options; byte_t _symmetry; string _name; int _resnr; double _m; double _q; vec _pos, _vel, _f, _u, _v, _w; bool _bPos; bool _bVel; bool _bU; bool _bV; bool _bW; bool _bF; /// constructur Bead(Topology *owner, int id, BeadType *type, byte_t symmetry, string name, int resnr, double m, double q) : TopologyItem(owner), _id(id), _type(type), _symmetry(symmetry), _name(name), _resnr(resnr), _m(m), _q(q) {_bPos=false; _bVel=false; _bU=false; _bV=false; _bW=false; _bF=false;} void *_userdata; friend class Topology; friend class Molecule; }; inline void Bead::setPos(const vec &r) { _bPos=true; _pos = r; } inline const vec &Bead::getPos() const { assert(_bPos); return _pos; } inline void Bead::setVel(const vec &r) { _bVel=true; _vel = r; } inline const vec &Bead::getVel() const { assert(_bVel); return _vel; } inline void Bead::setU(const vec &u) { _bU=true; _u = u; } inline const vec &Bead::getU() const { assert(_bU); return _u; } inline void Bead::setV(const vec &v) { _bV=true; _v = v; } inline const vec &Bead::getV() const { assert(_bV); return _v; } inline void Bead::setW(const vec &w) { _bW=true; _w = w; } inline const vec &Bead::getW() const { assert(_bW); return _w; } inline void Bead::setF(const vec &F) { _bF=true; _f = F; } inline const vec &Bead::getF() const { assert(_bF); return _f; } inline void Bead::HasPos(bool b) { _bPos=b; } inline void Bead::HasVel(bool b) { _bVel=b; } inline void Bead::HasF(bool b) { _bF=b; } inline void Bead::HasU(bool b) { _bU=b; } inline void Bead::HasV(bool b) { _bV=b; } inline void Bead::HasW(bool b) { _bW=b; } }} #endif /* _beadinfo_H */ csg-1.4.1/include/votca/csg/beadlist.h000066400000000000000000000027361315264121600175550ustar00rootroot00000000000000/* * Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef _BEADLIST_H #define _BEADLIST_H #include #include #include "topology.h" namespace votca { namespace csg { using namespace votca::tools; using namespace std; /** \brief Generate lists of beads This class generates a list of beads based on some criteria, currently only the bead type. */ class BeadList : public list { public: BeadList() {}; ~BeadList() {} /// \brief Select all beads of type withn a radius of reference vector int GenerateInSphericalSubvolume(Topology &top, const string &select, vec ref, double radius); Topology *getTopology() {return _topology; } private: Topology *_topology; }; }} #endif /* _BEADLIST_H */ csg-1.4.1/include/votca/csg/beadpair.h000066400000000000000000000026061315264121600175310ustar00rootroot00000000000000/* * Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef _BEADPAIR_H #define _BEADPAIR_H namespace votca { namespace csg { using namespace votca::tools; /** \brief A particle pair This class defines a particle pair. The future plan is, that the Pair class can be overloaded and Particle list creates these inherited pairs. */ class BeadPair : public std::pair { public: BeadPair() {} BeadPair(Bead *bead1, Bead *bead2, vec r) : std::pair(bead1, bead2), _r(r), _dist(abs(r)) {} virtual ~BeadPair() {} /// \brief the vector connecting two beads vec &r() { return _r; } /// \brief the distance of the beads double &dist() { return _dist; } protected: vec _r; double _dist; }; }} #endif /* _PAIR_H */ csg-1.4.1/include/votca/csg/beadtriple.h000066400000000000000000000034001315264121600200660ustar00rootroot00000000000000/* * Copyright 2016 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef BEADTRIPLE_H #define BEADTRIPLE_H #include namespace votca { namespace csg { using namespace votca::tools; /** \brief A particle pair This class defines a particle pair. The future plan is, that the Pair class can be overloaded and Particle list creates these inherited pairs. */ class BeadTriple : public std::tuple { public: BeadTriple() {} BeadTriple(Bead *bead1, Bead *bead2, Bead *bead3, vec r12, vec r13, vec r23) : std::tuple(bead1, bead2, bead3), _r12(r12), _r13(r13), _r23(r23), _dist12(abs(r12)), _dist13(abs(r13)), _dist23(abs(r23)) {} virtual ~BeadTriple() {} /// \brief the vector connecting two beads vec &r12() { return _r12; } vec &r13() { return _r13; } vec &r23() { return _r23; } /// \brief the distance of the beads double &dist12() { return _dist12; } double &dist13() { return _dist13; } double &dist23() { return _dist23; } protected: vec _r12; vec _r13; vec _r23; double _dist12; double _dist13; double _dist23; }; }} #endif /* BEADTRIPLE_H */ csg-1.4.1/include/votca/csg/beadtype.h000066400000000000000000000025551315264121600175620ustar00rootroot00000000000000/* * Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef _BEADTYPE_H #define _BEADTYPE_H #include #include "topologyitem.h" namespace votca { namespace csg { using namespace votca::tools; using namespace std; /** \brief Bead Type informaton Each bead has a type. While the bead name should be unique, several beads can share the same type. */ class BeadType : public TopologyItem { public: const int &getId() const { return _id; } const string &getName() const { return _name; } void setName(const string &name) { _name=name; } private: int _id; string _name; BeadType(Topology *parent, int id, const string &name) : TopologyItem(parent), _id(id), _name(name) {} friend class Topology; }; }} #endif /* _BEADTYPE_H */ csg-1.4.1/include/votca/csg/boundarycondition.h000066400000000000000000000033221315264121600215100ustar00rootroot00000000000000/* * Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef __VOTCA_BOUNDARYCONDITION_H #define __VOTCA_BOUNDARYCONDITION_H #include namespace votca { namespace csg { using namespace std; using namespace votca::tools; class BoundaryCondition { public: virtual ~BoundaryCondition() {}; /** * set the simulation box * \param box triclinic box matrix */ void setBox(const matrix &box) { _box = box; }; /** * get the simulation box * \return triclinic box matrix */ const matrix &getBox() { return _box; }; /** * get the volume of the box * \return box volume as double */ virtual double BoxVolume(); /** * get shortest connection vector between r_i and r_j with respect to the (periodic) box * \return shortest distance vector */ virtual vec BCShortestConnection(const vec &r_i, const vec &r_j) const = 0; enum eBoxtype { typeAuto = 0, typeTriclinic, typeOrthorhombic, typeOpen }; virtual eBoxtype getBoxType() = 0; protected: matrix _box; }; }} #endif /* BOUNDARYCONDITION_H */ csg-1.4.1/include/votca/csg/cgengine.h000066400000000000000000000056731315264121600175500ustar00rootroot00000000000000/* * Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef _cgengine_H #define _cgengine_H #include #include #include #include "topology.h" #include "cgmoleculedef.h" #include #include "topologymap.h" #include "cgobserver.h" #include #include "cgmoleculedef.h" #include "cgengine.h" #include "molecule.h" #include "topologyreader.h" #include "trajectorywriter.h" #include "trajectoryreader.h" #include #include #include "nematicorder.h" namespace votca { namespace csg { using namespace votca::tools; using namespace std; /** \brief coarse graining engine This class manages the coarse graining, at the moment it does the measurement stuff TODO: split this into an additional VotcaApplication object */ class CGEngine { public: CGEngine(); ~CGEngine(); /** create a coarse grained topolgy based on a given topology */ TopologyMap *CreateCGTopology(Topology &in, Topology &out); /** load molecule type from file */ void LoadMoleculeType(string filename); CGMoleculeDef *getMoleculeDef(string name); /** * \brief ignores molecule in mapping process * \param pattern glob pattern for molecule ident */ void AddIgnore(string pattern) { _ignores.push_back(pattern); } /** * \brief checks whether molecule is ignored * \param ident identifyier of molecule * \return true if is ignored */ bool IsIgnored(string ident); private: map _molecule_defs; std::list _ignores; }; inline CGMoleculeDef *CGEngine::getMoleculeDef(string name) { map::iterator iter; // if there is only 1 molecule definition, don't care about the name if(_molecule_defs.size()==1 && name == "unnamed") { return (*(_molecule_defs.begin())).second; } iter = _molecule_defs.find(name); if(iter == _molecule_defs.end()) return NULL; return (*iter).second; } inline bool CGEngine::IsIgnored(string ident) { for(std::list::iterator iter=_ignores.begin(); iter!=_ignores.end(); ++iter) { if(wildcmp(iter->c_str(), ident.c_str())) return true; } return false; } }} #endif /* _cgengine_H */ csg-1.4.1/include/votca/csg/cgmoleculedef.h000066400000000000000000000046621315264121600205640ustar00rootroot00000000000000/* * Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef _cgmoleculedef_H #define _cgmoleculedef_H #include #include #include #include #include #include "map.h" #include #include "exclusionlist.h" #include "molecule.h" namespace votca { namespace csg { using namespace votca::tools; using namespace std; /** \brief definition of a coarse grained molecule This class is to define a coarse grained molecule, which includes the topology, mapping, ... \todo clean up this class, do the bonded interactions right!!!! \todo check for consistency of xml file, seperate xml parser and class!! */ class CGMoleculeDef { public: CGMoleculeDef() {} ~CGMoleculeDef(); Molecule *CreateMolecule(Topology & top); Map *CreateMap(Molecule &in, Molecule &out); void Load(string filename); const string &getName() { return _name; } const string &getIdent() { return _ident; } private: Property _options; struct beaddef_t { string _name; string _type; byte_t _symmetry; string _mapping; vector _subbeads; Property *_options; }; // name of the coarse grained molecule string _name; // name of the molecule to coarse grain string _ident; // beads of the cg molecule vector _beads; map _beads_by_name; // mapping schemes map _maps; list _bonded; void ParseTopology(Property &options); void ParseBeads(Property &options); void ParseBonded(Property &options); void ParseMapping(Property &options); beaddef_t *getBeadByName(const string &name); Property *getMapByName(const string &name); }; }} #endif /* _cgmoleculedef_H */ csg-1.4.1/include/votca/csg/cgobserver.h000066400000000000000000000027551315264121600201300ustar00rootroot00000000000000/* * Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef _CGOBSERVER_H #define _CGOBSERVER_H #include "topology.h" namespace votca { namespace csg { using namespace votca::tools; /** \brief Observer class for analysis hook Each application which performs analysis operations should use CGEngine. It offers a hook (callback class) during the coarse-graining process to evaluate each frame. The user does not have to take care about mapping and other stoff. Just oberload this class and analyze properties of interest. */ class CGObserver { public: /// \brief called before the first frame virtual void BeginCG(Topology *top, Topology *top_atom = 0) = 0; /// \brief called after the last frame virtual void EndCG() = 0; // \brief called for each frame which is mapped virtual void EvalConfiguration(Topology *top, Topology *top_atom = 0) = 0; }; }} #endif /* _CGOBSERVER_H */ csg-1.4.1/include/votca/csg/csgapplication.h000066400000000000000000000143101315264121600207550ustar00rootroot00000000000000/* * Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef __VOTCA_CSGAPPLICATION_H #define __VOTCA_CSGAPPLICATION_H #include #include "topology.h" #include "topologymap.h" #include "cgobserver.h" #include #include #include "trajectoryreader.h" namespace votca { namespace csg { using namespace votca::tools; class CsgApplication : public Application { public: CsgApplication(); ~CsgApplication(); void Initialize(); bool EvaluateOptions(); void Run(void); void ShowHelpText(std::ostream &out); /// \brief overload and return true to enable mapping command line options virtual bool DoMapping(void) { return false; } /// \brief if DoMapping is true, will by default require mapping or not virtual bool DoMappingDefault(void) { return true; } /// \brief overload and return true to enable trajectory command line options virtual bool DoTrajectory(void) { return false; } /* \brief overload and return true to enable threaded calculations */ virtual bool DoThreaded(void) { return false; } /* \brief overload and return false to disable synchronized (while threaded) calculations */ virtual bool SynchronizeThreads(void) { if (DoThreaded()) return true; else return false; } /// \brief if topology is always needed virtual bool NeedsTopology(void) { return true; } /// \brief called after topology was loaded virtual bool EvaluateTopology(Topology *top, Topology *top_ref = 0) { return true; } void AddObserver(CGObserver *observer); /// \brief called before the first frame virtual void BeginEvaluate(Topology *top, Topology *top_ref = 0); /// \brief called after the last frame virtual void EndEvaluate(); // \brief called for each frame which is mapped virtual void EvalConfiguration(Topology *top, Topology *top_ref = 0); // thread related stuff follows /** \brief Worker, derived from Thread, does the work. * * Worker holds the information about the current frame, either in its * own copy (e.g. Topology), or, by reference, from the parent CsgApplication. * The computation is shifted from Run() into EvalConfiguration. The * user is required to overload ForkWorker and Mergeworker and thereby * define the initialization and merging of workers. By default, workers * will be executed in correct order according to the frames. Also, * output will follow the same order. * Mutexes handle the locking of input/output and are also used to impose * the correct order of frames for in/output. * */ class Worker : public Thread { public: Worker(); ~Worker(); /// \brief overload with the actual computation virtual void EvalConfiguration(Topology *top, Topology *top_ref = 0) = 0; /// \brief returns worker id int getId() { return _id; } protected: CsgApplication *_app; Topology _top, _top_cg; TopologyMap * _map; int _id; void Run(void); void setApplication(CsgApplication *app) { _app = app; } void setId(int id) { _id = id; } friend class CsgApplication; }; /** * \brief Gets frames from TrajectoryReader in an ordered way and, if successful, * calls Worker::EvalConfiguration for that frame. * * @param worker * @return True if frames left for calculation, else False */ bool ProcessData(Worker * worker); /** * * User is required to overload ForkWorker and initialize workers. * @return worker */ virtual Worker *ForkWorker(void); /** * User is required to overload MergeWorker and merge data from each worker. * @param worker */ virtual void MergeWorker(Worker *worker); protected: list _observers; bool _do_mapping; std::vector _myWorkers; int _nframes; bool _is_first_frame; int _nthreads; Mutex _nframesMutex; Mutex _traj_readerMutex; /// \brief stores Mutexes used to impose order for input std::vector _threadsMutexesIn; /// \brief stores Mutexes used to impose order for output std::vector _threadsMutexesOut; TrajectoryReader * _traj_reader; }; inline void CsgApplication::AddObserver(CGObserver *observer) { _observers.push_back(observer); } inline CsgApplication::Worker::Worker() : _app(NULL), _map(NULL) , _id(-1) { } } } #endif /* __VOTCA_CSGAPPLICATION_H */ csg-1.4.1/include/votca/csg/exclusionlist.h000066400000000000000000000103531315264121600206650ustar00rootroot00000000000000/* * Copyright 2009-2015 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef _exclusionlist_H #define _exclusionlist_H #include #include #include #include "bead.h" namespace votca { namespace csg { using namespace votca::tools; using namespace std; /// \todo fill _excl_by_bead /// \todo no ids but pointers, use PairList class Topology; class Bead; class ExclusionList { public: ExclusionList() {} ~ExclusionList() { Clear(); } void Clear(void); template void Remove(iteratable &l); template void ExcludeList(iteratable &l); struct exclusion_t { Bead *_atom; list _exclude; }; void CreateExclusions(Topology *top); exclusion_t *GetExclusions(Bead *bead); typedef list< exclusion_t * >::iterator iterator; iterator begin() { return _exclusions.begin(); } iterator end() { return _exclusions.end(); } bool IsExcluded(Bead *bead1, Bead *bead2); template void InsertExclusion(Bead *bead, iteratable &excluded); void InsertExclusion(Bead *bead1, Bead *bead2); void RemoveExclusion(Bead *bead1, Bead *bead2); private: list< exclusion_t * > _exclusions; map _excl_by_bead; friend std::ostream &operator<<(std::ostream &out, ExclusionList& exl); }; inline ExclusionList::exclusion_t * ExclusionList::GetExclusions(Bead *bead) { map::iterator iter = _excl_by_bead.find(bead); if(iter == _excl_by_bead.end()) return NULL; return (*iter).second; } template inline void ExclusionList::Remove(iteratable &l) { typename iteratable::iterator i, j; for ( i = l.begin(); i != l.end(); ++i ) { for ( j = i; j != l.end(); ++j ) { RemoveExclusion(*i, *j); } } } template inline void ExclusionList::ExcludeList( iteratable &l ) { typename iteratable::iterator i, j; for ( i = l.begin(); i != l.end(); ++i ) { for ( j = i; j != l.end(); ++j ) { InsertExclusion(*i, *j); } } } template inline void ExclusionList::InsertExclusion(Bead *bead1_, iteratable &l) { for(typename iteratable::iterator i=l.begin(); i!=l.end(); ++i) { Bead *bead1 = bead1_; ;Bead *bead2 = *i; if (bead2->getId() < bead1->getId()) swap(bead1, bead2); if(bead1==bead2) continue; if(IsExcluded(bead1, bead2)) continue; exclusion_t *e; if((e = GetExclusions(bead1)) == NULL) { e = new exclusion_t; e->_atom = bead1; _exclusions.push_back(e); _excl_by_bead[ bead1 ] = e; } e->_exclude.push_back(bead2); } } //template<> inline void ExclusionList::InsertExclusion(Bead *bead1, Bead *bead2) { if (bead2->getId() < bead1->getId()) swap(bead1, bead2); if(bead1==bead2) return; if(IsExcluded(bead1, bead2)) return; exclusion_t *e; if((e = GetExclusions(bead1)) == NULL) { e = new exclusion_t; e->_atom = bead1; _exclusions.push_back(e); _excl_by_bead[ bead1 ] = e; } e->_exclude.push_back(bead2); } inline void ExclusionList::RemoveExclusion(Bead *bead1, Bead *bead2) { if (bead2->getId() < bead1->getId()) swap(bead1, bead2); if(bead1==bead2) return; if(!IsExcluded(bead1, bead2)) return; list::iterator ex; for(ex=_exclusions.begin(); ex!=_exclusions.end(); ++ex) if((*ex)->_atom == bead1) break; if(ex==_exclusions.end()) return; (*ex)->_exclude.remove(bead2); if((*ex)->_exclude.empty()) { (*ex)=NULL; _exclusions.erase(ex); } _exclusions.remove(NULL); } std::ostream &operator<<(std::ostream &out,ExclusionList& ex); }} #endif /* _exclusionlist_H */ csg-1.4.1/include/votca/csg/fileformatfactory.h000066400000000000000000000026711315264121600215040ustar00rootroot00000000000000/* * Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef _FILEFORMATFACTORY_H #define _FILEFORMATFACTORY_H #include #include #include #include namespace votca { namespace csg { using namespace votca::tools; using namespace std; template class FileFormatFactory : public ObjectFactory { public: FileFormatFactory() {} T *Create(const string &file); }; template T *FileFormatFactory::Create(const string &file) { string filetype = ""; Tokenizer tok(file, "."); for(Tokenizer::iterator iter=tok.begin();iter!=tok.end();iter++) filetype = *iter; try { return ObjectFactory::Create(filetype); } catch(std::exception &error) {} return NULL; } }} #endif /* _FILEFORMATFACTORY_H */ csg-1.4.1/include/votca/csg/imcio.h000066400000000000000000000030221315264121600170530ustar00rootroot00000000000000/* * Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef _IMCIO_H #define _IMCIO_H #include #include #include #include #include #include namespace votca { namespace csg { using namespace votca::tools; using namespace std; void imcio_write_dS(const string &file, ub::vector &r, ub::vector &dS, std::list *list=NULL); void imcio_write_matrix(const string &file, ub::matrix &gmc, std::list *list=NULL); void imcio_write_index(const string &file, vector &names, vector &ranges); void imcio_read_dS(const string &file, ub::vector &r, ub::vector &dS); void imcio_read_matrix(const string &file, ub::matrix &gmc); void imcio_read_index(const string &file, vector &names, vector &ranges); }} #endif /* _IMCIO_H */ csg-1.4.1/include/votca/csg/interaction.h000066400000000000000000000244631315264121600203060ustar00rootroot00000000000000/* * Copyright 2009-2016 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef _interaction_H #define _interaction_H #include #include #include "topology.h" #include "bead.h" namespace votca { namespace csg { using namespace votca::tools; using namespace std; /** \brief base calss for all interactions This is the base class for all interactions. \todo double names/groups right, add molecules!! */ class Interaction { public: virtual ~Interaction() { } virtual double EvaluateVar(const Topology &top) = 0; void setName(const string &name) { _name = name; } string getName() const { return _name; } void setGroup(const string &group) { _group = group; RebuildName(); } const string &getGroup() const { return _group; } // the group id is set by topology, when interaction is added to it // \todo if the group name is changed later, group id should be updated by topology int getGroupId() { return _group_id; } void setGroupId(int id) { _group_id = id; } void setIndex(const int &index) { _index = index; RebuildName(); } const int &getIndex() const { return _index; } void setMolecule(const int &mol) { _mol = mol; RebuildName(); } const int &getMolecule() const { return _mol; } virtual vec Grad(const Topology &top, int bead) = 0; int BeadCount() { return _beads.size(); } int getBeadId(int bead) { return _beads[bead]; } protected: int _index; string _group; int _group_id; string _name; int _mol; vector _beads; void RebuildName(); }; inline void Interaction::RebuildName() { stringstream s; s << _mol+1 << ":" << _group << ":" << _index+1; _name = s.str(); } /** \brief bond interaction */ class IBond : public Interaction { public: IBond(int bead1, int bead2) { _beads.resize(2); _beads[0] = bead1; _beads[1] = bead2; } IBond(list &beads) { assert(beads.size()>=2); _beads.resize(2); for(int i=0; i<2; ++i) { _beads[i] = beads.front(); beads.pop_front(); }} double EvaluateVar(const Topology &top); vec Grad(const Topology &top, int bead); private: }; /** \brief angle interaction */ class IAngle : public Interaction { public: IAngle(int bead1, int bead2, int bead3) { _beads.resize(3); _beads[0] = bead1; _beads[1] = bead2; _beads[2] = bead3;} IAngle(list &beads) { assert(beads.size()>=3); _beads.resize(3); for(int i=0; i<3; ++i) { _beads[i] = beads.front(); beads.pop_front(); }} double EvaluateVar(const Topology &top); vec Grad(const Topology &top, int bead); private: }; /** \brief dihedral interaction */ class IDihedral : public Interaction { public: IDihedral(int bead1, int bead2, int bead3, int bead4) { _beads.resize(4); _beads[0] = bead1; _beads[1] = bead2; _beads[2] = bead3; _beads[3] = bead4;} IDihedral(list &beads) { assert(beads.size()>=4); _beads.resize(4); for(int i=0; i<4; ++i) { _beads[i] = beads.front(); beads.pop_front(); }} double EvaluateVar(const Topology &top); vec Grad(const Topology &top, int bead); private: }; inline double IBond::EvaluateVar(const Topology &top) { return abs(top.getDist(_beads[0], _beads[1])); } inline vec IBond::Grad(const Topology &top, int bead) { vec r = top.getDist(_beads[0], _beads[1]); r.normalize(); return (bead == 0) ? -r : r; } inline double IAngle::EvaluateVar(const Topology &top) { vec v1(top.getDist(_beads[1], _beads[0])); vec v2(top.getDist(_beads[1], _beads[2])); return acos(v1*v2/sqrt((v1*v1) * (v2*v2))); } inline vec IAngle::Grad(const Topology &top, int bead) { /*vec v1(top.getDist(_beads[1], _beads[0])); vec v2(top.getDist(_beads[1], _beads[2])); double av1 = abs(v1); double av2 = abs(v2); double cosphi = v1*v2 / (av1*av2); double acos_prime = -1.0 / (sqrt(1 - (cosphi*cosphi) )); switch (bead) { case (0): return acos_prime * (v2 / (av1*av2) - v1*cosphi/(av1*av1)); break; case (1): return -acos_prime * ( (v1+v2)/(av1 * av2)) - cosphi * ( v1/(av1*av1) + v2/(av2*av2) ); break; case (2): return acos_prime * (v1 / (av1*av2) - v2*cosphi/(av2*av2)); break; } return 0; */ vec v1(top.getDist(_beads[1], _beads[0])); vec v2(top.getDist(_beads[1], _beads[2])); double acos_prime = 1.0 / (sqrt(1 - (v1*v2) * (v1*v2)/( abs(v1) * abs(v2) * abs(v1) * abs(v2) ) )); switch (bead) { case (0): return acos_prime * (-v2 / ( abs(v1)*abs(v2) ) + (v1*v2) * v1 / ( abs(v2)*abs(v1)*abs(v1)*abs(v1) ) ); break; case (1): return acos_prime * ( (v1+v2)/(abs(v1) * abs(v2)) - (v1 * v2) * ((v2*v2) * v1 + (v1*v1) * v2 ) / ( abs(v1)*abs(v1)*abs(v1)*abs(v2)*abs(v2)*abs(v2) ) ); break; case (2): return acos_prime * (-v1 / ( abs(v1)*abs(v2) ) + (v1*v2) * v2 / ( abs(v1)*abs(v2)*abs(v2)*abs(v2) ) ); break; } // should never reach this assert(false); return vec(0,0,0); } inline double IDihedral::EvaluateVar(const Topology &top) { vec v1(top.getDist(_beads[0], _beads[1])); vec v2(top.getDist(_beads[1], _beads[2])); vec v3(top.getDist(_beads[2], _beads[3])); vec n1, n2; n1 = v1^v2; // calculate the normal vector n2 = v2^v3; // calculate the normal vector double sign = (v1*n2 < 0) ? -1 : 1; return sign*acos(n1*n2/sqrt((n1*n1) * (n2*n2))); //return sign*acos(n1*n2/sqrt((n1*n1) * (n2*n2))) + 1; //return pow(acos(n1*n2/sqrt((n1*n1) * (n2*n2))), 2); } inline vec IDihedral::Grad(const Topology &top, int bead) { vec v1(top.getDist(_beads[0], _beads[1])); vec v2(top.getDist(_beads[1], _beads[2])); vec v3(top.getDist(_beads[2], _beads[3])); vec n1, n2; //cout << "v1: " << v1 << " , v2: " << v2 << " , v3: " << v3 < #include #include #include #include "molecule.h" namespace votca { namespace csg { using namespace votca::tools; using namespace std; class BeadMap; /******************************************************* Mapper class, collection of maps *******************************************************/ class Map { public: Map(Molecule &in, Molecule &out) : _in(in), _out(out) {} ~Map(); void AddBeadMap(BeadMap *bmap) { _maps.push_back(bmap); } void Apply(); protected: Molecule _in, _out; vector _maps; }; /******************************************************* Interface for all maps *******************************************************/ class BeadMap { public: virtual ~BeadMap() {}; virtual void Apply() = 0; virtual void Initialize(Molecule *in, Bead *out, Property *opts_map, Property *opts_bead); protected: Molecule *_in; Bead *_out; Property *_opts_map; Property *_opts_bead; }; inline void BeadMap::Initialize(Molecule *in, Bead *out, Property *opts_bead, Property *opts_map) { _in = in; _out = out; _opts_map = opts_map; _opts_bead = opts_bead; } /******************************************************* Linear map for spherical beads *******************************************************/ class Map_Sphere : public BeadMap { public: Map_Sphere() {} void Apply(); void Initialize(Molecule *in, Bead *out, Property *opts_bead, Property *opts_map); protected: void AddElem(Bead *in, double weight, double force_weight); struct element_t { Bead *_in; double _weight; double _force_weight; }; vector _matrix; }; inline void Map_Sphere::AddElem(Bead *in, double weight, double force_weight) { element_t el; el._in = in; el._weight = weight; el._force_weight = force_weight; _matrix.push_back(el); } /******************************************************* Linear map for ellipsoidal bead *******************************************************/ class Map_Ellipsoid : public Map_Sphere { public: Map_Ellipsoid() { } void Apply(); protected: }; }} #endif /* _map_H */ csg-1.4.1/include/votca/csg/molecule.h000066400000000000000000000057261315264121600175750ustar00rootroot00000000000000/* * Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef _MOLECULE_H #define _MOLECULE_H #include #include #include #include #include "topologyitem.h" #include "bead.h" namespace votca { namespace csg { using namespace votca::tools; using namespace std; class Interaction; /** \brief information about molecules The Molecule class stores which beads belong to a molecule. The organization of beads into molecules is needed for the CG mapping. \todo sort atoms in molecule */ class Molecule : public TopologyItem { public: /// get the molecule ID int getId() const { return _id; } /// get the name of the molecule const string &getName() const { return _name; } /// set the name of the molecule void setName(const string &name) { _name=name; } /// Add a bead to the molecule void AddBead(Bead *bead, const string &name); /// get the id of a bead in the molecule Bead *getBead(int bead) { return _beads[bead]; } int getBeadId(int bead) { return _beads[bead]->getId(); } int getBeadIdByName(const string &name); /// get the number of beads in the molecule int BeadCount() const { return _beads.size(); } /// find a bead by it's name int getBeadByName(const string &name); string getBeadName(int bead) {return _bead_names[bead]; } /// Add an interaction to the molecule void AddInteraction(Interaction *ic) { _interactions.push_back(ic); } vector Interactions() { return _interactions; } template void setUserData(T *userdata) { _userdata = (void*)userdata; } template T *getUserData() { return (T *)_userdata; } private: // maps a name to a bead id map _beadmap; vector _interactions; // id of the molecules int _id; // name of the molecule string _name; // the beads in the molecule vector _beads; vector _bead_names; void *_userdata; /// constructor Molecule(Topology *parent, int id, string name) : TopologyItem(parent), _id(id), _name(name) {} friend class Topology; }; inline int Molecule::getBeadIdByName(const string &name) { int i = getBeadByName(name); if(i<0) return i; return _beads[i]->getId(); } }} #endif /* _Molecule_H */ csg-1.4.1/include/votca/csg/nblist.h000066400000000000000000000125321315264121600172540ustar00rootroot00000000000000/* * Copyright 2009-2016 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef _NBLIST_H #define _NBLIST_H #include "beadlist.h" #include "beadpair.h" #include "beadtriple.h" #include "pairlist.h" #include "exclusionlist.h" namespace votca { namespace csg { using namespace votca::tools; /** * \brief Neighbour list class * * Implements a simple N^2 neighbour search and stores neigbourlist with pair * structure. User defined criteria can be added by SetMatchFunction. To only * get every pair listed once, the SetMatchFunction can be used and always return * that the pair is not stored. * */ class NBList : public PairList { public: NBList(); virtual ~NBList(); /// Generate the neighbour list based on two bead lists (e.g. bead types) virtual void Generate(BeadList &list1, BeadList &list2, bool do_exclusions = true); /// Generate the neighbour list based on a single bead list virtual void Generate(BeadList &list, bool do_exclusions = true) { Generate(list, list, do_exclusions); } /// set the cutoff for the neighbour search void setCutoff(double cutoff) { _cutoff = cutoff; } /// get the cutoff for the neighbour search double getCutoff() { return _cutoff; } /** * \brief match function for class member functions * * SetMatchFunction can be used to specify additional criteria, weather two * beads are added to the list of pairs or not. The function gets the two * two beads and the distance vector as argument. If a pair should be added, * the function should return true, otherwise false. * * This function can also be used, in a situation where each pair needs only * to be processed once, but the total number of pairs is to big to be stored * in memory, e.g. to calculate rdf for huge systems. In this case, set a * match function which always returns false (->no pair is added), and do * the processing in the match function. */ template void SetMatchFunction(T *object, bool (T::*fkt)(Bead *, Bead *, const vec &, const double dist)); /// \brief match function for static member functions or plain functions void SetMatchFunction(bool (*fkt)(Bead *, Bead *, const vec &, const double dist)); /// standard match function static bool match_always(Bead *b1, Bead *b2, const vec &r, const double dist) { return true; } /// function to use a user defined pair type template void setPairType(); protected: /// cutoff double _cutoff; /// take into account exclusions from topolgoy bool _do_exclusions; /// policy function to create new bead types template static BeadPair *beadpair_create_policy(Bead *bead1, Bead *bead2, const vec &r) { return dynamic_cast(new pair_type(bead1, bead2, r)); } typedef BeadPair* (*pair_creator_t)(Bead *bead1, Bead *bead2, const vec &r); /// the current bead pair creator function pair_creator_t _pair_creator; protected: /// Functor for match function to be able to set member and non-member functions class Functor { public: Functor() {} virtual bool operator()(Bead *, Bead *, const vec &, const double dist) = 0; virtual ~Functor() {}; }; /// Functor for member functions template class FunctorMember : public Functor { public: typedef bool (T::*fkt_t)(Bead *, Bead *, const vec &, const double dist); FunctorMember(T* cls, fkt_t fkt) : _cls(cls), _fkt(fkt) {} bool operator()(Bead *b1, Bead *b2, const vec &r, const double dist) { return (_cls->*_fkt)(b1, b2, r, dist); } private: T* _cls; fkt_t _fkt; }; /// Functor for non-member functions class FunctorNonMember : public Functor { public: typedef bool (*fkt_t)(Bead *, Bead *, const vec &, const double dist); FunctorNonMember(fkt_t fkt) : _fkt(fkt) {} bool operator()(Bead *b1, Bead *b2, const vec &r, const double dist) { return (*_fkt)(b1, b2, r, dist); } private: fkt_t _fkt; }; Functor * _match_function; }; template void NBList::setPairType() { _pair_creator = NBList::beadpair_create_policy; } template inline void NBList::SetMatchFunction(T *object, bool (T::*fkt)(Bead *, Bead *, const vec &, const double)) { if(_match_function) delete _match_function; _match_function = dynamic_cast(new FunctorMember(object, fkt)); } inline void NBList::SetMatchFunction(bool (*fkt)(Bead *, Bead *, const vec &, const double)) { if(_match_function) delete _match_function; _match_function = dynamic_cast(new FunctorNonMember(fkt)); } }} #endif /* _NBLIST_H */ csg-1.4.1/include/votca/csg/nblistgrid.h000066400000000000000000000032751315264121600201260ustar00rootroot00000000000000/* * Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef _NBLISTGRID_H #define _NBLISTGRID_H #include #include #include "nblist.h" #include namespace votca { namespace csg { using namespace votca::tools; class NBListGrid : public NBList { public: void Generate(BeadList &list1, BeadList &list2, bool do_exclusions = true); void Generate(BeadList &list, bool do_exclusions = true); protected: struct cell_t { BeadList _beads; std::vector _neighbours; }; vec _box_a, _box_b, _box_c; vec _norm_a, _norm_b, _norm_c; int _box_Na, _box_Nb, _box_Nc; std::vector _grid; Topology *_top; void InitializeGrid(const matrix &box); cell_t &getCell(const vec &r); cell_t &getCell(const int &a, const int &b, const int &c); void TestBead(cell_t &cell, Bead *bead); void TestCell(cell_t &cell, Bead *bead); }; inline NBListGrid::cell_t &NBListGrid::getCell(const int &a, const int &b, const int &c) { return _grid[a + _box_Na*b + _box_Na*_box_Nb*c]; } }} #endif /* _NBLISTGRID_H */ csg-1.4.1/include/votca/csg/nematicorder.h000066400000000000000000000024011315264121600204270ustar00rootroot00000000000000/* * Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef _NEMATICORDER_H #define _NEMATICORDER_H #include "topology.h" #include "topology.h" #include namespace votca { namespace csg { using namespace votca::tools; class NematicOrder { public: NematicOrder() {} ~NematicOrder() {} void Process(Topology &top, const string &filter = "*"); matrix::eigensystem_t &NematicU() {return _nemat_u; } matrix::eigensystem_t &NematicV() {return _nemat_v; } matrix::eigensystem_t &NematicW() {return _nemat_w; } private: matrix _mu,_mv,_mw; matrix::eigensystem_t _nemat_u, _nemat_v, _nemat_w; }; }} #endif /* _NEMATICORDER_H */ csg-1.4.1/include/votca/csg/openbox.h000066400000000000000000000020011315264121600174210ustar00rootroot00000000000000/* * Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef __VOTCA_OPENBOX_H #define __VOTCA_OPENBOX_H #include "boundarycondition.h" namespace votca { namespace csg { using namespace std; using namespace votca::tools; class OpenBox : public BoundaryCondition { public: vec BCShortestConnection(const vec &r_i, const vec &r_j) const; eBoxtype getBoxType() { return typeOpen; } }; }} #endif /* OPENBOX_H */ csg-1.4.1/include/votca/csg/orthorhombicbox.h000066400000000000000000000020651315264121600211710ustar00rootroot00000000000000/* * Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef __VOTCA_ORTHORHOMBICBOX_H #define __VOTCA_ORTHORHOMBICBOX_H #include "boundarycondition.h" namespace votca { namespace csg { using namespace std; using namespace votca::tools; class OrthorhombicBox : public BoundaryCondition { public: vec BCShortestConnection(const vec &r_i, const vec &r_j) const; eBoxtype getBoxType() { return typeOrthorhombic; } protected: }; }} #endif /* ORTHORHOMBICBOX_H */ csg-1.4.1/include/votca/csg/pairlist.h000066400000000000000000000065211315264121600176110ustar00rootroot00000000000000/* * Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef _PAIRLIST_H #define _PAIRLIST_H #include #include namespace votca { namespace csg { template class PairList { public: PairList() {} virtual ~PairList() { Cleanup(); } //void Generate(BeadList *list1, BeadList *list2 = NULL); void AddPair(pair_type *p); typedef typename std::vector::iterator iterator; typedef typename std::map partners; iterator begin() { return _pairs.begin(); } iterator end() { return _pairs.end(); } typename std::vector::size_type size() { return _pairs.size(); } pair_type *front() { return _pairs.front(); } pair_type *back() { return _pairs.back(); } bool empty() { return _pairs.empty(); } void Cleanup(); pair_type *FindPair(element_type e1, element_type e2); partners *FindPartners(element_type e1); typedef element_type element_t; typedef pair_type pair_t; protected: std::vector _pairs; std::map< element_type , std::map > _pair_map; }; template inline void PairList::AddPair(pair_type *p) { /// \todo be careful, same pair object is used, some values might change (e.g. sign of distance vector) _pair_map[ p->first ][ p->second ] = p; _pair_map[ p->second ][ p->first ] = p; /// \todo check if unique _pairs.push_back(p); } template inline void PairList::Cleanup() { for(iterator iter = _pairs.begin(); iter!=_pairs.end(); ++iter) delete *iter; _pairs.clear(); _pair_map.clear(); } template inline pair_type *PairList::FindPair(element_type e1, element_type e2) { typename std::map< element_type , std::map< element_type, pair_type * > >::iterator iter1; iter1 = _pair_map.find(e1); if(iter1==_pair_map.end()) return NULL; //typename map::iterator iter2; typename partners::iterator iter2; iter2 = iter1->second.find(e2); if(iter2 == iter1->second.end()) return NULL; return iter2->second; } template typename PairList::partners *PairList::FindPartners(element_type e1) { typename std::map< element_type , std::map >::iterator iter; if((iter=_pair_map.find(e1)) == _pair_map.end()) return NULL; return &(iter->second); } }} #endif /* _PAIRLIST_H */ csg-1.4.1/include/votca/csg/potentialfunctions/000077500000000000000000000000001315264121600215355ustar00rootroot00000000000000csg-1.4.1/include/votca/csg/potentialfunctions/potentialfunction.h000066400000000000000000000062021315264121600254530ustar00rootroot00000000000000/* * Copyright 2009-2016 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef POTENTIALFUNCTION_H #define POTENTIALFUNCTION_H #include #include #include #include #include #include #include #include #include using namespace std; using namespace votca::tools; class PotentialFunction { public: virtual ~PotentialFunction() {} // read parameters from the input file virtual void setParam(string filename); // save parameters to the file virtual void SaveParam(const string& filename); // write potential table virtual void SavePotTab(const string& filename, const double step); // write potential table for specified interval virtual void SavePotTab(const string& filename, const double step, const double rmin, const double rcut); // set all parameters void setParam(const ub::vector param){ _lam = param; } // set ith parameter void setParam(const int i, const double val) { _lam(i) = val; } // set ith parameter among those to be optimized virtual void setOptParam(const int i, const double val) { setParam(i,val); } // set minimum r value to avoid large values void setMinDist(const double min) { _min = min; } // set cut-off value void setCutOffDist(const double cutoff) { _cut_off = cutoff; } // calculate function virtual double CalculateF (const double r) const = 0; // calculate first derivative w.r.t. ith parameter virtual double CalculateDF(const int i, const double r) const = 0; // calculate second derivative w.r.t. ith parameter virtual double CalculateD2F(const int i, const int j, const double r) const = 0; // return parameter ub::vector& Params() { return _lam; } // return ith parameter double getParam(const int i) const { return _lam(i); } // return ith parameter among those to be optimized virtual double getOptParam(const int i) const { return getParam(i); } // return size of parameters int getParamSize() const { return _lam.size(); } // return size of parameters to be optimized virtual int getOptParamSize() const { return getParamSize();} // return cut-off value double getCutOff() const { return _cut_off; } double getMinDist() const { return _min; } protected: PotentialFunction(const string& name_,const int nlam_,const double min_,const double max_); string _name; ub::vector _lam; double _cut_off; double _min; }; #endif csg-1.4.1/include/votca/csg/potentialfunctions/potentialfunctioncbspl.h000066400000000000000000000044071315264121600265040ustar00rootroot00000000000000/* * Copyright 2009-2016 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef POTENTIALFUNCTIONCBSPL_H #define POTENTIALFUNCTIONCBSPL_H #include #include #include #include #include "potentialfunction.h" using namespace std; using namespace votca::tools; class PotentialFunctionCBSPL : public PotentialFunction { public: PotentialFunctionCBSPL(const string& name_,const int nlam_, const double min_=0.0, const double max_=10.0); ~PotentialFunctionCBSPL(){} // calculate function value for given r double CalculateF (const double r) const; // calculate first derivative w.r.t. ith parameter double CalculateDF(const int i, const double r) const; // calculate second derivative w.r.t. ith parameter double CalculateD2F(const int i, const int j, const double r) const; int getOptParamSize() const ; void setParam(string filename); void SaveParam(const string& filename); void SavePotTab(const string& filename, const double step); void SavePotTab(const string& filename, const double step, const double rmin, const double rcut); void setOptParam(const int i, const double val); double getOptParam(const int i) const; void extrapolExclParam(); protected: // exclude these many first coefficients from optimization // since the region relevant to these coefficients is not sampled // the value of _nexcl is determined from rmin int _nexcl; // fix these many coeff near the cut-off to zero to ensure // zero potential and force values near cut-off int _ncutcoeff; int _nbreak; double _dr; ub::vector _rbreak; ub::matrix _M; }; #endif csg-1.4.1/include/votca/csg/potentialfunctions/potentialfunctionlj126.h000066400000000000000000000027061315264121600262370ustar00rootroot00000000000000/* * Copyright 2009-2016 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef POTENTIALFUNCTIONLJ126_H #define POTENTIALFUNCTIONLJ126_H #include #include #include #include "potentialfunction.h" using namespace std; using namespace votca::tools; // LJ 12-6 potential class // with c12,c6 parameters class PotentialFunctionLJ126 : public PotentialFunction { public: PotentialFunctionLJ126(const string& name_,const double min_=0.0, const double max_=10.0); ~PotentialFunctionLJ126(){}; // calculate function value for given r double CalculateF (const double r) const; // calculate first derivative w.r.t. ith parameter double CalculateDF(const int i, const double r) const; // calculate second derivative w.r.t. ith parameter double CalculateD2F(const int i, const int j, const double r) const; }; #endif csg-1.4.1/include/votca/csg/potentialfunctions/potentialfunctionljg.h000066400000000000000000000025271315264121600261560ustar00rootroot00000000000000/* * Copyright 2009-2016 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef POTENTIALFUNCTIONLJG_H #define POTENTIALFUNCTIONLJG_H #include "potentialfunction.h" // LJ 12-6 potential class // with c12,c6 parameters class PotentialFunctionLJG : public PotentialFunction { public: PotentialFunctionLJG(const string& name_,const double min_ = 0.0, const double max_ = 10.0); ~PotentialFunctionLJG() {}; // calculate function value for given r double CalculateF (const double r) const; // calculate first derivative w.r.t. ith parameter double CalculateDF(const int i, const double r) const; // calculate second derivative w.r.t. ith parameter double CalculateD2F(const int i, const int j, const double r) const; }; #endif /* POTFUNCTION_LJG_H */ csg-1.4.1/include/votca/csg/residue.h000066400000000000000000000030301315264121600174120ustar00rootroot00000000000000/* * Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef _residue_H #define _residue_H #include #include "topologyitem.h" namespace votca { namespace csg { using namespace votca::tools; using namespace std; /** \brief class for a residue The Residue class describes a residue. When reading in the atoms, all beads belong to a residue. Later on, the molecules can be organized into molecules based on their residue. */ class Residue : public TopologyItem { public: /// get the name of the residue const string &getName(); /// get the name of the residue const int &getId() const { return _id; } private: int _id; string _name; private: /// constructor Residue(Topology *parent, int id, const string &name) : TopologyItem(parent), _id(id), _name(name) {} friend class Topology; }; inline const string &Residue::getName() { return _name; } }} #endif /* _residue_H */ csg-1.4.1/include/votca/csg/topology.h000066400000000000000000000275041315264121600176420ustar00rootroot00000000000000/* * Copyright 2009-2016 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef _topology_H #define _topology_H #include #include #include #include #include #include #include #include "exclusionlist.h" #include "bead.h" #include "molecule.h" #include "residue.h" #include "beadtype.h" #include "boundarycondition.h" #include "triclinicbox.h" #include "orthorhombicbox.h" #include "openbox.h" namespace votca { namespace csg { using namespace votca::tools; class Interaction; class ExclusionList; typedef vector MoleculeContainer; typedef vector BeadContainer; typedef vector BeadTypeContainer; typedef vector ResidueContainer; typedef vector InteractionContainer; using namespace std; /** \brief topology of the whole system The Topology class stores the topology of the system like the beads, bonds, molecules and residues. \todo internal management for ids and indices **/ class Topology { public: /// constructor Topology() { _bc = new OpenBox(); _has_vel=false; } virtual ~Topology(); /** * \brief cleans up all the stored data */ virtual void Cleanup(); /** * \brief creates a new Bead * * \param symmetry symmetry of the bead, 1: spherical 3: ellipsoidal * \param name name of the bead * \param type bead type * \param resnr residue number * \param m mass * \param q charge * \return pointer to created bead * * The function creates a new bead and adds it to the list of beads. */ virtual Bead *CreateBead(byte_t symmetry, string name, BeadType *type, int resnr, double m, double q); /** * \brief get bead type or create it * \param name typename * \return pointer to bead type * * Returns an existing bead type or creates one if it doesn't exist yet */ virtual BeadType *GetOrCreateBeadType(string name); /** * \brief creates a new molecule * \param name name of the molecule * \return pointer to created molecule */ virtual Molecule *CreateMolecule(string name); /** * \brief checks weather molecules with the same name really contain the same number of beads */ void CheckMoleculeNaming(void); /** * \brief create a new resiude * @param name residue name * @return created residue */ virtual Residue *CreateResidue(string name); virtual Residue *CreateResidue(string name, int id); /** * \brief create molecules based on the residue * * This function scans the topology and creates molecules based on the resiude id. * All beads with the same resid are put int one molecule. */ void CreateMoleculesByResidue(); /** * \brief put the whole topology in one molecule * \param name name of the new molecule * * This function creates one big molecule for all beads in the topology. */ void CreateOneBigMolecule(string name); /** * \brief create molecules based on blocks of atoms * \param name molecule name * \param first first bead * \param nbeads number of beads per molecule * \param nmolecules number of molecules */ void CreateMoleculesByRange(string name, int first, int nbeads, int nmolecules); /** * \brief number of molecules in the system * @return number of molecule in topology */ int MoleculeCount() { return _molecules.size(); } /** * number of beads in the system * @return number of beads in the system */ int BeadCount() { return _beads.size(); } /** * number of residues in the system * \return number of residues */ int ResidueCount() { return _residues.size(); } /** * get molecule by index * @param index molecule number * @return pointer to molecule */ Molecule *MoleculeByIndex(int index); /** * access containter with all beads * @return bead container */ BeadContainer &Beads() { return _beads; } /** * access containter with all residues * @return bead container */ ResidueContainer &Residues() { return _residues; } /** * access containter with all molecules * @return molecule container */ MoleculeContainer &Molecules() { return _molecules; } /** * access containter with all bonded interactions * @return bonded interaction container */ InteractionContainer &BondedInteractions() { return _interactions; } void AddBondedInteraction(Interaction *ic); std::list InteractionsInGroup(const string &group); BeadType *getBeadType(const int i) const { return _beadtypes[i]; } Bead *getBead(const int i) const { return _beads[i]; } Residue *getResidue(const int i) const { return _residues[i]; } Molecule *getMolecule(const int i) const { return _molecules[i]; } /** * delete all molecule information */ void ClearMoleculeList(){ _molecules.clear(); } /** * \brief adds all the beads+molecules+residues from other topology * \param top topology to add */ void Add(Topology *top); /** * \brief copy topology data of different topology * \param top topology to copy from */ void CopyTopologyData(Topology *top); /** * \brief rename all the molecules in range * \param range range string of type 1:2:10 = 1, 3, 5, 7, ... * \param name new name of molecule * range is a string which is parsed by RangeParser, */ void RenameMolecules(string range, string name); /** * \brief rename all the bead types * \param name current rame of the bead type * \param newname new name of bead type */ void RenameBeadType(string name, string newname); /** * \brief set the mass of all the beads of a certain type * \param name the bead type * \param value mass value */ void SetBeadTypeMass(string name, double value); /** * set the simulation box * \param box triclinic box matrix */ void setBox(const matrix &box, BoundaryCondition::eBoxtype boxtype=BoundaryCondition::typeAuto) { // determine box type automatically in case boxtype==typeAuto if(boxtype==BoundaryCondition::typeAuto) { boxtype = autoDetectBoxType(box); } if(_bc) { delete (_bc); } switch(boxtype) { case BoundaryCondition::typeTriclinic: _bc = new TriclinicBox(); break; case BoundaryCondition::typeOrthorhombic: _bc = new OrthorhombicBox(); break; default: _bc = new OpenBox(); break; } _bc->setBox(box); }; /** * get the simulation box * \return triclinic box matrix */ const matrix &getBox() { return _bc->getBox(); }; /** * set the time of current frame * \param t simulation time in ns */ void setTime(double t) { _time = t; }; /** * get the time of current frame * \return simulation time in ns */ double getTime() { return _time; }; /** * set the step number of current frame * \param s step number */ void setStep(int s) { _step = s; }; /** * get the step number of current frame * \return step number */ int getStep() { return _step; }; /** * Sets the particle group. (For the H5MD file format) * \param particle_group The name of a particle group. */ void setParticleGroup(string particle_group) { _particle_group = particle_group; }; /** * Gets the particle group. * \return The name of a particle group. */ string getParticleGroup() { return _particle_group; }; /** * \brief pbc correct distance of two beads * \param bead1 index of first bead * \param bead2 index of second bead * \return distance vector * * calculates the smallest distance between two beads with correct treatment * of pbc */ vec getDist(int bead1, int bead2) const; /** * \brief calculate shortest vector connecting two points * \param r1 first point * \param r2 second point * \return distance vector * * calculates the smallest distance between two points with correct treatment * of pbc */ vec BCShortestConnection(const vec &r1, const vec &r2) const; /** * \brief return the shortest box size * \return shortest size * * Calculates the shortest length to connect two sides of the box */ double ShortestBoxSize(); /** * calculates the box volume * \return box volume */ double BoxVolume(); /** * rebuild exclusion list */ void RebuildExclusions(); /** * access exclusion list * \return exclusion list */ ExclusionList &getExclusions() { return _exclusions; } BoundaryCondition::eBoxtype getBoxType() { return _bc->getBoxType(); } template void InsertExclusion(Bead *bead1, iteratable &l); bool HasVel(){return _has_vel;} void SetHasVel(const bool v){ _has_vel=v;} bool HasForce(){return _has_force;} void SetHasForce(const bool v){ _has_force=v;} protected: BoundaryCondition *_bc; BoundaryCondition::eBoxtype autoDetectBoxType(const matrix &box); /// bead types in the topology BeadTypeContainer _beadtypes; /// beads in the topology BeadContainer _beads; /// molecules in the topology MoleculeContainer _molecules; /// residues in the topology ResidueContainer _residues; /// bonded interactions in the topology InteractionContainer _interactions; ExclusionList _exclusions; map _interaction_groups; map _beadtype_map; map > _interactions_by_group; double _time; int _step; bool _has_vel; bool _has_force; /// The particle group (For H5MD file format) string _particle_group; }; inline Bead *Topology::CreateBead(byte_t symmetry, string name, BeadType *type, int resnr, double m, double q) { Bead *b = new Bead(this, _beads.size(), type, symmetry, name, resnr, m, q); _beads.push_back(b); return b; } inline Molecule *Topology::CreateMolecule(string name) { Molecule *mol = new Molecule(this, _molecules.size(), name); _molecules.push_back(mol); return mol; } inline Residue *Topology::CreateResidue(string name, int id) { Residue *res = new Residue(this, id, name); _residues.push_back(res); return res; } inline Residue *Topology::CreateResidue(string name) { Residue *res = new Residue(this, _molecules.size(), name); _residues.push_back(res); return res; } inline Molecule *Topology::MoleculeByIndex(int index) { return _molecules[index]; } template inline void Topology::InsertExclusion(Bead *bead1, iteratable &l) { _exclusions.InsertExclusion(bead1, l); } }} #include "interaction.h" #endif /* _topology_H */ csg-1.4.1/include/votca/csg/topologyitem.h000066400000000000000000000020031315264121600205040ustar00rootroot00000000000000/* * Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef _TOPOLOGYITEM_H #define _TOPOLOGYITEM_H namespace votca { namespace csg { class Topology; class TopologyItem { public: virtual ~TopologyItem() {} Topology *getParent() { return _parent; } protected: TopologyItem(Topology *parent) : _parent(parent) {} Topology *_parent; friend class Topology; }; }} #endif /* _TOPOLOGYITEM_H */ csg-1.4.1/include/votca/csg/topologymap.h000066400000000000000000000024641315264121600203360ustar00rootroot00000000000000/* * Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef _topologymap_H #define _topologymap_H #include "map.h" #include "topology.h" #include namespace votca { namespace csg { using namespace votca::tools; using namespace std; class TopologyMap { public: ~TopologyMap(); TopologyMap(Topology *in, Topology *out); void AddMoleculeMap(Map *map); void Apply(); private: Topology *_in; Topology *_out; typedef vector MapContainer; MapContainer _maps; }; inline TopologyMap::TopologyMap(Topology *in, Topology *out) : _in(in), _out(out) {} inline void TopologyMap::AddMoleculeMap(Map *map) { _maps.push_back(map); } }} #endif /* _topologymap_H */ csg-1.4.1/include/votca/csg/topologyreader.h000066400000000000000000000024741315264121600210240ustar00rootroot00000000000000/* * Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef _TOPOLOGYREADER_H #define _TOPOLOGYREADER_H #include #include "topology.h" #include "fileformatfactory.h" namespace votca { namespace csg { using namespace votca::tools; using namespace std; class TopologyReader { public: virtual ~TopologyReader() {} /// open a trejectory file virtual bool ReadTopology(string file, Topology &top) = 0; static void RegisterPlugins(void); }; // important - singleton pattern, make sure factory is created before accessed inline FileFormatFactory &TopReaderFactory() { static FileFormatFactory _TopReaderFactory; return _TopReaderFactory; } }} #endif /* _TOPOLOGYREADER_H */ csg-1.4.1/include/votca/csg/trajectoryreader.h000066400000000000000000000031741315264121600213340ustar00rootroot00000000000000/* * Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef _trajectoryreader_H #define _trajectoryreader_H #include #include "topology.h" #include "fileformatfactory.h" namespace votca { namespace csg { using namespace votca::tools; using namespace std; /** \brief trajectoryreader interface This class defines the interface a trajectory reader has to implement */ class TrajectoryReader { public: virtual ~TrajectoryReader() {} /// open a trejectory file virtual bool Open(const string &file) = 0; virtual void Close() {}; /// read in the first frame virtual bool FirstFrame(Topology &top) = 0; /// read in the next frame virtual bool NextFrame(Topology &top) = 0; static void RegisterPlugins(void); }; // important - singleton pattern, make sure factory is created before accessed inline FileFormatFactory &TrjReaderFactory() { static FileFormatFactory _TrjReaderFactory; return _TrjReaderFactory; } }} #endif /* _trajectoryreader_H */ csg-1.4.1/include/votca/csg/trajectorywriter.h000066400000000000000000000026651315264121600214120ustar00rootroot00000000000000/* * Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef _trajectorywriter_H #define _trajectorywriter_H #include #include #include #include "fileformatfactory.h" #include "topology.h" namespace votca { namespace csg { using namespace votca::tools; using namespace std; class TrajectoryWriter { public: TrajectoryWriter() {} virtual ~TrajectoryWriter() {} virtual void Open(string file, bool bAppend = false) {} virtual void Close() {}; virtual void Write(Topology *top) {} static void RegisterPlugins(void); }; // important - singleton pattern, make sure factory is created before accessed inline FileFormatFactory &TrjWriterFactory() { static FileFormatFactory _TrjWriterFactory; return _TrjWriterFactory; } }} #endif /* _trajectorywriter_H */ csg-1.4.1/include/votca/csg/triclinicbox.h000066400000000000000000000020461315264121600204510ustar00rootroot00000000000000/* * Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef __VOTCA_TRICLINICBOX_H #define __VOTCA_TRICLINICBOX_H #include "boundarycondition.h" namespace votca { namespace csg { using namespace std; using namespace votca::tools; class TriclinicBox : public BoundaryCondition { public: vec BCShortestConnection(const vec &r_i, const vec &r_j) const; eBoxtype getBoxType() { return typeTriclinic; } protected: }; }} #endif /* TRICLINICBOX_H */ csg-1.4.1/include/votca/csg/triplelist.h000066400000000000000000000062611315264121600201560ustar00rootroot00000000000000/* * Copyright 2016 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef _TRIPLELIST_H #define _TRIPLELIST_H #include #include namespace votca { namespace csg { using namespace std; template class TripleList { public: TripleList() {} virtual ~TripleList() { Cleanup(); } void AddTriple(triple_type *t); typedef typename std::list::iterator iterator; iterator begin() { return _triples.begin(); } iterator end() { return _triples.end(); } typename list::size_type size() { return _triples.size(); } triple_type *front() { return _triples.front(); } triple_type *back() { return _triples.back(); } bool empty() { return _triples.empty(); } void Cleanup(); triple_type *FindTriple(element_type e1, element_type e2, element_type e3); typedef element_type element_t; typedef triple_type triple_t; protected: list _triples; map< element_type , map > > _triple_map; }; template inline void TripleList::AddTriple(triple_type *t) { /// \todo be careful, same triple object is used, some values might change (e.g. sign of distance vectors) //experimental: So far only mapping '123' and '321' to the same triple _triple_map[ (*t)[0] ][ (*t)[1] ][ (*t)[2] ] = t; _triple_map[ (*t)[2] ][ (*t)[1] ][ (*t)[0] ] = t; /// \todo check if unique _triples.push_back(t); } template inline void TripleList::Cleanup() { for(iterator iter = _triples.begin(); iter!=_triples.end(); ++iter) delete *iter; _triples.clear(); _triple_map.clear(); } template inline triple_type *TripleList::FindTriple(element_type e1, element_type e2, element_type e3) { typename std::map< element_type , map > > ::iterator iter1; iter1 = _triple_map.find(e1); if(iter1==_triple_map.end()) return NULL; typename std::map >::iterator iter2; iter2 = iter1->second.find(e2); if(iter2 == iter1->second.end()) return NULL; typename std::map::iterator iter3; iter3 = iter2->second.find(e3); if(iter3 == iter2->second.end()) return NULL; return iter3->second; } }} #endif /* _TRIPLELIST_H */ csg-1.4.1/include/votca/csg/version.h000066400000000000000000000046721315264121600174540ustar00rootroot00000000000000/* * Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ /** \mainpage VOTCA C++ reference \section intro_sec Introduction This page is the C++ code documentation of the VOTCA package (http://www.votca.org). The main target of VOTCA is the implementation of systematic coarse-graining techniques. However, it offers a powerful, object-oriented framework to develop analysis tools for particle based molecular simulations. \section started_sec Getting started To start developing custom analysis tools, a good place to start is the csgapps repository: https://github.com/votca/csgapps It contains several small analysis tools which were implemented based upon the VOTCA framework. We highly recomment to use an IDE such as Netbeans for development since it offers lots of guides to get started with new code (code completion, code documentation popups, navigation thourh code, ...). The main container for the whole structure is the Topology, so it is a good advise to get comfortable with this class. Also the standard applications in csg/src/tools might help. \section beginner_sec For beginners: how to avoid frustration For those not familiar with object oriented code: don't try to dig into every single function in order to understand what exactly is going on. This strategy only works for very small projects and is not intended for oject oriented programs. Think about the code in layers of abstraction! Your main focus should be on the global structure and understand how objects relate to each other. The code was designed that you don't have to redo and understand all the nasty details! */ #ifndef _csg_version_H #define _csg_version_H #include namespace votca { namespace csg { const std::string & CsgVersionStr(); void HelpTextHeader(const std::string &tool_name); }} #endif /* _tools_version_H */ csg-1.4.1/netbeans/000077500000000000000000000000001315264121600140715ustar00rootroot00000000000000csg-1.4.1/netbeans/csg_fmatch/000077500000000000000000000000001315264121600161675ustar00rootroot00000000000000csg-1.4.1/netbeans/csg_fmatch/Makefile000066400000000000000000000043351315264121600176340ustar00rootroot00000000000000# # There exist several targets which are by default empty and which can be # used for execution of your targets. These targets are usually executed # before and after some main targets. They are: # # .build-pre: called before 'build' target # .build-post: called after 'build' target # .clean-pre: called before 'clean' target # .clean-post: called after 'clean' target # .clobber-pre: called before 'clobber' target # .clobber-post: called after 'clobber' target # .all-pre: called before 'all' target # .all-post: called after 'all' target # .help-pre: called before 'help' target # .help-post: called after 'help' target # # Targets beginning with '.' are not intended to be called on their own. # # Main targets can be executed directly, and they are: # # build build a specific configuration # clean remove built files from a configuration # clobber remove all built files # all build all configurations # help print help mesage # # Targets .build-impl, .clean-impl, .clobber-impl, .all-impl, and # .help-impl are implemented in nbproject/makefile-impl.mk. # # NOCDDL # Environment MKDIR=mkdir CP=cp CCADMIN=CCadmin RANLIB=ranlib # build build: .build-pre .build-impl .build-post .build-pre: # Add your pre 'build' code here... .build-post: # Add your post 'build' code here... # clean clean: .clean-pre .clean-impl .clean-post .clean-pre: # Add your pre 'clean' code here... .clean-post: # Add your post 'clean' code here... # clobber clobber: .clobber-pre .clobber-impl .clobber-post .clobber-pre: # Add your pre 'clobber' code here... .clobber-post: # Add your post 'clobber' code here... # all all: .all-pre .all-impl .all-post .all-pre: # Add your pre 'all' code here... .all-post: # Add your post 'all' code here... # help help: .help-pre .help-impl .help-post .help-pre: # Add your pre 'help' code here... .help-post: # Add your post 'help' code here... # include project implementation makefile include nbproject/Makefile-impl.mk csg-1.4.1/netbeans/csg_fmatch/nbproject/000077500000000000000000000000001315264121600201555ustar00rootroot00000000000000csg-1.4.1/netbeans/csg_fmatch/nbproject/Makefile-Debug.mk000066400000000000000000000042671315264121600232600ustar00rootroot00000000000000# # Generated Makefile - do not edit! # # Edit the Makefile in the project folder instead (../Makefile). Each target # has a -pre and a -post target defined where you can add customized code. # # This makefile implements configuration specific macros and targets. # Environment MKDIR=mkdir CP=cp GREP=grep NM=nm CCADMIN=CCadmin RANLIB=ranlib CC=gcc CCC=g++ CXX=g++ FC= AS=as # Macros CND_PLATFORM=GNU-Linux-x86 CND_CONF=Debug CND_DISTDIR=dist # Include project Makefile include Makefile # Object Directory OBJECTDIR=build/${CND_CONF}/${CND_PLATFORM} # Object Files OBJECTFILES= \ ${OBJECTDIR}/_ext/715944016/csg_fmatch.o # C Compiler Flags CFLAGS= # CC Compiler Flags CCFLAGS= CXXFLAGS= # Fortran Compiler Flags FFLAGS= # Assembler Flags ASFLAGS= # Link Libraries and Options LDLIBSOPTIONS=-L/people/thnfs/homes/ruehle/gmx/lib ../libcsg/../../src/libcsg/libcsg.a ../../../tools/netbeans/libtools/../../src/libtools/libtools.a -lboost_program_options -lgmx -lexpat -lgsl -lgslcblas -lm -lpthread # Build Targets .build-conf: ${BUILD_SUBPROJECTS} "${MAKE}" -f nbproject/Makefile-Debug.mk ../../src/tools/csg_fmatch ../../src/tools/csg_fmatch: ../libcsg/../../src/libcsg/libcsg.a ../../src/tools/csg_fmatch: ../../../tools/netbeans/libtools/../../src/libtools/libtools.a ../../src/tools/csg_fmatch: ${OBJECTFILES} ${MKDIR} -p ../../src/tools ${LINK.cc} -o ../../src/tools/csg_fmatch ${OBJECTFILES} ${LDLIBSOPTIONS} ${OBJECTDIR}/_ext/715944016/csg_fmatch.o: ../../src/tools/csg_fmatch.cc ${MKDIR} -p ${OBJECTDIR}/_ext/715944016 ${RM} $@.d $(COMPILE.cc) -g -O -I../../include -I../../../tools/include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/715944016/csg_fmatch.o ../../src/tools/csg_fmatch.cc # Subprojects .build-subprojects: cd ../libcsg && ${MAKE} -f Makefile_nb CONF=Debug cd ../../../tools/netbeans/libtools && ${MAKE} -f Makefile_nb CONF=Debug # Clean Targets .clean-conf: ${CLEAN_SUBPROJECTS} ${RM} -r build/Debug ${RM} ../../src/tools/csg_fmatch # Subprojects .clean-subprojects: cd ../libcsg && ${MAKE} -f Makefile_nb CONF=Debug clean cd ../../../tools/netbeans/libtools && ${MAKE} -f Makefile_nb CONF=Debug clean # Enable dependency checking .dep.inc: .depcheck-impl include .dep.inc csg-1.4.1/netbeans/csg_fmatch/nbproject/Makefile-Release.mk000066400000000000000000000031461315264121600236050ustar00rootroot00000000000000# # Generated Makefile - do not edit! # # Edit the Makefile in the project folder instead (../Makefile). Each target # has a -pre and a -post target defined where you can add customized code. # # This makefile implements configuration specific macros and targets. # Environment MKDIR=mkdir CP=cp GREP=grep NM=nm CCADMIN=CCadmin RANLIB=ranlib CC=gcc CCC=g++ CXX=g++ FC= AS=as # Macros CND_PLATFORM=GNU-Linux-x86 CND_CONF=Release CND_DISTDIR=dist # Include project Makefile include Makefile # Object Directory OBJECTDIR=build/${CND_CONF}/${CND_PLATFORM} # Object Files OBJECTFILES= \ ${OBJECTDIR}/_ext/715944016/csg_fmatch.o # C Compiler Flags CFLAGS= # CC Compiler Flags CCFLAGS= CXXFLAGS= # Fortran Compiler Flags FFLAGS= # Assembler Flags ASFLAGS= # Link Libraries and Options LDLIBSOPTIONS= # Build Targets .build-conf: ${BUILD_SUBPROJECTS} "${MAKE}" -f nbproject/Makefile-Release.mk dist/Release/GNU-Linux-x86/csg_fmatch dist/Release/GNU-Linux-x86/csg_fmatch: ${OBJECTFILES} ${MKDIR} -p dist/Release/GNU-Linux-x86 ${LINK.cc} -o ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/csg_fmatch ${OBJECTFILES} ${LDLIBSOPTIONS} ${OBJECTDIR}/_ext/715944016/csg_fmatch.o: ../../src/tools/csg_fmatch.cc ${MKDIR} -p ${OBJECTDIR}/_ext/715944016 ${RM} $@.d $(COMPILE.cc) -O2 -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/715944016/csg_fmatch.o ../../src/tools/csg_fmatch.cc # Subprojects .build-subprojects: # Clean Targets .clean-conf: ${CLEAN_SUBPROJECTS} ${RM} -r build/Release ${RM} dist/Release/GNU-Linux-x86/csg_fmatch # Subprojects .clean-subprojects: # Enable dependency checking .dep.inc: .depcheck-impl include .dep.inc csg-1.4.1/netbeans/csg_fmatch/nbproject/Makefile-impl.mk000066400000000000000000000101561315264121600231650ustar00rootroot00000000000000# # Generated Makefile - do not edit! # # Edit the Makefile in the project folder instead (../Makefile). Each target # has a pre- and a post- target defined where you can add customization code. # # This makefile implements macros and targets common to all configurations. # # NOCDDL # Building and Cleaning subprojects are done by default, but can be controlled with the SUB # macro. If SUB=no, subprojects will not be built or cleaned. The following macro # statements set BUILD_SUB-CONF and CLEAN_SUB-CONF to .build-reqprojects-conf # and .clean-reqprojects-conf unless SUB has the value 'no' SUB_no=NO SUBPROJECTS=${SUB_${SUB}} BUILD_SUBPROJECTS_=.build-subprojects BUILD_SUBPROJECTS_NO= BUILD_SUBPROJECTS=${BUILD_SUBPROJECTS_${SUBPROJECTS}} CLEAN_SUBPROJECTS_=.clean-subprojects CLEAN_SUBPROJECTS_NO= CLEAN_SUBPROJECTS=${CLEAN_SUBPROJECTS_${SUBPROJECTS}} # Project Name PROJECTNAME=csg_fmatch # Active Configuration DEFAULTCONF=Debug CONF=${DEFAULTCONF} # All Configurations ALLCONFS=Debug Release # build .build-impl: .build-pre .validate-impl .depcheck-impl @#echo "=> Running $@... Configuration=$(CONF)" "${MAKE}" -f nbproject/Makefile-${CONF}.mk QMAKE=${QMAKE} SUBPROJECTS=${SUBPROJECTS} .build-conf # clean .clean-impl: .clean-pre .validate-impl .depcheck-impl @#echo "=> Running $@... Configuration=$(CONF)" "${MAKE}" -f nbproject/Makefile-${CONF}.mk QMAKE=${QMAKE} SUBPROJECTS=${SUBPROJECTS} .clean-conf # clobber .clobber-impl: .clobber-pre .depcheck-impl @#echo "=> Running $@..." for CONF in ${ALLCONFS}; \ do \ "${MAKE}" -f nbproject/Makefile-$${CONF}.mk QMAKE=${QMAKE} SUBPROJECTS=${SUBPROJECTS} .clean-conf; \ done # all .all-impl: .all-pre .depcheck-impl @#echo "=> Running $@..." for CONF in ${ALLCONFS}; \ do \ "${MAKE}" -f nbproject/Makefile-$${CONF}.mk QMAKE=${QMAKE} SUBPROJECTS=${SUBPROJECTS} .build-conf; \ done # build tests .build-tests-impl: .build-impl .build-tests-pre @#echo "=> Running $@... Configuration=$(CONF)" "${MAKE}" -f nbproject/Makefile-${CONF}.mk SUBPROJECTS=${SUBPROJECTS} .build-tests-conf # run tests .test-impl: .build-tests-impl .test-pre @#echo "=> Running $@... Configuration=$(CONF)" "${MAKE}" -f nbproject/Makefile-${CONF}.mk SUBPROJECTS=${SUBPROJECTS} .test-conf # dependency checking support .depcheck-impl: @echo "# This code depends on make tool being used" >.dep.inc @if [ -n "${MAKE_VERSION}" ]; then \ echo "DEPFILES=\$$(wildcard \$$(addsuffix .d, \$${OBJECTFILES}))" >>.dep.inc; \ echo "ifneq (\$${DEPFILES},)" >>.dep.inc; \ echo "include \$${DEPFILES}" >>.dep.inc; \ echo "endif" >>.dep.inc; \ else \ echo ".KEEP_STATE:" >>.dep.inc; \ echo ".KEEP_STATE_FILE:.make.state.\$${CONF}" >>.dep.inc; \ fi # configuration validation .validate-impl: @if [ ! -f nbproject/Makefile-${CONF}.mk ]; \ then \ echo ""; \ echo "Error: can not find the makefile for configuration '${CONF}' in project ${PROJECTNAME}"; \ echo "See 'make help' for details."; \ echo "Current directory: " `pwd`; \ echo ""; \ fi @if [ ! -f nbproject/Makefile-${CONF}.mk ]; \ then \ exit 1; \ fi # help .help-impl: .help-pre @echo "This makefile supports the following configurations:" @echo " ${ALLCONFS}" @echo "" @echo "and the following targets:" @echo " build (default target)" @echo " clean" @echo " clobber" @echo " all" @echo " help" @echo "" @echo "Makefile Usage:" @echo " make [CONF=] [SUB=no] build" @echo " make [CONF=] [SUB=no] clean" @echo " make [SUB=no] clobber" @echo " make [SUB=no] all" @echo " make help" @echo "" @echo "Target 'build' will build a specific configuration and, unless 'SUB=no'," @echo " also build subprojects." @echo "Target 'clean' will clean a specific configuration and, unless 'SUB=no'," @echo " also clean subprojects." @echo "Target 'clobber' will remove all built files from all configurations and," @echo " unless 'SUB=no', also from subprojects." @echo "Target 'all' will will build all configurations and, unless 'SUB=no'," @echo " also build subprojects." @echo "Target 'help' prints this message." @echo "" csg-1.4.1/netbeans/csg_fmatch/nbproject/configurations.xml000066400000000000000000000074451315264121600237430ustar00rootroot00000000000000 ../../src/tools/csg_fmatch.h ../../src/tools/csg_fmatch.cc Makefile ../../src/tools Makefile localhost GNU|GNU 2 2 ../../include ../../../tools/include ../../src/tools/csg_fmatch /people/thnfs/homes/ruehle/gmx/lib boost_program_options gmx expat gsl gslcblas Mathematics PosixThreads localhost GNU|GNU 2 5 5 5 csg-1.4.1/netbeans/csg_fmatch/nbproject/project.properties000066400000000000000000000000001315264121600237270ustar00rootroot00000000000000csg-1.4.1/netbeans/csg_fmatch/nbproject/project.xml000066400000000000000000000017771315264121600223610ustar00rootroot00000000000000 org.netbeans.modules.cnd.makeproject csg_fmatch 0 cc h UTF-8 ../libcsg ../../../tools/netbeans/libtools ../../src/tools Debug Release csg-1.4.1/netbeans/csg_imcrepack/000077500000000000000000000000001315264121600166635ustar00rootroot00000000000000csg-1.4.1/netbeans/csg_imcrepack/.dep.inc000066400000000000000000000002201315264121600201760ustar00rootroot00000000000000# This code depends on make tool being used DEPFILES=$(wildcard $(addsuffix .d, ${OBJECTFILES})) ifneq (${DEPFILES},) include ${DEPFILES} endif csg-1.4.1/netbeans/csg_imcrepack/Makefile000066400000000000000000000042471315264121600203320ustar00rootroot00000000000000# # There exist several targets which are by default empty and which can be # used for execution of your targets. These targets are usually executed # before and after some main targets. They are: # # .build-pre: called before 'build' target # .build-post: called after 'build' target # .clean-pre: called before 'clean' target # .clean-post: called after 'clean' target # .clobber-pre: called before 'clobber' target # .clobber-post: called after 'clobber' target # .all-pre: called before 'all' target # .all-post: called after 'all' target # .help-pre: called before 'help' target # .help-post: called after 'help' target # # Targets beginning with '.' are not intended to be called on their own. # # Main targets can be executed directly, and they are: # # build build a specific configuration # clean remove built files from a configuration # clobber remove all built files # all build all configurations # help print help mesage # # Targets .build-impl, .clean-impl, .clobber-impl, .all-impl, and # .help-impl are implemented in nbproject/makefile-impl.mk. # # NOCDDL # Environment MKDIR=mkdir CP=cp CCADMIN=CCadmin RANLIB=ranlib # build build: .build-post .build-pre: # Add your pre 'build' code here... .build-post: .build-impl # Add your post 'build' code here... # clean clean: .clean-post .clean-pre: # Add your pre 'clean' code here... .clean-post: .clean-impl # Add your post 'clean' code here... # clobber clobber: .clobber-post .clobber-pre: # Add your pre 'clobber' code here... .clobber-post: .clobber-impl # Add your post 'clobber' code here... # all all: .all-post .all-pre: # Add your pre 'all' code here... .all-post: .all-impl # Add your post 'all' code here... # help help: .help-post .help-pre: # Add your pre 'help' code here... .help-post: .help-impl # Add your post 'help' code here... # include project implementation makefile include nbproject/Makefile-impl.mk csg-1.4.1/netbeans/csg_imcrepack/nbproject/000077500000000000000000000000001315264121600206515ustar00rootroot00000000000000csg-1.4.1/netbeans/csg_imcrepack/nbproject/Makefile-Debug.mk000066400000000000000000000042071315264121600237460ustar00rootroot00000000000000# # Generated Makefile - do not edit! # # Edit the Makefile in the project folder instead (../Makefile). Each target # has a -pre and a -post target defined where you can add customized code. # # This makefile implements configuration specific macros and targets. # Environment MKDIR=mkdir CP=cp GREP=grep NM=nm CCADMIN=CCadmin RANLIB=ranlib CC=gcc CCC=g++ CXX=g++ FC= AS=as # Macros CND_PLATFORM=GNU-Linux-x86 CND_CONF=Debug CND_DISTDIR=dist # Include project Makefile include Makefile # Object Directory OBJECTDIR=build/${CND_CONF}/${CND_PLATFORM} # Object Files OBJECTFILES= \ ${OBJECTDIR}/_ext/715944016/csg_imcrepack.o # C Compiler Flags CFLAGS= # CC Compiler Flags CCFLAGS= CXXFLAGS= # Fortran Compiler Flags FFLAGS= # Assembler Flags ASFLAGS= # Link Libraries and Options LDLIBSOPTIONS=../libcsg/../../src/libcsg/libcsg.a ../../../tools/netbeans/libtools/../../src/libtools/libtools.a -lboost_program_options -lm # Build Targets .build-conf: ${BUILD_SUBPROJECTS} "${MAKE}" -f nbproject/Makefile-Debug.mk ../../src/tools/csg_imcrepack ../../src/tools/csg_imcrepack: ../libcsg/../../src/libcsg/libcsg.a ../../src/tools/csg_imcrepack: ../../../tools/netbeans/libtools/../../src/libtools/libtools.a ../../src/tools/csg_imcrepack: ${OBJECTFILES} ${MKDIR} -p ../../src/tools ${LINK.cc} -o ../../src/tools/csg_imcrepack ${OBJECTFILES} ${LDLIBSOPTIONS} ${OBJECTDIR}/_ext/715944016/csg_imcrepack.o: ../../src/tools/csg_imcrepack.cc ${MKDIR} -p ${OBJECTDIR}/_ext/715944016 ${RM} $@.d $(COMPILE.cc) -g -I../../../tools/include -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/715944016/csg_imcrepack.o ../../src/tools/csg_imcrepack.cc # Subprojects .build-subprojects: cd ../libcsg && ${MAKE} -f Makefile_nb CONF=Debug cd ../../../tools/netbeans/libtools && ${MAKE} -f Makefile_nb CONF=Debug # Clean Targets .clean-conf: ${CLEAN_SUBPROJECTS} ${RM} -r build/Debug ${RM} ../../src/tools/csg_imcrepack # Subprojects .clean-subprojects: cd ../libcsg && ${MAKE} -f Makefile_nb CONF=Debug clean cd ../../../tools/netbeans/libtools && ${MAKE} -f Makefile_nb CONF=Debug clean # Enable dependency checking .dep.inc: .depcheck-impl include .dep.inc csg-1.4.1/netbeans/csg_imcrepack/nbproject/Makefile-Release.mk000066400000000000000000000032011315264121600242710ustar00rootroot00000000000000# # Generated Makefile - do not edit! # # Edit the Makefile in the project folder instead (../Makefile). Each target # has a -pre and a -post target defined where you can add customized code. # # This makefile implements configuration specific macros and targets. # Environment MKDIR=mkdir CP=cp GREP=grep NM=nm CCADMIN=CCadmin RANLIB=ranlib CC=gcc CCC=g++ CXX=g++ FC= AS=as # Macros CND_PLATFORM=GNU-Linux-x86 CND_CONF=Release CND_DISTDIR=dist # Include project Makefile include Makefile # Object Directory OBJECTDIR=build/${CND_CONF}/${CND_PLATFORM} # Object Files OBJECTFILES= \ ${OBJECTDIR}/_ext/715944016/csg_imcrepack.o # C Compiler Flags CFLAGS= # CC Compiler Flags CCFLAGS= CXXFLAGS= # Fortran Compiler Flags FFLAGS= # Assembler Flags ASFLAGS= # Link Libraries and Options LDLIBSOPTIONS= # Build Targets .build-conf: ${BUILD_SUBPROJECTS} "${MAKE}" -f nbproject/Makefile-Release.mk dist/Release/GNU-Linux-x86/csg_imcrepack dist/Release/GNU-Linux-x86/csg_imcrepack: ${OBJECTFILES} ${MKDIR} -p dist/Release/GNU-Linux-x86 ${LINK.cc} -o ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/csg_imcrepack ${OBJECTFILES} ${LDLIBSOPTIONS} ${OBJECTDIR}/_ext/715944016/csg_imcrepack.o: ../../src/tools/csg_imcrepack.cc ${MKDIR} -p ${OBJECTDIR}/_ext/715944016 ${RM} $@.d $(COMPILE.cc) -O2 -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/715944016/csg_imcrepack.o ../../src/tools/csg_imcrepack.cc # Subprojects .build-subprojects: # Clean Targets .clean-conf: ${CLEAN_SUBPROJECTS} ${RM} -r build/Release ${RM} dist/Release/GNU-Linux-x86/csg_imcrepack # Subprojects .clean-subprojects: # Enable dependency checking .dep.inc: .depcheck-impl include .dep.inc csg-1.4.1/netbeans/csg_imcrepack/nbproject/Makefile-impl.mk000066400000000000000000000101611315264121600236550ustar00rootroot00000000000000# # Generated Makefile - do not edit! # # Edit the Makefile in the project folder instead (../Makefile). Each target # has a pre- and a post- target defined where you can add customization code. # # This makefile implements macros and targets common to all configurations. # # NOCDDL # Building and Cleaning subprojects are done by default, but can be controlled with the SUB # macro. If SUB=no, subprojects will not be built or cleaned. The following macro # statements set BUILD_SUB-CONF and CLEAN_SUB-CONF to .build-reqprojects-conf # and .clean-reqprojects-conf unless SUB has the value 'no' SUB_no=NO SUBPROJECTS=${SUB_${SUB}} BUILD_SUBPROJECTS_=.build-subprojects BUILD_SUBPROJECTS_NO= BUILD_SUBPROJECTS=${BUILD_SUBPROJECTS_${SUBPROJECTS}} CLEAN_SUBPROJECTS_=.clean-subprojects CLEAN_SUBPROJECTS_NO= CLEAN_SUBPROJECTS=${CLEAN_SUBPROJECTS_${SUBPROJECTS}} # Project Name PROJECTNAME=csg_imcrepack # Active Configuration DEFAULTCONF=Debug CONF=${DEFAULTCONF} # All Configurations ALLCONFS=Debug Release # build .build-impl: .build-pre .validate-impl .depcheck-impl @#echo "=> Running $@... Configuration=$(CONF)" "${MAKE}" -f nbproject/Makefile-${CONF}.mk QMAKE=${QMAKE} SUBPROJECTS=${SUBPROJECTS} .build-conf # clean .clean-impl: .clean-pre .validate-impl .depcheck-impl @#echo "=> Running $@... Configuration=$(CONF)" "${MAKE}" -f nbproject/Makefile-${CONF}.mk QMAKE=${QMAKE} SUBPROJECTS=${SUBPROJECTS} .clean-conf # clobber .clobber-impl: .clobber-pre .depcheck-impl @#echo "=> Running $@..." for CONF in ${ALLCONFS}; \ do \ "${MAKE}" -f nbproject/Makefile-$${CONF}.mk QMAKE=${QMAKE} SUBPROJECTS=${SUBPROJECTS} .clean-conf; \ done # all .all-impl: .all-pre .depcheck-impl @#echo "=> Running $@..." for CONF in ${ALLCONFS}; \ do \ "${MAKE}" -f nbproject/Makefile-$${CONF}.mk QMAKE=${QMAKE} SUBPROJECTS=${SUBPROJECTS} .build-conf; \ done # build tests .build-tests-impl: .build-impl .build-tests-pre @#echo "=> Running $@... Configuration=$(CONF)" "${MAKE}" -f nbproject/Makefile-${CONF}.mk SUBPROJECTS=${SUBPROJECTS} .build-tests-conf # run tests .test-impl: .build-tests-impl .test-pre @#echo "=> Running $@... Configuration=$(CONF)" "${MAKE}" -f nbproject/Makefile-${CONF}.mk SUBPROJECTS=${SUBPROJECTS} .test-conf # dependency checking support .depcheck-impl: @echo "# This code depends on make tool being used" >.dep.inc @if [ -n "${MAKE_VERSION}" ]; then \ echo "DEPFILES=\$$(wildcard \$$(addsuffix .d, \$${OBJECTFILES}))" >>.dep.inc; \ echo "ifneq (\$${DEPFILES},)" >>.dep.inc; \ echo "include \$${DEPFILES}" >>.dep.inc; \ echo "endif" >>.dep.inc; \ else \ echo ".KEEP_STATE:" >>.dep.inc; \ echo ".KEEP_STATE_FILE:.make.state.\$${CONF}" >>.dep.inc; \ fi # configuration validation .validate-impl: @if [ ! -f nbproject/Makefile-${CONF}.mk ]; \ then \ echo ""; \ echo "Error: can not find the makefile for configuration '${CONF}' in project ${PROJECTNAME}"; \ echo "See 'make help' for details."; \ echo "Current directory: " `pwd`; \ echo ""; \ fi @if [ ! -f nbproject/Makefile-${CONF}.mk ]; \ then \ exit 1; \ fi # help .help-impl: .help-pre @echo "This makefile supports the following configurations:" @echo " ${ALLCONFS}" @echo "" @echo "and the following targets:" @echo " build (default target)" @echo " clean" @echo " clobber" @echo " all" @echo " help" @echo "" @echo "Makefile Usage:" @echo " make [CONF=] [SUB=no] build" @echo " make [CONF=] [SUB=no] clean" @echo " make [SUB=no] clobber" @echo " make [SUB=no] all" @echo " make help" @echo "" @echo "Target 'build' will build a specific configuration and, unless 'SUB=no'," @echo " also build subprojects." @echo "Target 'clean' will clean a specific configuration and, unless 'SUB=no'," @echo " also clean subprojects." @echo "Target 'clobber' will remove all built files from all configurations and," @echo " unless 'SUB=no', also from subprojects." @echo "Target 'all' will will build all configurations and, unless 'SUB=no'," @echo " also build subprojects." @echo "Target 'help' prints this message." @echo "" csg-1.4.1/netbeans/csg_imcrepack/nbproject/Makefile-variables.mk000066400000000000000000000015161315264121600246700ustar00rootroot00000000000000# # Generated - do not edit! # # NOCDDL # CND_BASEDIR=`pwd` CND_BUILDDIR=build CND_DISTDIR=dist # Debug configuration CND_PLATFORM_Debug=GNU-Linux-x86 CND_ARTIFACT_DIR_Debug=../../src/tools CND_ARTIFACT_NAME_Debug=csg_imcrepack CND_ARTIFACT_PATH_Debug=../../src/tools/csg_imcrepack CND_PACKAGE_DIR_Debug=dist/Debug/GNU-Linux-x86/package CND_PACKAGE_NAME_Debug=csgimcrepack.tar CND_PACKAGE_PATH_Debug=dist/Debug/GNU-Linux-x86/package/csgimcrepack.tar # Release configuration CND_PLATFORM_Release=GNU-Linux-x86 CND_ARTIFACT_DIR_Release=dist/Release/GNU-Linux-x86 CND_ARTIFACT_NAME_Release=csg_imcrepack CND_ARTIFACT_PATH_Release=dist/Release/GNU-Linux-x86/csg_imcrepack CND_PACKAGE_DIR_Release=dist/Release/GNU-Linux-x86/package CND_PACKAGE_NAME_Release=csgimcrepack.tar CND_PACKAGE_PATH_Release=dist/Release/GNU-Linux-x86/package/csgimcrepack.tar csg-1.4.1/netbeans/csg_imcrepack/nbproject/Package-Debug.bash000066400000000000000000000025731315264121600240760ustar00rootroot00000000000000#!/bin/bash -x # # Generated - do not edit! # # Macros TOP=`pwd` CND_PLATFORM=GNU-Linux-x86 CND_CONF=Debug CND_DISTDIR=dist NBTMPDIR=build/${CND_CONF}/${CND_PLATFORM}/tmp-packaging TMPDIRNAME=tmp-packaging OUTPUT_PATH=../../src/tools/csg_imcrepack OUTPUT_BASENAME=csg_imcrepack PACKAGE_TOP_DIR=csgimcrepack/ # Functions function checkReturnCode { rc=$? if [ $rc != 0 ] then exit $rc fi } function makeDirectory # $1 directory path # $2 permission (optional) { mkdir -p "$1" checkReturnCode if [ "$2" != "" ] then chmod $2 "$1" checkReturnCode fi } function copyFileToTmpDir # $1 from-file path # $2 to-file path # $3 permission { cp "$1" "$2" checkReturnCode if [ "$3" != "" ] then chmod $3 "$2" checkReturnCode fi } # Setup cd "${TOP}" mkdir -p ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package rm -rf ${NBTMPDIR} mkdir -p ${NBTMPDIR} # Copy files and create directories and links cd "${TOP}" makeDirectory "${NBTMPDIR}/csgimcrepack/bin" copyFileToTmpDir "${OUTPUT_PATH}" "${NBTMPDIR}/${PACKAGE_TOP_DIR}bin/${OUTPUT_BASENAME}" 0755 # Generate tar file cd "${TOP}" rm -f ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/csgimcrepack.tar cd ${NBTMPDIR} tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/csgimcrepack.tar * checkReturnCode # Cleanup cd "${TOP}" rm -rf ${NBTMPDIR} csg-1.4.1/netbeans/csg_imcrepack/nbproject/Package-Release.bash000066400000000000000000000026301315264121600244220ustar00rootroot00000000000000#!/bin/bash -x # # Generated - do not edit! # # Macros TOP=`pwd` CND_PLATFORM=GNU-Linux-x86 CND_CONF=Release CND_DISTDIR=dist NBTMPDIR=build/${CND_CONF}/${CND_PLATFORM}/tmp-packaging TMPDIRNAME=tmp-packaging OUTPUT_PATH=${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/csg_imcrepack OUTPUT_BASENAME=csg_imcrepack PACKAGE_TOP_DIR=csgimcrepack/ # Functions function checkReturnCode { rc=$? if [ $rc != 0 ] then exit $rc fi } function makeDirectory # $1 directory path # $2 permission (optional) { mkdir -p "$1" checkReturnCode if [ "$2" != "" ] then chmod $2 "$1" checkReturnCode fi } function copyFileToTmpDir # $1 from-file path # $2 to-file path # $3 permission { cp "$1" "$2" checkReturnCode if [ "$3" != "" ] then chmod $3 "$2" checkReturnCode fi } # Setup cd "${TOP}" mkdir -p ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package rm -rf ${NBTMPDIR} mkdir -p ${NBTMPDIR} # Copy files and create directories and links cd "${TOP}" makeDirectory "${NBTMPDIR}/csgimcrepack/bin" copyFileToTmpDir "${OUTPUT_PATH}" "${NBTMPDIR}/${PACKAGE_TOP_DIR}bin/${OUTPUT_BASENAME}" 0755 # Generate tar file cd "${TOP}" rm -f ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/csgimcrepack.tar cd ${NBTMPDIR} tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/csgimcrepack.tar * checkReturnCode # Cleanup cd "${TOP}" rm -rf ${NBTMPDIR} csg-1.4.1/netbeans/csg_imcrepack/nbproject/configurations.xml000066400000000000000000000064641315264121600244370ustar00rootroot00000000000000 ../../src/tools/csg_imcrepack.cc Makefile ../../src/tools Makefile localhost GNU|GNU 2 ../../../tools/include ../../include ../../src/tools/csg_imcrepack boost_program_options Mathematics localhost GNU|GNU 2 5 5 5 csg-1.4.1/netbeans/csg_imcrepack/nbproject/project.properties000066400000000000000000000000001315264121600244230ustar00rootroot00000000000000csg-1.4.1/netbeans/csg_imcrepack/nbproject/project.xml000066400000000000000000000017561315264121600230520ustar00rootroot00000000000000 org.netbeans.modules.cnd.makeproject csg_imcrepack 0 cc UTF-8 ../libcsg ../../../tools/netbeans/libtools ../../src/tools Debug Release csg-1.4.1/netbeans/csg_resample/000077500000000000000000000000001315264121600165355ustar00rootroot00000000000000csg-1.4.1/netbeans/csg_resample/Makefile_nb000066400000000000000000000043351315264121600206610ustar00rootroot00000000000000# # There exist several targets which are by default empty and which can be # used for execution of your targets. These targets are usually executed # before and after some main targets. They are: # # .build-pre: called before 'build' target # .build-post: called after 'build' target # .clean-pre: called before 'clean' target # .clean-post: called after 'clean' target # .clobber-pre: called before 'clobber' target # .clobber-post: called after 'clobber' target # .all-pre: called before 'all' target # .all-post: called after 'all' target # .help-pre: called before 'help' target # .help-post: called after 'help' target # # Targets beginning with '.' are not intended to be called on their own. # # Main targets can be executed directly, and they are: # # build build a specific configuration # clean remove built files from a configuration # clobber remove all built files # all build all configurations # help print help mesage # # Targets .build-impl, .clean-impl, .clobber-impl, .all-impl, and # .help-impl are implemented in nbproject/makefile-impl.mk. # # NOCDDL # Environment MKDIR=mkdir CP=cp CCADMIN=CCadmin RANLIB=ranlib # build build: .build-pre .build-impl .build-post .build-pre: # Add your pre 'build' code here... .build-post: # Add your post 'build' code here... # clean clean: .clean-pre .clean-impl .clean-post .clean-pre: # Add your pre 'clean' code here... .clean-post: # Add your post 'clean' code here... # clobber clobber: .clobber-pre .clobber-impl .clobber-post .clobber-pre: # Add your pre 'clobber' code here... .clobber-post: # Add your post 'clobber' code here... # all all: .all-pre .all-impl .all-post .all-pre: # Add your pre 'all' code here... .all-post: # Add your post 'all' code here... # help help: .help-pre .help-impl .help-post .help-pre: # Add your pre 'help' code here... .help-post: # Add your post 'help' code here... # include project implementation makefile include nbproject/Makefile-impl.mk csg-1.4.1/netbeans/csg_resample/nbproject/000077500000000000000000000000001315264121600205235ustar00rootroot00000000000000csg-1.4.1/netbeans/csg_resample/nbproject/Makefile-Debug.mk000066400000000000000000000042201315264121600236130ustar00rootroot00000000000000# # Generated Makefile - do not edit! # # Edit the Makefile in the project folder instead (../Makefile). Each target # has a -pre and a -post target defined where you can add customized code. # # This makefile implements configuration specific macros and targets. # Environment MKDIR=mkdir CP=cp GREP=grep NM=nm CCADMIN=CCadmin RANLIB=ranlib CC=gcc CCC=g++ CXX=g++ FC= AS=as # Macros CND_PLATFORM=GNU-Linux-x86 CND_CONF=Debug CND_DISTDIR=dist # Include project Makefile include Makefile_nb # Object Directory OBJECTDIR=build/${CND_CONF}/${CND_PLATFORM} # Object Files OBJECTFILES= \ ${OBJECTDIR}/_ext/715944016/csg_resample.o # C Compiler Flags CFLAGS= # CC Compiler Flags CCFLAGS= CXXFLAGS= # Fortran Compiler Flags FFLAGS= # Assembler Flags ASFLAGS= # Link Libraries and Options LDLIBSOPTIONS=../libcsg/../../src/libcsg/libcsg.a ../../../tools/netbeans/libtools/../../src/libtools/libtools.a -lboost_program_options -lgsl -lgslcblas -lm # Build Targets .build-conf: ${BUILD_SUBPROJECTS} "${MAKE}" -f nbproject/Makefile-Debug.mk ../../src/tools/csg_resample ../../src/tools/csg_resample: ../libcsg/../../src/libcsg/libcsg.a ../../src/tools/csg_resample: ../../../tools/netbeans/libtools/../../src/libtools/libtools.a ../../src/tools/csg_resample: ${OBJECTFILES} ${MKDIR} -p ../../src/tools ${LINK.cc} -o ../../src/tools/csg_resample ${OBJECTFILES} ${LDLIBSOPTIONS} ${OBJECTDIR}/_ext/715944016/csg_resample.o: ../../src/tools/csg_resample.cc ${MKDIR} -p ${OBJECTDIR}/_ext/715944016 ${RM} $@.d $(COMPILE.cc) -g -I../../../tools/include -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/715944016/csg_resample.o ../../src/tools/csg_resample.cc # Subprojects .build-subprojects: cd ../libcsg && ${MAKE} -f Makefile_nb CONF=Debug cd ../../../tools/netbeans/libtools && ${MAKE} -f Makefile_nb CONF=Debug # Clean Targets .clean-conf: ${CLEAN_SUBPROJECTS} ${RM} -r build/Debug ${RM} ../../src/tools/csg_resample # Subprojects .clean-subprojects: cd ../libcsg && ${MAKE} -f Makefile_nb CONF=Debug clean cd ../../../tools/netbeans/libtools && ${MAKE} -f Makefile_nb CONF=Debug clean # Enable dependency checking .dep.inc: .depcheck-impl include .dep.inc csg-1.4.1/netbeans/csg_resample/nbproject/Makefile-Release.mk000066400000000000000000000031731315264121600241530ustar00rootroot00000000000000# # Generated Makefile - do not edit! # # Edit the Makefile in the project folder instead (../Makefile). Each target # has a -pre and a -post target defined where you can add customized code. # # This makefile implements configuration specific macros and targets. # Environment MKDIR=mkdir CP=cp GREP=grep NM=nm CCADMIN=CCadmin RANLIB=ranlib CC=gcc CCC=g++ CXX=g++ FC= AS=as # Macros CND_PLATFORM=GNU-Linux-x86 CND_CONF=Release CND_DISTDIR=dist # Include project Makefile include Makefile_nb # Object Directory OBJECTDIR=build/${CND_CONF}/${CND_PLATFORM} # Object Files OBJECTFILES= \ ${OBJECTDIR}/_ext/715944016/csg_resample.o # C Compiler Flags CFLAGS= # CC Compiler Flags CCFLAGS= CXXFLAGS= # Fortran Compiler Flags FFLAGS= # Assembler Flags ASFLAGS= # Link Libraries and Options LDLIBSOPTIONS= # Build Targets .build-conf: ${BUILD_SUBPROJECTS} "${MAKE}" -f nbproject/Makefile-Release.mk dist/Release/GNU-Linux-x86/csg_resample dist/Release/GNU-Linux-x86/csg_resample: ${OBJECTFILES} ${MKDIR} -p dist/Release/GNU-Linux-x86 ${LINK.cc} -o ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/csg_resample ${OBJECTFILES} ${LDLIBSOPTIONS} ${OBJECTDIR}/_ext/715944016/csg_resample.o: ../../src/tools/csg_resample.cc ${MKDIR} -p ${OBJECTDIR}/_ext/715944016 ${RM} $@.d $(COMPILE.cc) -O2 -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/715944016/csg_resample.o ../../src/tools/csg_resample.cc # Subprojects .build-subprojects: # Clean Targets .clean-conf: ${CLEAN_SUBPROJECTS} ${RM} -r build/Release ${RM} dist/Release/GNU-Linux-x86/csg_resample # Subprojects .clean-subprojects: # Enable dependency checking .dep.inc: .depcheck-impl include .dep.inc csg-1.4.1/netbeans/csg_resample/nbproject/Makefile-impl.mk000066400000000000000000000101601315264121600235260ustar00rootroot00000000000000# # Generated Makefile - do not edit! # # Edit the Makefile in the project folder instead (../Makefile). Each target # has a pre- and a post- target defined where you can add customization code. # # This makefile implements macros and targets common to all configurations. # # NOCDDL # Building and Cleaning subprojects are done by default, but can be controlled with the SUB # macro. If SUB=no, subprojects will not be built or cleaned. The following macro # statements set BUILD_SUB-CONF and CLEAN_SUB-CONF to .build-reqprojects-conf # and .clean-reqprojects-conf unless SUB has the value 'no' SUB_no=NO SUBPROJECTS=${SUB_${SUB}} BUILD_SUBPROJECTS_=.build-subprojects BUILD_SUBPROJECTS_NO= BUILD_SUBPROJECTS=${BUILD_SUBPROJECTS_${SUBPROJECTS}} CLEAN_SUBPROJECTS_=.clean-subprojects CLEAN_SUBPROJECTS_NO= CLEAN_SUBPROJECTS=${CLEAN_SUBPROJECTS_${SUBPROJECTS}} # Project Name PROJECTNAME=csg_resample # Active Configuration DEFAULTCONF=Debug CONF=${DEFAULTCONF} # All Configurations ALLCONFS=Debug Release # build .build-impl: .build-pre .validate-impl .depcheck-impl @#echo "=> Running $@... Configuration=$(CONF)" "${MAKE}" -f nbproject/Makefile-${CONF}.mk QMAKE=${QMAKE} SUBPROJECTS=${SUBPROJECTS} .build-conf # clean .clean-impl: .clean-pre .validate-impl .depcheck-impl @#echo "=> Running $@... Configuration=$(CONF)" "${MAKE}" -f nbproject/Makefile-${CONF}.mk QMAKE=${QMAKE} SUBPROJECTS=${SUBPROJECTS} .clean-conf # clobber .clobber-impl: .clobber-pre .depcheck-impl @#echo "=> Running $@..." for CONF in ${ALLCONFS}; \ do \ "${MAKE}" -f nbproject/Makefile-$${CONF}.mk QMAKE=${QMAKE} SUBPROJECTS=${SUBPROJECTS} .clean-conf; \ done # all .all-impl: .all-pre .depcheck-impl @#echo "=> Running $@..." for CONF in ${ALLCONFS}; \ do \ "${MAKE}" -f nbproject/Makefile-$${CONF}.mk QMAKE=${QMAKE} SUBPROJECTS=${SUBPROJECTS} .build-conf; \ done # build tests .build-tests-impl: .build-impl .build-tests-pre @#echo "=> Running $@... Configuration=$(CONF)" "${MAKE}" -f nbproject/Makefile-${CONF}.mk SUBPROJECTS=${SUBPROJECTS} .build-tests-conf # run tests .test-impl: .build-tests-impl .test-pre @#echo "=> Running $@... Configuration=$(CONF)" "${MAKE}" -f nbproject/Makefile-${CONF}.mk SUBPROJECTS=${SUBPROJECTS} .test-conf # dependency checking support .depcheck-impl: @echo "# This code depends on make tool being used" >.dep.inc @if [ -n "${MAKE_VERSION}" ]; then \ echo "DEPFILES=\$$(wildcard \$$(addsuffix .d, \$${OBJECTFILES}))" >>.dep.inc; \ echo "ifneq (\$${DEPFILES},)" >>.dep.inc; \ echo "include \$${DEPFILES}" >>.dep.inc; \ echo "endif" >>.dep.inc; \ else \ echo ".KEEP_STATE:" >>.dep.inc; \ echo ".KEEP_STATE_FILE:.make.state.\$${CONF}" >>.dep.inc; \ fi # configuration validation .validate-impl: @if [ ! -f nbproject/Makefile-${CONF}.mk ]; \ then \ echo ""; \ echo "Error: can not find the makefile for configuration '${CONF}' in project ${PROJECTNAME}"; \ echo "See 'make help' for details."; \ echo "Current directory: " `pwd`; \ echo ""; \ fi @if [ ! -f nbproject/Makefile-${CONF}.mk ]; \ then \ exit 1; \ fi # help .help-impl: .help-pre @echo "This makefile supports the following configurations:" @echo " ${ALLCONFS}" @echo "" @echo "and the following targets:" @echo " build (default target)" @echo " clean" @echo " clobber" @echo " all" @echo " help" @echo "" @echo "Makefile Usage:" @echo " make [CONF=] [SUB=no] build" @echo " make [CONF=] [SUB=no] clean" @echo " make [SUB=no] clobber" @echo " make [SUB=no] all" @echo " make help" @echo "" @echo "Target 'build' will build a specific configuration and, unless 'SUB=no'," @echo " also build subprojects." @echo "Target 'clean' will clean a specific configuration and, unless 'SUB=no'," @echo " also clean subprojects." @echo "Target 'clobber' will remove all built files from all configurations and," @echo " unless 'SUB=no', also from subprojects." @echo "Target 'all' will will build all configurations and, unless 'SUB=no'," @echo " also build subprojects." @echo "Target 'help' prints this message." @echo "" csg-1.4.1/netbeans/csg_resample/nbproject/configurations.xml000066400000000000000000000066471315264121600243140ustar00rootroot00000000000000 ../../src/tools/csg_resample.cc Makefile_nb ../../src/tools Makefile_nb localhost GNU|GNU 2 ../../../tools/include ../../include ../../src/tools/csg_resample boost_program_options gsl gslcblas Mathematics localhost GNU|GNU 2 5 5 5 csg-1.4.1/netbeans/csg_resample/nbproject/project.properties000066400000000000000000000000001315264121600242750ustar00rootroot00000000000000csg-1.4.1/netbeans/csg_resample/nbproject/project.xml000066400000000000000000000017551315264121600227230ustar00rootroot00000000000000 org.netbeans.modules.cnd.makeproject csg_resample 0 cc UTF-8 ../libcsg ../../../tools/netbeans/libtools ../../src/tools Debug Release csg-1.4.1/netbeans/csg_reupdate/000077500000000000000000000000001315264121600165365ustar00rootroot00000000000000csg-1.4.1/netbeans/csg_reupdate/nbproject/000077500000000000000000000000001315264121600205245ustar00rootroot00000000000000csg-1.4.1/netbeans/csg_reupdate/nbproject/Makefile-Debug.mk000066400000000000000000000100121315264121600236100ustar00rootroot00000000000000# # Generated Makefile - do not edit! # # Edit the Makefile in the project folder instead (../Makefile). Each target # has a -pre and a -post target defined where you can add customized code. # # This makefile implements configuration specific macros and targets. # Environment MKDIR=mkdir CP=cp GREP=grep NM=nm CCADMIN=CCadmin RANLIB=ranlib CC=gcc CCC=g++ CXX=g++ FC=gfortran AS=as # Macros CND_PLATFORM=GNU-Linux-x86 CND_CONF=Debug CND_DISTDIR=dist CND_BUILDDIR=build # Include project Makefile include Makefile # Object Directory OBJECTDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM} # Object Files OBJECTFILES= \ ${OBJECTDIR}/_ext/715944016/csg_reupdate.o \ ${OBJECTDIR}/_ext/312014500/potentialfunctioncbspl.o \ ${OBJECTDIR}/_ext/312014500/potentialfunction.o \ ${OBJECTDIR}/_ext/312014500/potentialfunctionlj126.o \ ${OBJECTDIR}/_ext/312014500/potentialfunctionljg.o # C Compiler Flags CFLAGS= # CC Compiler Flags CCFLAGS= CXXFLAGS= # Fortran Compiler Flags FFLAGS= # Assembler Flags ASFLAGS= # Link Libraries and Options LDLIBSOPTIONS=../libcsg/../../src/libcsg/libcsg.a ../../../tools/netbeans/libtools/../../src/libtools/libtools.a -lboost_program_options -lgmx -lexpat -lgsl -lgslcblas -lm -lpthread # Build Targets .build-conf: ${BUILD_SUBPROJECTS} "${MAKE}" -f nbproject/Makefile-${CND_CONF}.mk ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/csg_reupdate ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/csg_reupdate: ../libcsg/../../src/libcsg/libcsg.a ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/csg_reupdate: ../../../tools/netbeans/libtools/../../src/libtools/libtools.a ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/csg_reupdate: ${OBJECTFILES} ${MKDIR} -p ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM} ${LINK.cc} -o ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/csg_reupdate ${OBJECTFILES} ${LDLIBSOPTIONS} ${OBJECTDIR}/_ext/715944016/csg_reupdate.o: ../../src/tools/csg_reupdate.cc ${MKDIR} -p ${OBJECTDIR}/_ext/715944016 ${RM} $@.d $(COMPILE.cc) -g -I../../include -I../../../tools/include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/715944016/csg_reupdate.o ../../src/tools/csg_reupdate.cc ${OBJECTDIR}/_ext/312014500/potentialfunctioncbspl.o: ../../src/tools/potentialfunctions/potentialfunctioncbspl.cc ${MKDIR} -p ${OBJECTDIR}/_ext/312014500 ${RM} $@.d $(COMPILE.cc) -g -I../../include -I../../../tools/include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/312014500/potentialfunctioncbspl.o ../../src/tools/potentialfunctions/potentialfunctioncbspl.cc ${OBJECTDIR}/_ext/312014500/potentialfunction.o: ../../src/tools/potentialfunctions/potentialfunction.cc ${MKDIR} -p ${OBJECTDIR}/_ext/312014500 ${RM} $@.d $(COMPILE.cc) -g -I../../include -I../../../tools/include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/312014500/potentialfunction.o ../../src/tools/potentialfunctions/potentialfunction.cc ${OBJECTDIR}/_ext/312014500/potentialfunctionlj126.o: ../../src/tools/potentialfunctions/potentialfunctionlj126.cc ${MKDIR} -p ${OBJECTDIR}/_ext/312014500 ${RM} $@.d $(COMPILE.cc) -g -I../../include -I../../../tools/include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/312014500/potentialfunctionlj126.o ../../src/tools/potentialfunctions/potentialfunctionlj126.cc ${OBJECTDIR}/_ext/312014500/potentialfunctionljg.o: ../../src/tools/potentialfunctions/potentialfunctionljg.cc ${MKDIR} -p ${OBJECTDIR}/_ext/312014500 ${RM} $@.d $(COMPILE.cc) -g -I../../include -I../../../tools/include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/312014500/potentialfunctionljg.o ../../src/tools/potentialfunctions/potentialfunctionljg.cc # Subprojects .build-subprojects: cd ../libcsg && ${MAKE} -f Makefile_nb CONF=Debug cd ../../../tools/netbeans/libtools && ${MAKE} -f Makefile_nb CONF=Debug # Clean Targets .clean-conf: ${CLEAN_SUBPROJECTS} ${RM} -r ${CND_BUILDDIR}/${CND_CONF} ${RM} ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/csg_reupdate # Subprojects .clean-subprojects: cd ../libcsg && ${MAKE} -f Makefile_nb CONF=Debug clean cd ../../../tools/netbeans/libtools && ${MAKE} -f Makefile_nb CONF=Debug clean # Enable dependency checking .dep.inc: .depcheck-impl include .dep.inc csg-1.4.1/netbeans/csg_reupdate/nbproject/Makefile-Release.mk000066400000000000000000000062741315264121600241610ustar00rootroot00000000000000# # Generated Makefile - do not edit! # # Edit the Makefile in the project folder instead (../Makefile). Each target # has a -pre and a -post target defined where you can add customized code. # # This makefile implements configuration specific macros and targets. # Environment MKDIR=mkdir CP=cp GREP=grep NM=nm CCADMIN=CCadmin RANLIB=ranlib CC=gcc CCC=g++ CXX=g++ FC=gfortran AS=as # Macros CND_PLATFORM=GNU-Linux-x86 CND_CONF=Release CND_DISTDIR=dist CND_BUILDDIR=build # Include project Makefile include Makefile # Object Directory OBJECTDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM} # Object Files OBJECTFILES= \ ${OBJECTDIR}/_ext/715944016/csg_reupdate.o \ ${OBJECTDIR}/_ext/312014500/potentialfunctioncbspl.o \ ${OBJECTDIR}/_ext/312014500/potentialfunction.o \ ${OBJECTDIR}/_ext/312014500/potentialfunctionlj126.o \ ${OBJECTDIR}/_ext/312014500/potentialfunctionljg.o # C Compiler Flags CFLAGS= # CC Compiler Flags CCFLAGS= CXXFLAGS= # Fortran Compiler Flags FFLAGS= # Assembler Flags ASFLAGS= # Link Libraries and Options LDLIBSOPTIONS= # Build Targets .build-conf: ${BUILD_SUBPROJECTS} "${MAKE}" -f nbproject/Makefile-${CND_CONF}.mk ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/csg_reupdate ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/csg_reupdate: ${OBJECTFILES} ${MKDIR} -p ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM} ${LINK.cc} -o ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/csg_reupdate ${OBJECTFILES} ${LDLIBSOPTIONS} ${OBJECTDIR}/_ext/715944016/csg_reupdate.o: ../../src/tools/csg_reupdate.cc ${MKDIR} -p ${OBJECTDIR}/_ext/715944016 ${RM} $@.d $(COMPILE.cc) -O2 -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/715944016/csg_reupdate.o ../../src/tools/csg_reupdate.cc ${OBJECTDIR}/_ext/312014500/potentialfunctioncbspl.o: ../../src/tools/potentialfunctions/potentialfunctioncbspl.cc ${MKDIR} -p ${OBJECTDIR}/_ext/312014500 ${RM} $@.d $(COMPILE.cc) -O2 -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/312014500/potentialfunctioncbspl.o ../../src/tools/potentialfunctions/potentialfunctioncbspl.cc ${OBJECTDIR}/_ext/312014500/potentialfunction.o: ../../src/tools/potentialfunctions/potentialfunction.cc ${MKDIR} -p ${OBJECTDIR}/_ext/312014500 ${RM} $@.d $(COMPILE.cc) -O2 -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/312014500/potentialfunction.o ../../src/tools/potentialfunctions/potentialfunction.cc ${OBJECTDIR}/_ext/312014500/potentialfunctionlj126.o: ../../src/tools/potentialfunctions/potentialfunctionlj126.cc ${MKDIR} -p ${OBJECTDIR}/_ext/312014500 ${RM} $@.d $(COMPILE.cc) -O2 -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/312014500/potentialfunctionlj126.o ../../src/tools/potentialfunctions/potentialfunctionlj126.cc ${OBJECTDIR}/_ext/312014500/potentialfunctionljg.o: ../../src/tools/potentialfunctions/potentialfunctionljg.cc ${MKDIR} -p ${OBJECTDIR}/_ext/312014500 ${RM} $@.d $(COMPILE.cc) -O2 -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/312014500/potentialfunctionljg.o ../../src/tools/potentialfunctions/potentialfunctionljg.cc # Subprojects .build-subprojects: # Clean Targets .clean-conf: ${CLEAN_SUBPROJECTS} ${RM} -r ${CND_BUILDDIR}/${CND_CONF} ${RM} ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/csg_reupdate # Subprojects .clean-subprojects: # Enable dependency checking .dep.inc: .depcheck-impl include .dep.inc csg-1.4.1/netbeans/csg_reupdate/nbproject/Makefile-impl.mk000066400000000000000000000101601315264121600235270ustar00rootroot00000000000000# # Generated Makefile - do not edit! # # Edit the Makefile in the project folder instead (../Makefile). Each target # has a pre- and a post- target defined where you can add customization code. # # This makefile implements macros and targets common to all configurations. # # NOCDDL # Building and Cleaning subprojects are done by default, but can be controlled with the SUB # macro. If SUB=no, subprojects will not be built or cleaned. The following macro # statements set BUILD_SUB-CONF and CLEAN_SUB-CONF to .build-reqprojects-conf # and .clean-reqprojects-conf unless SUB has the value 'no' SUB_no=NO SUBPROJECTS=${SUB_${SUB}} BUILD_SUBPROJECTS_=.build-subprojects BUILD_SUBPROJECTS_NO= BUILD_SUBPROJECTS=${BUILD_SUBPROJECTS_${SUBPROJECTS}} CLEAN_SUBPROJECTS_=.clean-subprojects CLEAN_SUBPROJECTS_NO= CLEAN_SUBPROJECTS=${CLEAN_SUBPROJECTS_${SUBPROJECTS}} # Project Name PROJECTNAME=csg_reupdate # Active Configuration DEFAULTCONF=Debug CONF=${DEFAULTCONF} # All Configurations ALLCONFS=Debug Release # build .build-impl: .build-pre .validate-impl .depcheck-impl @#echo "=> Running $@... Configuration=$(CONF)" "${MAKE}" -f nbproject/Makefile-${CONF}.mk QMAKE=${QMAKE} SUBPROJECTS=${SUBPROJECTS} .build-conf # clean .clean-impl: .clean-pre .validate-impl .depcheck-impl @#echo "=> Running $@... Configuration=$(CONF)" "${MAKE}" -f nbproject/Makefile-${CONF}.mk QMAKE=${QMAKE} SUBPROJECTS=${SUBPROJECTS} .clean-conf # clobber .clobber-impl: .clobber-pre .depcheck-impl @#echo "=> Running $@..." for CONF in ${ALLCONFS}; \ do \ "${MAKE}" -f nbproject/Makefile-$${CONF}.mk QMAKE=${QMAKE} SUBPROJECTS=${SUBPROJECTS} .clean-conf; \ done # all .all-impl: .all-pre .depcheck-impl @#echo "=> Running $@..." for CONF in ${ALLCONFS}; \ do \ "${MAKE}" -f nbproject/Makefile-$${CONF}.mk QMAKE=${QMAKE} SUBPROJECTS=${SUBPROJECTS} .build-conf; \ done # build tests .build-tests-impl: .build-impl .build-tests-pre @#echo "=> Running $@... Configuration=$(CONF)" "${MAKE}" -f nbproject/Makefile-${CONF}.mk SUBPROJECTS=${SUBPROJECTS} .build-tests-conf # run tests .test-impl: .build-tests-impl .test-pre @#echo "=> Running $@... Configuration=$(CONF)" "${MAKE}" -f nbproject/Makefile-${CONF}.mk SUBPROJECTS=${SUBPROJECTS} .test-conf # dependency checking support .depcheck-impl: @echo "# This code depends on make tool being used" >.dep.inc @if [ -n "${MAKE_VERSION}" ]; then \ echo "DEPFILES=\$$(wildcard \$$(addsuffix .d, \$${OBJECTFILES}))" >>.dep.inc; \ echo "ifneq (\$${DEPFILES},)" >>.dep.inc; \ echo "include \$${DEPFILES}" >>.dep.inc; \ echo "endif" >>.dep.inc; \ else \ echo ".KEEP_STATE:" >>.dep.inc; \ echo ".KEEP_STATE_FILE:.make.state.\$${CONF}" >>.dep.inc; \ fi # configuration validation .validate-impl: @if [ ! -f nbproject/Makefile-${CONF}.mk ]; \ then \ echo ""; \ echo "Error: can not find the makefile for configuration '${CONF}' in project ${PROJECTNAME}"; \ echo "See 'make help' for details."; \ echo "Current directory: " `pwd`; \ echo ""; \ fi @if [ ! -f nbproject/Makefile-${CONF}.mk ]; \ then \ exit 1; \ fi # help .help-impl: .help-pre @echo "This makefile supports the following configurations:" @echo " ${ALLCONFS}" @echo "" @echo "and the following targets:" @echo " build (default target)" @echo " clean" @echo " clobber" @echo " all" @echo " help" @echo "" @echo "Makefile Usage:" @echo " make [CONF=] [SUB=no] build" @echo " make [CONF=] [SUB=no] clean" @echo " make [SUB=no] clobber" @echo " make [SUB=no] all" @echo " make help" @echo "" @echo "Target 'build' will build a specific configuration and, unless 'SUB=no'," @echo " also build subprojects." @echo "Target 'clean' will clean a specific configuration and, unless 'SUB=no'," @echo " also clean subprojects." @echo "Target 'clobber' will remove all built files from all configurations and," @echo " unless 'SUB=no', also from subprojects." @echo "Target 'all' will will build all configurations and, unless 'SUB=no'," @echo " also build subprojects." @echo "Target 'help' prints this message." @echo "" csg-1.4.1/netbeans/csg_reupdate/nbproject/configurations.xml000066400000000000000000000111221315264121600242750ustar00rootroot00000000000000 ../../src/tools/csg_reupdate.h ../../src/tools/potentialfunctions/potentialfunction.h ../../src/tools/potentialfunctions/potentialfunctioncbspl.h ../../src/tools/potentialfunctions/potentialfunctionlj126.h ../../src/tools/potentialfunctions/potentialfunctionljg.h ../../src/tools/csg_reupdate.cc ../../src/tools/potentialfunctions/potentialfunction.cc ../../src/tools/potentialfunctions/potentialfunctioncbspl.cc ../../src/tools/potentialfunctions/potentialfunctionlj126.cc ../../src/tools/potentialfunctions/potentialfunctionljg.cc Makefile ../../include ../../src/tools/potentialfunctions Makefile LOCAL_SOURCES GNU|GNU ../../include ../../../tools/include boost_program_options gmx expat gsl gslcblas Mathematics PosixThreads LOCAL_SOURCES GNU|GNU 5 5 5 5 csg-1.4.1/netbeans/csg_reupdate/nbproject/project.properties000066400000000000000000000000001315264121600242760ustar00rootroot00000000000000csg-1.4.1/netbeans/csg_reupdate/nbproject/project.xml000066400000000000000000000023771315264121600227250ustar00rootroot00000000000000 org.netbeans.modules.cnd.makeproject csg_reupdate 0 cc h UTF-8 ../libcsg ../../../tools/netbeans/libtools ../../include ../../src/tools/potentialfunctions Debug 1 Release 1 csg-1.4.1/netbeans/csg_stat/000077500000000000000000000000001315264121600157005ustar00rootroot00000000000000csg-1.4.1/netbeans/csg_stat/Makefile000066400000000000000000000043351315264121600173450ustar00rootroot00000000000000# # There exist several targets which are by default empty and which can be # used for execution of your targets. These targets are usually executed # before and after some main targets. They are: # # .build-pre: called before 'build' target # .build-post: called after 'build' target # .clean-pre: called before 'clean' target # .clean-post: called after 'clean' target # .clobber-pre: called before 'clobber' target # .clobber-post: called after 'clobber' target # .all-pre: called before 'all' target # .all-post: called after 'all' target # .help-pre: called before 'help' target # .help-post: called after 'help' target # # Targets beginning with '.' are not intended to be called on their own. # # Main targets can be executed directly, and they are: # # build build a specific configuration # clean remove built files from a configuration # clobber remove all built files # all build all configurations # help print help mesage # # Targets .build-impl, .clean-impl, .clobber-impl, .all-impl, and # .help-impl are implemented in nbproject/makefile-impl.mk. # # NOCDDL # Environment MKDIR=mkdir CP=cp CCADMIN=CCadmin RANLIB=ranlib # build build: .build-pre .build-impl .build-post .build-pre: # Add your pre 'build' code here... .build-post: # Add your post 'build' code here... # clean clean: .clean-pre .clean-impl .clean-post .clean-pre: # Add your pre 'clean' code here... .clean-post: # Add your post 'clean' code here... # clobber clobber: .clobber-pre .clobber-impl .clobber-post .clobber-pre: # Add your pre 'clobber' code here... .clobber-post: # Add your post 'clobber' code here... # all all: .all-pre .all-impl .all-post .all-pre: # Add your pre 'all' code here... .all-post: # Add your post 'all' code here... # help help: .help-pre .help-impl .help-post .help-pre: # Add your pre 'help' code here... .help-post: # Add your post 'help' code here... # include project implementation makefile include nbproject/Makefile-impl.mk csg-1.4.1/netbeans/csg_stat/nbproject/000077500000000000000000000000001315264121600176665ustar00rootroot00000000000000csg-1.4.1/netbeans/csg_stat/nbproject/Makefile-Debug.mk000066400000000000000000000047571315264121600227750ustar00rootroot00000000000000# # Generated Makefile - do not edit! # # Edit the Makefile in the project folder instead (../Makefile). Each target # has a -pre and a -post target defined where you can add customized code. # # This makefile implements configuration specific macros and targets. # Environment MKDIR=mkdir CP=cp GREP=grep NM=nm CCADMIN=CCadmin RANLIB=ranlib CC=gcc CCC=g++ CXX=g++ FC=gfortran AS=as # Macros CND_PLATFORM=GNU-Linux-x86 CND_CONF=Debug CND_DISTDIR=dist CND_BUILDDIR=build # Include project Makefile include Makefile # Object Directory OBJECTDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM} # Object Files OBJECTFILES= \ ${OBJECTDIR}/_ext/715944016/csg_stat_imc.o \ ${OBJECTDIR}/_ext/715944016/csg_stat.o # C Compiler Flags CFLAGS= # CC Compiler Flags CCFLAGS= CXXFLAGS= # Fortran Compiler Flags FFLAGS= # Assembler Flags ASFLAGS= # Link Libraries and Options LDLIBSOPTIONS=../libcsg/../../src/libcsg/libcsg.a ../../../tools/netbeans/libtools/../../src/libtools/libtools.a -lgmx -lboost_program_options -lexpat -lm -lpthread # Build Targets .build-conf: ${BUILD_SUBPROJECTS} "${MAKE}" -f nbproject/Makefile-${CND_CONF}.mk ../../src/tools/csg_stat ../../src/tools/csg_stat: ../libcsg/../../src/libcsg/libcsg.a ../../src/tools/csg_stat: ../../../tools/netbeans/libtools/../../src/libtools/libtools.a ../../src/tools/csg_stat: ${OBJECTFILES} ${MKDIR} -p ../../src/tools ${LINK.cc} -o ../../src/tools/csg_stat ${OBJECTFILES} ${LDLIBSOPTIONS} ${OBJECTDIR}/_ext/715944016/csg_stat_imc.o: ../../src/tools/csg_stat_imc.cc ${MKDIR} -p ${OBJECTDIR}/_ext/715944016 ${RM} $@.d $(COMPILE.cc) -g -I../../include -I../../../tools/include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/715944016/csg_stat_imc.o ../../src/tools/csg_stat_imc.cc ${OBJECTDIR}/_ext/715944016/csg_stat.o: ../../src/tools/csg_stat.cc ${MKDIR} -p ${OBJECTDIR}/_ext/715944016 ${RM} $@.d $(COMPILE.cc) -g -I../../include -I../../../tools/include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/715944016/csg_stat.o ../../src/tools/csg_stat.cc # Subprojects .build-subprojects: cd ../libcsg && ${MAKE} -f Makefile_nb CONF=Debug cd ../../../tools/netbeans/libtools && ${MAKE} -f Makefile_nb CONF=Debug # Clean Targets .clean-conf: ${CLEAN_SUBPROJECTS} ${RM} -r ${CND_BUILDDIR}/${CND_CONF} ${RM} ../../src/tools/csg_stat # Subprojects .clean-subprojects: cd ../libcsg && ${MAKE} -f Makefile_nb CONF=Debug clean cd ../../../tools/netbeans/libtools && ${MAKE} -f Makefile_nb CONF=Debug clean # Enable dependency checking .dep.inc: .depcheck-impl include .dep.inc csg-1.4.1/netbeans/csg_stat/nbproject/Makefile-Release.mk000066400000000000000000000047501315264121600233200ustar00rootroot00000000000000# # Generated Makefile - do not edit! # # Edit the Makefile in the project folder instead (../Makefile). Each target # has a -pre and a -post target defined where you can add customized code. # # This makefile implements configuration specific macros and targets. # Environment MKDIR=mkdir CP=cp GREP=grep NM=nm CCADMIN=CCadmin RANLIB=ranlib CC=gcc CCC=g++ CXX=g++ FC=gfortran AS=as # Macros CND_PLATFORM=GNU-Linux-x86 CND_CONF=Release CND_DISTDIR=dist CND_BUILDDIR=build # Include project Makefile include Makefile # Object Directory OBJECTDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM} # Object Files OBJECTFILES= \ ${OBJECTDIR}/_ext/715944016/csg_stat_imc.o \ ${OBJECTDIR}/_ext/715944016/csg_stat.o # C Compiler Flags CFLAGS= # CC Compiler Flags CCFLAGS= CXXFLAGS= # Fortran Compiler Flags FFLAGS= # Assembler Flags ASFLAGS= # Link Libraries and Options LDLIBSOPTIONS=../libcsg/../../src/libcsg/libcsg.a ../../../tools/netbeans/libtools/../../src/libtools/libtools.a -lgmx -lboost_program_options -lxml2 -lm # Build Targets .build-conf: ${BUILD_SUBPROJECTS} "${MAKE}" -f nbproject/Makefile-${CND_CONF}.mk ../../src/tools/csg_stat ../../src/tools/csg_stat: ../libcsg/../../src/libcsg/libcsg.a ../../src/tools/csg_stat: ../../../tools/netbeans/libtools/../../src/libtools/libtools.a ../../src/tools/csg_stat: ${OBJECTFILES} ${MKDIR} -p ../../src/tools ${LINK.cc} -o ../../src/tools/csg_stat ${OBJECTFILES} ${LDLIBSOPTIONS} ${OBJECTDIR}/_ext/715944016/csg_stat_imc.o: ../../src/tools/csg_stat_imc.cc ${MKDIR} -p ${OBJECTDIR}/_ext/715944016 ${RM} $@.d $(COMPILE.cc) -g -O -I../../include -I../../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/715944016/csg_stat_imc.o ../../src/tools/csg_stat_imc.cc ${OBJECTDIR}/_ext/715944016/csg_stat.o: ../../src/tools/csg_stat.cc ${MKDIR} -p ${OBJECTDIR}/_ext/715944016 ${RM} $@.d $(COMPILE.cc) -g -O -I../../include -I../../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/715944016/csg_stat.o ../../src/tools/csg_stat.cc # Subprojects .build-subprojects: cd ../libcsg && ${MAKE} -f Makefile_nb CONF=Release cd ../../../tools/netbeans/libtools && ${MAKE} -f Makefile_nb CONF=Release # Clean Targets .clean-conf: ${CLEAN_SUBPROJECTS} ${RM} -r ${CND_BUILDDIR}/${CND_CONF} ${RM} ../../src/tools/csg_stat # Subprojects .clean-subprojects: cd ../libcsg && ${MAKE} -f Makefile_nb CONF=Release clean cd ../../../tools/netbeans/libtools && ${MAKE} -f Makefile_nb CONF=Release clean # Enable dependency checking .dep.inc: .depcheck-impl include .dep.inc csg-1.4.1/netbeans/csg_stat/nbproject/Makefile-impl.mk000066400000000000000000000101741315264121600226760ustar00rootroot00000000000000# # Generated Makefile - do not edit! # # Edit the Makefile in the project folder instead (../Makefile). Each target # has a pre- and a post- target defined where you can add customization code. # # This makefile implements macros and targets common to all configurations. # # NOCDDL # Building and Cleaning subprojects are done by default, but can be controlled with the SUB # macro. If SUB=no, subprojects will not be built or cleaned. The following macro # statements set BUILD_SUB-CONF and CLEAN_SUB-CONF to .build-reqprojects-conf # and .clean-reqprojects-conf unless SUB has the value 'no' SUB_no=NO SUBPROJECTS=${SUB_${SUB}} BUILD_SUBPROJECTS_=.build-subprojects BUILD_SUBPROJECTS_NO= BUILD_SUBPROJECTS=${BUILD_SUBPROJECTS_${SUBPROJECTS}} CLEAN_SUBPROJECTS_=.clean-subprojects CLEAN_SUBPROJECTS_NO= CLEAN_SUBPROJECTS=${CLEAN_SUBPROJECTS_${SUBPROJECTS}} # Project Name PROJECTNAME=csg_stat # Active Configuration DEFAULTCONF=Debug CONF=${DEFAULTCONF} # All Configurations ALLCONFS=Debug Release profile_release # build .build-impl: .build-pre .validate-impl .depcheck-impl @#echo "=> Running $@... Configuration=$(CONF)" "${MAKE}" -f nbproject/Makefile-${CONF}.mk QMAKE=${QMAKE} SUBPROJECTS=${SUBPROJECTS} .build-conf # clean .clean-impl: .clean-pre .validate-impl .depcheck-impl @#echo "=> Running $@... Configuration=$(CONF)" "${MAKE}" -f nbproject/Makefile-${CONF}.mk QMAKE=${QMAKE} SUBPROJECTS=${SUBPROJECTS} .clean-conf # clobber .clobber-impl: .clobber-pre .depcheck-impl @#echo "=> Running $@..." for CONF in ${ALLCONFS}; \ do \ "${MAKE}" -f nbproject/Makefile-$${CONF}.mk QMAKE=${QMAKE} SUBPROJECTS=${SUBPROJECTS} .clean-conf; \ done # all .all-impl: .all-pre .depcheck-impl @#echo "=> Running $@..." for CONF in ${ALLCONFS}; \ do \ "${MAKE}" -f nbproject/Makefile-$${CONF}.mk QMAKE=${QMAKE} SUBPROJECTS=${SUBPROJECTS} .build-conf; \ done # build tests .build-tests-impl: .build-impl .build-tests-pre @#echo "=> Running $@... Configuration=$(CONF)" "${MAKE}" -f nbproject/Makefile-${CONF}.mk SUBPROJECTS=${SUBPROJECTS} .build-tests-conf # run tests .test-impl: .build-tests-impl .test-pre @#echo "=> Running $@... Configuration=$(CONF)" "${MAKE}" -f nbproject/Makefile-${CONF}.mk SUBPROJECTS=${SUBPROJECTS} .test-conf # dependency checking support .depcheck-impl: @echo "# This code depends on make tool being used" >.dep.inc @if [ -n "${MAKE_VERSION}" ]; then \ echo "DEPFILES=\$$(wildcard \$$(addsuffix .d, \$${OBJECTFILES}))" >>.dep.inc; \ echo "ifneq (\$${DEPFILES},)" >>.dep.inc; \ echo "include \$${DEPFILES}" >>.dep.inc; \ echo "endif" >>.dep.inc; \ else \ echo ".KEEP_STATE:" >>.dep.inc; \ echo ".KEEP_STATE_FILE:.make.state.\$${CONF}" >>.dep.inc; \ fi # configuration validation .validate-impl: @if [ ! -f nbproject/Makefile-${CONF}.mk ]; \ then \ echo ""; \ echo "Error: can not find the makefile for configuration '${CONF}' in project ${PROJECTNAME}"; \ echo "See 'make help' for details."; \ echo "Current directory: " `pwd`; \ echo ""; \ fi @if [ ! -f nbproject/Makefile-${CONF}.mk ]; \ then \ exit 1; \ fi # help .help-impl: .help-pre @echo "This makefile supports the following configurations:" @echo " ${ALLCONFS}" @echo "" @echo "and the following targets:" @echo " build (default target)" @echo " clean" @echo " clobber" @echo " all" @echo " help" @echo "" @echo "Makefile Usage:" @echo " make [CONF=] [SUB=no] build" @echo " make [CONF=] [SUB=no] clean" @echo " make [SUB=no] clobber" @echo " make [SUB=no] all" @echo " make help" @echo "" @echo "Target 'build' will build a specific configuration and, unless 'SUB=no'," @echo " also build subprojects." @echo "Target 'clean' will clean a specific configuration and, unless 'SUB=no'," @echo " also clean subprojects." @echo "Target 'clobber' will remove all built files from all configurations and," @echo " unless 'SUB=no', also from subprojects." @echo "Target 'all' will will build all configurations and, unless 'SUB=no'," @echo " also build subprojects." @echo "Target 'help' prints this message." @echo "" csg-1.4.1/netbeans/csg_stat/nbproject/Makefile-profile_release.mk000066400000000000000000000045521315264121600251000ustar00rootroot00000000000000# # Generated Makefile - do not edit! # # Edit the Makefile in the project folder instead (../Makefile). Each target # has a -pre and a -post target defined where you can add customized code. # # This makefile implements configuration specific macros and targets. # Environment MKDIR=mkdir CP=cp GREP=grep NM=nm CCADMIN=CCadmin RANLIB=ranlib CC=gcc CCC=g++ CXX=g++ FC=gfortran AS=as # Macros CND_PLATFORM=GNU-Linux-x86 CND_CONF=profile_release CND_DISTDIR=dist CND_BUILDDIR=build # Include project Makefile include Makefile # Object Directory OBJECTDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM} # Object Files OBJECTFILES= \ ${OBJECTDIR}/_ext/715944016/csg_stat_imc.o \ ${OBJECTDIR}/_ext/715944016/csg_stat.o # C Compiler Flags CFLAGS= # CC Compiler Flags CCFLAGS=-pg CXXFLAGS=-pg # Fortran Compiler Flags FFLAGS= # Assembler Flags ASFLAGS= # Link Libraries and Options LDLIBSOPTIONS=../libcsg/../../src/libcsg/libcsg.a ../../../tools/netbeans/libtools/../../src/libtools/libtools.a -lgmx -lboost_program_options -lxml2 -lm # Build Targets .build-conf: ${BUILD_SUBPROJECTS} "${MAKE}" -f nbproject/Makefile-${CND_CONF}.mk ../../src/tools/csg_stat ../../src/tools/csg_stat: ../libcsg/../../src/libcsg/libcsg.a ../../src/tools/csg_stat: ../../../tools/netbeans/libtools/../../src/libtools/libtools.a ../../src/tools/csg_stat: ${OBJECTFILES} ${MKDIR} -p ../../src/tools ${LINK.cc} -pg -o ../../src/tools/csg_stat ${OBJECTFILES} ${LDLIBSOPTIONS} ${OBJECTDIR}/_ext/715944016/csg_stat_imc.o: ../../src/tools/csg_stat_imc.cc ${MKDIR} -p ${OBJECTDIR}/_ext/715944016 ${RM} $@.d $(COMPILE.cc) -g -O -I../../include -I../../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/715944016/csg_stat_imc.o ../../src/tools/csg_stat_imc.cc ${OBJECTDIR}/_ext/715944016/csg_stat.o: ../../src/tools/csg_stat.cc ${MKDIR} -p ${OBJECTDIR}/_ext/715944016 ${RM} $@.d $(COMPILE.cc) -g -O -I../../include -I../../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/715944016/csg_stat.o ../../src/tools/csg_stat.cc # Subprojects .build-subprojects: cd ../libcsg && ${MAKE} -f Makefile_nb CONF=profile_release # Clean Targets .clean-conf: ${CLEAN_SUBPROJECTS} ${RM} -r ${CND_BUILDDIR}/${CND_CONF} ${RM} ../../src/tools/csg_stat # Subprojects .clean-subprojects: cd ../libcsg && ${MAKE} -f Makefile_nb CONF=profile_release clean # Enable dependency checking .dep.inc: .depcheck-impl include .dep.inc csg-1.4.1/netbeans/csg_stat/nbproject/configurations.xml000066400000000000000000000167271315264121600234570ustar00rootroot00000000000000 ../../src/tools/imc.h ../../src/tools/csg_stat.cc ../../src/tools/csg_stat_imc.cc Makefile ../../src/tools Makefile localhost GNU|GNU 2 ../../include ../../../tools/include ../../src/tools/csg_stat gmx boost_program_options expat Mathematics PosixThreads localhost GNU|GNU 2 5 2 ../../include ../../../include 5 ../../src/tools/csg_stat gmx boost_program_options xml2 Mathematics localhost GNU|GNU 2 5 2 ../../include ../../../include -pg 5 ../../src/tools/csg_stat gmx boost_program_options xml2 Mathematics -pg csg-1.4.1/netbeans/csg_stat/nbproject/project.properties000066400000000000000000000000001315264121600234400ustar00rootroot00000000000000csg-1.4.1/netbeans/csg_stat/nbproject/project.xml000066400000000000000000000020621315264121600220560ustar00rootroot00000000000000 org.netbeans.modules.cnd.makeproject csg_stat 0 cc h UTF-8 ../libcsg ../../../tools/netbeans/libtools ../../src/tools Debug Release profile_release csg-1.4.1/netbeans/libcsg/000077500000000000000000000000001315264121600153345ustar00rootroot00000000000000csg-1.4.1/netbeans/libcsg/Makefile_nb000066400000000000000000000043351315264121600174600ustar00rootroot00000000000000# # There exist several targets which are by default empty and which can be # used for execution of your targets. These targets are usually executed # before and after some main targets. They are: # # .build-pre: called before 'build' target # .build-post: called after 'build' target # .clean-pre: called before 'clean' target # .clean-post: called after 'clean' target # .clobber-pre: called before 'clobber' target # .clobber-post: called after 'clobber' target # .all-pre: called before 'all' target # .all-post: called after 'all' target # .help-pre: called before 'help' target # .help-post: called after 'help' target # # Targets beginning with '.' are not intended to be called on their own. # # Main targets can be executed directly, and they are: # # build build a specific configuration # clean remove built files from a configuration # clobber remove all built files # all build all configurations # help print help mesage # # Targets .build-impl, .clean-impl, .clobber-impl, .all-impl, and # .help-impl are implemented in nbproject/makefile-impl.mk. # # NOCDDL # Environment MKDIR=mkdir CP=cp CCADMIN=CCadmin RANLIB=ranlib # build build: .build-pre .build-impl .build-post .build-pre: # Add your pre 'build' code here... .build-post: # Add your post 'build' code here... # clean clean: .clean-pre .clean-impl .clean-post .clean-pre: # Add your pre 'clean' code here... .clean-post: # Add your post 'clean' code here... # clobber clobber: .clobber-pre .clobber-impl .clobber-post .clobber-pre: # Add your pre 'clobber' code here... .clobber-post: # Add your post 'clobber' code here... # all all: .all-pre .all-impl .all-post .all-pre: # Add your pre 'all' code here... .all-post: # Add your post 'all' code here... # help help: .help-pre .help-impl .help-post .help-pre: # Add your pre 'help' code here... .help-post: # Add your post 'help' code here... # include project implementation makefile include nbproject/Makefile-impl.mk csg-1.4.1/netbeans/libcsg/nbproject/000077500000000000000000000000001315264121600173225ustar00rootroot00000000000000csg-1.4.1/netbeans/libcsg/nbproject/Makefile-Debug.mk000066400000000000000000000413011315264121600224130ustar00rootroot00000000000000# # Generated Makefile - do not edit! # # Edit the Makefile in the project folder instead (../Makefile). Each target # has a -pre and a -post target defined where you can add customized code. # # This makefile implements configuration specific macros and targets. # Environment MKDIR=mkdir CP=cp GREP=grep NM=nm CCADMIN=CCadmin RANLIB=ranlib CC=gcc CCC=g++ CXX=g++ FC=gfortran AS=as # Macros CND_PLATFORM=GNU-Linux-x86 CND_DLIB_EXT=so CND_CONF=Debug CND_DISTDIR=dist CND_BUILDDIR=build # Include project Makefile include Makefile_nb # Object Directory OBJECTDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM} # Object Files OBJECTFILES= \ ${OBJECTDIR}/_ext/484457853/beadlist.o \ ${OBJECTDIR}/_ext/484457853/boundarycondition.o \ ${OBJECTDIR}/_ext/484457853/cgengine.o \ ${OBJECTDIR}/_ext/484457853/cgmoleculedef.o \ ${OBJECTDIR}/_ext/484457853/csgapplication.o \ ${OBJECTDIR}/_ext/484457853/exclusionlist.o \ ${OBJECTDIR}/_ext/484457853/imcio.o \ ${OBJECTDIR}/_ext/484457853/map.o \ ${OBJECTDIR}/_ext/1332856960/esptopologyreader.o \ ${OBJECTDIR}/_ext/1332856960/esptrajectoryreader.o \ ${OBJECTDIR}/_ext/1332856960/gmx_print_version.o \ ${OBJECTDIR}/_ext/1332856960/gmx_version_check.o \ ${OBJECTDIR}/_ext/1332856960/gmx_version_nb.o \ ${OBJECTDIR}/_ext/1332856960/gmxtopologyreader.o \ ${OBJECTDIR}/_ext/1332856960/gmxtrajectoryreader.o \ ${OBJECTDIR}/_ext/1332856960/gmxtrajectorywriter.o \ ${OBJECTDIR}/_ext/1332856960/grotopologyreader.o \ ${OBJECTDIR}/_ext/1332856960/growriter.o \ ${OBJECTDIR}/_ext/1332856960/lammpsreader.o \ ${OBJECTDIR}/_ext/1332856960/pdbtopologyreader.o \ ${OBJECTDIR}/_ext/1332856960/pdbwriter.o \ ${OBJECTDIR}/_ext/1332856960/xmltopologyreader.o \ ${OBJECTDIR}/_ext/1332856960/xyzreader.o \ ${OBJECTDIR}/_ext/1332856960/xyzwriter.o \ ${OBJECTDIR}/_ext/484457853/molecule.o \ ${OBJECTDIR}/_ext/484457853/nblist.o \ ${OBJECTDIR}/_ext/484457853/nblistgrid.o \ ${OBJECTDIR}/_ext/484457853/nematicorder.o \ ${OBJECTDIR}/_ext/484457853/openbox.o \ ${OBJECTDIR}/_ext/484457853/orthorhombicbox.o \ ${OBJECTDIR}/_ext/484457853/topology.o \ ${OBJECTDIR}/_ext/484457853/topologymap.o \ ${OBJECTDIR}/_ext/484457853/topologyreader.o \ ${OBJECTDIR}/_ext/484457853/trajectoryreader.o \ ${OBJECTDIR}/_ext/484457853/trajectorywriter.o \ ${OBJECTDIR}/_ext/484457853/triclinicbox.o \ ${OBJECTDIR}/_ext/484457853/version_nb.o # C Compiler Flags CFLAGS= # CC Compiler Flags CCFLAGS= CXXFLAGS= # Fortran Compiler Flags FFLAGS= # Assembler Flags ASFLAGS= # Link Libraries and Options LDLIBSOPTIONS= # Build Targets .build-conf: ${BUILD_SUBPROJECTS} "${MAKE}" -f nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/libcsg.a ../../src/libcsg/libcsg.a: ${OBJECTFILES} ${MKDIR} -p ../../src/libcsg ${RM} ../../src/libcsg/libcsg.a ${AR} -rv ../../src/libcsg/libcsg.a ${OBJECTFILES} $(RANLIB) ../../src/libcsg/libcsg.a ${OBJECTDIR}/_ext/484457853/beadlist.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/beadlist.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/beadlist.o ../../src/libcsg/beadlist.cc ${OBJECTDIR}/_ext/484457853/boundarycondition.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/boundarycondition.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/boundarycondition.o ../../src/libcsg/boundarycondition.cc ${OBJECTDIR}/_ext/484457853/cgengine.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/cgengine.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/cgengine.o ../../src/libcsg/cgengine.cc ${OBJECTDIR}/_ext/484457853/cgmoleculedef.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/cgmoleculedef.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/cgmoleculedef.o ../../src/libcsg/cgmoleculedef.cc ${OBJECTDIR}/_ext/484457853/csgapplication.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/csgapplication.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/csgapplication.o ../../src/libcsg/csgapplication.cc ${OBJECTDIR}/_ext/484457853/exclusionlist.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/exclusionlist.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/exclusionlist.o ../../src/libcsg/exclusionlist.cc ${OBJECTDIR}/_ext/484457853/imcio.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/imcio.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/imcio.o ../../src/libcsg/imcio.cc ${OBJECTDIR}/_ext/484457853/map.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/map.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/map.o ../../src/libcsg/map.cc ${OBJECTDIR}/_ext/1332856960/esptopologyreader.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/esptopologyreader.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/esptopologyreader.o ../../src/libcsg/modules/io/esptopologyreader.cc ${OBJECTDIR}/_ext/1332856960/esptrajectoryreader.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/esptrajectoryreader.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/esptrajectoryreader.o ../../src/libcsg/modules/io/esptrajectoryreader.cc ${OBJECTDIR}/_ext/1332856960/gmx_print_version.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/gmx_print_version.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/gmx_print_version.o ../../src/libcsg/modules/io/gmx_print_version.cc ${OBJECTDIR}/_ext/1332856960/gmx_version_check.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/gmx_version_check.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/gmx_version_check.o ../../src/libcsg/modules/io/gmx_version_check.cc ${OBJECTDIR}/_ext/1332856960/gmx_version_nb.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/gmx_version_nb.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/gmx_version_nb.o ../../src/libcsg/modules/io/gmx_version_nb.cc ${OBJECTDIR}/_ext/1332856960/gmxtopologyreader.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/gmxtopologyreader.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/gmxtopologyreader.o ../../src/libcsg/modules/io/gmxtopologyreader.cc ${OBJECTDIR}/_ext/1332856960/gmxtrajectoryreader.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/gmxtrajectoryreader.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/gmxtrajectoryreader.o ../../src/libcsg/modules/io/gmxtrajectoryreader.cc ${OBJECTDIR}/_ext/1332856960/gmxtrajectorywriter.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/gmxtrajectorywriter.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/gmxtrajectorywriter.o ../../src/libcsg/modules/io/gmxtrajectorywriter.cc ${OBJECTDIR}/_ext/1332856960/grotopologyreader.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/grotopologyreader.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/grotopologyreader.o ../../src/libcsg/modules/io/grotopologyreader.cc ${OBJECTDIR}/_ext/1332856960/growriter.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/growriter.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/growriter.o ../../src/libcsg/modules/io/growriter.cc ${OBJECTDIR}/_ext/1332856960/lammpsreader.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/lammpsreader.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/lammpsreader.o ../../src/libcsg/modules/io/lammpsreader.cc ${OBJECTDIR}/_ext/1332856960/pdbtopologyreader.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/pdbtopologyreader.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/pdbtopologyreader.o ../../src/libcsg/modules/io/pdbtopologyreader.cc ${OBJECTDIR}/_ext/1332856960/pdbwriter.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/pdbwriter.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/pdbwriter.o ../../src/libcsg/modules/io/pdbwriter.cc ${OBJECTDIR}/_ext/1332856960/xmltopologyreader.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/xmltopologyreader.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/xmltopologyreader.o ../../src/libcsg/modules/io/xmltopologyreader.cc ${OBJECTDIR}/_ext/1332856960/xyzreader.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/xyzreader.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/xyzreader.o ../../src/libcsg/modules/io/xyzreader.cc ${OBJECTDIR}/_ext/1332856960/xyzwriter.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/xyzwriter.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/xyzwriter.o ../../src/libcsg/modules/io/xyzwriter.cc ${OBJECTDIR}/_ext/484457853/molecule.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/molecule.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/molecule.o ../../src/libcsg/molecule.cc ${OBJECTDIR}/_ext/484457853/nblist.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/nblist.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/nblist.o ../../src/libcsg/nblist.cc ${OBJECTDIR}/_ext/484457853/nblistgrid.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/nblistgrid.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/nblistgrid.o ../../src/libcsg/nblistgrid.cc ${OBJECTDIR}/_ext/484457853/nematicorder.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/nematicorder.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/nematicorder.o ../../src/libcsg/nematicorder.cc ${OBJECTDIR}/_ext/484457853/openbox.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/openbox.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/openbox.o ../../src/libcsg/openbox.cc ${OBJECTDIR}/_ext/484457853/orthorhombicbox.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/orthorhombicbox.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/orthorhombicbox.o ../../src/libcsg/orthorhombicbox.cc ${OBJECTDIR}/_ext/484457853/topology.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/topology.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/topology.o ../../src/libcsg/topology.cc ${OBJECTDIR}/_ext/484457853/topologymap.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/topologymap.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/topologymap.o ../../src/libcsg/topologymap.cc ${OBJECTDIR}/_ext/484457853/topologyreader.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/topologyreader.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/topologyreader.o ../../src/libcsg/topologyreader.cc ${OBJECTDIR}/_ext/484457853/trajectoryreader.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/trajectoryreader.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/trajectoryreader.o ../../src/libcsg/trajectoryreader.cc ${OBJECTDIR}/_ext/484457853/trajectorywriter.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/trajectorywriter.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/trajectorywriter.o ../../src/libcsg/trajectorywriter.cc ${OBJECTDIR}/_ext/484457853/triclinicbox.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/triclinicbox.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/triclinicbox.o ../../src/libcsg/triclinicbox.cc ${OBJECTDIR}/_ext/484457853/version_nb.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/version_nb.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -g -I../../src/libcsg -I../../../tools/include -I/usr/include/libxml2 -I../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/version_nb.o ../../src/libcsg/version_nb.cc # Subprojects .build-subprojects: # Clean Targets .clean-conf: ${CLEAN_SUBPROJECTS} ${RM} -r ${CND_BUILDDIR}/${CND_CONF} ${RM} ../../src/libcsg/libcsg.a # Subprojects .clean-subprojects: # Enable dependency checking .dep.inc: .depcheck-impl include .dep.inc csg-1.4.1/netbeans/libcsg/nbproject/Makefile-Release.mk000066400000000000000000000426471315264121600227630ustar00rootroot00000000000000# # Generated Makefile - do not edit! # # Edit the Makefile in the project folder instead (../Makefile). Each target # has a -pre and a -post target defined where you can add customized code. # # This makefile implements configuration specific macros and targets. # Environment MKDIR=mkdir CP=cp GREP=grep NM=nm CCADMIN=CCadmin RANLIB=ranlib CC=gcc CCC=g++ CXX=g++ FC=gfortran AS=as # Macros CND_PLATFORM=GNU-Linux-x86 CND_DLIB_EXT=so CND_CONF=Release CND_DISTDIR=dist CND_BUILDDIR=build # Include project Makefile include Makefile_nb # Object Directory OBJECTDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM} # Object Files OBJECTFILES= \ ${OBJECTDIR}/_ext/484457853/beadlist.o \ ${OBJECTDIR}/_ext/484457853/boundarycondition.o \ ${OBJECTDIR}/_ext/484457853/cgengine.o \ ${OBJECTDIR}/_ext/484457853/cgmoleculedef.o \ ${OBJECTDIR}/_ext/484457853/csgapplication.o \ ${OBJECTDIR}/_ext/484457853/exclusionlist.o \ ${OBJECTDIR}/_ext/484457853/imcio.o \ ${OBJECTDIR}/_ext/484457853/map.o \ ${OBJECTDIR}/_ext/1332856960/esptopologyreader.o \ ${OBJECTDIR}/_ext/1332856960/esptrajectoryreader.o \ ${OBJECTDIR}/_ext/1332856960/gmx_print_version.o \ ${OBJECTDIR}/_ext/1332856960/gmx_version_check.o \ ${OBJECTDIR}/_ext/1332856960/gmx_version_nb.o \ ${OBJECTDIR}/_ext/1332856960/gmxtopologyreader.o \ ${OBJECTDIR}/_ext/1332856960/gmxtrajectoryreader.o \ ${OBJECTDIR}/_ext/1332856960/gmxtrajectorywriter.o \ ${OBJECTDIR}/_ext/1332856960/grotopologyreader.o \ ${OBJECTDIR}/_ext/1332856960/growriter.o \ ${OBJECTDIR}/_ext/1332856960/lammpsreader.o \ ${OBJECTDIR}/_ext/1332856960/pdbtopologyreader.o \ ${OBJECTDIR}/_ext/1332856960/pdbwriter.o \ ${OBJECTDIR}/_ext/1332856960/xmltopologyreader.o \ ${OBJECTDIR}/_ext/1332856960/xyzreader.o \ ${OBJECTDIR}/_ext/1332856960/xyzwriter.o \ ${OBJECTDIR}/_ext/484457853/molecule.o \ ${OBJECTDIR}/_ext/484457853/nblist.o \ ${OBJECTDIR}/_ext/484457853/nblistgrid.o \ ${OBJECTDIR}/_ext/484457853/nematicorder.o \ ${OBJECTDIR}/_ext/484457853/openbox.o \ ${OBJECTDIR}/_ext/484457853/orthorhombicbox.o \ ${OBJECTDIR}/_ext/484457853/topology.o \ ${OBJECTDIR}/_ext/484457853/topologymap.o \ ${OBJECTDIR}/_ext/484457853/topologyreader.o \ ${OBJECTDIR}/_ext/484457853/trajectoryreader.o \ ${OBJECTDIR}/_ext/484457853/trajectorywriter.o \ ${OBJECTDIR}/_ext/484457853/triclinicbox.o \ ${OBJECTDIR}/_ext/484457853/version_nb.o # C Compiler Flags CFLAGS= # CC Compiler Flags CCFLAGS= CXXFLAGS= # Fortran Compiler Flags FFLAGS= # Assembler Flags ASFLAGS= # Link Libraries and Options LDLIBSOPTIONS= # Build Targets .build-conf: ${BUILD_SUBPROJECTS} "${MAKE}" -f nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/libcsg.a ../../src/libcsg/libcsg.a: ${OBJECTFILES} ${MKDIR} -p ../../src/libcsg ${RM} ../../src/libcsg/libcsg.a ${AR} -rv ../../src/libcsg/libcsg.a ${OBJECTFILES} $(RANLIB) ../../src/libcsg/libcsg.a ${OBJECTDIR}/_ext/484457853/beadlist.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/beadlist.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/beadlist.o ../../src/libcsg/beadlist.cc ${OBJECTDIR}/_ext/484457853/boundarycondition.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/boundarycondition.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/boundarycondition.o ../../src/libcsg/boundarycondition.cc ${OBJECTDIR}/_ext/484457853/cgengine.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/cgengine.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/cgengine.o ../../src/libcsg/cgengine.cc ${OBJECTDIR}/_ext/484457853/cgmoleculedef.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/cgmoleculedef.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/cgmoleculedef.o ../../src/libcsg/cgmoleculedef.cc ${OBJECTDIR}/_ext/484457853/csgapplication.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/csgapplication.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/csgapplication.o ../../src/libcsg/csgapplication.cc ${OBJECTDIR}/_ext/484457853/exclusionlist.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/exclusionlist.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/exclusionlist.o ../../src/libcsg/exclusionlist.cc ${OBJECTDIR}/_ext/484457853/imcio.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/imcio.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/imcio.o ../../src/libcsg/imcio.cc ${OBJECTDIR}/_ext/484457853/map.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/map.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/map.o ../../src/libcsg/map.cc ${OBJECTDIR}/_ext/1332856960/esptopologyreader.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/esptopologyreader.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/esptopologyreader.o ../../src/libcsg/modules/io/esptopologyreader.cc ${OBJECTDIR}/_ext/1332856960/esptrajectoryreader.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/esptrajectoryreader.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/esptrajectoryreader.o ../../src/libcsg/modules/io/esptrajectoryreader.cc ${OBJECTDIR}/_ext/1332856960/gmx_print_version.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/gmx_print_version.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/gmx_print_version.o ../../src/libcsg/modules/io/gmx_print_version.cc ${OBJECTDIR}/_ext/1332856960/gmx_version_check.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/gmx_version_check.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/gmx_version_check.o ../../src/libcsg/modules/io/gmx_version_check.cc ${OBJECTDIR}/_ext/1332856960/gmx_version_nb.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/gmx_version_nb.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/gmx_version_nb.o ../../src/libcsg/modules/io/gmx_version_nb.cc ${OBJECTDIR}/_ext/1332856960/gmxtopologyreader.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/gmxtopologyreader.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/gmxtopologyreader.o ../../src/libcsg/modules/io/gmxtopologyreader.cc ${OBJECTDIR}/_ext/1332856960/gmxtrajectoryreader.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/gmxtrajectoryreader.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/gmxtrajectoryreader.o ../../src/libcsg/modules/io/gmxtrajectoryreader.cc ${OBJECTDIR}/_ext/1332856960/gmxtrajectorywriter.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/gmxtrajectorywriter.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/gmxtrajectorywriter.o ../../src/libcsg/modules/io/gmxtrajectorywriter.cc ${OBJECTDIR}/_ext/1332856960/grotopologyreader.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/grotopologyreader.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/grotopologyreader.o ../../src/libcsg/modules/io/grotopologyreader.cc ${OBJECTDIR}/_ext/1332856960/growriter.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/growriter.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/growriter.o ../../src/libcsg/modules/io/growriter.cc ${OBJECTDIR}/_ext/1332856960/lammpsreader.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/lammpsreader.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/lammpsreader.o ../../src/libcsg/modules/io/lammpsreader.cc ${OBJECTDIR}/_ext/1332856960/pdbtopologyreader.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/pdbtopologyreader.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/pdbtopologyreader.o ../../src/libcsg/modules/io/pdbtopologyreader.cc ${OBJECTDIR}/_ext/1332856960/pdbwriter.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/pdbwriter.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/pdbwriter.o ../../src/libcsg/modules/io/pdbwriter.cc ${OBJECTDIR}/_ext/1332856960/xmltopologyreader.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/xmltopologyreader.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/xmltopologyreader.o ../../src/libcsg/modules/io/xmltopologyreader.cc ${OBJECTDIR}/_ext/1332856960/xyzreader.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/xyzreader.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/xyzreader.o ../../src/libcsg/modules/io/xyzreader.cc ${OBJECTDIR}/_ext/1332856960/xyzwriter.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/xyzwriter.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/xyzwriter.o ../../src/libcsg/modules/io/xyzwriter.cc ${OBJECTDIR}/_ext/484457853/molecule.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/molecule.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/molecule.o ../../src/libcsg/molecule.cc ${OBJECTDIR}/_ext/484457853/nblist.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/nblist.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/nblist.o ../../src/libcsg/nblist.cc ${OBJECTDIR}/_ext/484457853/nblistgrid.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/nblistgrid.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/nblistgrid.o ../../src/libcsg/nblistgrid.cc ${OBJECTDIR}/_ext/484457853/nematicorder.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/nematicorder.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/nematicorder.o ../../src/libcsg/nematicorder.cc ${OBJECTDIR}/_ext/484457853/openbox.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/openbox.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/openbox.o ../../src/libcsg/openbox.cc ${OBJECTDIR}/_ext/484457853/orthorhombicbox.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/orthorhombicbox.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/orthorhombicbox.o ../../src/libcsg/orthorhombicbox.cc ${OBJECTDIR}/_ext/484457853/topology.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/topology.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/topology.o ../../src/libcsg/topology.cc ${OBJECTDIR}/_ext/484457853/topologymap.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/topologymap.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/topologymap.o ../../src/libcsg/topologymap.cc ${OBJECTDIR}/_ext/484457853/topologyreader.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/topologyreader.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/topologyreader.o ../../src/libcsg/topologyreader.cc ${OBJECTDIR}/_ext/484457853/trajectoryreader.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/trajectoryreader.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/trajectoryreader.o ../../src/libcsg/trajectoryreader.cc ${OBJECTDIR}/_ext/484457853/trajectorywriter.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/trajectorywriter.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/trajectorywriter.o ../../src/libcsg/trajectorywriter.cc ${OBJECTDIR}/_ext/484457853/triclinicbox.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/triclinicbox.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/triclinicbox.o ../../src/libcsg/triclinicbox.cc ${OBJECTDIR}/_ext/484457853/version_nb.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/version_nb.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/version_nb.o ../../src/libcsg/version_nb.cc # Subprojects .build-subprojects: # Clean Targets .clean-conf: ${CLEAN_SUBPROJECTS} ${RM} -r ${CND_BUILDDIR}/${CND_CONF} ${RM} ../../src/libcsg/libcsg.a # Subprojects .clean-subprojects: # Enable dependency checking .dep.inc: .depcheck-impl include .dep.inc csg-1.4.1/netbeans/libcsg/nbproject/Makefile-impl.mk000066400000000000000000000101721315264121600223300ustar00rootroot00000000000000# # Generated Makefile - do not edit! # # Edit the Makefile in the project folder instead (../Makefile). Each target # has a pre- and a post- target defined where you can add customization code. # # This makefile implements macros and targets common to all configurations. # # NOCDDL # Building and Cleaning subprojects are done by default, but can be controlled with the SUB # macro. If SUB=no, subprojects will not be built or cleaned. The following macro # statements set BUILD_SUB-CONF and CLEAN_SUB-CONF to .build-reqprojects-conf # and .clean-reqprojects-conf unless SUB has the value 'no' SUB_no=NO SUBPROJECTS=${SUB_${SUB}} BUILD_SUBPROJECTS_=.build-subprojects BUILD_SUBPROJECTS_NO= BUILD_SUBPROJECTS=${BUILD_SUBPROJECTS_${SUBPROJECTS}} CLEAN_SUBPROJECTS_=.clean-subprojects CLEAN_SUBPROJECTS_NO= CLEAN_SUBPROJECTS=${CLEAN_SUBPROJECTS_${SUBPROJECTS}} # Project Name PROJECTNAME=libcsg # Active Configuration DEFAULTCONF=Debug CONF=${DEFAULTCONF} # All Configurations ALLCONFS=Debug Release profile_release # build .build-impl: .build-pre .validate-impl .depcheck-impl @#echo "=> Running $@... Configuration=$(CONF)" "${MAKE}" -f nbproject/Makefile-${CONF}.mk QMAKE=${QMAKE} SUBPROJECTS=${SUBPROJECTS} .build-conf # clean .clean-impl: .clean-pre .validate-impl .depcheck-impl @#echo "=> Running $@... Configuration=$(CONF)" "${MAKE}" -f nbproject/Makefile-${CONF}.mk QMAKE=${QMAKE} SUBPROJECTS=${SUBPROJECTS} .clean-conf # clobber .clobber-impl: .clobber-pre .depcheck-impl @#echo "=> Running $@..." for CONF in ${ALLCONFS}; \ do \ "${MAKE}" -f nbproject/Makefile-$${CONF}.mk QMAKE=${QMAKE} SUBPROJECTS=${SUBPROJECTS} .clean-conf; \ done # all .all-impl: .all-pre .depcheck-impl @#echo "=> Running $@..." for CONF in ${ALLCONFS}; \ do \ "${MAKE}" -f nbproject/Makefile-$${CONF}.mk QMAKE=${QMAKE} SUBPROJECTS=${SUBPROJECTS} .build-conf; \ done # build tests .build-tests-impl: .build-impl .build-tests-pre @#echo "=> Running $@... Configuration=$(CONF)" "${MAKE}" -f nbproject/Makefile-${CONF}.mk SUBPROJECTS=${SUBPROJECTS} .build-tests-conf # run tests .test-impl: .build-tests-impl .test-pre @#echo "=> Running $@... Configuration=$(CONF)" "${MAKE}" -f nbproject/Makefile-${CONF}.mk SUBPROJECTS=${SUBPROJECTS} .test-conf # dependency checking support .depcheck-impl: @echo "# This code depends on make tool being used" >.dep.inc @if [ -n "${MAKE_VERSION}" ]; then \ echo "DEPFILES=\$$(wildcard \$$(addsuffix .d, \$${OBJECTFILES}))" >>.dep.inc; \ echo "ifneq (\$${DEPFILES},)" >>.dep.inc; \ echo "include \$${DEPFILES}" >>.dep.inc; \ echo "endif" >>.dep.inc; \ else \ echo ".KEEP_STATE:" >>.dep.inc; \ echo ".KEEP_STATE_FILE:.make.state.\$${CONF}" >>.dep.inc; \ fi # configuration validation .validate-impl: @if [ ! -f nbproject/Makefile-${CONF}.mk ]; \ then \ echo ""; \ echo "Error: can not find the makefile for configuration '${CONF}' in project ${PROJECTNAME}"; \ echo "See 'make help' for details."; \ echo "Current directory: " `pwd`; \ echo ""; \ fi @if [ ! -f nbproject/Makefile-${CONF}.mk ]; \ then \ exit 1; \ fi # help .help-impl: .help-pre @echo "This makefile supports the following configurations:" @echo " ${ALLCONFS}" @echo "" @echo "and the following targets:" @echo " build (default target)" @echo " clean" @echo " clobber" @echo " all" @echo " help" @echo "" @echo "Makefile Usage:" @echo " make [CONF=] [SUB=no] build" @echo " make [CONF=] [SUB=no] clean" @echo " make [SUB=no] clobber" @echo " make [SUB=no] all" @echo " make help" @echo "" @echo "Target 'build' will build a specific configuration and, unless 'SUB=no'," @echo " also build subprojects." @echo "Target 'clean' will clean a specific configuration and, unless 'SUB=no'," @echo " also clean subprojects." @echo "Target 'clobber' will remove all built files from all configurations and," @echo " unless 'SUB=no', also from subprojects." @echo "Target 'all' will will build all configurations and, unless 'SUB=no'," @echo " also build subprojects." @echo "Target 'help' prints this message." @echo "" csg-1.4.1/netbeans/libcsg/nbproject/Makefile-profile_release.mk000066400000000000000000000426651315264121600245430ustar00rootroot00000000000000# # Generated Makefile - do not edit! # # Edit the Makefile in the project folder instead (../Makefile). Each target # has a -pre and a -post target defined where you can add customized code. # # This makefile implements configuration specific macros and targets. # Environment MKDIR=mkdir CP=cp GREP=grep NM=nm CCADMIN=CCadmin RANLIB=ranlib CC=gcc CCC=g++ CXX=g++ FC=gfortran AS=as # Macros CND_PLATFORM=GNU-Linux-x86 CND_DLIB_EXT=so CND_CONF=profile_release CND_DISTDIR=dist CND_BUILDDIR=build # Include project Makefile include Makefile_nb # Object Directory OBJECTDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM} # Object Files OBJECTFILES= \ ${OBJECTDIR}/_ext/484457853/beadlist.o \ ${OBJECTDIR}/_ext/484457853/boundarycondition.o \ ${OBJECTDIR}/_ext/484457853/cgengine.o \ ${OBJECTDIR}/_ext/484457853/cgmoleculedef.o \ ${OBJECTDIR}/_ext/484457853/csgapplication.o \ ${OBJECTDIR}/_ext/484457853/exclusionlist.o \ ${OBJECTDIR}/_ext/484457853/imcio.o \ ${OBJECTDIR}/_ext/484457853/map.o \ ${OBJECTDIR}/_ext/1332856960/esptopologyreader.o \ ${OBJECTDIR}/_ext/1332856960/esptrajectoryreader.o \ ${OBJECTDIR}/_ext/1332856960/gmx_print_version.o \ ${OBJECTDIR}/_ext/1332856960/gmx_version_check.o \ ${OBJECTDIR}/_ext/1332856960/gmx_version_nb.o \ ${OBJECTDIR}/_ext/1332856960/gmxtopologyreader.o \ ${OBJECTDIR}/_ext/1332856960/gmxtrajectoryreader.o \ ${OBJECTDIR}/_ext/1332856960/gmxtrajectorywriter.o \ ${OBJECTDIR}/_ext/1332856960/grotopologyreader.o \ ${OBJECTDIR}/_ext/1332856960/growriter.o \ ${OBJECTDIR}/_ext/1332856960/lammpsreader.o \ ${OBJECTDIR}/_ext/1332856960/pdbtopologyreader.o \ ${OBJECTDIR}/_ext/1332856960/pdbwriter.o \ ${OBJECTDIR}/_ext/1332856960/xmltopologyreader.o \ ${OBJECTDIR}/_ext/1332856960/xyzreader.o \ ${OBJECTDIR}/_ext/1332856960/xyzwriter.o \ ${OBJECTDIR}/_ext/484457853/molecule.o \ ${OBJECTDIR}/_ext/484457853/nblist.o \ ${OBJECTDIR}/_ext/484457853/nblistgrid.o \ ${OBJECTDIR}/_ext/484457853/nematicorder.o \ ${OBJECTDIR}/_ext/484457853/openbox.o \ ${OBJECTDIR}/_ext/484457853/orthorhombicbox.o \ ${OBJECTDIR}/_ext/484457853/topology.o \ ${OBJECTDIR}/_ext/484457853/topologymap.o \ ${OBJECTDIR}/_ext/484457853/topologyreader.o \ ${OBJECTDIR}/_ext/484457853/trajectoryreader.o \ ${OBJECTDIR}/_ext/484457853/trajectorywriter.o \ ${OBJECTDIR}/_ext/484457853/triclinicbox.o \ ${OBJECTDIR}/_ext/484457853/version_nb.o # C Compiler Flags CFLAGS= # CC Compiler Flags CCFLAGS=-pg CXXFLAGS=-pg # Fortran Compiler Flags FFLAGS= # Assembler Flags ASFLAGS= # Link Libraries and Options LDLIBSOPTIONS= # Build Targets .build-conf: ${BUILD_SUBPROJECTS} "${MAKE}" -f nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/libcsg.a ../../src/libcsg/libcsg.a: ${OBJECTFILES} ${MKDIR} -p ../../src/libcsg ${RM} ../../src/libcsg/libcsg.a ${AR} -rv ../../src/libcsg/libcsg.a ${OBJECTFILES} $(RANLIB) ../../src/libcsg/libcsg.a ${OBJECTDIR}/_ext/484457853/beadlist.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/beadlist.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/beadlist.o ../../src/libcsg/beadlist.cc ${OBJECTDIR}/_ext/484457853/boundarycondition.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/boundarycondition.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/boundarycondition.o ../../src/libcsg/boundarycondition.cc ${OBJECTDIR}/_ext/484457853/cgengine.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/cgengine.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/cgengine.o ../../src/libcsg/cgengine.cc ${OBJECTDIR}/_ext/484457853/cgmoleculedef.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/cgmoleculedef.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/cgmoleculedef.o ../../src/libcsg/cgmoleculedef.cc ${OBJECTDIR}/_ext/484457853/csgapplication.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/csgapplication.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/csgapplication.o ../../src/libcsg/csgapplication.cc ${OBJECTDIR}/_ext/484457853/exclusionlist.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/exclusionlist.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/exclusionlist.o ../../src/libcsg/exclusionlist.cc ${OBJECTDIR}/_ext/484457853/imcio.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/imcio.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/imcio.o ../../src/libcsg/imcio.cc ${OBJECTDIR}/_ext/484457853/map.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/map.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/map.o ../../src/libcsg/map.cc ${OBJECTDIR}/_ext/1332856960/esptopologyreader.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/esptopologyreader.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/esptopologyreader.o ../../src/libcsg/modules/io/esptopologyreader.cc ${OBJECTDIR}/_ext/1332856960/esptrajectoryreader.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/esptrajectoryreader.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/esptrajectoryreader.o ../../src/libcsg/modules/io/esptrajectoryreader.cc ${OBJECTDIR}/_ext/1332856960/gmx_print_version.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/gmx_print_version.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/gmx_print_version.o ../../src/libcsg/modules/io/gmx_print_version.cc ${OBJECTDIR}/_ext/1332856960/gmx_version_check.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/gmx_version_check.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/gmx_version_check.o ../../src/libcsg/modules/io/gmx_version_check.cc ${OBJECTDIR}/_ext/1332856960/gmx_version_nb.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/gmx_version_nb.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/gmx_version_nb.o ../../src/libcsg/modules/io/gmx_version_nb.cc ${OBJECTDIR}/_ext/1332856960/gmxtopologyreader.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/gmxtopologyreader.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/gmxtopologyreader.o ../../src/libcsg/modules/io/gmxtopologyreader.cc ${OBJECTDIR}/_ext/1332856960/gmxtrajectoryreader.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/gmxtrajectoryreader.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/gmxtrajectoryreader.o ../../src/libcsg/modules/io/gmxtrajectoryreader.cc ${OBJECTDIR}/_ext/1332856960/gmxtrajectorywriter.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/gmxtrajectorywriter.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/gmxtrajectorywriter.o ../../src/libcsg/modules/io/gmxtrajectorywriter.cc ${OBJECTDIR}/_ext/1332856960/grotopologyreader.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/grotopologyreader.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/grotopologyreader.o ../../src/libcsg/modules/io/grotopologyreader.cc ${OBJECTDIR}/_ext/1332856960/growriter.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/growriter.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/growriter.o ../../src/libcsg/modules/io/growriter.cc ${OBJECTDIR}/_ext/1332856960/lammpsreader.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/lammpsreader.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/lammpsreader.o ../../src/libcsg/modules/io/lammpsreader.cc ${OBJECTDIR}/_ext/1332856960/pdbtopologyreader.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/pdbtopologyreader.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/pdbtopologyreader.o ../../src/libcsg/modules/io/pdbtopologyreader.cc ${OBJECTDIR}/_ext/1332856960/pdbwriter.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/pdbwriter.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/pdbwriter.o ../../src/libcsg/modules/io/pdbwriter.cc ${OBJECTDIR}/_ext/1332856960/xmltopologyreader.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/xmltopologyreader.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/xmltopologyreader.o ../../src/libcsg/modules/io/xmltopologyreader.cc ${OBJECTDIR}/_ext/1332856960/xyzreader.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/xyzreader.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/xyzreader.o ../../src/libcsg/modules/io/xyzreader.cc ${OBJECTDIR}/_ext/1332856960/xyzwriter.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/modules/io/xyzwriter.cc ${MKDIR} -p ${OBJECTDIR}/_ext/1332856960 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/1332856960/xyzwriter.o ../../src/libcsg/modules/io/xyzwriter.cc ${OBJECTDIR}/_ext/484457853/molecule.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/molecule.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/molecule.o ../../src/libcsg/molecule.cc ${OBJECTDIR}/_ext/484457853/nblist.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/nblist.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/nblist.o ../../src/libcsg/nblist.cc ${OBJECTDIR}/_ext/484457853/nblistgrid.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/nblistgrid.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/nblistgrid.o ../../src/libcsg/nblistgrid.cc ${OBJECTDIR}/_ext/484457853/nematicorder.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/nematicorder.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/nematicorder.o ../../src/libcsg/nematicorder.cc ${OBJECTDIR}/_ext/484457853/openbox.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/openbox.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/openbox.o ../../src/libcsg/openbox.cc ${OBJECTDIR}/_ext/484457853/orthorhombicbox.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/orthorhombicbox.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/orthorhombicbox.o ../../src/libcsg/orthorhombicbox.cc ${OBJECTDIR}/_ext/484457853/topology.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/topology.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/topology.o ../../src/libcsg/topology.cc ${OBJECTDIR}/_ext/484457853/topologymap.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/topologymap.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/topologymap.o ../../src/libcsg/topologymap.cc ${OBJECTDIR}/_ext/484457853/topologyreader.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/topologyreader.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/topologyreader.o ../../src/libcsg/topologyreader.cc ${OBJECTDIR}/_ext/484457853/trajectoryreader.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/trajectoryreader.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/trajectoryreader.o ../../src/libcsg/trajectoryreader.cc ${OBJECTDIR}/_ext/484457853/trajectorywriter.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/trajectorywriter.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/trajectorywriter.o ../../src/libcsg/trajectorywriter.cc ${OBJECTDIR}/_ext/484457853/triclinicbox.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/triclinicbox.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/triclinicbox.o ../../src/libcsg/triclinicbox.cc ${OBJECTDIR}/_ext/484457853/version_nb.o: nbproject/Makefile-${CND_CONF}.mk ../../src/libcsg/version_nb.cc ${MKDIR} -p ${OBJECTDIR}/_ext/484457853 ${RM} $@.d $(COMPILE.cc) -O3 -I../../include -I../../../include -I/usr/include/libxml2 -I../../../../../ruehle/gmx/include/gromacs -MMD -MP -MF $@.d -o ${OBJECTDIR}/_ext/484457853/version_nb.o ../../src/libcsg/version_nb.cc # Subprojects .build-subprojects: # Clean Targets .clean-conf: ${CLEAN_SUBPROJECTS} ${RM} -r ${CND_BUILDDIR}/${CND_CONF} ${RM} ../../src/libcsg/libcsg.a # Subprojects .clean-subprojects: # Enable dependency checking .dep.inc: .depcheck-impl include .dep.inc csg-1.4.1/netbeans/libcsg/nbproject/configurations.xml000066400000000000000000000217731315264121600231100ustar00rootroot00000000000000 ../../include/votca/csg/bead.h ../../include/votca/csg/beadlist.h ../../include/votca/csg/beadpair.h ../../include/votca/csg/beadtype.h ../../include/votca/csg/boundarycondition.h ../../include/votca/csg/cgengine.h ../../include/votca/csg/cgmoleculedef.h ../../include/votca/csg/cgobserver.h ../../include/votca/csg/csgapplication.h ../../include/votca/csg/exclusionlist.h ../../include/votca/csg/fileformatfactory.h ../../include/votca/csg/imcio.h ../../include/votca/csg/interaction.h ../../include/votca/csg/map.h ../../include/votca/csg/molecule.h ../../include/votca/csg/nblist.h ../../include/votca/csg/nblistgrid.h ../../include/votca/csg/nematicorder.h ../../include/votca/csg/openbox.h ../../include/votca/csg/orthorhombicbox.h ../../include/votca/csg/pairlist.h ../../include/votca/csg/residue.h ../../include/votca/csg/topology.h ../../include/votca/csg/topologyitem.h ../../include/votca/csg/topologymap.h ../../include/votca/csg/topologyreader.h ../../include/votca/csg/trajectoryreader.h ../../include/votca/csg/trajectorywriter.h ../../include/votca/csg/triclinicbox.h ../../include/votca/csg/version.h ../../src/libcsg/modules/io/esptopologyreader.cc ../../src/libcsg/modules/io/esptopologyreader.h ../../src/libcsg/modules/io/esptrajectoryreader.cc ../../src/libcsg/modules/io/esptrajectoryreader.h ../../src/libcsg/modules/io/gmx_print_version.cc ../../src/libcsg/modules/io/gmx_version_check.cc ../../src/libcsg/modules/io/gmx_version_check.h ../../src/libcsg/modules/io/gmx_version_nb.cc ../../src/libcsg/modules/io/gmxtopologyreader.cc ../../src/libcsg/modules/io/gmxtopologyreader.h ../../src/libcsg/modules/io/gmxtrajectoryreader.cc ../../src/libcsg/modules/io/gmxtrajectoryreader.h ../../src/libcsg/modules/io/gmxtrajectorywriter.cc ../../src/libcsg/modules/io/gmxtrajectorywriter.h ../../src/libcsg/modules/io/grotopologyreader.cc ../../src/libcsg/modules/io/grotopologyreader.h ../../src/libcsg/modules/io/growriter.cc ../../src/libcsg/modules/io/growriter.h ../../src/libcsg/modules/io/lammpsreader.cc ../../src/libcsg/modules/io/lammpsreader.h ../../src/libcsg/modules/io/pdbtopologyreader.cc ../../src/libcsg/modules/io/pdbtopologyreader.h ../../src/libcsg/modules/io/pdbwriter.cc ../../src/libcsg/modules/io/pdbwriter.h ../../src/libcsg/modules/io/xmltopologyreader.cc ../../src/libcsg/modules/io/xmltopologyreader.h ../../src/libcsg/modules/io/xyzreader.cc ../../src/libcsg/modules/io/xyzwriter.cc ../../src/libcsg/modules/io/xyzwriter.h ../../src/libcsg/beadlist.cc ../../src/libcsg/boundarycondition.cc ../../src/libcsg/cgengine.cc ../../src/libcsg/cgmoleculedef.cc ../../src/libcsg/csgapplication.cc ../../src/libcsg/exclusionlist.cc ../../src/libcsg/imcio.cc ../../src/libcsg/map.cc ../../src/libcsg/molecule.cc ../../src/libcsg/nblist.cc ../../src/libcsg/nblistgrid.cc ../../src/libcsg/nematicorder.cc ../../src/libcsg/openbox.cc ../../src/libcsg/orthorhombicbox.cc ../../src/libcsg/topology.cc ../../src/libcsg/topologymap.cc ../../src/libcsg/topologyreader.cc ../../src/libcsg/trajectoryreader.cc ../../src/libcsg/trajectorywriter.cc ../../src/libcsg/triclinicbox.cc ../../include/version.h ../../src/libcsg/version_nb.cc Makefile_nb ../../src/libcsg ../../include Makefile_nb localhost GNU|GNU 2 ../../src/libcsg ../../../tools/include /usr/include/libxml2 ../../include ../../src/libcsg/libcsg.a localhost GNU|GNU 2 5 6 ../../include ../../../include /usr/include/libxml2 ../../../../../ruehle/gmx/include/gromacs 5 ../../src/libcsg/libcsg.a localhost GNU|GNU 2 5 6 ../../include ../../../include /usr/include/libxml2 ../../../../../ruehle/gmx/include/gromacs -pg 5 ../../src/libcsg/libcsg.a csg-1.4.1/netbeans/libcsg/nbproject/project.properties000066400000000000000000000000001315264121600230740ustar00rootroot00000000000000csg-1.4.1/netbeans/libcsg/nbproject/project.xml000066400000000000000000000016731315264121600215210ustar00rootroot00000000000000 org.netbeans.modules.cnd.makeproject libcsg 0 cc h UTF-8 ../../src/libcsg ../../include Debug Release profile_release csg-1.4.1/scripts/000077500000000000000000000000001315264121600137615ustar00rootroot00000000000000csg-1.4.1/scripts/CMakeLists.txt000066400000000000000000000040531315264121600165230ustar00rootroot00000000000000configure_file(help2t2t.in ${CMAKE_CURRENT_BINARY_DIR}/help2t2t.out @ONLY) add_custom_target(help2t2t_build DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/help2t2t) add_custom_command(OUTPUT help2t2t ALL COMMAND ${CMAKE_COMMAND} -DINPUT="help2t2t.out" -DOUTPUT="help2t2t" -DGIT_EXECUTABLE="${GIT_EXECUTABLE}" -DMERCURIAL_EXECUTABLE="${MERCURIAL_EXECUTABLE}" -DTOP_SOURCE_DIR="${CMAKE_SOURCE_DIR}" -P ${CMAKE_MODULE_PATH}/gitscript.cmake) set_property(DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES help2t2t) foreach(SCRIPT csg_call csg_inverse) configure_file(${SCRIPT}.in ${CMAKE_CURRENT_BINARY_DIR}/${SCRIPT}.out @ONLY) add_custom_target(${SCRIPT}_build ALL DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/${SCRIPT}) add_custom_command(OUTPUT ${SCRIPT} COMMAND ${CMAKE_COMMAND} -DINPUT="${SCRIPT}.out" -DOUTPUT="${SCRIPT}" -DGIT_EXECUTABLE="${GIT_EXECUTABLE}" -DMERCURIAL_EXECUTABLE="${MERCURIAL_EXECUTABLE}" -DTOP_SOURCE_DIR="${CMAKE_SOURCE_DIR}" -P ${CMAKE_MODULE_PATH}/gitscript.cmake) set_property(DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES ${SCRIPT}) install(PROGRAMS ${CMAKE_CURRENT_BINARY_DIR}/${SCRIPT} DESTINATION bin) if (TXT2TAGS_FOUND AND BASH) add_custom_command(OUTPUT ${SCRIPT}.man COMMAND VOTCASHARE=${CMAKE_SOURCE_DIR}/share ${BASH} ${CMAKE_CURRENT_BINARY_DIR}/${SCRIPT} --help > ${SCRIPT}.help COMMAND ${BASH} ${CMAKE_CURRENT_BINARY_DIR}/help2t2t ${SCRIPT}.help > ${SCRIPT}.t2t COMMAND ${TXT2TAGS_EXECUTABLE} -q -t man -i ${SCRIPT}.t2t -o ${SCRIPT}.man DEPENDS help2t2t_build ${SCRIPT}) add_custom_target(${SCRIPT}_manpage DEPENDS ${SCRIPT}.man) add_dependencies(manpages ${SCRIPT}_manpage) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${SCRIPT}.man DESTINATION ${MAN}/man1 RENAME ${SCRIPT}.1) set_property(DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES ${SCRIPT}.help ${SCRIPT}.t2t) endif(TXT2TAGS_FOUND AND BASH) endforeach(SCRIPT) option(WITH_RC_FILES "Install votca rc files, no need when installing under /usr" ON) install(FILES csg-completion.bash DESTINATION ${DATA}/rc) csg-1.4.1/scripts/csg-completion.bash000066400000000000000000000022371315264121600175470ustar00rootroot00000000000000# # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #complete with commands filename and options _votca_csg_common_opts() { local cur myopt cur=${COMP_WORDS[COMP_CWORD]} if [[ "$cur" == --* ]]; then myopt=$( $1 --help 2> /dev/null | sed -e '/--/!d' \ -e 's/.*?\(--[A-Za-z0-9]\+\).*/\1/' | sort -u ) COMPREPLY=( $( compgen -W '$myopt' -- $cur ) ) fi } complete -F _votca_csg_common_opts -f \ csg_boltzmann csg_dump csg_gmxtopol csg_inverse csg_resample \ csg_call csg_fmatch csg_imcrepack csg_map csg_property csg_stat \ csg_density csg_reupdate csg_dlptopol csg-1.4.1/scripts/csg_call.in000066400000000000000000000132061315264121600160620ustar00rootroot00000000000000#!/bin/bash # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # version='@PROJECT_VERSION@ #CSG_GIT_ID#' ext_cmd="" simprog="" log="" show_share="no" #unset stuff from enviorment unset CSGXMLFILE CSGDEBUG bondtype bondname #failback define die() { echo -e "$*" >&2 exit 1 } show_help () { cat << eof ================================================== ======== VOTCA (http://www.votca.org) ======== ================================================== please submit bugs to bugs@votca.org ${0##*/}, version ${version} This script calls scripts and functions for the iterative framework. Function can be executed or shows if key1='function'. Usage: ${0##*/} [OPTIONS] key1 key2 [SCRIPT OPTIONS] Allowed options: -l, --list Show list of all script --cat Show the content of the script --show Show the path to the script --show-share Shows the used VOTCASHARE dir and exits --scriptdir DIR Set the user script dir (Used if no options xml file is given) Default: empty --options FILE Specify the options xml file to use --log FILE Specify the log file to use Default: stdout --ia-type type Specify the interaction type to use --ia-name name Specify the interaction name to use --nocolor Disable colors --sloppy-tables Allow tables without flags --debug Enable debug mode with a lot of information -h, --help Show this help Examples: * ${0##*/} table smooth [ARGUMENTS] * ${0##*/} --show run gromacs eof } while [[ ${1#-} != $1 ]]; do if [[ ${1#--} = $1 && -n ${1:2} ]]; then #short opt with arguments here: fc if [[ ${1#-[fc]} != ${1} ]]; then set -- "${1:0:2}" "${1:2}" "${@:2}" else set -- "${1:0:2}" "-${1:2}" "${@:2}" fi fi case $1 in -l | --list) ext_cmd="show_csg_tables" shift ;; --scriptdir) die "'--scriptdir' is obsolete, please specify the script path in the xml file (cg.inverse.scriptpath)" shift 2;; --simprog) die "'--simprog' is obsolete, please specify the simprog in the xml file (cg.inverse.program)" shift 2;; --options) export CSGXMLFILE="$2" [ -f "$CSGXMLFILE" ] || die "options xml file '$CSGXMLFILE' not found" shift 2;; --sloppy-tables) export VOTCA_TABLES_WITHOUT_FLAG="yes" shift ;; --log) log="$2" shift 2;; --ia-type) export bondtype="$2" shift 2;; --ia-name) export bondname="$2" shift 2;; --cat) ext_cmd="cat_external" shift;; --show) ext_cmd="source_wrapper" shift;; --show-share) show_share="yes" shift;; --nocolor) export CSGNOCOLOR="yes" shift;; -h | --help) show_help exit 0;; --debug) export CSGDEBUG="yes" shift;; -v | --version) echo "${0##*/}, version $version" exit 0;; *) die "Unknown option '$1'";; esac done if [[ -z ${VOTCASHARE} ]]; then if [ -f "${0%/*}/../share/votca/scripts/inverse/inverse.sh" ]; then #transform it to a global path export VOTCASHARE="$(cd ${0%/*}/../share/votca;pwd)" elif [ -f "@CMAKE_INSTALL_PREFIX@/@DATA@/scripts/inverse/inverse.sh" ]; then export VOTCASHARE="@CMAKE_INSTALL_PREFIX@/@DATA@" else echo "Error: Environment value VOTCASHARE is not defined and could not be guessed" >&2 echo "Export VOTCASHARE or source VOTCARC.bash or VOTCARC.csh" >&2 exit 1 fi else if [[ ! -f ${VOTCASHARE}/scripts/inverse/inverse.sh ]]; then echo "Error: Environment value VOTCASHARE seems to be wrong" >&2 echo "Could not find \${VOTCASHARE}/scripts/inverse/inverse.sh" >&2 echo "Export VOTCASHARE or source VOTCARC.bash or VOTCARC.csh" >&2 exit 1 fi fi if [ "$show_share" = "yes" ]; then echo "${VOTCASHARE}" exit 0 fi if [[ -f ${VOTCASHARE}/scripts/inverse/start_framework.sh ]]; then source ${VOTCASHARE}/scripts/inverse/start_framework.sh || die "Could not source start_framework.sh" else die "Could not find start_framework.sh" fi [[ -n ${CSGXMLFILE} ]] && scriptpath="$(csg_get_property --allow-empty cg.inverse.scriptpath)" > /dev/null [[ -n ${scriptpath} ]] && echo "Adding '$scriptpath to csgshare" && add_to_csgshare "$scriptpath" [[ -n ${CSGXMLFILE} ]] && simprog="$(csg_get_property --allow-empty cg.inverse.program "$simprog")" [[ -n ${simprog} ]] && echo "We are using Sim Program: $simprog" && source_function $simprog if [[ $ext_cmd = show_csg_tables ]]; then $ext_cmd exit $? fi [[ -z $1 || -z $2 ]] && die "${0##*/}: Missing argument" if [[ -n $ext_cmd ]]; then $ext_cmd $1 $2 exit $? fi #help of scripts should always work and be quiet if [[ $3 = --help ]]; then cat < /dev/null)" ]; then #in case we have an old version of csg_call without --show-share tmp="$(csg_call --show-share 2> /dev/null)" && export VOTCASHARE="$tmp" unset tmp fi #we leave --help here to have it even when #VOTCASHARE is not defined if [[ $1 = "--help" || $1 = "-h" ]]; then cat << eof ================================================== ======== VOTCA (http://www.votca.org) ======== ================================================== please submit bugs to bugs@votca.org eof if [[ -f ${VOTCASHARE}/scripts/inverse/inverse.sh ]]; then exec ${VOTCASHARE}/scripts/inverse/inverse.sh --help | \ sed -e "s/inverse\.sh/${0##*/}/g" -e "s/%version%/${version}/" exit 0 fi cat << eof ${0##*/}, version ${version} Start the script (inverse.sh) to run IBM, IMC, etc. Usage: ${0##*/} [OPTIONS] --options settings.xml Allowed options: -h, --help Show this help NOTE: this is a short help, please source VOTCARC.bash or VOTCARC.csh or export VOTCASHARE to get the full help. eof exit 0 fi #we don't know if it was done above if [[ -n "$(type -p csg_call)" ]]; then VOTCASHARE="$(csg_call --show-share)" || exit 1 export VOTCASHARE else echo "Could not find csg_call" >&2 exit 1 fi if [[ -f ${VOTCASHARE}/scripts/inverse/inverse.sh ]]; then pre="$*" [[ -n $pre && -z ${pre//*--debug*} ]] && pre="bash -x " || pre="" exec ${pre} ${VOTCASHARE}/scripts/inverse/inverse.sh "$@" exit 0 else echo "${0##*/}: Could not run \${VOTCASHARE}/scripts/inverse/inverse.sh $@" >&2 exit 1 fi csg-1.4.1/scripts/help2t2t.in000066400000000000000000000076601315264121600157660ustar00rootroot00000000000000#! /bin/bash -e # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # version='@PROJECT_VERSION@ #CSG_GIT_ID#' die() { echo -e "$*" >&2 exit 1 } assert() { local x pipestatus=${PIPESTATUS[*]} for x in $pipestatus; do [[ $x -eq 0 ]] || die "$@" done } [ -z "$1" ] && die "Usage: ${0##*/} helpfile" prog="${1%.*}" [ -f "$1" ] || die "${0##*/}: $1 not found" #trash the header helpmsg="$(sed -e '1,10d' $1)" || die "$prog --help failed" #first block is descprition desc="$(echo "$helpmsg" | sed -n '1,/^[[:space:]]*$/p')" assert "parse of desc failed" [ -z "$desc" ] && die "Failed to fetch desc" helpmsg="$(echo "$helpmsg" | sed '1,/^[[:space:]]*$/d')" assert "cut of help msg failed" #second block can be usage line usage="$(echo "$helpmsg" | sed -n '1s/Usage:[[:space:]]*\(.*\)$/\1/p')" assert "parse of usage failed" if [ -z "$usage" ]; then usage="**$prog** \"\"[\"\"//OPTION//\"\"]\"\" \"\"[\"\"//OPTIONPARAMETERS//\"\"]\"\"" else usage="$(echo "$usage" | sed -e 's/^/**/' -e 's/ /** /' -e 's/\([][]\)/""\1""/g')" assert "parse part 2 of usage failed" helpmsg="$(echo "$helpmsg" | sed '1,/^[[:space:]]*$/d')" assert "cut of help msg failed" fi #try to find examples block exam="$(echo "$helpmsg" | sed -n '/^Examples:/,/^[[:space:]]*$/p')" || die "parse of exam failed" if [ -n "$exam" ]; then exam="$(echo "$exam" | \ sed -e '1d' \ -e '/^\* /s/\( \{2\}\|$\)/``/' \ -e '/^\*.*``/s/^\*[[:space:]]*/- ``/')" assert "parse part 2 of exam failed" helpmsg="$(echo "$helpmsg" | sed '/^Examples:/,/^[[:space:]]*$/d')" assert "cut of help msg failed" fi #write t2t file cat < - ``--option`` text #-usageline -> usage: ``code`` #-extra empty line before new section to close itemize #-examplelines (^*) -> - ``line`` #-remove NEEDS and OPTINAL, ... line #-added newline before new option block echo -e "$helpmsg" | sed \ -e 's/^[[:space:]]*//' \ -e 's/[[:space:]]*$//' \ -e '/^-[^ ]/s/ \{2,\}/**\n/' \ -e '/^-[^ ].*\*\*/s/^/: **/' \ -e '/^\* /s/\( \{2\}\|$\)/``/' \ -e '/^\*.*``/s/^\*[[:space:]]*/- ``/' \ -e '/^\(NEEDS\|OPTIONAL\|USES\|PROVIDES\)/d' \ -e 's/^[A-Za-z]* options:/\n&/' assert "sed of options failed" if [ -n "$exam" ]; then cat < This Manual Page was converted from t2t format to the this format by [txt2tags http://txt2tags.org] ! The t2t file was extracted from '$prog --help' by ${0##*/} (version $version) = COPYRIGHT = Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. EOF csg-1.4.1/share/000077500000000000000000000000001315264121600133745ustar00rootroot00000000000000csg-1.4.1/share/CMakeLists.txt000066400000000000000000000001771315264121600161410ustar00rootroot00000000000000add_subdirectory(man) add_subdirectory(xml) add_subdirectory(doc) add_subdirectory(scripts/inverse) add_subdirectory(template) csg-1.4.1/share/doc/000077500000000000000000000000001315264121600141415ustar00rootroot00000000000000csg-1.4.1/share/doc/CMakeLists.txt000066400000000000000000000011561315264121600167040ustar00rootroot00000000000000find_package(Doxygen) if (DOXYGEN_FOUND) configure_file(Doxyfile.in ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile @ONLY) add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/html/index.html COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile COMMENT "Build doxygen documentation") add_custom_target(html-csg DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/html/index.html) if(NOT TARGET html) add_custom_target(html) endif() add_dependencies(html html-csg) set_property(DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES html) endif (DOXYGEN_FOUND) csg-1.4.1/share/doc/Doxyfile.in000066400000000000000000001742511315264121600162660ustar00rootroot00000000000000# Doxyfile 1.5.9 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project # # All text after a hash (#) is considered a comment and will be ignored # The format is: # TAG = value [value, ...] # For lists items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (" ") #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the config file # that follow. The default is UTF-8 which is also the encoding used for all # text before the first occurrence of this tag. Doxygen uses libiconv (or the # iconv built into libc) for the transcoding. See # http://www.gnu.org/software/libiconv for the list of possible encodings. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or a sequence of words surrounded # by quotes) that should identify the project. PROJECT_NAME = @CMAKE_PROJECT_NAME@ # The PROJECT_NUMBER tag can be used to enter a project or revision number. # This could be handy for archiving the generated documentation or # if some version control system is used. PROJECT_NUMBER = @PROJECT_VERSION@ # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. OUTPUT_DIRECTORY = # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output # format and will distribute the generated files over these directories. # Enabling this option can be useful when feeding doxygen a huge amount of # source files, where putting all generated files in the same directory would # otherwise cause performance problems for the file system. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # The default language is English, other supported languages are: # Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, # Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, # Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English # messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, # Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrilic, Slovak, # Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. OUTPUT_LANGUAGE = English # If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will # include brief member descriptions after the members that are listed in # the file and class documentation (similar to JavaDoc). # Set to NO to disable this. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend # the brief description of a member or function before the detailed description. # Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator # that is used to form the text in various listings. Each string # in this list, if found as the leading text of the brief description, will be # stripped from the text and the result after processing the whole list, is # used as the annotated text. Otherwise, the brief description is used as-is. # If left blank, the following values are used ("$name" is automatically # replaced with the name of the entity): "The $name class" "The $name widget" # "The $name file" "is" "provides" "specifies" "contains" # "represents" "a" "an" "the" ABBREVIATE_BRIEF = # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # Doxygen will generate a detailed section even if there is only a brief # description. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full # path before files name in the file list and in the header files. If set # to NO the shortest path that makes the file name unique will be used. FULL_PATH_NAMES = YES # If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag # can be used to strip a user-defined part of the path. Stripping is # only done if one of the specified strings matches the left-hand part of # the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the # path to strip. STRIP_FROM_PATH = # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of # the path mentioned in the documentation of a class, which tells # the reader which header file to include in order to use a class. # If left blank only the name of the header file containing the class # definition is used. Otherwise one should specify the include paths that # are normally passed to the compiler using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter # (but less readable) file names. This can be useful is your file systems # doesn't support long names like on DOS, Mac, or CD-ROM. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen # will interpret the first line (until the first dot) of a JavaDoc-style # comment as the brief description. If set to NO, the JavaDoc # comments will behave just like regular Qt-style comments # (thus requiring an explicit @brief command for a brief description.) JAVADOC_AUTOBRIEF = NO # If the QT_AUTOBRIEF tag is set to YES then Doxygen will # interpret the first line (until the first dot) of a Qt-style # comment as the brief description. If set to NO, the comments # will behave just like regular Qt-style comments (thus requiring # an explicit \brief command for a brief description.) QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen # treat a multi-line C++ special comment block (i.e. a block of //! or /// # comments) as a brief description. This used to be the default behaviour. # The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = NO # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce # a new page for each member. If set to NO, the documentation of a member will # be part of the file/class/namespace that contains it. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 8 # This tag can be used to specify a number of aliases that acts # as commands in the documentation. An alias has the form "name=value". # For example adding "sideeffect=\par Side Effects:\n" will allow you to # put the command \sideeffect (or @sideeffect) in the documentation, which # will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. ALIASES = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C # sources only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = NO # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java # sources only. Doxygen will then generate output that is more tailored for # Java. For instance, namespaces will be presented as packages, qualified # scopes will look different, etc. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources only. Doxygen will then generate output that is more tailored for # Fortran. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for # VHDL. OPTIMIZE_OUTPUT_VHDL = NO # Doxygen selects the parser to use depending on the extension of the files it parses. # With this tag you can assign which parser to use for a given extension. # Doxygen has a built-in mapping, but you can override or extend it using this tag. # The format is ext=language, where ext is a file extension, and language is one of # the parsers supported by doxygen: IDL, Java, Javascript, C#, C, C++, D, PHP, # Objective-C, Python, Fortran, VHDL, C, C++. For instance to make doxygen treat # .inc files as Fortran files (default is PHP), and .f files as C (default is Fortran), # use: inc=Fortran f=C. Note that for custom extensions you also need to set FILE_PATTERNS otherwise the files are not read by doxygen. EXTENSION_MAPPING = # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should # set this tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); v.s. # func(std::string) {}). This also make the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. BUILTIN_STL_SUPPORT = NO # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. # Doxygen will parse them like normal C++ but will assume all classes use public # instead of private inheritance when no explicit protection keyword is present. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate getter # and setter methods for a property. Setting this option to YES (the default) # will make doxygen to replace the get and set methods by a property in the # documentation. This will only work if the methods are indeed getting or # setting a simple type. If this is not the case, or you want to show the # methods anyway, you should set this option to NO. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a # subgroup of that type (e.g. under the Public Functions section). Set it to # NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES # When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum # is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically # be useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. TYPEDEF_HIDES_STRUCT = NO # The SYMBOL_CACHE_SIZE determines the size of the internal cache use to # determine which symbols to keep in memory and which to flush to disk. # When the cache is full, less often used symbols will be written to disk. # For small to medium size projects (<1000 input files) the default value is # probably good enough. For larger projects a too small cache size can cause # doxygen to be busy swapping symbols to and from disk most of the time # causing a significant performance penality. # If the system has enough physical memory increasing the cache will improve the # performance by keeping more symbols in memory. Note that the value works on # a logarithmic scale so increasing the size by one will rougly double the # memory usage. The cache size is given by this formula: # 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, # corresponding to a cache size of 2^16 = 65536 symbols SYMBOL_CACHE_SIZE = 0 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES EXTRACT_ALL = YES # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = YES # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = YES # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) # defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = YES # This flag is only useful for Objective-C code. When set to YES local # methods, which are defined in the implementation section but not in # the interface are included in the documentation. # If set to NO (the default) only methods in the interface are included. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base # name of the file that contains the anonymous namespace. By default # anonymous namespace are hidden. EXTRACT_ANON_NSPACES = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. # If set to NO (the default) these members will be included in the # various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. # If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all # friend (class|struct|union) declarations. # If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any # documentation blocks found inside the body of a function. # If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation # that is typed after a \internal command is included. If the tag is set # to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate # file names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen # will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = YES # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen # will sort the (detailed) documentation of file and class members # alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the # brief documentation of file, namespace and class members alphabetically # by member name. If set to NO (the default) the members will appear in # declaration order. SORT_BRIEF_DOCS = NO # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the # hierarchy of group names into alphabetical order. If set to NO (the default) # the group names will appear in their defined order. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be # sorted by fully-qualified names, including namespaces. If set to # NO (the default), the class list will be sorted only by class name, # not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the # alphabetical list. SORT_BY_SCOPE_NAME = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or # disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or # disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or # disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if sectionname ... \endif. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines # the initial value of a variable or define consists of for it to appear in # the documentation. If the initializer consists of more lines than specified # here it will be hidden. Use a value of 0 to hide initializers completely. # The appearance of the initializer of individual variables and defines in the # documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated # at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = YES # If the sources in your project are distributed over multiple directories # then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy # in the documentation. The default is NO. SHOW_DIRECTORIES = NO # Set the SHOW_FILES tag to NO to disable the generation of the Files page. # This will remove the Files entry from the Quick Index and from the # Folder Tree View (if specified). The default is YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the # Namespaces page. # This will remove the Namespaces entry from the Quick Index # and from the Folder Tree View (if specified). The default is YES. SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command , where is the value of # the FILE_VERSION_FILTER tag, and is the name of an input file # provided by doxygen. Whatever the program writes to standard output # is used as the file version. See the manual for examples. FILE_VERSION_FILTER = # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed by # doxygen. The layout file controls the global structure of the generated output files # in an output format independent way. The create the layout file that represents # doxygen's defaults, run doxygen with the -l option. You can optionally specify a # file name after the option, if omitted DoxygenLayout.xml will be used as the name # of the layout file. LAYOUT_FILE = #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES # If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings # for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = YES # If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some # parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES # This WARN_NO_PARAMDOC option can be abled to get warnings for # functions that are documented, but have no documentation for their parameters # or return value. If set to NO (the default) doxygen will only warn about # wrong or incomplete parameter documentation, but not about the absence of # documentation. WARN_NO_PARAMDOC = NO # The WARN_FORMAT tag determines the format of the warning messages that # doxygen can produce. The string should contain the $file, $line, and $text # tags, which will be replaced by the file and line number from which the # warning originated and the warning text. Optionally the format may contain # $version, which will be replaced by the version of the file (if it could # be obtained via FILE_VERSION_FILTER) WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning # and error messages should be written. If left blank the output is written # to stderr. WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag can be used to specify the files and/or directories that contain # documented source files. You may enter file names like "myfile.cpp" or # directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = @CMAKE_SOURCE_DIR@ # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is # also the default input encoding. Doxygen uses libiconv (or the iconv built # into libc) for the transcoding. See http://www.gnu.org/software/libiconv for # the list of possible encodings. INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank the following patterns are tested: # *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx # *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90 FILE_PATTERNS = # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = YES # The EXCLUDE tag can be used to specify files and/or directories that should # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. EXCLUDE = # The EXCLUDE_SYMLINKS tag can be used select whether or not files or # directories that are symbolic links (a Unix filesystem feature) are excluded # from the input. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. Note that the wildcards are matched # against the file with absolute path, so to exclude all test directories # for example use the pattern */test/* EXCLUDE_PATTERNS = */.git/* # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test EXCLUDE_SYMBOLS = # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see # the \include command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude # commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or # directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command , where # is the value of the INPUT_FILTER tag, and is the name of an # input file. Doxygen will then use the output that the filter program writes # to standard output. # If FILTER_PATTERNS is specified, this tag will be # ignored. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. # Doxygen will compare the file name with each pattern and apply the # filter if there is a match. # The filters are a list of the form: # pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further # info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER # is applied to all files. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will # be generated. Documented entities will be cross-referenced with these sources. # Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = YES # Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct # doxygen to hide any special comment blocks from generated source code # fragments. Normal C and C++ comments will always remain visible. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES # then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = NO # If the REFERENCES_RELATION tag is set to YES # then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = NO # If the REFERENCES_LINK_SOURCE tag is set to YES (the default) # and SOURCE_BROWSER tag is set to YES, then the hyperlinks from # functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will # link to the source code. # Otherwise they will link to the documentation. REFERENCES_LINK_SOURCE = YES # If the USE_HTAGS tag is set to YES then the references to source code # will point to the HTML generated by the htags(1) tool instead of doxygen # built-in source browser. The htags tool is part of GNU's global source # tagging system (see http://www.gnu.org/software/global/global.html). You # will need version 4.8.6 or higher. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index # of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = NO # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for # each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a personal HTML header for # each generated HTML page. If it is left blank doxygen will generate a # standard header. HTML_HEADER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to # fine-tune the look of the HTML output. If the tag is left blank doxygen # will generate a default style sheet. Note that doxygen will try to copy # the style sheet file to the HTML output directory, so don't put your own # stylesheet in the HTML output directory as well, or it will be erased! HTML_STYLESHEET = # If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, # files or namespaces will be aligned in HTML using tables. If set to # NO a bullet list will be used. HTML_ALIGN_MEMBERS = YES # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. For this to work a browser that supports # JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox # Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari). HTML_DYNAMIC_SECTIONS = NO # If the GENERATE_DOCSET tag is set to YES, additional index files # will be generated that can be used as input for Apple's Xcode 3 # integrated development environment, introduced with OSX 10.5 (Leopard). # To create a documentation set, doxygen will generate a Makefile in the # HTML output directory. Running make will produce the docset in that # directory and running "make install" will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find # it at startup. # See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html for more information. GENERATE_DOCSET = NO # When GENERATE_DOCSET tag is set to YES, this tag determines the name of the # feed. A documentation feed provides an umbrella under which multiple # documentation sets from a single provider (such as a company or product suite) # can be grouped. DOCSET_FEEDNAME = "Doxygen generated docs" # When GENERATE_DOCSET tag is set to YES, this tag specifies a string that # should uniquely identify the documentation set bundle. This should be a # reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen # will append .docset to the name. DOCSET_BUNDLE_ID = org.doxygen.Project # If the GENERATE_HTMLHELP tag is set to YES, additional index files # will be generated that can be used as input for tools like the # Microsoft HTML help workshop to generate a compiled HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You # can add a path in front of the file if the result should not be # written to the html output directory. CHM_FILE = # If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can # be used to specify the location (absolute path including file name) of # the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. HHC_LOCATION = # If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag # controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING # is used to encode HtmlHelp index (hhk), content (hhc) and project file # content. CHM_INDEX_ENCODING = # If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag # controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = NO # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and QHP_VIRTUAL_FOLDER # are set, an additional index file will be generated that can be used as input for # Qt's qhelpgenerator to generate a Qt Compressed Help (.qch) of the generated # HTML documentation. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can # be used to specify the file name of the resulting .qch file. # The path specified is relative to the HTML output folder. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#namespace QHP_NAMESPACE = # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#virtual-folders QHP_VIRTUAL_FOLDER = doc # If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to add. # For more information please see # http://doc.trolltech.com/qthelpproject.html#custom-filters QHP_CUST_FILTER_NAME = # The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the custom filter to add.For more information please see # Qt Help Project / Custom Filters. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this project's # filter section matches. # Qt Help Project / Filter Attributes. QHP_SECT_FILTER_ATTRS = # If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can # be used to specify the location of Qt's qhelpgenerator. # If non-empty doxygen will try to run qhelpgenerator on the generated # .qhp file. QHG_LOCATION = # The DISABLE_INDEX tag can be used to turn on/off the condensed index at # top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. DISABLE_INDEX = NO # This tag can be used to set the number of enum values (range [1..20]) # that doxygen will group on one line in the generated HTML documentation. ENUM_VALUES_PER_LINE = 4 # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. # If the tag value is set to FRAME, a side panel will be generated # containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports # JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+, # Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are # probably better off using the HTML help feature. Other possible values # for this tag are: HIERARCHIES, which will generate the Groups, Directories, # and Class Hierarchy pages using a tree view instead of an ordered list; # ALL, which combines the behavior of FRAME and HIERARCHIES; and NONE, which # disables this behavior completely. For backwards compatibility with previous # releases of Doxygen, the values YES and NO are equivalent to FRAME and NONE # respectively. GENERATE_TREEVIEW = NONE # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 # Use this tag to change the font size of Latex formulas included # as images in the HTML documentation. The default is 10. Note that # when you change the font size after a successful doxygen run you need # to manually remove any form_*.png images from the HTML output directory # to force them to be regenerated. FORMULA_FONTSIZE = 10 #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = NO # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to # generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = makeindex # If the COMPACT_LATEX tag is set to YES Doxygen generates more compact # LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used # by the printer. Possible values are: a4, a4wide, letter, legal and # executive. If left blank a4wide will be used. PAPER_TYPE = a4wide # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until # the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! LATEX_HEADER = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated # is prepared for conversion to pdf (using ps2pdf). The pdf file will # contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = YES # If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of # plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = YES # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. # command to the generated LaTeX files. This will instruct LaTeX to keep # running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = NO # If LATEX_HIDE_INDICES is set to YES then doxygen will not # include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO # If LATEX_SOURCE_CODE is set to YES then doxygen will include source code with syntax highlighting in the LaTeX output. Note that which sources are shown also depends on other settings such as SOURCE_BROWSER. LATEX_SOURCE_CODE = NO #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output # The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES Doxygen generates more compact # RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated # will contain hyperlink fields. The RTF file will # contain links (just like the HTML output) instead of page references. # This makes the output suitable for online browsing using WORD or other # programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO # Load stylesheet definitions from file. Syntax is similar to doxygen's # config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = NO # The MAN_OUTPUT tag is used to specify where the man pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .3 # If the MAN_LINKS tag is set to YES and Doxygen generates man output, # then it will generate one additional man file for each entity # documented in the real man page(s). These additional files # only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES Doxygen will # generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = NO # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as the default path. XML_OUTPUT = xml # The XML_SCHEMA tag can be used to specify an XML schema, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_SCHEMA = # The XML_DTD tag can be used to specify an XML DTD, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_DTD = # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will # generate an AutoGen Definitions (see autogen.sf.net) file # that captures the structure of the code including all # documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES Doxygen will # generate a Perl module file that captures the structure of # the code including all documentation. Note that this # feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES Doxygen will generate # the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be # nicely formatted so it can be parsed by a human reader. # This is useful # if you want to understand what is going on. # On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file # are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. # This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will # evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro # names in the source code. If set to NO (the default) only conditional # compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = NO # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES # then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_DEFINED tags. EXPAND_ONLY_PREDEF = NO # If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # in the INCLUDE_PATH (see below) will be search if a #include is found. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by # the preprocessor. INCLUDE_PATH = # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that # are defined before the preprocessor is started (similar to the -D option of # gcc). The argument of the tag is a list of macros of the form: name # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. To prevent a macro definition from being # undefined via #undef or recursively expanded use the := operator # instead of the = operator. PREDEFINED = # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all function-like macros that are alone # on a line, have an all uppercase name, and do not end with a semicolon. Such # function macros are typically used for boiler-plate code, and will confuse # the parser if not removed. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- # The TAGFILES option can be used to specify one or more tagfiles. # Optionally an initial location of the external documentation # can be added for each tagfile. The format of a tag file without # this location is as follows: # # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # # TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths or # URLs. If a location is present for each tag, the installdox tool # does not have to be run to correct the links. # Note that each tag file must have a unique name # (where the name does NOT include the path) # If a tag file is not located in the directory in which doxygen # is run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. GENERATE_TAGFILE = # If the ALLEXTERNALS tag is set to YES all external classes will be listed # in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed # in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will # generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base # or super classes. Setting the tag to NO turns the diagrams off. Note that # this option is superseded by the HAVE_DOT option below. This is only a # fallback. It is recommended to install and use dot, since it yields more # powerful graphs. CLASS_DIAGRAMS = YES # You can define message sequence charts within doxygen comments using the \msc # command. Doxygen will then run the mscgen tool (see # http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the # documentation. The MSCGEN_PATH tag allows you to specify the directory where # the mscgen tool resides. If left empty the tool is assumed to be found in the # default search path. MSCGEN_PATH = # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz, a graph visualization # toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = YES # By default doxygen will write a font called FreeSans.ttf to the output # directory and reference it in all dot files that doxygen generates. This # font does not include all possible unicode characters however, so when you need # these (or just want a differently looking font) you can specify the font name # using DOT_FONTNAME. You need need to make sure dot is able to find the font, # which can be done by putting it in a standard location or by setting the # DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory # containing the font. DOT_FONTNAME = FreeSans # The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. # The default size is 10pt. DOT_FONTSIZE = 10 # By default doxygen will tell dot to use the output directory to look for the # FreeSans.ttf font (which doxygen will put there itself). If you specify a # different font using DOT_FONTNAME you can set the path where dot # can find it using this tag. DOT_FONTPATH = # If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect inheritance relations. Setting this tag to YES will force the # the CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = YES # If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. COLLABORATION_GRAPH = YES # If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen # will generate a graph for groups, showing the direct groups dependencies GROUP_GRAPHS = YES # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO # If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. TEMPLATE_RELATIONS = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT # tags are set to YES then doxygen will generate a graph for each documented # file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = YES # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and # HAVE_DOT tags are set to YES then doxygen will generate a graph for each # documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH and HAVE_DOT options are set to YES then # doxygen will generate a call dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable call graphs # for selected functions only using the \callgraph command. CALL_GRAPH = NO # If the CALLER_GRAPH and HAVE_DOT tags are set to YES then # doxygen will generate a caller dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable caller # graphs for selected functions only using the \callergraph command. CALLER_GRAPH = NO # If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = YES # If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES # then doxygen will show the dependencies a directory has on other directories # in a graphical way. The dependency relations are determined by the #include # relations between the files in the directories. DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are png, jpg, or gif # If left blank png will be used. DOT_IMAGE_FORMAT = png # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the # \dotfile command). DOTFILE_DIRS = # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of # nodes that will be shown in the graph. If the number of nodes in a graph # becomes larger than this value, doxygen will truncate the graph, which is # visualized by representing a node as a red box. Note that doxygen if the # number of direct children of the root node in a graph is already larger than # DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note # that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. DOT_GRAPH_MAX_NODES = 50 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the # graphs generated by dot. A depth value of 3 means that only nodes reachable # from the root by following a path via at most 3 edges will be shown. Nodes # that lay further from the root node will be omitted. Note that setting this # option to 1 or 2 may greatly reduce the computation time needed for large # code bases. Also note that the size of a graph can be further restricted by # DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, because dot on Windows does not # seem to support this out of the box. Warning: Depending on the platform used, # enabling this option may lead to badly anti-aliased labels on the edges of # a graph (i.e. they become hard to read). DOT_TRANSPARENT = NO # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) # support this, this feature is disabled by default. DOT_MULTI_TARGETS = NO # If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will # generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES (the default) Doxygen will # remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES #--------------------------------------------------------------------------- # Options related to the search engine #--------------------------------------------------------------------------- # The SEARCHENGINE tag specifies whether or not a search engine should be # used. If set to NO the values of all tags below this one will be ignored. SEARCHENGINE = NO csg-1.4.1/share/man/000077500000000000000000000000001315264121600141475ustar00rootroot00000000000000csg-1.4.1/share/man/CMakeLists.txt000066400000000000000000000010211315264121600167010ustar00rootroot00000000000000if (TXT2TAGS_FOUND) add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/votca-csg.man COMMAND ${TXT2TAGS_EXECUTABLE} -q -t man -i ${CMAKE_CURRENT_SOURCE_DIR}/votca-csg.t2t -o ${CMAKE_CURRENT_BINARY_DIR}/votca-csg.man COMMENT "Building votca-csg manpage") add_custom_target(votca-csg_manpage DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/votca-csg.man) add_dependencies(manpages votca-csg_manpage) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/votca-csg.man DESTINATION ${MAN}/man7 RENAME votca-csg.7) endif (TXT2TAGS_FOUND) csg-1.4.1/share/man/votca-csg.t2t000066400000000000000000000052241315264121600164730ustar00rootroot00000000000000votca-csg VOTCA Development Team %%mtime(%d/%m/%Y) %make the manpage type 7, txt2tags does 1 by default %!postproc(man): "^(\.TH.*) 1 " "\1 7 " = NAME = votca-csg - The VOTCA coarse-graining engine = DESCRIPTION = Versatile Object-oriented Toolkit for Coarse-graining Applications (VOTCA) is a package intended to reduce the amount of routine work when doing systematic coarse-graining of various systems. The core is written in C++. Iterative methods are implemented using bash + perl. Please visit the program site at __http://www.votca.org__. = SYNOPSIS = The following commands make up the votca-csg suite. Please refer to their individual man pages for further details. : **csg_boltzmann** Performs tasks that are needed for simple Boltzmann inversion in an interactive environment. : **csg_call** Calls script from the interactive framework for the user. : **csg_density** Calculates all kind of density profiles. : **csg_dump** Prints atoms that are read from a topology file to help debug atom naming. : **csg_fmatch** Performs force matching (also called //multiscale coarse-graining//). : **csg_gmxtopol** Create skeleton for gromacs topology based on atomistic topology and a mapping file. : **csg_imcrepack** Is internally called by inversion scripts to kick out zero entries in the matrix for inverse Monte Carlo (imc). : **csg_inverse** Starts the script to run iterative Boltzmann inverse (ibi) or inverse Monte Carlo (imc), etc. : **csg_map** Map a reference trajectory to a coarse-grained trajectory. : **csg_part_dist** Outputs the time-averaged number of particles, listed by particle types. : **csg_property** Helper program called by inverse scripts to parse xml file. : **csg_resample** Changes grid + interval and interpolated any sort of table files. : **csg_stat** Calculates all kind of distribuions (bonded + non-bonded). : **multi_g_rdf** A multiplexed version of g_rdf = AUTHORS = Written and maintained by the VOTCA Development Team This Manual Page converted from t2t format to the this format by [txt2tags http://txt2tags.org] ! = COPYRIGHT = Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. csg-1.4.1/share/misc/000077500000000000000000000000001315264121600143275ustar00rootroot00000000000000csg-1.4.1/share/misc/do_errorplot.sh000077500000000000000000000020041315264121600173740ustar00rootroot00000000000000#!/bin/bash # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # do_errors() { csg_imc --top topol.tpr --trj traj.xtc --cg ../water_cg.xml --options ../cg.xml \ --do-imc --begin 100 \ --write-every $1 --do-blocks --nframes $((16*$1)) ~/src/csg/share/scripts/inverse/errorbars.sh ibm cp CG-CG.dpot.err ibm.err.$i ~/src/csg/share/scripts/inverse/errorbars.sh imc cp CG-CG.dpot.err imc.err.$i } for i in $(seq 1 250); do do_errors $i done csg-1.4.1/share/scripts/000077500000000000000000000000001315264121600150635ustar00rootroot00000000000000csg-1.4.1/share/scripts/inverse/000077500000000000000000000000001315264121600165365ustar00rootroot00000000000000csg-1.4.1/share/scripts/inverse/CMakeLists.txt000066400000000000000000000003461315264121600213010ustar00rootroot00000000000000file(GLOB VOTCA_SCRIPTS *.sh *.pl *.py) file(GLOB VOTCA_FILES csg_table *.pm *.m *.octave) install(FILES ${VOTCA_FILES} DESTINATION ${DATA}/scripts/inverse) install(PROGRAMS ${VOTCA_SCRIPTS} DESTINATION ${DATA}/scripts/inverse) csg-1.4.1/share/scripts/inverse/CsgFunctions.pm000066400000000000000000000147531315264121600215130ustar00rootroot00000000000000package CsgFunctions; # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # use strict; require Exporter; use vars qw(@ISA @EXPORT); @ISA = qw(Exporter); @EXPORT = qw(csg_function_help csg_get_property csg_get_interaction_property readin_table readin_data saveto_table saveto_table_err readin_table_err); sub csg_function_help() { print <){ $line++; ${$_[4]}.=$_ if (defined($_[4]) and (/^[#@]/)); next if /^[#@]/; # remove leading spacees for split $_ =~ s/^\s*//; next if /^\s*$/; my @parts=split(/\s+/); if ( $sloppy eq "yes" ) { defined($parts[1]) || die "readin_table: Not enought columns in line $line in file $_[0]\n"; $parts[$#parts+1] = "i"; } else { defined($parts[2]) || die "readin_table: Not enought columns in line $line in file $_[0], if you don't have flags in your table add --sloppy-tables option to csg_call\n"; ($parts[$#parts] =~ /[iou]/) || die "readin_table: Wrong flag($parts[$#parts]) for r=$parts[0] in file $_[0], if you don't have flags in your table add --sloppy-tables option to csg_call\n"; } #very trick array dereference (@) of pointer to an array $_[.] stored in an array $_ push(@{$_[1]},$parts[0]); push(@{$_[2]},$parts[1]); push(@{$_[3]},$parts[$#parts]); } close(TAB) || die "readin_table: could not close file $_[0]\n"; die "readin_table: 0 lines were read from $_[0]\n" if ($line==0); return $line; } sub readin_table_err($\@\@\@\@;\$) { defined($_[4]) || die "readin_table_err: Missing argument\n"; open(TAB,"$_[0]") || die "readin_table_err: could not open file $_[0]\n"; my $sloppy= $ENV{'VOTCA_TABLES_WITHOUT_FLAG'}; $sloppy="no" unless defined($sloppy); my $line=0; while (){ $line++; ${$_[5]}.=$_ if (defined($_[4]) and (/^[#@]/)); # remove leading spacees for split $_ =~ s/^\s*//; next if /^[#@]/; next if /^\s*$/; my @parts=split(/\s+/); if ( $sloppy eq "yes" ) { defined($parts[2]) || die "readin_table_err: Not enought columns in line $line in file $_[0]\n"; $parts[$#parts+1] = "i"; }else{ defined($parts[3]) || die "readin_table_err: Not enought columns in line $line in file $_[0], if you don't have flags in your table add --sloppy-tables option to csg_call\n"; ($parts[$#parts] =~ /[iou]/) || die "readin_table_err: Wrong flag($parts[$#parts]) for r=$parts[0] in file $_[0], if you don't have flags in your table add --sloppy-tables option to csg_call\n"; } #very trick array dereference (@) of pointer to an array $_[.] stored in an array $_ push(@{$_[1]},$parts[0]); push(@{$_[2]},$parts[1]); push(@{$_[3]},$parts[2]); push(@{$_[4]},$parts[$#parts]); } close(TAB) || die "readin_table_err: could not close file $_[0]\n"; die "readin_table_err: 0 lines were read from $_[0]\n" if ($line==0); return $line; } sub readin_data($$\@\@) { defined($_[3]) || die "readin_data: Missing argument\n"; open(TAB,"$_[0]") || die "readin_table: could not open file $_[0]\n"; my $line=0; my $column=int($_[1]); while (){ $line++; # remove leading spacees for split $_ =~ s/^\s*//; next if /^[#@]/; next if /^\s*$/; my @parts=split(/\s+/); defined($parts[1]) || die "readin_table: Not enought columns in line $line in file $_[0]\n"; die "readin_data: Can't read column $column\n" unless (defined($parts[$column])); #very trick array dereference (@) of pointer to an array $_[.] stored in an array $_ push(@{$_[2]},$parts[0]); push(@{$_[3]},$parts[$column]); } close(TAB) || die "readin_table: could not close file $_[0]\n"; return $line; } sub saveto_table($\@\@\@;$) { defined($_[3]) || die "saveto_table: Missing argument\n"; open(OUTFILE,"> $_[0]") or die "saveto_table: could not open $_[0] \n"; print OUTFILE "$_[4]" if (defined $_[4]); for(my $i=0;$i<=$#{$_[1]};$i++){ print OUTFILE "${$_[1]}[$i] ${$_[2]}[$i] ${$_[3]}[$i]\n"; } close(OUTFILE) or die "Error at closing $_[0]\n"; return 1; } sub saveto_table_err($\@\@\@\@;$) { defined($_[3]) || die "saveto_table: Missing argument\n"; open(OUTFILE,"> $_[0]") or die "saveto_table: could not open $_[0] \n"; print OUTFILE "$_[5]" if (defined $_[5]); for(my $i=0;$i<=$#{$_[1]};$i++){ print OUTFILE "${$_[1]}[$i] ${$_[2]}[$i] ${$_[3]}[$i] ${$_[4]}[$i]\n"; } close(OUTFILE) or die "Error at closing $_[0]\n"; return 1; } #important 1; csg-1.4.1/share/scripts/inverse/CsgSimplex.pm000066400000000000000000000216561315264121600211640ustar00rootroot00000000000000package CsgSimplex; # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # use strict; require Exporter; use vars qw(@ISA @EXPORT); @ISA = qw(Exporter); @EXPORT = qw(csg_simplex_function_help readin_simplex_state saveto_simplex_state sort_simplex_table is_num replace_parameter_flag get_convergence_value remove_parameter_set calc_parameter_center linop_parameter); sub csg_simplex_function_help() { print <){ $line++; if (/^#State =\s*(\S*)/) { $state="$1"; next; } ${$_[3]}.=$_ if (defined($_[3]) and (/^[#@]/)); if (/^#Format\s*(.*)$/) { ${$_[4]}="$1" if defined($_[4]); } next if ($_ =~ /^[#@]/); $_ =~ s/^\s*//; # remove leading spacees for split next if /^\s*$/; my @parts=split(/\s+/); $parameters=$#parts unless ($parameters); die "readin_simplex_state: Number of parameters ($#parts) differ in line $line from previous lines ($parameters) of file $_[0]\n" if ($parameters != $#parts); push(@{$_[2]},\@parts); } close(TAB) || die "readin_simplex_state: could not close file $_[0]\n"; die "readin_simplex_state: 0 lines were read from $_[0]\n" if ($line==0); die "readin_simplex_state: could not read state from $_[0]\n" unless ($state); ${$_[1]}="$state"; return $line; } sub saveto_simplex_state($$\@;$) { defined($_[2]) || die "saveto_simplex_state: Missing argument\n"; open(OUTFILE,"> $_[0]") or die "saveto_simplex_state: could not open $_[0] \n"; print OUTFILE "$_[3]" if (defined $_[3]); print OUTFILE "#State = $_[1]\n"; my @simplex_table=@{$_[2]}; my $parameters=undef; #remember 2d arrays is a list of lists for (my $i=0;$i<=$#simplex_table;$i++){ $parameters=$#{$simplex_table[$i]} unless ($parameters); die "saveto_simplex_state: Number of parameters ($#{$simplex_table[$i]}) differ in set $i from previous lines ($parameters)" if ($parameters != $#{$simplex_table[$i]}); print OUTFILE "@{$simplex_table[$i]}\n"; } close(OUTFILE) or die "Error at closing $_[0]\n"; return 1; } sub sort_simplex_table(\@) { defined($_[0]) || die "sort_simplex_table: Missing argument\n"; my @simplex_table=@{$_[0]}; my $parameters=undef; my @index; #remember 2d arrays is a list of lists for (my $i=0;$i<=$#simplex_table;$i++){ $parameters=$#{$simplex_table[$i]} unless ($parameters); die "sort_simplex_table: Number of parameters ($#{$simplex_table[$i]}) differ in set $i from previous lines ($parameters)" if ($parameters != $#{$simplex_table[$i]}); push(@index,$i); is_num("$simplex_table[$i][-2]") || die "sort_simplex_table: second last value of parameter set $i is not a number\n"; die "sort_simplex_table: try set found!\n" if ($simplex_table[$i][-1] =~ /^try$/); } @index=sort { $simplex_table[$a][-2] <=> $simplex_table[$b][-2] } @index; my @sorted_table; for (my $i=0;$i<=$#index;$i++){ $sorted_table[$i]=$simplex_table[$index[$i]]; } @{$_[0]}=@sorted_table; return 1; } sub replace_parameter_flag(\@$$) { defined($_[2]) || die "replace_parameter_flag: Missing argument\n"; my @simplex_table=@{$_[0]}; for (my $i=0;$i<=$#simplex_table;$i++){ $simplex_table[$i][$#{$simplex_table[$i]}] =~ s/^$_[1]$/$_[2]/; } } sub is_num($) { defined($_[0]) || die "is_num: Missing argument\n"; use POSIX qw(strtod); my $str = shift; $str =~ s/^\s+//; $str =~ s/\s+$//; $! = 0; my($num, $unparsed) = strtod($str); if (($str eq '') || ($unparsed != 0) || $!) { return 0; } return 1; } sub get_convergence_value(\@$); sub get_convergence_value(\@$) { defined($_[1]) || die "get_convergence_value: Missing argument\n"; my @simplex_table=@{$_[0]}; if ($_[1] eq "lowest") { my $value=undef; for (my $i=0;$i<=$#simplex_table;$i++) { next if ($simplex_table[$i][-1] =~ /^try/); $value=$simplex_table[$i][-2] unless defined($value); $value=$simplex_table[$i][-2] if $value>$simplex_table[$i][-2] } return $value; } elsif ($_[1] eq "ihighest") { my $ivalue=undef; for (my $i=0;$i<=$#simplex_table;$i++) { next if ($simplex_table[$i][-1] =~ /^(try|tryold)$/); $ivalue=$i unless defined($ivalue); $ivalue=$i if $simplex_table[$ivalue][-2]<$simplex_table[$i][-2]; } return $ivalue; } elsif ($_[1] eq "highest") { my $i=get_convergence_value(@simplex_table,"ihighest"); return $simplex_table[$i][-2]; } elsif ($_[1] eq "second") { my $ivalue=get_convergence_value(@simplex_table,"ihighest"); # in case we do simplex on one parameter return $simplex_table[$ivalue][-2] if ($#simplex_table == 2); my $value=undef; for (my $i=0;$i<=$#simplex_table;$i++) { next if ($simplex_table[$i][-1] =~ /^(try|tryold)$/); next if ($i==$ivalue); $value=$simplex_table[$i][-2] unless defined($value); $value=$simplex_table[$i][-2] if $value<$simplex_table[$i][-2]; } return $value; } elsif ($_[1] =~ /^(try|tryold)$/) { my $value=undef; for (my $i=0;$i<=$#simplex_table;$i++) { if ( $simplex_table[$i][-1] =~ /^$_[1]$/ ) { die "get_convergence_value: Found two $_[1] value in parameter set\n" if defined($value); $value=$simplex_table[$i][-2]; } } die "get_convergence_value: Could not find any $_[1] value in paramter set\n" unless defined($value); return $value; } else { die "get_convergence_value: I don't know how to get value '$_[1]'\n"; } } sub remove_parameter_set(\@$) { defined($_[1]) || die "remove_parameter_set: Missing argument\n"; my @simplex_table=@{$_[0]}; my $value=undef; my @new_table; if ($_[1] =~ /^(try|tryold)$/) { for (my $i=0;$i<=$#simplex_table;$i++) { if ( $simplex_table[$i][-1] =~ /^$_[1]$/ ) { die "remove_parameter_set: Found two parameter set with flag '$_[1]'" if defined($value); $value=$i; } else { push(@new_table,$simplex_table[$i]); } } } elsif ($_[1] eq "highest") { $value=get_convergence_value(@simplex_table,"ihighest"); for (my $i=0;$i<=$#simplex_table;$i++) { push(@new_table,$simplex_table[$i]) unless ($i == $value); } } else { die "remove_parameter_set: I don't know how to remove value '$_[1]'\n"; } die "remove_parameter_set: Could not find a parameter set with flag '$_[1]'" unless defined($value); @{$_[0]}=@new_table; return @{$simplex_table[$value]}; } sub calc_parameter_center(\@){ defined($_[0]) || die "calc_parameter_center: Missing argument\n"; my @simplex_table=@{$_[0]}; my @center; sort_simplex_table(@simplex_table); for (my $j=0;$j<=$#{$simplex_table[0]};$j++) { if (is_num("$simplex_table[0][$j]")) { $center[$j]=0; } else { $center[$j]=$simplex_table[0][$j]; } } #mind the $i<$#simplex_table to skip the highest value for (my $i=0;$i<$#simplex_table;$i++) { die "calc_parameter_center: number of parameters (".($#{$simplex_table[$i]}-1).") of parameter set #".($i+1)." differs from the number of non-try sets - 1 (=".($#simplex_table)."). Expected $#{$simplex_table[$i]} non-try sets.\n" if (($#simplex_table+1) != $#{$simplex_table[$i]}); for (my $j=0;$j<=$#{$simplex_table[$i]};$j++) { if (is_num("$simplex_table[$i][$j]")) { $center[$j]+=$simplex_table[$i][$j]/$#simplex_table; } } } return @center; } sub linop_parameter(\@$\@\@) { defined($_[3]) || die "linop_parameter: Missing argument\n"; my @vec1=@{$_[0]}; my $scale=$_[1]; my @vec2=@{$_[2]}; die "linop_parameter: 1st ($#vec1) and 2nd vector ($#vec2) have different length\n" unless ($#vec1 = $#vec2); my @vec3=@{$_[3]}; my @vec4; die "linop_parameter: 1st ($#vec1) and 3rd vector ($#vec3) have different length\n" unless ($#vec1 = $#vec3); for (my $i=0;$i<=$#vec1;$i++) { if (is_num($vec1[$i])) { $vec4[$i]=$vec1[$i]+$scale*($vec2[$i]-$vec3[$i]); } else { $vec4[$i]=$vec1[$i]; } } $vec4[-1]="pending"; $vec4[-2]=0; return @vec4; } #important 1; csg-1.4.1/share/scripts/inverse/add_POT.pl000077500000000000000000000050541315264121600203540ustar00rootroot00000000000000#! /usr/bin/perl -w # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # use strict; ( my $progname = $0 ) =~ s#^.*/##; if (defined($ARGV[0])&&("$ARGV[0]" eq "--help")){ print <0.0001); die "Different start potential point $r_delta[0] $r_cur[0]\n" if (($r_delta[0]-$r_cur[0]) > 0.0001); die "Different end potential point \n" if ( $#r_cur != $#r_delta ); my $outfile="$ARGV[2]"; my @pot; my @flag; # TODO: think about addition rules # now I did it like that to always maintain interval of interest in all potentials # shount that just be a < instead of <= ?? for (my $i=0;$i<=$#r_cur;$i++){ if($flag_cur[$i] eq "u" || $flag_delta[$i] eq "u") { $pot[$i] = $pot_cur[$i]; # is already nan or we don't change $flag[$i] = "u"; } else { $pot[$i]=$pot_cur[$i]+$pot_delta[$i]; $flag[$i] = $flag_cur[$i]; } #if ($flag_cur[$i] eq "i"){ # if ($flag_delta[$i] eq "i"){ # $pot[$i]=$pot_cur[$i]+$pot_delta[$i]; # } else { # $pot[$i]=$pot_cur[$i]; # } # $flag[$i]="i"; #} else { # $pot[$i]="nan"; # $flag[$i]="u"; #} } saveto_table($outfile,@r_cur,@pot,@flag) || die "$progname: error at save table\n"; csg-1.4.1/share/scripts/inverse/add_pot_generic.sh000077500000000000000000000017331315264121600222070ustar00rootroot00000000000000#! /bin/bash # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # if [ "$1" = "--help" ]; then cat <" "0"; then msg --color blue --to-stderr "Automatically setting begin time to 0, because CSG_RUNTEST was set" begin=0 fi echo "Running ${g_energy[@]}" #no critical here so that we can print the error output=$(echo Pressure | ${g_energy[@]} -b "${begin}" -s "${topol}" ${opts} 2>&1) ret="$?" echo "$output" | gromacs_log "${g_energy[@]} -b "${begin}" -s "${topol}" ${opts}" [[ $ret -eq 0 ]] || die "${0##*/}: '${g_energy[@]} -b "${begin}" -s "${topol}" ${opts}' failed" #the number pattern '-\?[0-9][^[:space:]]*[0-9]' is ugly, but it supports X X.X X.Xe+X Xe-X and so on #awk 'print $2' does not work for older version of g_energy as the format varies between #^Pressure XXX (bar) and ^Pressure (bar) XXX p_now=$(echo "$output" | sed -n 's/^Pressure[^-0-9]*\(\(-\?[0-9][^[:space:]]*[0-9]\|nan\)\)[[:space:]].*$/\1/p' ) || \ die "${0##*/}: sed grep of Pressure failed" [[ -z $p_now ]] && die "${0##*/}: Could not get pressure from simulation" [[ $p_now = nan && $(csg_get_property cg.inverse.gromacs.g_energy.pressure.allow_nan) = no ]] && \ die "${0##*/}: Pressure was nan, check your simulation (this usually means system has blow up -> use pre simulation)" echo "Pressure=${p_now}" > "$1" csg-1.4.1/share/scripts/inverse/calc_rdf_generic.sh000077500000000000000000000071141315264121600223310ustar00rootroot00000000000000#! /bin/bash # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # if [[ $1 = "--help" ]]; then cat <" "0"; then msg --color blue --to-stderr "Automatically setting equi_time to 0, because CSG_RUNTEST was set" equi_time=0 fi first_frame="$(csg_get_property cg.inverse.$sim_prog.first_frame)" with_errors=$(csg_get_property cg.inverse.$sim_prog.rdf.with_errors) if [[ ${with_errors} = "yes" ]]; then suffix="_with_errors" block_length=$(csg_get_property cg.inverse.$sim_prog.rdf.block_length) if [[ ${CSG_RUNTEST} ]] && csg_calc "$block_length" ">" "2"; then msg --color blue --to-stderr "Automatically setting block_length to 2, because CSG_RUNTEST was set" block_length=2 fi error_opts="--block-length ${block_length} --ext dist.block" else suffix="" fi tasks=$(get_number_tasks) #rdf calculation is maybe done already in a different interaction if is_done "rdf_calculation${suffix}"; then echo "rdf calculation is already done" else msg "Calculating rdfs with csg_stat using $tasks tasks" critical csg_stat --nt $tasks --options "$CSGXMLFILE" --top "$topol" --trj "$traj" --begin $equi_time --first-frame $first_frame ${error_opts} ${maps:+--cg ${maps}} mark_done "rdf_calculation${suffix}" fi if [[ ${with_errors} = "yes" ]]; then if ! is_done "${name}_rdf_average"; then for i in ${name}_*.dist.block; do [[ -f $i ]] || die "${0##*/}: Could not find ${name}_*.dist.block after running csg_sat, that usually means the blocksize (cg.inverse.$sim_prog.rdf.block_length) is too big." done msg "Calculating average rdfs and its errors for interaction $name" do_external table average --output ${name}.dist.new ${name}_*.dist.block mark_done "${name}_rdf_average" fi fi csg-1.4.1/share/scripts/inverse/calc_target_rdf_generic.sh000077500000000000000000000040441315264121600236760ustar00rootroot00000000000000#! /bin/bash # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # if [[ $1 = "--help" ]]; then cat <= 2.6, namely 2.6, 2.7, 3.3, 3.4. CMA-ES searches for a minimizer (a solution x in :math:`R^n`) of an objective function f (cost function), such that f(x) is minimal. Regarding f, only a passably reliable ranking of the candidate solutions in each iteration is necessary. Neither the function values itself, nor the gradient of f need to be available or do matter (like in the downhill simplex Nelder-Mead algorithm). Some termination criteria however depend on actual f-values. Two interfaces are provided: - function `fmin(func, x0, sigma0,...)` runs a complete minimization of the objective function func with CMA-ES. - class `CMAEvolutionStrategy` allows for minimization such that the control of the iteration loop remains with the user. Used packages: - unavoidable: `numpy` (see `barecmaes2.py` if `numpy` is not available), - avoidable with small changes: `time`, `sys` - optional: `matplotlib.pyplot` (for `plot` etc., highly recommended), `pprint` (pretty print), `pickle` (in class `Sections`), `doctest`, `inspect`, `pygsl` (never by default) Install ------- The file ``cma.py`` only needs to be visible in the python path (e.g. in the current working directory). The preferred way of (system-wide) installation is calling pip install cma from the command line. The ``cma.py`` file can also be installed from the system shell terminal command line by:: python cma.py --install which solely calls the ``setup`` function from the standard ``distutils.core`` package for installation. If the ``setup.py`` file is been provided with ``cma.py``, the standard call is python setup.py cma Both calls need to see ``cma.py`` in the current working directory and might need to be preceded with ``sudo``. To upgrade the currently installed version from the Python Package Index, and also for first time installation, type in the system shell:: pip install --upgrade cma Testing ------- From the system shell:: python cma.py --test or from the Python shell ``ipython``:: run cma.py --test or from any python shell import cma cma.main('--test') runs ``doctest.testmod(cma)`` showing only exceptions (and not the tests that fail due to small differences in the output) and should run without complaints in about between 20 and 100 seconds. Example ------- From a python shell:: import cma help(cma) # "this" help message, use cma? in ipython help(cma.fmin) help(cma.CMAEvolutionStrategy) help(cma.CMAOptions) cma.CMAOptions('tol') # display 'tolerance' termination options cma.CMAOptions('verb') # display verbosity options res = cma.fmin(cma.Fcts.tablet, 15 * [1], 1) res[0] # best evaluated solution res[5] # mean solution, presumably better with noise :See: `fmin()`, `CMAOptions`, `CMAEvolutionStrategy` :Author: Nikolaus Hansen, 2008-2015 :Contributor: Petr Baudis, 2014 :License: BSD 3-Clause, see below. """ # The BSD 3-Clause License # Copyright (c) 2014 Inria # Author: Nikolaus Hansen, 2008-2015 # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright and # authors notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # and authors notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors nor the authors names may be used to endorse or promote # products derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS # OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED # AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY # WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # (note to self) for testing: # pyflakes cma.py # finds bugs by static analysis # pychecker --limit 60 cma.py # also executes, all 60 warnings checked # or python ~/Downloads/pychecker-0.8.19/pychecker/checker.py cma.py # python cma.py -t -quiet # executes implemented tests based on doctest # python -3 cma.py --test 2> out2to3warnings.txt # # to create a html documentation file: # pydoc -w cma # edit the header (remove local pointers) # epydoc cma.py # comes close to javadoc but does not find the # # links of function references etc # doxygen needs @package cma as first line in the module docstring # some things like class attributes are not interpreted correctly # sphinx: doc style of doc.python.org, could not make it work (yet) # TODO: implement a (deep enough) copy-constructor for class # CMAEvolutionStrategy to repeat the same step in different # configurations for online-adaptation of meta parameters # TODO: reconsider geno-pheno transformation. Can it be a completely # separate module that operates inbetween optimizer and objective? # Can we still propagate a repair of solutions to the optimizer? # How about gradients (should be fine)? # TODO: implement bipop in a separate algorithm as meta portfolio # algorithm of IPOP and a local restart option to be implemented # in fmin (e.g. option restart_mode in [IPOP, local]) # TODO: self.opts['mindx'] is checked without sigma_vec, which is wrong, # TODO: project sigma_vec on the smallest eigenvector? # TODO: class _CMAStopDict implementation looks way too complicated # TODO: separate display and logging options, those CMAEvolutionStrategy # instances don't use themselves (probably all?) # TODO: disp method is implemented in CMAEvolutionStrategy and in # CMADataLogger separately, OOOptimizer.disp_str should return a str # which can be used uniformly? Only logger can disp a history. # TODO: check scitools.easyviz and how big the adaptation would be # TODO: split tell into a variable transformation part and the "pure" # functionality # usecase: es.tell_geno(X, [func(es.pheno(x)) for x in X]) # genotypic repair is not part of tell_geno # TODO: copy_always optional parameter does not make much sense, # as one can always copy the input argument first, # however some calls are simpler # TODO: generalize input logger in optimize() as after_iteration_handler # (which is logger.add by default)? One difficulty is that # the logger object is returned (not anymore when return of optimize # is change). Another difficulty is the obscure usage of modulo # for writing a final data line in optimize. # TODO: separate initialize==reset_state from __init__ # TODO: introduce Ypos == diffC which makes the code more consistent and # the active update "exact"? # TODO: dynamically read "signals" from a file, see import ConfigParser # or myproperties.py (to be called after tell()) # # typical parameters in scipy.optimize: disp, xtol, ftol, maxiter, maxfun, # callback=None # maxfev, diag (A sequency of N positive entries that serve as # scale factors for the variables.) # full_output -- non-zero to return all optional outputs. # If xtol < 0.0, xtol is set to sqrt(machine_precision) # 'infot -- a dictionary of optional outputs with the keys: # 'nfev': the number of function calls... # # see eg fmin_powell # typical returns # x, f, dictionary d # (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, # ) # # TODO: keep best ten solutions # TODO: implement constraints handling # TODO: extend function unitdoctest, or use unittest? # TODO: apply style guide # TODO: eigh(): thorough testing would not hurt # changes: # 15/01/20: larger condition numbers for C realized by using tf_pheno # of GenoPheno attribute gp. # 15/01/19: injection method, first implementation, short injections # and long injections with good fitness need to be addressed yet # 15/01/xx: prepare_injection_directions to simplify/centralize injected # solutions from mirroring and TPA # 14/12/26: bug fix in correlation_matrix computation if np.diag is a view # 14/12/06: meta_parameters now only as annotations in ## comments # 14/12/03: unified use of base class constructor call, now always # super(ThisClass, self).__init__(args_for_base_class_constructor) # 14/11/29: termination via "stop now" in file cmaes_signals.par # 14/11/28: bug fix initialization of C took place before setting the # seed. Now in some dimensions (e.g. 10) results are (still) not # determistic due to np.linalg.eigh, in some dimensions (<9, 12) # they seem to be deterministic. # 14/11/23: bipop option integration, contributed by Petr Baudis # 14/09/30: initial_elitism option added to fmin # 14/08/1x: developing fitness wrappers in FFWrappers class # 14/08/xx: return value of OOOptimizer.optimize changed to self. # CMAOptions now need to uniquely match an *initial substring* # only (via method corrected_key). # Bug fix in CMAEvolutionStrategy.stop: termination conditions # are now recomputed iff check and self.countiter > 0. # Doc corrected that self.gp.geno _is_ applied to x0 # Vaste reorganization/modularization/improvements of plotting # 14/08/01: bug fix to guaranty pos. def. in active CMA # 14/06/04: gradient of f can now be used with fmin and/or ask # 14/05/11: global rcParams['font.size'] not permanently changed anymore, # a little nicer annotations for the plots # 14/05/07: added method result_pretty to pretty print optimization result # 14/05/06: associated show() everywhere with ion() which should solve the # blocked terminal problem # 14/05/05: all instances of "unicode" removed (was incompatible to 3.x) # 14/05/05: replaced type(x) == y with isinstance(x, y), reorganized the # comments before the code starts # 14/05/xx: change the order of kwargs of OOOptimizer.optimize, # remove prepare method in AdaptSigma classes, various changes/cleaning # 14/03/01: bug fix BoundaryHandlerBase.has_bounds didn't check lower bounds correctly # bug fix in BoundPenalty.repair len(bounds[0]) was used instead of len(bounds[1]) # bug fix in GenoPheno.pheno, where x was not copied when only boundary-repair was applied # 14/02/27: bug fixed when BoundPenalty was combined with fixed variables. # 13/xx/xx: step-size adaptation becomes a class derived from CMAAdaptSigmaBase, # to make testing different adaptation rules (much) easier # 12/12/14: separated CMAOptions and arguments to fmin # 12/10/25: removed useless check_points from fmin interface # 12/10/17: bug fix printing number of infeasible samples, moved not-in-use methods # timesCroot and divCroot to the right class # 12/10/16 (0.92.00): various changes commit: bug bound[0] -> bounds[0], more_to_write fixed, # sigma_vec introduced, restart from elitist, trace normalization, max(mu,popsize/2) # is used for weight calculation. # 12/07/23: (bug:) BoundPenalty.update respects now genotype-phenotype transformation # 12/07/21: convert value True for noisehandling into 1 making the output compatible # 12/01/30: class Solution and more old stuff removed r3101 # 12/01/29: class Solution is depreciated, GenoPheno and SolutionDict do the job (v0.91.00, r3100) # 12/01/06: CMA_eigenmethod option now takes a function (integer still works) # 11/09/30: flat fitness termination checks also history length # 11/09/30: elitist option (using method clip_or_fit_solutions) # 11/09/xx: method clip_or_fit_solutions for check_points option for all sorts of # injected or modified solutions and even reliable adaptive encoding # 11/08/19: fixed: scaling and typical_x type clashes 1 vs array(1) vs ones(dim) vs dim * [1] # 11/07/25: fixed: fmin wrote first and last line even with verb_log==0 # fixed: method settableOptionsList, also renamed to versatileOptions # default seed depends on time now # 11/07/xx (0.9.92): added: active CMA, selective mirrored sampling, noise/uncertainty handling # fixed: output argument ordering in fmin, print now only used as function # removed: parallel option in fmin # 11/07/01: another try to get rid of the memory leak by replacing self.unrepaired = self[:] # 11/07/01: major clean-up and reworking of abstract base classes and of the documentation, # also the return value of fmin changed and attribute stop is now a method. # 11/04/22: bug-fix: option fixed_variables in combination with scaling # 11/04/21: stopdict is not a copy anymore # 11/04/15: option fixed_variables implemented # 11/03/23: bug-fix boundary update was computed even without boundaries # 11/03/12: bug-fix of variable annotation in plots # 11/02/05: work around a memory leak in numpy # 11/02/05: plotting routines improved # 10/10/17: cleaning up, now version 0.9.30 # 10/10/17: bug-fix: return values of fmin now use phenotyp (relevant # if input scaling_of_variables is given) # 08/10/01: option evalparallel introduced, # bug-fix for scaling being a vector # 08/09/26: option CMAseparable becomes CMA_diagonal # 08/10/18: some names change, test functions go into a class # 08/10/24: more refactorizing # 10/03/09: upper bound exp(min(1,...)) for step-size control from __future__ import division # future is >= 3.0, this code has mainly been used with 2.6 & 2.7 from __future__ import with_statement # only necessary for python 2.5 and not in heavy use from __future__ import print_function # available from python 2.6, code should also work without from __future__ import absolute_import from __future__ import unicode_literals # from __future__ import collections.MutableMapping # does not exist in future, otherwise Python 2.5 would work, since 0.91.01 import sys if not sys.version.startswith('2'): # in python 3 xrange = range raw_input = input basestring = str else: input = raw_input # in py2, input(x) == eval(raw_input(x)) import time # not really essential import collections import numpy as np # arange, cos, size, eye, inf, dot, floor, outer, zeros, linalg.eigh, # sort, argsort, random, ones,... from numpy import inf, array, dot, exp, log, sqrt, sum, isscalar, isfinite # to access the built-in sum fct: ``__builtins__.sum`` or ``del sum`` # removes the imported sum and recovers the shadowed build-in try: from matplotlib import pyplot savefig = pyplot.savefig # now we can use cma.savefig() etc closefig = pyplot.close def show(): # is_interactive = matplotlib.is_interactive() pyplot.ion() pyplot.show() # if we call now matplotlib.interactive(True), the console is # blocked pyplot.ion() # prevents that execution stops after plotting except: pyplot = None savefig = None closefig = None def show(): print('pyplot.show() is not available') print('Could not import matplotlib.pyplot, therefore ``cma.plot()``" +' ' etc. is not available') __author__ = 'Nikolaus Hansen' __version__ = "1.1.06 $Revision: 4129 $ $Date: 2015-01-23 20:13:51 +0100 (Fri, 23 Jan 2015) $" # $Source$ # according to PEP 8 style guides, but what is it good for? # $Id: cma.py 4129 2015-01-23 19:13:51Z hansen $ # bash $: svn propset svn:keywords 'Date Revision Id' cma.py __docformat__ = "reStructuredText" # this hides some comments entirely? __all__ = ( 'main', 'fmin', 'fcts', 'Fcts', 'felli', 'rotate', 'pprint', 'plot', 'disp', 'show', 'savefig', 'closefig', 'use_archives', 'is_feasible', 'unitdoctest', 'DerivedDictBase', 'SolutionDict', 'CMASolutionDict', 'BestSolution', # 'BoundaryHandlerBase', 'BoundNone', 'BoundTransform', 'BoundPenalty', # 'BoxConstraintsTransformationBase', # 'BoxConstraintsLinQuadTransformation', 'GenoPheno', 'OOOptimizer', 'CMAEvolutionStrategy', 'CMAOptions', 'CMASolutionDict', 'CMAAdaptSigmaBase', 'CMAAdaptSigmaNone', 'CMAAdaptSigmaDistanceProportional', 'CMAAdaptSigmaCSA', 'CMAAdaptSigmaTPA', 'CMAAdaptSigmaMedianImprovement', 'BaseDataLogger', 'CMADataLogger', 'NoiseHandler', 'Sections', 'Misc', 'Mh', 'ElapsedTime', 'Rotation', 'fcts', 'FFWrappers', ) use_archives = True # on False some unit tests fail """speed up for very large population size. `use_archives` prevents the need for an inverse gp-transformation, relies on collections module, not sure what happens if set to ``False``. """ class MetaParameters(object): """meta parameters are either "modifiable constants" or refer to options from ``CMAOptions`` or are arguments to ``fmin`` or to the ``NoiseHandler`` class constructor. Details ------- This code contains a single class instance `meta_parameters` Some interfaces rely on parameters being either `int` or `float` only. More sophisticated choices are implemented via ``choice_value = {1: 'this', 2: 'or that'}[int_param_value]`` here. CAVEAT ------ ``meta_parameters`` should not be used to determine default arguments, because these are assigned only once and for all during module import. """ def __init__(self): self.sigma0 = None ## [~0.01, ~10] # no default available # learning rates and back-ward time horizons self.CMA_cmean = 1.0 ## [~0.1, ~10] # self.c1_multiplier = 1.0 ## [~1e-4, ~20] l self.cmu_multiplier = 2.0 ## [~1e-4, ~30] l # zero means off self.CMA_active = 1.0 ## [~1e-4, ~10] l # 0 means off, was CMA_activefac self.cc_multiplier = 1.0 ## [~0.01, ~20] l self.cs_multiplier = 1.0 ## [~0.01, ~10] l # learning rate for cs self.CSA_dampfac = 1.0 ## [~0.01, ~10] self.CMA_dampsvec_fac = None ## [~0.01, ~100] # def=np.Inf or 0.5, not clear whether this is a log parameter self.CMA_dampsvec_fade = 0.1 ## [0, ~2] # exponents for learning rates self.c1_exponent = 2.0 ## [~1.25, 2] self.cmu_exponent = 2.0 ## [~1.25, 2] self.cact_exponent = 1.5 ## [~1.25, 2] self.cc_exponent = 1.0 ## [~0.25, ~1.25] self.cs_exponent = 1.0 ## [~0.25, ~1.75] # upper bound depends on CSA_clip_length_value # selection related parameters self.lambda_exponent = 0.0 ## [0, ~2.5] # usually <= 2, used by adding N**lambda_exponent to popsize-1 self.parent_fraction = 0.5 ## [0, 1] # default is weighted recombination self.CMA_elitist = 0 ## [0, 2] i # a choice variable self.CMA_mirrors = 0.0 ## [0, 0.5) # values <0.5 are interpreted as fraction, values >1 as numbers (rounded), otherwise about 0.16 is used', # sampling strategies self.CMA_sample_on_sphere_surface = 0 ## [0, 1] i # boolean self.mean_shift_line_samples = 0 ## [0, 1] i # boolean self.pc_line_samples = 0 ## [0, 1] i # boolean # step-size adapation related parameters self.CSA_damp_mueff_exponent = 0.5 ## [~0.25, ~1.5] # zero would mean no dependency of damping on mueff, useful with CSA_disregard_length option', self.CSA_disregard_length = 0 ## [0, 1] i self.CSA_squared = 0 ## [0, 1] i self.CSA_clip_length_value = None ## [0, ~20] # None reflects inf # noise handling self.noise_reeval_multiplier = 1.0 ## [0.2, 4] # usually 2 offspring are reevaluated self.noise_choose_reeval = 1 ## [1, 3] i # which ones to reevaluate self.noise_theta = 0.5 ## [~0.05, ~0.9] self.noise_alphasigma = 2.0 ## [0, 10] self.noise_alphaevals = 2.0 ## [0, 10] self.noise_alphaevalsdown_exponent = -0.25 ## [-1.5, 0] self.noise_aggregate = None ## [1, 2] i # None and 0 == default or user option choice, 1 == median, 2 == mean # TODO: more noise handling options (maxreevals...) # restarts self.restarts = 0 ## [0, ~30] # but depends on popsize inc self.restart_from_best = 0 ## [0, 1] i # bool self.incpopsize = 2.0 ## [~1, ~5] # termination conditions (for restarts) self.maxiter_multiplier = 1.0 ## [~0.01, ~100] l self.mindx = 0.0 ## [1e-17, ~1e-3] l #v minimal std in any direction, cave interference with tol*', self.minstd = 0.0 ## [1e-17, ~1e-3] l #v minimal std in any coordinate direction, cave interference with tol*', self.maxstd = None ## [~1, ~1e9] l #v maximal std in any coordinate direction, default is inf', self.tolfacupx = 1e3 ## [~10, ~1e9] l #v termination when step-size increases by tolfacupx (diverges). That is, the initial step-size was chosen far too small and better solutions were found far away from the initial solution x0', self.tolupsigma = 1e20 ## [~100, ~1e99] l #v sigma/sigma0 > tolupsigma * max(sqrt(eivenvals(C))) indicates "creeping behavior" with usually minor improvements', self.tolx = 1e-11 ## [1e-17, ~1e-3] l #v termination criterion: tolerance in x-changes', self.tolfun = 1e-11 ## [1e-17, ~1e-3] l #v termination criterion: tolerance in function value, quite useful', self.tolfunhist = 1e-12 ## [1e-17, ~1e-3] l #v termination criterion: tolerance in function value history', self.tolstagnation_multiplier = 1.0 ## [0.01, ~100] # ': 'int(100 + 100 * N**1.5 / popsize) #v termination if no improvement over tolstagnation iterations', # abandoned: # self.noise_change_sigma_exponent = 1.0 ## [0, 2] # self.noise_epsilon = 1e-7 ## [0, ~1e-2] l # # self.maxfevals = None ## [1, ~1e11] l # is not a performance parameter # self.cc_mu_multiplier = 1 ## [0, ~10] # AKA alpha_cc # self.lambda_log_multiplier = 3 ## [0, ~10] # self.lambda_multiplier = 0 ## (0, ~10] meta_parameters = MetaParameters() # emptysets = ('', (), [], {}) # array([]) does not work but np.size(.) == 0 # here is the problem: # bool(array([0])) is False # bool(list(array([0]))) is True # bool(list(array([0, 1]))) is True # bool(array([0, 1])) raises ValueError # # "x in emptysets" cannot be well replaced by "not x" # which is also True for array([]) and None, but also for 0 and False, # and False for NaN, and an exception for array([0,1]), see also # http://google-styleguide.googlecode.com/svn/trunk/pyguide.html#True/False_evaluations # ____________________________________________________________ # ____________________________________________________________ # def rglen(ar): """shortcut for the iterator ``xrange(len(ar))``""" return xrange(len(ar)) def is_feasible(x, f): """default to check feasibility, see also ``cma_default_options``""" return f is not None and f is not np.NaN global_verbosity = 1 def _print_warning(msg, method_name=None, class_name=None, iteration=None, verbose=None): if verbose is None: verbose = global_verbosity if verbose > 0: print('WARNING (module=' + __name__ + (', class=' + str(class_name) if class_name else '') + (', method=' + str(method_name) if method_name else '') + (', iteration=' + str(iteration) if iteration else '') + '): ', msg) # ____________________________________________________________ # ____________________________________________________________ # def unitdoctest(): """is used to describe test cases and might in future become helpful as an experimental tutorial as well. The main testing feature at the moment is by doctest with ``cma._test()`` or conveniently by ``python cma.py --test``. With the ``--verbose`` option added, the results will always slightly differ and many "failed" test cases might be reported. A simple first overall test: >>> import cma >>> res = cma.fmin(cma.fcts.elli, 3*[1], 1, ... {'CMA_diagonal':2, 'seed':1, 'verb_time':0}) (3_w,7)-CMA-ES (mu_w=2.3,w_1=58%) in dimension 3 (seed=1) Covariance matrix is diagonal for 2 iterations (1/ccov=7.0) Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec 1 7 1.453161670768570e+04 1.2e+00 1.08e+00 1e+00 1e+00 2 14 3.281197961927601e+04 1.3e+00 1.22e+00 1e+00 2e+00 3 21 1.082851071704020e+04 1.3e+00 1.24e+00 1e+00 2e+00 100 700 8.544042012075362e+00 1.4e+02 3.18e-01 1e-03 2e-01 200 1400 5.691152415221861e-12 1.0e+03 3.82e-05 1e-09 1e-06 220 1540 3.890107746209078e-15 9.5e+02 4.56e-06 8e-11 7e-08 termination on tolfun : 1e-11 final/bestever f-value = 3.89010774621e-15 2.52273602735e-15 mean solution: [ -4.63614606e-08 -3.42761465e-10 1.59957987e-11] std deviation: [ 6.96066282e-08 2.28704425e-09 7.63875911e-11] Test on the Rosenbrock function with 3 restarts. The first trial only finds the local optimum, which happens in about 20% of the cases. >>> import cma >>> res = cma.fmin(cma.fcts.rosen, 4*[-1], 1, ... options={'ftarget':1e-6, 'verb_time':0, ... 'verb_disp':500, 'seed':3}, ... restarts=3) (4_w,8)-CMA-ES (mu_w=2.6,w_1=52%) in dimension 4 (seed=3) Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec 1 8 4.875315645656848e+01 1.0e+00 8.43e-01 8e-01 8e-01 2 16 1.662319948123120e+02 1.1e+00 7.67e-01 7e-01 8e-01 3 24 6.747063604799602e+01 1.2e+00 7.08e-01 6e-01 7e-01 184 1472 3.701428610430019e+00 4.3e+01 9.41e-07 3e-08 5e-08 termination on tolfun : 1e-11 final/bestever f-value = 3.70142861043 3.70142861043 mean solution: [-0.77565922 0.61309336 0.38206284 0.14597202] std deviation: [ 2.54211502e-08 3.88803698e-08 4.74481641e-08 3.64398108e-08] (8_w,16)-CMA-ES (mu_w=4.8,w_1=32%) in dimension 4 (seed=4) Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec 1 1489 2.011376859371495e+02 1.0e+00 8.90e-01 8e-01 9e-01 2 1505 4.157106647905128e+01 1.1e+00 8.02e-01 7e-01 7e-01 3 1521 3.548184889359060e+01 1.1e+00 1.02e+00 8e-01 1e+00 111 3249 6.831867555502181e-07 5.1e+01 2.62e-02 2e-04 2e-03 termination on ftarget : 1e-06 final/bestever f-value = 6.8318675555e-07 1.18576673231e-07 mean solution: [ 0.99997004 0.99993938 0.99984868 0.99969505] std deviation: [ 0.00018973 0.00038006 0.00076479 0.00151402] >>> assert res[1] <= 1e-6 Notice the different termination conditions. Termination on the target function value ftarget prevents further restarts. Test of scaling_of_variables option >>> import cma >>> opts = cma.CMAOptions() >>> opts['seed'] = 456 >>> opts['verb_disp'] = 0 >>> opts['CMA_active'] = 1 >>> # rescaling of third variable: for searching in roughly >>> # x0 plus/minus 1e3*sigma0 (instead of plus/minus sigma0) >>> opts['scaling_of_variables'] = [1, 1, 1e3, 1] >>> res = cma.fmin(cma.fcts.rosen, 4 * [0.1], 0.1, opts) termination on tolfun : 1e-11 final/bestever f-value = 2.68096173031e-14 1.09714829146e-14 mean solution: [ 1.00000001 1.00000002 1.00000004 1.00000007] std deviation: [ 3.00466854e-08 5.88400826e-08 1.18482371e-07 2.34837383e-07] The printed std deviations reflect the actual value in the parameters of the function (not the one in the internal representation which can be different). Test of CMA_stds scaling option. >>> import cma >>> opts = cma.CMAOptions() >>> s = 5 * [1] >>> s[0] = 1e3 >>> opts.set('CMA_stds', s) >>> opts.set('verb_disp', 0) >>> res = cma.fmin(cma.fcts.cigar, 5 * [0.1], 0.1, opts) >>> assert res[1] < 1800 :See: cma.main(), cma._test() """ pass class _BlancClass(object): """blanc container class for having a collection of attributes, that might/should at some point become a more tailored class""" if use_archives: class DerivedDictBase(collections.MutableMapping): """for conveniently adding "features" to a dictionary. The actual dictionary is in ``self.data``. Copy-paste and modify setitem, getitem, and delitem, if necessary. Details: This is the clean way to subclass build-in dict. """ def __init__(self, *args, **kwargs): # collections.MutableMapping.__init__(self) super(DerivedDictBase, self).__init__() # super(SolutionDict, self).__init__() # the same self.data = dict() self.data.update(dict(*args, **kwargs)) def __len__(self): return len(self.data) def __contains__(self, key): return key in self.data def __iter__(self): return iter(self.data) def __setitem__(self, key, value): """defines self[key] = value""" self.data[key] = value def __getitem__(self, key): """defines self[key]""" return self.data[key] def __delitem__(self, key): del self.data[key] class SolutionDict(DerivedDictBase): """dictionary with computation of an hash key. The hash key is generated from the inserted solution and a stack of previously inserted same solutions is provided. Each entry is meant to store additional information related to the solution. >>> import cma, numpy as np >>> d = cma.SolutionDict() >>> x = np.array([1,2,4]) >>> d[x] = {'f': sum(x**2), 'iteration': 1} >>> assert d[x]['iteration'] == 1 >>> assert d.get(x) == (d[x] if d.key(x) in d.keys() else None) TODO: data_with_same_key behaves like a stack (see setitem and delitem), but rather should behave like a queue?! A queue is less consistent with the operation self[key] = ..., if self.data_with_same_key[key] is not empty. TODO: iteration key is used to clean up without error management """ def __init__(self, *args, **kwargs): # DerivedDictBase.__init__(self, *args, **kwargs) super(SolutionDict, self).__init__(*args, **kwargs) self.data_with_same_key = {} self.last_iteration = 0 def key(self, x): try: return tuple(x) # using sum(x) is slower, using x[0] is slightly faster except TypeError: return x def __setitem__(self, key, value): """defines self[key] = value""" key = self.key(key) if key in self.data_with_same_key: self.data_with_same_key[key] += [self.data[key]] elif key in self.data: self.data_with_same_key[key] = [self.data[key]] self.data[key] = value def __getitem__(self, key): # 50% of time of """defines self[key]""" return self.data[self.key(key)] def __delitem__(self, key): """remove only most current key-entry""" key = self.key(key) if key in self.data_with_same_key: if len(self.data_with_same_key[key]) == 1: self.data[key] = self.data_with_same_key.pop(key)[0] else: self.data[key] = self.data_with_same_key[key].pop(-1) else: del self.data[key] def truncate(self, max_len, min_iter): if len(self) > max_len: for k in list(self.keys()): if self[k]['iteration'] < min_iter: del self[k] # deletes one item with k as key, better delete all? class CMASolutionDict(SolutionDict): def __init__(self, *args, **kwargs): # SolutionDict.__init__(self, *args, **kwargs) super(CMASolutionDict, self).__init__(*args, **kwargs) self.last_solution_index = 0 # TODO: insert takes 30% of the overall CPU time, mostly in def key() # with about 15% of the overall CPU time def insert(self, key, geno=None, iteration=None, fitness=None, value=None): """insert an entry with key ``key`` and value ``value if value is not None else {'geno':key}`` and ``self[key]['kwarg'] = kwarg if kwarg is not None`` for the further kwargs. """ # archive returned solutions, first clean up archive if iteration is not None and iteration > self.last_iteration and (iteration % 10) < 1: self.truncate(300, iteration - 3) elif value is not None and value.get('iteration'): iteration = value['iteration'] if (iteration % 10) < 1: self.truncate(300, iteration - 3) self.last_solution_index += 1 if value is not None: try: iteration = value['iteration'] except: pass if iteration is not None: if iteration > self.last_iteration: self.last_solution_index = 0 self.last_iteration = iteration else: iteration = self.last_iteration + 0.5 # a hack to get a somewhat reasonable value if value is not None: self[key] = value else: self[key] = {'pheno': key} if geno is not None: self[key]['geno'] = geno if iteration is not None: self[key]['iteration'] = iteration if fitness is not None: self[key]['fitness'] = fitness return self[key] if not use_archives: class CMASolutionDict(dict): """a hack to get most code examples running""" def insert(self, *args, **kwargs): pass def get(self, key): return None def __getitem__(self, key): return None def __setitem__(self, key, value): pass class BestSolution(object): """container to keep track of the best solution seen""" def __init__(self, x=None, f=np.inf, evals=None): """initialize the best solution with `x`, `f`, and `evals`. Better solutions have smaller `f`-values. """ self.x = x self.x_geno = None self.f = f if f is not None and f is not np.nan else np.inf self.evals = evals self.evalsall = evals self.last = _BlancClass() self.last.x = x self.last.f = f def update(self, arx, xarchive=None, arf=None, evals=None): """checks for better solutions in list `arx`. Based on the smallest corresponding value in `arf`, alternatively, `update` may be called with a `BestSolution` instance like ``update(another_best_solution)`` in which case the better solution becomes the current best. `xarchive` is used to retrieve the genotype of a solution. """ if isinstance(arx, BestSolution): if self.evalsall is None: self.evalsall = arx.evalsall elif arx.evalsall is not None: self.evalsall = max((self.evalsall, arx.evalsall)) if arx.f is not None and arx.f < np.inf: self.update([arx.x], xarchive, [arx.f], arx.evals) return self assert arf is not None # find failsave minimum minidx = np.nanargmin(arf) if minidx is np.nan: return minarf = arf[minidx] # minarf = reduce(lambda x, y: y if y and y is not np.nan # and y < x else x, arf, np.inf) if minarf < np.inf and (minarf < self.f or self.f is None): self.x, self.f = arx[minidx], arf[minidx] if xarchive is not None and xarchive.get(self.x) is not None: self.x_geno = xarchive[self.x].get('geno') else: self.x_geno = None self.evals = None if not evals else evals - len(arf) + minidx + 1 self.evalsall = evals elif evals: self.evalsall = evals self.last.x = arx[minidx] self.last.f = minarf def get(self): """return ``(x, f, evals)`` """ return self.x, self.f, self.evals # , self.x_geno # ____________________________________________________________ # ____________________________________________________________ # class BoundaryHandlerBase(object): """hacked base class """ def __init__(self, bounds): """bounds are not copied, but possibly modified and put into a normalized form: ``bounds`` can be ``None`` or ``[lb, ub]`` where ``lb`` and ``ub`` are either None or a vector (which can have ``None`` entries). Generally, the last entry is recycled to compute bounds for any dimension. """ if not bounds: self.bounds = None else: l = [None, None] # figure out lenths for i in [0, 1]: try: l[i] = len(bounds[i]) except TypeError: bounds[i] = [bounds[i]] l[i] = 1 if all([bounds[i][j] is None or not isfinite(bounds[i][j]) for j in rglen(bounds[i])]): bounds[i] = None if bounds[i] is not None and any([bounds[i][j] == (-1)**i * np.inf for j in rglen(bounds[i])]): raise ValueError('lower/upper is +inf/-inf and ' + 'therefore no finite feasible solution is available') self.bounds = bounds def __call__(self, solutions, *args, **kwargs): """return penalty or list of penalties, by default zero(s). This interface seems too specifically tailored to the derived BoundPenalty class, it should maybe change. """ if isscalar(solutions[0]): return 0.0 else: return len(solutions) * [0.0] def update(self, *args, **kwargs): return self def repair(self, x, copy_if_changed=True, copy_always=False): """projects infeasible values on the domain bound, might be overwritten by derived class """ if copy_always: x = array(x, copy=True) copy = False else: copy = copy_if_changed if self.bounds is None: return x for ib in [0, 1]: if self.bounds[ib] is None: continue for i in rglen(x): idx = min([i, len(self.bounds[ib]) - 1]) if self.bounds[ib][idx] is not None and \ (-1)**ib * x[i] < (-1)**ib * self.bounds[ib][idx]: if copy: x = array(x, copy=True) copy = False x[i] = self.bounds[ib][idx] def inverse(self, y, copy_if_changed=True, copy_always=False): return y if not copy_always else array(y, copy=True) def get_bounds(self, which, dimension): """``get_bounds('lower', 8)`` returns the lower bounds in 8-D""" if which == 'lower' or which == 0: return self._get_bounds(0, dimension) elif which == 'upper' or which == 1: return self._get_bounds(1, dimension) else: raise ValueError("argument which must be 'lower' or 'upper'") def _get_bounds(self, ib, dimension): """ib == 0/1 means lower/upper bound, return a vector of length `dimension` """ sign_ = 2 * ib - 1 assert sign_**2 == 1 if self.bounds is None or self.bounds[ib] is None: return array(dimension * [sign_ * np.Inf]) res = [] for i in xrange(dimension): res.append(self.bounds[ib][min([i, len(self.bounds[ib]) - 1])]) if res[-1] is None: res[-1] = sign_ * np.Inf return array(res) def has_bounds(self): """return True, if any variable is bounded""" bounds = self.bounds if bounds in (None, [None, None]): return False for ib, bound in enumerate(bounds): if bound is not None: sign_ = 2 * ib - 1 for bound_i in bound: if bound_i is not None and sign_ * bound_i < np.inf: return True return False def is_in_bounds(self, x): """not yet tested""" if self.bounds is None: return True for ib in [0, 1]: if self.bounds[ib] is None: continue for i in rglen(x): idx = min([i, len(self.bounds[ib]) - 1]) if self.bounds[ib][idx] is not None and \ (-1)**ib * x[i] < (-1)**ib * self.bounds[ib][idx]: return False return True def to_dim_times_two(self, bounds): """return boundaries in format ``[[lb0, ub0], [lb1, ub1], ...]``, as used by ``BoxConstraints...`` class. """ if not bounds: b = [[None, None]] else: l = [None, None] # figure out lenths for i in [0, 1]: try: l[i] = len(bounds[i]) except TypeError: bounds[i] = [bounds[i]] l[i] = 1 b = [] # bounds in different format try: for i in xrange(max(l)): b.append([bounds[0][i] if i < l[0] else None, bounds[1][i] if i < l[1] else None]) except (TypeError, IndexError): print("boundaries must be provided in the form " + "[scalar_of_vector, scalar_or_vector]") raise return b # ____________________________________________________________ # ____________________________________________________________ # class BoundNone(BoundaryHandlerBase): def __init__(self, bounds=None): if bounds is not None: raise ValueError() # BoundaryHandlerBase.__init__(self, None) super(BoundNone, self).__init__(None) def is_in_bounds(self, x): return True # ____________________________________________________________ # ____________________________________________________________ # class BoundTransform(BoundaryHandlerBase): """Handles boundary by a smooth, piecewise linear and quadratic transformation into the feasible domain. >>> import cma >>> veq = cma.Mh.vequals_approximately >>> b = cma.BoundTransform([None, 1]) >>> assert b.bounds == [[None], [1]] >>> assert veq(b.repair([0, 1, 1.2]), array([ 0., 0.975, 0.975])) >>> assert b.is_in_bounds([0, 0.5, 1]) >>> assert veq(b.transform([0, 1, 2]), [ 0. , 0.975, 0.2 ]) >>> o=cma.fmin(cma.fcts.sphere, 6 * [-2], 0.5, options={ ... 'boundary_handling': 'BoundTransform ', ... 'bounds': [[], 5 * [-1] + [inf]] }) >>> assert o[1] < 5 + 1e-8 >>> import numpy as np >>> b = cma.BoundTransform([-np.random.rand(120), np.random.rand(120)]) >>> for i in range(100): ... x = (-i-1) * np.random.rand(120) + i * np.random.randn(120) ... x_to_b = b.repair(x) ... x2 = b.inverse(x_to_b) ... x2_to_b = b.repair(x2) ... x3 = b.inverse(x2_to_b) ... x3_to_b = b.repair(x3) ... assert veq(x_to_b, x2_to_b) ... assert veq(x2, x3) ... assert veq(x2_to_b, x3_to_b) Details: this class uses ``class BoxConstraintsLinQuadTransformation`` """ def __init__(self, bounds=None): """Argument bounds can be `None` or ``bounds[0]`` and ``bounds[1]`` are lower and upper domain boundaries, each is either `None` or a scalar or a list or array of appropriate size. """ # BoundaryHandlerBase.__init__(self, bounds) super(BoundTransform, self).__init__(bounds) self.bounds_tf = BoxConstraintsLinQuadTransformation(self.to_dim_times_two(bounds)) def repair(self, x, copy_if_changed=True, copy_always=False): """transforms ``x`` into the bounded domain. ``copy_always`` option might disappear. """ copy = copy_if_changed if copy_always: x = array(x, copy=True) copy = False if self.bounds is None or (self.bounds[0] is None and self.bounds[1] is None): return x return self.bounds_tf(x, copy) def transform(self, x): return self.repair(x) def inverse(self, x, copy_if_changed=True, copy_always=False): """inverse transform of ``x`` from the bounded domain. """ copy = copy_if_changed if copy_always: x = array(x, copy=True) copy = False if self.bounds is None or (self.bounds[0] is None and self.bounds[1] is None): return x return self.bounds_tf.inverse(x, copy) # this doesn't exist # ____________________________________________________________ # ____________________________________________________________ # class BoundPenalty(BoundaryHandlerBase): """Computes the boundary penalty. Must be updated each iteration, using the `update` method. Details ------- The penalty computes like ``sum(w[i] * (x[i]-xfeas[i])**2)``, where `xfeas` is the closest feasible (in-bounds) solution from `x`. The weight `w[i]` should be updated during each iteration using the update method. Example: >>> import cma >>> cma.fmin(cma.felli, 6 * [1], 1, ... { ... 'boundary_handling': 'BoundPenalty', ... 'bounds': [-1, 1], ... 'fixed_variables': {0: 0.012, 2:0.234} ... }) Reference: Hansen et al 2009, A Method for Handling Uncertainty... IEEE TEC, with addendum, see http://www.lri.fr/~hansen/TEC2009online.pdf """ def __init__(self, bounds=None): """Argument bounds can be `None` or ``bounds[0]`` and ``bounds[1]`` are lower and upper domain boundaries, each is either `None` or a scalar or a list or array of appropriate size. """ # # # bounds attribute reminds the domain boundary values # BoundaryHandlerBase.__init__(self, bounds) super(BoundPenalty, self).__init__(bounds) self.gamma = 1 # a very crude assumption self.weights_initialized = False # gamma becomes a vector after initialization self.hist = [] # delta-f history def repair(self, x, copy_if_changed=True, copy_always=False): """sets out-of-bounds components of ``x`` on the bounds. """ # TODO (old data): CPU(N,lam,iter=20,200,100): 3.3s of 8s for two bounds, 1.8s of 6.5s for one bound # remark: np.max([bounds[0], x]) is about 40 times slower than max((bounds[0], x)) copy = copy_if_changed if copy_always: x = array(x, copy=True) bounds = self.bounds if bounds not in (None, [None, None], (None, None)): # solely for effiency x = array(x, copy=True) if copy and not copy_always else x if bounds[0] is not None: if isscalar(bounds[0]): for i in rglen(x): x[i] = max((bounds[0], x[i])) else: for i in rglen(x): j = min([i, len(bounds[0]) - 1]) if bounds[0][j] is not None: x[i] = max((bounds[0][j], x[i])) if bounds[1] is not None: if isscalar(bounds[1]): for i in rglen(x): x[i] = min((bounds[1], x[i])) else: for i in rglen(x): j = min((i, len(bounds[1]) - 1)) if bounds[1][j] is not None: x[i] = min((bounds[1][j], x[i])) return x # ____________________________________________________________ # def __call__(self, x, archive, gp): """returns the boundary violation penalty for `x` ,where `x` is a single solution or a list or array of solutions. """ if x in (None, (), []): return x if self.bounds in (None, [None, None], (None, None)): return 0.0 if isscalar(x[0]) else [0.0] * len(x) # no penalty x_is_single_vector = isscalar(x[0]) x = [x] if x_is_single_vector else x # add fixed variables to self.gamma try: gamma = list(self.gamma) # fails if self.gamma is a scalar for i in sorted(gp.fixed_values): # fails if fixed_values is None gamma.insert(i, 0.0) gamma = array(gamma, copy=False) except TypeError: gamma = self.gamma pen = [] for xi in x: # CAVE: this does not work with already repaired values!! # CPU(N,lam,iter=20,200,100)?: 3s of 10s, array(xi): 1s # remark: one deep copy can be prevented by xold = xi first xpheno = gp.pheno(archive[xi]['geno']) # necessary, because xi was repaired to be in bounds xinbounds = self.repair(xpheno) # could be omitted (with unpredictable effect in case of external repair) fac = 1 # exp(0.1 * (log(self.scal) - np.mean(self.scal))) pen.append(sum(gamma * ((xinbounds - xpheno) / fac)**2) / len(xi)) return pen[0] if x_is_single_vector else pen # ____________________________________________________________ # def feasible_ratio(self, solutions): """counts for each coordinate the number of feasible values in ``solutions`` and returns an array of length ``len(solutions[0])`` with the ratios. `solutions` is a list or array of repaired ``Solution`` instances, """ raise NotImplementedError('Solution class disappeared') count = np.zeros(len(solutions[0])) for x in solutions: count += x.unrepaired == x return count / float(len(solutions)) # ____________________________________________________________ # def update(self, function_values, es): """updates the weights for computing a boundary penalty. Arguments --------- `function_values` all function values of recent population of solutions `es` `CMAEvolutionStrategy` object instance, in particular mean and variances and the methods from the attribute `gp` of type `GenoPheno` are used. """ if self.bounds is None or (self.bounds[0] is None and self.bounds[1] is None): return self N = es.N # ## prepare # compute varis = sigma**2 * C_ii varis = es.sigma**2 * array(N * [es.C] if isscalar(es.C) else (# scalar case es.C if isscalar(es.C[0]) else # diagonal matrix case [es.C[i][i] for i in xrange(N)])) # full matrix case # relative violation in geno-space dmean = (es.mean - es.gp.geno(self.repair(es.gp.pheno(es.mean)))) / varis**0.5 # ## Store/update a history of delta fitness value fvals = sorted(function_values) l = 1 + len(fvals) val = fvals[3 * l // 4] - fvals[l // 4] # exact interquartile range apart interpolation val = val / np.mean(varis) # new: val is normalized with sigma of the same iteration # insert val in history if isfinite(val) and val > 0: self.hist.insert(0, val) elif val == inf and len(self.hist) > 1: self.hist.insert(0, max(self.hist)) else: pass # ignore 0 or nan values if len(self.hist) > 20 + (3 * N) / es.popsize: self.hist.pop() # ## prepare dfit = np.median(self.hist) # median interquartile range damp = min(1, es.sp.mueff / 10. / N) # ## set/update weights # Throw initialization error if len(self.hist) == 0: raise _Error('wrongful initialization, no feasible solution sampled. ' + 'Reasons can be mistakenly set bounds (lower bound not smaller than upper bound) or a too large initial sigma0 or... ' + 'See description of argument func in help(cma.fmin) or an example handling infeasible solutions in help(cma.CMAEvolutionStrategy). ') # initialize weights if dmean.any() and (not self.weights_initialized or es.countiter == 2): # TODO self.gamma = array(N * [2 * dfit]) ## BUGBUGzzzz: N should be phenotypic (bounds are in phenotype), but is genotypic self.weights_initialized = True # update weights gamma if self.weights_initialized: edist = array(abs(dmean) - 3 * max(1, N**0.5 / es.sp.mueff)) if 1 < 3: # this is better, around a factor of two # increase single weights possibly with a faster rate than they can decrease # value unit of edst is std dev, 3==random walk of 9 steps self.gamma *= exp((edist > 0) * np.tanh(edist / 3) / 2.)**damp # decrease all weights up to the same level to avoid single extremely small weights # use a constant factor for pseudo-keeping invariance self.gamma[self.gamma > 5 * dfit] *= exp(-1. / 3)**damp # self.gamma[idx] *= exp(5*dfit/self.gamma[idx] - 1)**(damp/3) es.more_to_write += list(self.gamma) if self.weights_initialized else N * [1.0] # ## return penalty # es.more_to_write = self.gamma if not isscalar(self.gamma) else N*[1] return self # bound penalty values # ____________________________________________________________ # ____________________________________________________________ # class BoxConstraintsTransformationBase(object): """Implements a transformation into boundaries and is used for boundary handling:: tf = BoxConstraintsTransformationAnyDerivedClass([[1, 4]]) x = [3, 2, 4.4] y = tf(x) # "repaired" solution print(tf([2.5])) # middle value is never changed [2.5] :See: ``BoundaryHandler`` """ def __init__(self, bounds): try: if len(bounds[0]) != 2: raise ValueError except: raise ValueError(' bounds must be either [[lb0, ub0]] or [[lb0, ub0], [lb1, ub1],...], \n where in both cases the last entry is reused for all remaining dimensions') self.bounds = bounds self.initialize() def initialize(self): """initialize in base class""" self._lb = [b[0] for b in self.bounds] # can be done more efficiently? self._ub = [b[1] for b in self.bounds] def _lowerupperval(self, a, b, c): return np.max([np.max(a), np.min([np.min(b), c])]) def bounds_i(self, i): """return ``[ith_lower_bound, ith_upper_bound]``""" return self.bounds[self._index(i)] def __call__(self, solution_in_genotype): res = [self._transform_i(x, i) for i, x in enumerate(solution_in_genotype)] return res transform = __call__ def inverse(self, solution_in_phenotype, copy_if_changed=True, copy_always=True): return [self._inverse_i(y, i) for i, y in enumerate(solution_in_phenotype)] def _index(self, i): return min((i, len(self.bounds) - 1)) def _transform_i(self, x, i): raise NotImplementedError('this is an abstract method that should be implemented in the derived class') def _inverse_i(self, y, i): raise NotImplementedError('this is an abstract method that should be implemented in the derived class') def shift_or_mirror_into_invertible_domain(self, solution_genotype): """return the reference solution that has the same ``box_constraints_transformation(solution)`` value, i.e. ``tf.shift_or_mirror_into_invertible_domain(x) = tf.inverse(tf.transform(x))``. This is an idempotent mapping (leading to the same result independent how often it is repeatedly applied). """ return self.inverse(self(solution_genotype)) raise NotImplementedError('this is an abstract method that should be implemented in the derived class') class _BoxConstraintsTransformationTemplate(BoxConstraintsTransformationBase): """copy/paste this template to implement a new boundary handling transformation""" def __init__(self, bounds): # BoxConstraintsTransformationBase.__init__(self, bounds) super(_BoxConstraintsTransformationTemplate, self).__init__(bounds) def initialize(self): BoxConstraintsTransformationBase.initialize(self) # likely to be removed def _transform_i(self, x, i): raise NotImplementedError('this is an abstract method that should be implemented in the derived class') def _inverse_i(self, y, i): raise NotImplementedError('this is an abstract method that should be implemented in the derived class') __doc__ = BoxConstraintsTransformationBase.__doc__ + __doc__ class BoxConstraintsLinQuadTransformation(BoxConstraintsTransformationBase): """implements a bijective, monotonous transformation between [lb - al, ub + au] and [lb, ub] which is the identity (and therefore linear) in [lb + al, ub - au] (typically about 90% of the interval) and quadratic in [lb - 3*al, lb + al] and in [ub - au, ub + 3*au]. The transformation is periodically expanded beyond the limits (somewhat resembling the shape sin(x-pi/2)) with a period of ``2 * (ub - lb + al + au)``. Details ======= Partly due to numerical considerations depend the values ``al`` and ``au`` on ``abs(lb)`` and ``abs(ub)`` which makes the transformation non-translation invariant. In contrast to sin(.), the transformation is robust to "arbitrary" values for boundaries, e.g. a lower bound of ``-1e99`` or ``np.Inf`` or ``None``. Examples ======== Example to use with cma: >>> import cma >>> # only the first variable has an upper bound >>> tf = cma.BoxConstraintsLinQuadTransformation([[1,2], [1,None]]) # second==last pair is re-cycled >>> cma.fmin(cma.felli, 9 * [2], 1, {'transformation': [tf.transform, tf.inverse], 'verb_disp': 0}) >>> # ...or... >>> es = cma.CMAEvolutionStrategy(9 * [2], 1) >>> while not es.stop(): ... X = es.ask() ... f = [cma.felli(tf(x)) for x in X] # tf(x) == tf.transform(x) ... es.tell(X, f) Example of the internal workings: >>> import cma >>> tf = cma.BoxConstraintsLinQuadTransformation([[1,2], [1,11], [1,11]]) >>> tf.bounds [[1, 2], [1, 11], [1, 11]] >>> tf([1.5, 1.5, 1.5]) [1.5, 1.5, 1.5] >>> tf([1.52, -2.2, -0.2, 2, 4, 10.4]) [1.52, 4.0, 2.0, 2.0, 4.0, 10.4] >>> res = np.round(tf._au, 2) >>> assert list(res[:4]) == [ 0.15, 0.6, 0.6, 0.6] >>> res = [round(x, 2) for x in tf.shift_or_mirror_into_invertible_domain([1.52, -12.2, -0.2, 2, 4, 10.4])] >>> assert res == [1.52, 9.2, 2.0, 2.0, 4.0, 10.4] >>> tmp = tf([1]) # call with lower dimension """ def __init__(self, bounds): """``x`` is defined in ``[lb - 3*al, ub + au + r - 2*al]`` with ``r = ub - lb + al + au``, and ``x == transformation(x)`` in ``[lb + al, ub - au]``. ``beta*x - alphal = beta*x - alphau`` is then defined in ``[lb, ub]``, ``alphal`` and ``alphau`` represent the same value, but respectively numerically better suited for values close to lb and ub. """ # BoxConstraintsTransformationBase.__init__(self, bounds) super(BoxConstraintsLinQuadTransformation, self).__init__(bounds) # super().__init__(bounds) # only available since Python 3.x # super(BB, self).__init__(bounds) # is supposed to call initialize def initialize(self, length=None): """see ``__init__``""" if length is None: length = len(self.bounds) max_i = min((len(self.bounds) - 1, length - 1)) self._lb = array([self.bounds[min((i, max_i))][0] if self.bounds[min((i, max_i))][0] is not None else -np.Inf for i in xrange(length)], copy=False) self._ub = array([self.bounds[min((i, max_i))][1] if self.bounds[min((i, max_i))][1] is not None else np.Inf for i in xrange(length)], copy=False) lb = self._lb ub = self._ub # define added values for lower and upper bound self._al = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(lb[i])) / 20]) if isfinite(lb[i]) else 1 for i in rglen(lb)], copy=False) self._au = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(ub[i])) / 20]) if isfinite(ub[i]) else 1 for i in rglen(ub)], copy=False) def __call__(self, solution_genotype, copy_if_changed=True, copy_always=False): # about four times faster version of array([self._transform_i(x, i) for i, x in enumerate(solution_genotype)]) # still, this makes a typical run on a test function two times slower, but there might be one too many copies # during the transformations in gp if len(self._lb) != len(solution_genotype): self.initialize(len(solution_genotype)) lb = self._lb ub = self._ub al = self._al au = self._au if copy_always or not isinstance(solution_genotype[0], float): # transformed value is likely to be a float y = np.array(solution_genotype, copy=True, dtype=float) # if solution_genotype is not a float, copy value is disregarded copy = False else: y = solution_genotype copy = copy_if_changed idx = (y < lb - 2 * al - (ub - lb) / 2.0) | (y > ub + 2 * au + (ub - lb) / 2.0) if idx.any(): r = 2 * (ub[idx] - lb[idx] + al[idx] + au[idx]) # period s = lb[idx] - 2 * al[idx] - (ub[idx] - lb[idx]) / 2.0 # start if copy: y = np.array(y, copy=True) copy = False y[idx] -= r * ((y[idx] - s) // r) # shift idx = y > ub + au if idx.any(): if copy: y = np.array(y, copy=True) copy = False y[idx] -= 2 * (y[idx] - ub[idx] - au[idx]) idx = y < lb - al if idx.any(): if copy: y = np.array(y, copy=True) copy = False y[idx] += 2 * (lb[idx] - al[idx] - y[idx]) idx = y < lb + al if idx.any(): if copy: y = np.array(y, copy=True) copy = False y[idx] = lb[idx] + (y[idx] - (lb[idx] - al[idx]))**2 / 4 / al[idx] idx = y > ub - au if idx.any(): if copy: y = np.array(y, copy=True) copy = False y[idx] = ub[idx] - (y[idx] - (ub[idx] + au[idx]))**2 / 4 / au[idx] # assert Mh.vequals_approximately(y, BoxConstraintsTransformationBase.__call__(self, solution_genotype)) return y __call__.doc = BoxConstraintsTransformationBase.__doc__ transform = __call__ def idx_infeasible(self, solution_genotype): """return indices of "infeasible" variables, that is, variables that do not directly map into the feasible domain such that ``tf.inverse(tf(x)) == x``. """ res = [i for i, x in enumerate(solution_genotype) if not self.is_feasible_i(x, i)] return res def is_feasible_i(self, x, i): """return True if value ``x`` is in the invertible domain of variable ``i`` """ lb = self._lb[self._index(i)] ub = self._ub[self._index(i)] al = self._al[self._index(i)] au = self._au[self._index(i)] return lb - al < x < ub + au def is_loosely_feasible_i(self, x, i): """never used""" lb = self._lb[self._index(i)] ub = self._ub[self._index(i)] al = self._al[self._index(i)] au = self._au[self._index(i)] return lb - 2 * al - (ub - lb) / 2.0 <= x <= ub + 2 * au + (ub - lb) / 2.0 def shift_or_mirror_into_invertible_domain(self, solution_genotype, copy=False): """Details: input ``solution_genotype`` is changed. The domain is [lb - al, ub + au] and in [lb - 2*al - (ub - lb) / 2, lb - al] mirroring is applied. """ assert solution_genotype is not None if copy: y = [val for val in solution_genotype] else: y = solution_genotype if isinstance(y, np.ndarray) and not isinstance(y[0], float): y = array(y, dtype=float) for i in rglen(y): lb = self._lb[self._index(i)] ub = self._ub[self._index(i)] al = self._al[self._index(i)] au = self._au[self._index(i)] # x is far from the boundary, compared to ub - lb if y[i] < lb - 2 * al - (ub - lb) / 2.0 or y[i] > ub + 2 * au + (ub - lb) / 2.0: r = 2 * (ub - lb + al + au) # period s = lb - 2 * al - (ub - lb) / 2.0 # start y[i] -= r * ((y[i] - s) // r) # shift if y[i] > ub + au: y[i] -= 2 * (y[i] - ub - au) if y[i] < lb - al: y[i] += 2 * (lb - al - y[i]) return y shift_or_mirror_into_invertible_domain.__doc__ = BoxConstraintsTransformationBase.shift_or_mirror_into_invertible_domain.__doc__ + shift_or_mirror_into_invertible_domain.__doc__ def _shift_or_mirror_into_invertible_i(self, x, i): """shift into the invertible domain [lb - ab, ub + au], mirror close to boundaries in order to get a smooth transformation everywhere """ assert x is not None lb = self._lb[self._index(i)] ub = self._ub[self._index(i)] al = self._al[self._index(i)] au = self._au[self._index(i)] # x is far from the boundary, compared to ub - lb if x < lb - 2 * al - (ub - lb) / 2.0 or x > ub + 2 * au + (ub - lb) / 2.0: r = 2 * (ub - lb + al + au) # period s = lb - 2 * al - (ub - lb) / 2.0 # start x -= r * ((x - s) // r) # shift if x > ub + au: x -= 2 * (x - ub - au) if x < lb - al: x += 2 * (lb - al - x) return x def _transform_i(self, x, i): """return transform of x in component i""" x = self._shift_or_mirror_into_invertible_i(x, i) lb = self._lb[self._index(i)] ub = self._ub[self._index(i)] al = self._al[self._index(i)] au = self._au[self._index(i)] if x < lb + al: return lb + (x - (lb - al))**2 / 4 / al elif x < ub - au: return x elif x < ub + 3 * au: return ub - (x - (ub + au))**2 / 4 / au else: assert False # shift removes this case return ub + au - (x - (ub + au)) def _inverse_i(self, y, i): """return inverse of y in component i""" lb = self._lb[self._index(i)] ub = self._ub[self._index(i)] al = self._al[self._index(i)] au = self._au[self._index(i)] if 1 < 3: if not lb <= y <= ub: raise ValueError('argument of inverse must be within the given bounds') if y < lb + al: return (lb - al) + 2 * (al * (y - lb))**0.5 elif y < ub - au: return y else: return (ub + au) - 2 * (au * (ub - y))**0.5 class GenoPheno(object): """Genotype-phenotype transformation. Method `pheno` provides the transformation from geno- to phenotype, that is from the internal representation to the representation used in the objective function. Method `geno` provides the "inverse" pheno- to genotype transformation. The geno-phenotype transformation comprises, in this order: - insert fixed variables (with the phenotypic and therefore quite possibly "wrong" values) - affine linear transformation (first scaling then shift) - user-defined transformation - repair (e.g. into feasible domain due to boundaries) - assign fixed variables their original phenotypic value By default all transformations are the identity. The repair is only applied, if the transformation is given as argument to the method `pheno`. ``geno`` is only necessary, if solutions have been injected. """ def __init__(self, dim, scaling=None, typical_x=None, fixed_values=None, tf=None): """return `GenoPheno` instance with phenotypic dimension `dim`. Keyword Arguments ----------------- `scaling` the diagonal of a scaling transformation matrix, multipliers in the genotyp-phenotyp transformation, see `typical_x` `typical_x` ``pheno = scaling*geno + typical_x`` `fixed_values` a dictionary of variable indices and values, like ``{0:2.0, 2:1.1}``, that are not subject to change, negative indices are ignored (they act like incommenting the index), values are phenotypic values. `tf` list of two user-defined transformation functions, or `None`. ``tf[0]`` is a function that transforms the internal representation as used by the optimizer into a solution as used by the objective function. ``tf[1]`` does the back-transformation. For example:: tf_0 = lambda x: [xi**2 for xi in x] tf_1 = lambda x: [abs(xi)**0.5 fox xi in x] or "equivalently" without the `lambda` construct:: def tf_0(x): return [xi**2 for xi in x] def tf_1(x): return [abs(xi)**0.5 fox xi in x] ``tf=[tf_0, tf_1]`` is a reasonable way to guaranty that only positive values are used in the objective function. Details ------- If ``tf_0`` is not the identity and ``tf_1`` is ommitted, the genotype of ``x0`` cannot be computed consistently and "injection" of phenotypic solutions is likely to lead to unexpected results. """ self.N = dim self.fixed_values = fixed_values if tf is not None: self.tf_pheno = tf[0] self.tf_geno = tf[1] # TODO: should not necessarily be needed # r = np.random.randn(dim) # assert all(tf[0](tf[1](r)) - r < 1e-7) # r = np.random.randn(dim) # assert all(tf[0](tf[1](r)) - r > -1e-7) _print_warning("in class GenoPheno: user defined transformations have not been tested thoroughly") else: self.tf_geno = None self.tf_pheno = None if fixed_values: if not isinstance(fixed_values, dict): raise _Error("fixed_values must be a dictionary {index:value,...}") if max(fixed_values.keys()) >= dim: raise _Error("max(fixed_values.keys()) = " + str(max(fixed_values.keys())) + " >= dim=N=" + str(dim) + " is not a feasible index") # convenience commenting functionality: drop negative keys for k in list(fixed_values.keys()): if k < 0: fixed_values.pop(k) def vec_is_default(vec, default_val=0): """return True if `vec` has the value `default_val`, None or [None] are also recognized as default """ # TODO: rather let default_val be a list of default values, # cave comparison of arrays try: if len(vec) == 1: vec = vec[0] # [None] becomes None and is always default except TypeError: pass # vec is a scalar if vec is None or all(vec == default_val): return True if all([val is None or val == default_val for val in vec]): return True return False self.scales = array(scaling) if scaling is not None else None if vec_is_default(self.scales, 1): self.scales = 1 # CAVE: 1 is not array(1) elif self.scales.shape is not () and len(self.scales) != self.N: raise _Error('len(scales) == ' + str(len(self.scales)) + ' does not match dimension N == ' + str(self.N)) self.typical_x = array(typical_x) if typical_x is not None else None if vec_is_default(self.typical_x, 0): self.typical_x = 0 elif self.typical_x.shape is not () and len(self.typical_x) != self.N: raise _Error('len(typical_x) == ' + str(len(self.typical_x)) + ' does not match dimension N == ' + str(self.N)) if (self.scales is 1 and self.typical_x is 0 and self.fixed_values is None and self.tf_pheno is None): self.isidentity = True else: self.isidentity = False if self.tf_pheno is None: self.islinear = True else: self.islinear = False def pheno(self, x, into_bounds=None, copy=True, copy_always=False, archive=None, iteration=None): """maps the genotypic input argument into the phenotypic space, see help for class `GenoPheno` Details ------- If ``copy``, values from ``x`` are copied if changed under the transformation. """ # TODO: copy_always seems superfluous, as it could be done in the calling code input_type = type(x) if into_bounds is None: into_bounds = (lambda x, copy=False: x if not copy else array(x, copy=copy)) if copy_always and not copy: raise ValueError('arguments copy_always=' + str(copy_always) + ' and copy=' + str(copy) + ' have inconsistent values') if copy_always: x = array(x, copy=True) copy = False if self.isidentity: y = into_bounds(x) # was into_bounds(x, False) before (bug before v0.96.22) else: if self.fixed_values is None: y = array(x, copy=copy) # make a copy, in case else: # expand with fixed values y = list(x) # is a copy for i in sorted(self.fixed_values.keys()): y.insert(i, self.fixed_values[i]) y = array(y, copy=False) copy = False if self.scales is not 1: # just for efficiency y *= self.scales if self.typical_x is not 0: y += self.typical_x if self.tf_pheno is not None: y = array(self.tf_pheno(y), copy=False) y = into_bounds(y, copy) # copy is False if self.fixed_values is not None: for i, k in list(self.fixed_values.items()): y[i] = k if input_type is np.ndarray: y = array(y, copy=False) if archive is not None: archive.insert(y, geno=x, iteration=iteration) return y def geno(self, y, from_bounds=None, copy_if_changed=True, copy_always=False, repair=None, archive=None): """maps the phenotypic input argument into the genotypic space, that is, computes essentially the inverse of ``pheno``. By default a copy is made only to prevent to modify ``y``. The inverse of the user-defined transformation (if any) is only needed if external solutions are injected, it is not applied to the initial solution x0. Details ======= ``geno`` searches first in ``archive`` for the genotype of ``y`` and returns the found value, typically unrepaired. Otherwise, first ``from_bounds`` is applied, to revert a projection into the bound domain (if necessary) and ``pheno`` is reverted. ``repair`` is applied last, and is usually the method ``CMAEvolutionStrategy.repair_genotype`` that limits the Mahalanobis norm of ``geno(y) - mean``. """ if from_bounds is None: from_bounds = lambda x, copy=False: x # not change, no copy if archive is not None: try: x = archive[y]['geno'] except (KeyError, TypeError): x = None if x is not None: if archive[y]['iteration'] < archive.last_iteration \ and repair is not None: x = repair(x, copy_if_changed=copy_always) return x input_type = type(y) x = y if copy_always: x = array(y, copy=True) copy = False else: copy = copy_if_changed x = from_bounds(x, copy) if self.isidentity: if repair is not None: x = repair(x, copy) return x if copy: # could be improved? x = array(x, copy=True) copy = False # user-defined transformation if self.tf_geno is not None: x = array(self.tf_geno(x), copy=False) elif self.tf_pheno is not None: raise ValueError('t1 of options transformation was not defined but is needed as being the inverse of t0') # affine-linear transformation: shift and scaling if self.typical_x is not 0: x -= self.typical_x if self.scales is not 1: # just for efficiency x /= self.scales # kick out fixed_values if self.fixed_values is not None: # keeping the transformed values does not help much # therefore it is omitted if 1 < 3: keys = sorted(self.fixed_values.keys()) x = array([x[i] for i in xrange(len(x)) if i not in keys], copy=False) # repair injected solutions if repair is not None: x = repair(x, copy) if input_type is np.ndarray: x = array(x, copy=False) return x # ____________________________________________________________ # ____________________________________________________________ # check out built-in package abc: class ABCMeta, abstractmethod, abstractproperty... # see http://docs.python.org/whatsnew/2.6.html PEP 3119 abstract base classes # class OOOptimizer(object): """"abstract" base class for an Object Oriented Optimizer interface. Relevant methods are `__init__`, `ask`, `tell`, `stop`, `result`, and `optimize`. Only `optimize` is fully implemented in this base class. Examples -------- All examples minimize the function `elli`, the output is not shown. (A preferred environment to execute all examples is ``ipython`` in ``%pylab`` mode.) First we need:: from cma import CMAEvolutionStrategy # CMAEvolutionStrategy derives from the OOOptimizer class felli = lambda x: sum(1e3**((i-1.)/(len(x)-1.)*x[i])**2 for i in range(len(x))) The shortest example uses the inherited method `OOOptimizer.optimize()`:: es = CMAEvolutionStrategy(8 * [0.1], 0.5).optimize(felli) The input parameters to `CMAEvolutionStrategy` are specific to this inherited class. The remaining functionality is based on interface defined by `OOOptimizer`. We might have a look at the result:: print(es.result()[0]) # best solution and print(es.result()[1]) # its function value In order to display more exciting output we do:: es.logger.plot() # if matplotlib is available Virtually the same example can be written with an explicit loop instead of using `optimize()`. This gives the necessary insight into the `OOOptimizer` class interface and entire control over the iteration loop:: optim = CMAEvolutionStrategy(9 * [0.5], 0.3) # a new CMAEvolutionStrategy instance # this loop resembles optimize() while not optim.stop(): # iterate X = optim.ask() # get candidate solutions f = [felli(x) for x in X] # evaluate solutions # in case do something else that needs to be done optim.tell(X, f) # do all the real "update" work optim.disp(20) # display info every 20th iteration optim.logger.add() # log another "data line" # final output print('termination by', optim.stop()) print('best f-value =', optim.result()[1]) print('best solution =', optim.result()[0]) optim.logger.plot() # if matplotlib is available Details ------- Most of the work is done in the method `tell(...)`. The method `result()` returns more useful output. """ def __init__(self, xstart, **more_args): """``xstart`` is a mandatory argument""" self.xstart = xstart self.more_args = more_args self.initialize() def initialize(self): """(re-)set to the initial state""" self.countiter = 0 self.xcurrent = self.xstart[:] raise NotImplementedError('method initialize() must be implemented in derived class') def ask(self, gradf=None, **more_args): """abstract method, AKA "get" or "sample_distribution", deliver new candidate solution(s), a list of "vectors" """ raise NotImplementedError('method ask() must be implemented in derived class') def tell(self, solutions, function_values): """abstract method, AKA "update", pass f-values and prepare for next iteration """ self.countiter += 1 raise NotImplementedError('method tell() must be implemented in derived class') def stop(self): """abstract method, return satisfied termination conditions in a dictionary like ``{'termination reason': value, ...}``, for example ``{'tolfun': 1e-12}``, or the empty dictionary ``{}``. The implementation of `stop()` should prevent an infinite loop. """ raise NotImplementedError('method stop() is not implemented') def disp(self, modulo=None): """abstract method, display some iteration infos if ``self.iteration_counter % modulo == 0`` """ pass # raise NotImplementedError('method disp() is not implemented') def result(self): """abstract method, return ``(x, f(x), ...)``, that is, the minimizer, its function value, ... """ raise NotImplementedError('method result() is not implemented') # previous ordering: # def optimize(self, objectivefct, # logger=None, verb_disp=20, # iterations=None, min_iterations=1, # call_back=None): def optimize(self, objective_fct, iterations=None, min_iterations=1, args=(), verb_disp=None, logger=None, call_back=None): """find minimizer of `objective_fct`. CAVEAT: the return value for `optimize` has changed to ``self``. Arguments --------- `objective_fct` function be to minimized `iterations` number of (maximal) iterations, while ``not self.stop()`` `min_iterations` minimal number of iterations, even if ``not self.stop()`` `args` arguments passed to `objective_fct` `verb_disp` print to screen every `verb_disp` iteration, if ``None`` the value from ``self.logger`` is "inherited", if available. ``logger`` a `BaseDataLogger` instance, which must be compatible with the type of ``self``. ``call_back`` call back function called like ``call_back(self)`` or a list of call back functions. ``return self``, that is, the `OOOptimizer` instance. Example ------- >>> import cma >>> es = cma.CMAEvolutionStrategy(7 * [0.1], 0.5 ... ).optimize(cma.fcts.rosen, verb_disp=100) (4_w,9)-CMA-ES (mu_w=2.8,w_1=49%) in dimension 7 (seed=630721393) Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec 1 9 3.163954777181882e+01 1.0e+00 4.12e-01 4e-01 4e-01 0:0.0 2 18 3.299006223906629e+01 1.0e+00 3.60e-01 3e-01 4e-01 0:0.0 3 27 1.389129389866704e+01 1.1e+00 3.18e-01 3e-01 3e-01 0:0.0 100 900 2.494847340045985e+00 8.6e+00 5.03e-02 2e-02 5e-02 0:0.3 200 1800 3.428234862999135e-01 1.7e+01 3.77e-02 6e-03 3e-02 0:0.5 300 2700 3.216640032470860e-04 5.6e+01 6.62e-03 4e-04 9e-03 0:0.8 400 3600 6.155215286199821e-12 6.6e+01 7.44e-06 1e-07 4e-06 0:1.1 438 3942 1.187372505161762e-14 6.0e+01 3.27e-07 4e-09 9e-08 0:1.2 438 3942 1.187372505161762e-14 6.0e+01 3.27e-07 4e-09 9e-08 0:1.2 ('termination by', {'tolfun': 1e-11}) ('best f-value =', 1.1189867885201275e-14) ('solution =', array([ 1. , 1. , 1. , 0.99999999, 0.99999998, 0.99999996, 0.99999992])) >>> print(es.result()[0]) array([ 1. 1. 1. 0.99999999 0.99999998 0.99999996 0.99999992]) """ assert iterations is None or min_iterations <= iterations if not hasattr(self, 'logger'): self.logger = logger logger = self.logger = logger or self.logger if not isinstance(call_back, list): call_back = [call_back] citer = 0 while not self.stop() or citer < min_iterations: if iterations is not None and citer >= iterations: return self.result() citer += 1 X = self.ask() # deliver candidate solutions fitvals = [objective_fct(x, *args) for x in X] self.tell(X, fitvals) # all the work is done here self.disp(verb_disp) for f in call_back: f is None or f(self) logger.add(self) if logger else None # signal logger that we left the loop # TODO: this is very ugly, because it assumes modulo keyword # argument *and* modulo attribute to be available try: logger.add(self, modulo=bool(logger.modulo)) if logger else None except TypeError: print(' suppressing the final call of the logger in ' + 'OOOptimizer.optimize (modulo keyword parameter not ' + 'available)') except AttributeError: print(' suppressing the final call of the logger in ' + 'OOOptimizer.optimize (modulo attribute not ' + 'available)') if verb_disp: self.disp(1) if verb_disp in (1, True): print('termination by', self.stop()) print('best f-value =', self.result()[1]) print('solution =', self.result()[0]) return self # was: return self.result() + (self.stop(), self, logger) _experimental = False class CMAAdaptSigmaBase(object): """step-size adaptation base class, implementing hsig functionality via an isotropic evolution path. """ def __init__(self, *args, **kwargs): self.is_initialized_base = False self._ps_updated_iteration = -1 def initialize_base(self, es): """set parameters and state variable based on dimension, mueff and possibly further options. """ ## meta_parameters.cs_exponent == 1.0 b = 1.0 ## meta_parameters.cs_multiplier == 1.0 self.cs = 1.0 * (es.sp.mueff + 2)**b / (es.N**b + (es.sp.mueff + 3)**b) self.ps = np.zeros(es.N) self.is_initialized_base = True return self def _update_ps(self, es): """update the isotropic evolution path :type es: CMAEvolutionStrategy """ if not self.is_initialized_base: self.initialize_base(es) if self._ps_updated_iteration == es.countiter: return if es.countiter <= es.itereigenupdated: # es.B and es.D must/should be those from the last iteration assert es.countiter >= es.itereigenupdated _print_warning('distribution transformation (B and D) have been updated before ps could be computed', '_update_ps', 'CMAAdaptSigmaBase') z = dot(es.B, (1. / es.D) * dot(es.B.T, (es.mean - es.mean_old) / es.sigma_vec)) z *= es.sp.mueff**0.5 / es.sigma / es.sp.cmean self.ps = (1 - self.cs) * self.ps + sqrt(self.cs * (2 - self.cs)) * z self._ps_updated_iteration = es.countiter def hsig(self, es): """return "OK-signal" for rank-one update, `True` (OK) or `False` (stall rank-one update), based on the length of an evolution path """ self._update_ps(es) if self.ps is None: return True squared_sum = sum(self.ps**2) / (1 - (1 - self.cs)**(2 * es.countiter)) # correction with self.countiter seems not necessary, # as pc also starts with zero return squared_sum / es.N - 1 < 1 + 4. / (es.N + 1) def update(self, es, **kwargs): """update ``es.sigma``""" self._update_ps(es) raise NotImplementedError('must be implemented in a derived class') class CMAAdaptSigmaNone(CMAAdaptSigmaBase): def update(self, es, **kwargs): """no update, ``es.sigma`` remains constant. :param es: ``CMAEvolutionStrategy`` class instance :param kwargs: whatever else is needed to update ``es.sigma`` """ pass class CMAAdaptSigmaDistanceProportional(CMAAdaptSigmaBase): """artificial setting of ``sigma`` for test purposes, e.g. to simulate optimal progress rates. """ def __init__(self, coefficient=1.2): super(CMAAdaptSigmaDistanceProportional, self).__init__() # base class provides method hsig() self.coefficient = coefficient self.is_initialized = True def update(self, es, **kwargs): # optimal step-size is es.sigma = self.coefficient * es.sp.mueff * sum(es.mean**2)**0.5 / es.N / es.sp.cmean class CMAAdaptSigmaCSA(CMAAdaptSigmaBase): def __init__(self): """postpone initialization to a method call where dimension and mueff should be known. """ self.is_initialized = False def initialize(self, es): """set parameters and state variable based on dimension, mueff and possibly further options. """ self.disregard_length_setting = True if es.opts['CSA_disregard_length'] else False if es.opts['CSA_clip_length_value'] is not None: try: if len(es.opts['CSA_clip_length_value']) == 0: es.opts['CSA_clip_length_value'] = [-np.Inf, np.Inf] elif len(es.opts['CSA_clip_length_value']) == 1: es.opts['CSA_clip_length_value'] = [-np.Inf, es.opts['CSA_clip_length_value'][0]] elif len(es.opts['CSA_clip_length_value']) == 2: es.opts['CSA_clip_length_value'] = np.sort(es.opts['CSA_clip_length_value']) else: raise ValueError('option CSA_clip_length_value should be a number of len(.) in [1,2]') except TypeError: # len(...) failed es.opts['CSA_clip_length_value'] = [-np.Inf, es.opts['CSA_clip_length_value']] es.opts['CSA_clip_length_value'] = list(np.sort(es.opts['CSA_clip_length_value'])) if es.opts['CSA_clip_length_value'][0] > 0 or es.opts['CSA_clip_length_value'][1] < 0: raise ValueError('option CSA_clip_length_value must be a single positive or a negative and a positive number') ## meta_parameters.cs_exponent == 1.0 b = 1.0 ## meta_parameters.cs_multiplier == 1.0 self.cs = 1.0 * (es.sp.mueff + 2)**b / (es.N + (es.sp.mueff + 3)**b) self.damps = es.opts['CSA_dampfac'] * (0.5 + 0.5 * min([1, (es.sp.lam_mirr / (0.159 * es.sp.popsize) - 1)**2])**1 + 2 * max([0, ((es.sp.mueff - 1) / (es.N + 1))**es.opts['CSA_damp_mueff_exponent'] - 1]) + self.cs ) self.max_delta_log_sigma = 1 # in symmetric use (strict lower bound is -cs/damps anyway) if self.disregard_length_setting: es.opts['CSA_clip_length_value'] = [0, 0] ## meta_parameters.cs_exponent == 1.0 b = 1.0 * 0.5 ## meta_parameters.cs_multiplier == 1.0 self.cs = 1.0 * (es.sp.mueff + 1)**b / (es.N**b + 2 * es.sp.mueff**b) self.damps = es.opts['CSA_dampfac'] * 1 # * (1.1 - 1/(es.N+1)**0.5) if es.opts['verbose'] > 1 or self.disregard_length_setting or 11 < 3: print('SigmaCSA Parameters') for k, v in self.__dict__.items(): print(' ', k, ':', v) self.ps = np.zeros(es.N) self._ps_updated_iteration = -1 self.is_initialized = True def _update_ps(self, es): if not self.is_initialized: self.initialize(es) if self._ps_updated_iteration == es.countiter: return z = dot(es.B, (1. / es.D) * dot(es.B.T, (es.mean - es.mean_old) / es.sigma_vec)) z *= es.sp.mueff**0.5 / es.sigma / es.sp.cmean # zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz if es.opts['CSA_clip_length_value'] is not None: vals = es.opts['CSA_clip_length_value'] min_len = es.N**0.5 + vals[0] * es.N / (es.N + 2) max_len = es.N**0.5 + vals[1] * es.N / (es.N + 2) act_len = sum(z**2)**0.5 new_len = Mh.minmax(act_len, min_len, max_len) if new_len != act_len: z *= new_len / act_len # z *= (es.N / sum(z**2))**0.5 # ==> sum(z**2) == es.N # z *= es.const.chiN / sum(z**2)**0.5 self.ps = (1 - self.cs) * self.ps + sqrt(self.cs * (2 - self.cs)) * z self._ps_updated_iteration = es.countiter def update(self, es, **kwargs): self._update_ps(es) # caveat: if es.B or es.D are already updated and ps is not, this goes wrong! if es.opts['CSA_squared']: s = (sum(self.ps**2) / es.N - 1) / 2 # sum(self.ps**2) / es.N has mean 1 and std sqrt(2/N) and is skewed # divided by 2 to have the derivative d/dx (x**2 / N - 1) for x**2=N equal to 1 else: s = sum(self.ps**2)**0.5 / es.const.chiN - 1 if es.opts['vv'] == 'pc for ps': s = sum((es.D**-1 * dot(es.B.T, es.pc))**2)**0.5 / es.const.chiN - 1 s = (sum((es.D**-1 * dot(es.B.T, es.pc))**2) / es.N - 1) / 2 s *= self.cs / self.damps s_clipped = Mh.minmax(s, -self.max_delta_log_sigma, self.max_delta_log_sigma) es.sigma *= np.exp(s_clipped) # "error" handling if s_clipped != s: _print_warning('sigma change exp(' + str(s) + ') = ' + str(np.exp(s)) + ' clipped to exp(+-' + str(self.max_delta_log_sigma) + ')', 'update', 'CMAAdaptSigmaCSA', es.countiter, es.opts['verbose']) class CMAAdaptSigmaMedianImprovement(CMAAdaptSigmaBase): """Compares median fitness against a fitness percentile of the previous iteration, see Ait ElHara et al, GECCO 2013. """ def __init__(self): # CMAAdaptSigmaBase.__init__(self) super(CMAAdaptSigmaMedianImprovement, self).__init__() # base class provides method hsig() def initialize(self, es): r = es.sp.mueff / es.popsize self.index_to_compare = 0.5 * (r**0.5 + 2.0 * (1 - r**0.5) / log(es.N + 9)**2) * (es.popsize) # TODO self.index_to_compare = (0.30 if not es.opts['vv'] else es.opts['vv']) * es.popsize # TODO self.damp = 2 - 2 / es.N # sign-rule: 2 self.c = 0.3 # sign-rule needs <= 0.3 self.s = 0 # averaged statistics, usually between -1 and +1 def update(self, es, **kwargs): if es.countiter < 2: self.initialize(es) self.fit = es.fit.fit else: ft1, ft2 = self.fit[int(self.index_to_compare)], self.fit[int(np.ceil(self.index_to_compare))] ftt1, ftt2 = es.fit.fit[(es.popsize - 1) // 2], es.fit.fit[int(np.ceil((es.popsize - 1) / 2))] pt2 = self.index_to_compare - int(self.index_to_compare) # ptt2 = (es.popsize - 1) / 2 - (es.popsize - 1) // 2 # not in use s = 0 if 1 < 3: s += pt2 * sum(es.fit.fit <= self.fit[int(np.ceil(self.index_to_compare))]) s += (1 - pt2) * sum(es.fit.fit < self.fit[int(self.index_to_compare)]) s -= es.popsize / 2. s *= 2. / es.popsize # the range was popsize, is 2 self.s = (1 - self.c) * self.s + self.c * s es.sigma *= exp(self.s / self.damp) # es.more_to_write.append(10**(self.s)) #es.more_to_write.append(10**((2 / es.popsize) * (sum(es.fit.fit < self.fit[int(self.index_to_compare)]) - (es.popsize + 1) / 2))) # # es.more_to_write.append(10**(self.index_to_compare - sum(self.fit <= es.fit.fit[es.popsize // 2]))) # # es.more_to_write.append(10**(np.sign(self.fit[int(self.index_to_compare)] - es.fit.fit[es.popsize // 2]))) self.fit = es.fit.fit class CMAAdaptSigmaTPA(CMAAdaptSigmaBase): """two point adaptation for step-size sigma. Relies on a specific sampling of the first two offspring, whose objective function value ranks are used to decide on the step-size change. Example ======= >>> import cma >>> cma.CMAOptions('adapt').pprint() >>> es = cma.CMAEvolutionStrategy(10 * [0.2], 0.1, {'AdaptSigma': cma.CMAAdaptSigmaTPA, 'ftarget': 1e-8}) >>> es.optimize(cma.fcts.rosen) >>> assert 'ftarget' in es.stop() >>> assert es.result()[1] <= 1e-8 >>> assert es.result()[2] < 6500 # typically < 5500 References: loosely based on Hansen 2008, CMA-ES with Two-Point Step-Size Adaptation, more tightly based on an upcoming paper by Hansen et al. """ def __init__(self, dimension=None, opts=None): super(CMAAdaptSigmaTPA, self).__init__() # base class provides method hsig() # CMAAdaptSigmaBase.__init__(self) self.initialized = False self.dimension = dimension self.opts = opts def initialize(self, N=None, opts=None): if N is None: N = self.dimension if opts is None: opts = self.opts try: damp_fac = opts['CSA_dampfac'] # should be renamed to sigma_adapt_dampfac or something except (TypeError, KeyError): damp_fac = 1 self.sp = _BlancClass() # just a container to have sp.name instead of sp['name'] to access parameters try: self.sp.damp = damp_fac * eval('N')**0.5 # why do we need 10 <-> exp(1/10) == 1.1? 2 should be fine!? # self.sp.damp = damp_fac * (4 - 3.6/eval('N')**0.5) except: self.sp.damp = 4 # - 3.6 / N**0.5 # should become new default _print_warning("dimension not known, damping set to 4", 'initialize', 'CMAAdaptSigmaTPA') try: if opts['vv'][0] == 'TPA_damp': self.sp.damp = opts['vv'][1] print('damp set to %d' % self.sp.damp) except (TypeError): pass self.sp.dampup = 0.5**0.0 * 1.0 * self.sp.damp # 0.5 fails to converge on the Rastrigin function self.sp.dampdown = 2.0**0.0 * self.sp.damp if self.sp.dampup != self.sp.dampdown: print('TPA damping is asymmetric') self.sp.c = 0.3 # rank difference is asymetric and therefore the switch from increase to decrease takes too long self.sp.z_exponent = 0.5 # sign(z) * abs(z)**z_exponent, 0.5 seems better with larger popsize, 1 was default self.sp.sigma_fac = 1.0 # (obsolete) 0.5 feels better, but no evidence whether it is self.sp.relative_to_delta_mean = True # (obsolete) self.s = 0 # the state variable self.last = None self.initialized = True return self def update(self, es, function_values, **kwargs): """the first and second value in ``function_values`` must reflect two mirrored solutions sampled in direction / in opposite direction of the previous mean shift, respectively. """ # TODO: on the linear function, the two mirrored samples lead # to a sharp increase of condition of the covariance matrix. # They should not be used to update the covariance matrix, # if the step-size inreases quickly. This should be fine with # negative updates though. if not self.initialized: self.initialize(es.N, es.opts) if 1 < 3: # use the ranking difference of the mirrors for adaptation # damp = 5 should be fine z = np.where(es.fit.idx == 1)[0][0] - np.where(es.fit.idx == 0)[0][0] z /= es.popsize - 1 # z in [-1, 1] self.s = (1 - self.sp.c) * self.s + self.sp.c * np.sign(z) * np.abs(z)**self.sp.z_exponent if self.s > 0: es.sigma *= exp(self.s / self.sp.dampup) else: es.sigma *= exp(self.s / self.sp.dampdown) #es.more_to_write.append(10**z) new_injections = True # ____________________________________________________________ # ____________________________________________________________ # class CMAEvolutionStrategy(OOOptimizer): """CMA-ES stochastic optimizer class with ask-and-tell interface. Calling Sequences ================= es = CMAEvolutionStrategy(x0, sigma0) es = CMAEvolutionStrategy(x0, sigma0, opts) es = CMAEvolutionStrategy(x0, sigma0).optimize(objective_fct) res = CMAEvolutionStrategy(x0, sigma0, opts).optimize(objective_fct).result() Arguments ========= `x0` initial solution, starting point. `x0` is given as "phenotype" which means, if:: opts = {'transformation': [transform, inverse]} is given and ``inverse is None``, the initial mean is not consistent with `x0` in that ``transform(mean)`` does not equal to `x0` unless ``transform(mean)`` equals ``mean``. `sigma0` initial standard deviation. The problem variables should have been scaled, such that a single standard deviation on all variables is useful and the optimum is expected to lie within about `x0` +- ``3*sigma0``. See also options `scaling_of_variables`. Often one wants to check for solutions close to the initial point. This allows, for example, for an easier check of consistency of the objective function and its interfacing with the optimizer. In this case, a much smaller `sigma0` is advisable. `opts` options, a dictionary with optional settings, see class `CMAOptions`. Main interface / usage ====================== The interface is inherited from the generic `OOOptimizer` class (see also there). An object instance is generated from es = cma.CMAEvolutionStrategy(8 * [0.5], 0.2) The least verbose interface is via the optimize method:: es.optimize(objective_func) res = es.result() More verbosely, the optimization is done using the methods ``stop``, ``ask``, and ``tell``:: while not es.stop(): solutions = es.ask() es.tell(solutions, [cma.fcts.rosen(s) for s in solutions]) es.disp() es.result_pretty() where ``ask`` delivers new candidate solutions and ``tell`` updates the ``optim`` instance by passing the respective function values (the objective function ``cma.fcts.rosen`` can be replaced by any properly defined objective function, see ``cma.fcts`` for more examples). To change an option, for example a termination condition to continue the optimization, call es.opts.set({'tolfacupx': 1e4}) The class `CMAEvolutionStrategy` also provides:: (solutions, func_values) = es.ask_and_eval(objective_func) and an entire optimization can also be written like:: while not es.stop(): es.tell(*es.ask_and_eval(objective_func)) Besides for termination criteria, in CMA-ES only the ranks of the `func_values` are relevant. Attributes and Properties ========================= - `inputargs` -- passed input arguments - `inopts` -- passed options - `opts` -- actually used options, some of them can be changed any time via ``opts.set``, see class `CMAOptions` - `popsize` -- population size lambda, number of candidate solutions returned by `ask()` - `logger` -- a `CMADataLogger` instance utilized by `optimize` Examples ======== Super-short example, with output shown: >>> import cma >>> # construct an object instance in 4-D, sigma0=1: >>> es = cma.CMAEvolutionStrategy(4 * [1], 1, {'seed':234}) (4_w,8)-CMA-ES (mu_w=2.6,w_1=52%) in dimension 4 (seed=234) >>> >>> # optimize the ellipsoid function >>> es.optimize(cma.fcts.elli, verb_disp=1) Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec 1 8 2.093015112685775e+04 1.0e+00 9.27e-01 9e-01 9e-01 0:0.0 2 16 4.964814235917688e+04 1.1e+00 9.54e-01 9e-01 1e+00 0:0.0 3 24 2.876682459926845e+05 1.2e+00 1.02e+00 9e-01 1e+00 0:0.0 100 800 6.809045875281943e-01 1.3e+02 1.41e-02 1e-04 1e-02 0:0.2 200 1600 2.473662150861846e-10 8.0e+02 3.08e-05 1e-08 8e-06 0:0.5 233 1864 2.766344961865341e-14 8.6e+02 7.99e-07 8e-11 7e-08 0:0.6 >>> >>> cma.pprint(es.result()) (array([ -1.98546755e-09, -1.10214235e-09, 6.43822409e-11, -1.68621326e-11]), 4.5119610261406537e-16, 1666, 1672, 209, array([ -9.13545269e-09, -1.45520541e-09, -6.47755631e-11, -1.00643523e-11]), array([ 3.20258681e-08, 3.15614974e-09, 2.75282215e-10, 3.27482983e-11])) >>> assert es.result()[1] < 1e-9 >>> help(es.result) Help on method result in module cma: result(self) method of cma.CMAEvolutionStrategy instance return ``(xbest, f(xbest), evaluations_xbest, evaluations, iterations, pheno(xmean), effective_stds)`` The optimization loop can also be written explicitly. >>> import cma >>> es = cma.CMAEvolutionStrategy(4 * [1], 1) >>> while not es.stop(): ... X = es.ask() ... es.tell(X, [cma.fcts.elli(x) for x in X]) ... es.disp() achieving the same result as above. An example with lower bounds (at zero) and handling infeasible solutions: >>> import cma >>> import numpy as np >>> es = cma.CMAEvolutionStrategy(10 * [0.2], 0.5, {'bounds': [0, np.inf]}) >>> while not es.stop(): ... fit, X = [], [] ... while len(X) < es.popsize: ... curr_fit = None ... while curr_fit in (None, np.NaN): ... x = es.ask(1)[0] ... curr_fit = cma.fcts.somenan(x, cma.fcts.elli) # might return np.NaN ... X.append(x) ... fit.append(curr_fit) ... es.tell(X, fit) ... es.logger.add() ... es.disp() >>> >>> assert es.result()[1] < 1e-9 >>> assert es.result()[2] < 9000 # by internal termination >>> # es.logger.plot() # will plot data >>> # cma.show() # display plot window An example with user-defined transformation, in this case to realize a lower bound of 2. >>> es = cma.CMAEvolutionStrategy(5 * [3], 1, ... {"transformation": [lambda x: x**2+2, None]}) >>> es.optimize(cma.fcts.rosen) >>> assert cma.fcts.rosen(es.result()[0]) < 1e-6 + 5.530760944396627e+02 >>> assert es.result()[2] < 3300 The inverse transformation is (only) necessary if the `BoundPenalty` boundary handler is used at the same time. The ``CMAEvolutionStrategy`` class also provides a default logger (cave: files are overwritten when the logger is used with the same filename prefix): >>> import cma >>> es = cma.CMAEvolutionStrategy(4 * [0.2], 0.5, {'verb_disp': 0}) >>> es.logger.disp_header() # to understand the print of disp Iterat Nfevals function value axis ratio maxstd minstd >>> while not es.stop(): ... X = es.ask() ... es.tell(X, [cma.fcts.sphere(x) for x in X]) ... es.logger.add() # log current iteration ... es.logger.disp([-1]) # display info for last iteration 1 8 2.72769793021748e+03 1.0e+00 4.05e-01 3.99e-01 2 16 6.58755537926063e+03 1.1e+00 4.00e-01 3.39e-01 193 1544 3.15195320957214e-15 1.2e+03 3.70e-08 3.45e-11 >>> es.logger.disp_header() Iterat Nfevals function value axis ratio maxstd minstd >>> # es.logger.plot() # will make a plot Example implementing restarts with increasing popsize (IPOP), output is not displayed: >>> import cma, numpy as np >>> >>> # restart with increasing population size (IPOP) >>> bestever = cma.BestSolution() >>> for lam in 10 * 2**np.arange(8): # 10, 20, 40, 80, ..., 10 * 2**7 ... es = cma.CMAEvolutionStrategy('6 - 8 * np.random.rand(9)', # 9-D ... 5, # initial std sigma0 ... {'popsize': lam, # options ... 'verb_append': bestever.evalsall}) ... logger = cma.CMADataLogger().register(es, append=bestever.evalsall) ... while not es.stop(): ... X = es.ask() # get list of new solutions ... fit = [cma.fcts.rastrigin(x) for x in X] # evaluate each solution ... es.tell(X, fit) # besides for termination only the ranking in fit is used ... ... # display some output ... logger.add() # add a "data point" to the log, writing in files ... es.disp() # uses option verb_disp with default 100 ... ... print('termination:', es.stop()) ... cma.pprint(es.best.__dict__) ... ... bestever.update(es.best) ... ... # show a plot ... # logger.plot(); ... if bestever.f < 1e-8: # global optimum was hit ... break >>> assert es.result()[1] < 1e-8 On the Rastrigin function, usually after five restarts the global optimum is located. Using the ``multiprocessing`` module, we can evaluate the function in parallel with a simple modification of the example (however multiprocessing seems not always reliable):: try: import multiprocessing as mp import cma es = cma.CMAEvolutionStrategy(22 * [0.0], 1.0, {'maxiter':10}) pool = mp.Pool(es.popsize) while not es.stop(): X = es.ask() f_values = pool.map_async(cma.felli, X).get() # use chunksize parameter as es.popsize/len(pool)? es.tell(X, f_values) es.disp() es.logger.add() except ImportError: pass The final example shows how to resume: >>> import cma, pickle >>> >>> es = cma.CMAEvolutionStrategy(12 * [0.1], # a new instance, 12-D ... 0.5) # initial std sigma0 >>> es.optimize(cma.fcts.rosen, iterations=100) >>> pickle.dump(es, open('saved-cma-object.pkl', 'wb')) >>> print('saved') >>> del es # let's start fresh >>> >>> es = pickle.load(open('saved-cma-object.pkl', 'rb')) >>> print('resumed') >>> es.optimize(cma.fcts.rosen, verb_disp=200) >>> assert es.result()[2] < 15000 >>> cma.pprint(es.result()) Details ======= The following two enhancements are implemented, the latter is turned on by default only for very small population size. *Active CMA* is implemented with option ``CMA_active`` and conducts an update of the covariance matrix with negative weights. The negative update is implemented, such that positive definiteness is guarantied. The update is applied after the default update and only before the covariance matrix is decomposed, which limits the additional computational burden to be at most a factor of three (typically smaller). A typical speed up factor (number of f-evaluations) is between 1.1 and two. References: Jastrebski and Arnold, CEC 2006, Glasmachers et al, GECCO 2010. *Selective mirroring* is implemented with option ``CMA_mirrors`` in the method ``get_mirror()``. Only the method `ask_and_eval()` (used by `fmin`) will then sample selectively mirrored vectors. In selective mirroring, only the worst solutions are mirrored. With the default small number of mirrors, *pairwise selection* (where at most one of the two mirrors contribute to the update of the distribution mean) is implicitly guarantied under selective mirroring and therefore not explicitly implemented. References: Brockhoff et al, PPSN 2010, Auger et al, GECCO 2011. :See: `fmin()`, `OOOptimizer`, `CMAOptions`, `plot()`, `ask()`, `tell()`, `ask_and_eval()` """ @property # read only attribute decorator for a method def popsize(self): """number of samples by default returned by` ask()` """ return self.sp.popsize # this is not compatible with python2.5: # @popsize.setter # def popsize(self, p): # """popsize cannot be set (this might change in future) # """ # raise _Error("popsize cannot be changed") def stop(self, check=True): """return a dictionary with the termination status. With ``check==False``, the termination conditions are not checked and the status might not reflect the current situation. """ if (check and self.countiter > 0 and self.opts['termination_callback'] and self.opts['termination_callback'] != str(self.opts['termination_callback'])): self.callbackstop = self.opts['termination_callback'](self) return self._stopdict(self, check) # update the stopdict and return a Dict def copy_constructor(self, es): raise NotImplementedError("") def __init__(self, x0, sigma0, inopts={}): """see class `CMAEvolutionStrategy` """ if isinstance(x0, CMAEvolutionStrategy): self.copy_constructor(x0) return self.inputargs = dict(locals()) # for the record del self.inputargs['self'] # otherwise the instance self has a cyclic reference self.inopts = inopts opts = CMAOptions(inopts).complement() # CMAOptions() == fmin([],[]) == defaultOptions() global_verbosity = opts.eval('verbose') if global_verbosity < -8: opts['verb_disp'] = 0 opts['verb_log'] = 0 opts['verb_plot'] = 0 if 'noise_handling' in opts and opts.eval('noise_handling'): raise ValueError('noise_handling not available with class CMAEvolutionStrategy, use function fmin') if 'restarts' in opts and opts.eval('restarts'): raise ValueError('restarts not available with class CMAEvolutionStrategy, use function fmin') self._set_x0(x0) # manage weird shapes, set self.x0 self.N_pheno = len(self.x0) self.sigma0 = sigma0 if isinstance(sigma0, basestring): # TODO: no real need here (do rather in fmin) self.sigma0 = eval(sigma0) # like '1./N' or 'np.random.rand(1)[0]+1e-2' if np.size(self.sigma0) != 1 or np.shape(self.sigma0): raise _Error('input argument sigma0 must be (or evaluate to) a scalar') self.sigma = self.sigma0 # goes to inialize # extract/expand options N = self.N_pheno assert isinstance(opts['fixed_variables'], (basestring, dict)) \ or opts['fixed_variables'] is None # TODO: in case of a string we need to eval the fixed_variables if isinstance(opts['fixed_variables'], dict): N = self.N_pheno - len(opts['fixed_variables']) opts.evalall(locals()) # using only N self.opts = opts self.randn = opts['randn'] self.gp = GenoPheno(self.N_pheno, opts['scaling_of_variables'], opts['typical_x'], opts['fixed_variables'], opts['transformation']) self.boundary_handler = opts.eval('boundary_handling')(opts.eval('bounds')) if not self.boundary_handler.has_bounds(): self.boundary_handler = BoundNone() # just a little faster and well defined elif not self.boundary_handler.is_in_bounds(self.x0): if opts['verbose'] >= 0: _print_warning('initial solution is out of the domain boundaries:') print(' x0 = ' + str(self.gp.pheno(self.x0))) print(' ldom = ' + str(self.boundary_handler.bounds[0])) print(' udom = ' + str(self.boundary_handler.bounds[1])) # set self.mean to geno(x0) tf_geno_backup = self.gp.tf_geno if self.gp.tf_pheno and self.gp.tf_geno is None: self.gp.tf_geno = lambda x: x # a hack to avoid an exception _print_warning(""" computed initial point is likely to be wrong, because no inverse was found of user provided phenotype transformation""") self.mean = self.gp.geno(self.x0, from_bounds=self.boundary_handler.inverse, copy_always=True) self.gp.tf_geno = tf_geno_backup # without copy_always interface: # self.mean = self.gp.geno(array(self.x0, copy=True), copy_if_changed=False) self.N = len(self.mean) assert N == self.N self.fmean = np.NaN # TODO name should change? prints nan in output files (OK with matlab&octave) self.fmean_noise_free = 0. # for output only self.adapt_sigma = opts['AdaptSigma'] if self.adapt_sigma is False: self.adapt_sigma = CMAAdaptSigmaNone self.adapt_sigma = self.adapt_sigma() # class instance self.sp = _CMAParameters(N, opts) self.sp0 = self.sp # looks useless, as it is not a copy # initialization of state variables self.countiter = 0 self.countevals = max((0, opts['verb_append'])) \ if not isinstance(opts['verb_append'], bool) else 0 self.pc = np.zeros(N) self.pc_neg = np.zeros(N) def eval_scaling_vector(in_): res = 1 if np.all(in_): res = array(in_, dtype=float) if np.size(res) not in (1, N): raise ValueError("""CMA_stds option must have dimension %d instead of %d""" % (str(N), np.size(res))) return res self.sigma_vec = eval_scaling_vector(self.opts['CMA_stds']) if isfinite(self.opts['CMA_dampsvec_fac']): self.sigma_vec *= np.ones(N) # make sure to get a vector self.sigma_vec0 = self.sigma_vec if isscalar(self.sigma_vec) \ else self.sigma_vec.copy() stds = eval_scaling_vector(self.opts['CMA_teststds']) if self.opts['CMA_diagonal']: # is True or > 0 # linear time and space complexity self.B = array(1) # fine for np.dot(self.B, .) and self.B.T self.C = stds**2 * np.ones(N) # in case stds == 1 self.dC = self.C else: self.B = np.eye(N) # identity(N) # prevent equal eigenvals, a hack for np.linalg: # self.C = np.diag(stds**2 * exp(1e-4 * np.random.rand(N))) self.C = np.diag(stds**2 * exp((1e-4 / N) * np.arange(N))) self.dC = np.diag(self.C).copy() self._Yneg = np.zeros((N, N)) self.D = self.dC**0.5 # we assume that C is diagonal # self.gp.pheno adds fixed variables relative_stds = ((self.gp.pheno(self.mean + self.sigma * self.sigma_vec * self.D) - self.gp.pheno(self.mean - self.sigma * self.sigma_vec * self.D)) / 2.0 / (self.boundary_handler.get_bounds('upper', self.N_pheno) - self.boundary_handler.get_bounds('lower', self.N_pheno))) if np.any(relative_stds > 1): raise ValueError('initial standard deviations larger than the bounded domain size in variables ' + str(np.where(relative_stds > 1)[0])) self._flgtelldone = True self.itereigenupdated = self.countiter self.count_eigen = 0 self.noiseS = 0 # noise "signal" self.hsiglist = [] if not opts['seed']: np.random.seed() six_decimals = (time.time() - 1e6 * (time.time() // 1e6)) opts['seed'] = 1e5 * np.random.rand() + six_decimals + 1e5 * (time.time() % 1) opts['seed'] = int(opts['seed']) np.random.seed(opts['seed']) # CAVEAT: this only seeds np.random self.sent_solutions = CMASolutionDict() self.archive = CMASolutionDict() self.best = BestSolution() self.const = _BlancClass() self.const.chiN = N**0.5 * (1 - 1. / (4.*N) + 1. / (21.*N**2)) # expectation of norm(randn(N,1)) self.logger = CMADataLogger(opts['verb_filenameprefix'], modulo=opts['verb_log']).register(self) # attribute for stopping criteria in function stop self._stopdict = _CMAStopDict() self.callbackstop = 0 self.fit = _BlancClass() self.fit.fit = [] # not really necessary self.fit.hist = [] # short history of best self.fit.histbest = [] # long history of best self.fit.histmedian = [] # long history of median self.more_to_write = [] # [1, 1, 1, 1] # N*[1] # needed when writing takes place before setting # say hello if opts['verb_disp'] > 0 and opts['verbose'] >= 0: sweighted = '_w' if self.sp.mu > 1 else '' smirr = 'mirr%d' % (self.sp.lam_mirr) if self.sp.lam_mirr else '' print('(%d' % (self.sp.mu) + sweighted + ',%d' % (self.sp.popsize) + smirr + ')-' + ('a' if opts['CMA_active'] else '') + 'CMA-ES' + ' (mu_w=%2.1f,w_1=%d%%)' % (self.sp.mueff, int(100 * self.sp.weights[0])) + ' in dimension %d (seed=%d, %s)' % (N, opts['seed'], time.asctime())) # + func.__name__ if opts['CMA_diagonal'] and self.sp.CMA_on: s = '' if opts['CMA_diagonal'] is not True: s = ' for ' if opts['CMA_diagonal'] < np.inf: s += str(int(opts['CMA_diagonal'])) else: s += str(np.floor(opts['CMA_diagonal'])) s += ' iterations' s += ' (1/ccov=' + str(round(1. / (self.sp.c1 + self.sp.cmu))) + ')' print(' Covariance matrix is diagonal' + s) def _set_x0(self, x0): if x0 == str(x0): x0 = eval(x0) self.x0 = array(x0) # should not have column or row, is just 1-D if self.x0.ndim == 2: if self.opts.eval('verbose') >= 0: _print_warning('input x0 should be a list or 1-D array, trying to flatten ' + str(self.x0.shape) + '-array') if self.x0.shape[0] == 1: self.x0 = self.x0[0] elif self.x0.shape[1] == 1: self.x0 = array([x[0] for x in self.x0]) if self.x0.ndim != 1: raise _Error('x0 must be 1-D array') if len(self.x0) <= 1: raise _Error('optimization in 1-D is not supported (code was never tested)') self.x0.resize(self.x0.shape[0]) # 1-D array, not really necessary?! # ____________________________________________________________ # ____________________________________________________________ def ask(self, number=None, xmean=None, sigma_fac=1, gradf=None, args=()): """get new candidate solutions, sampled from a multi-variate normal distribution and transformed to f-representation (phenotype) to be evaluated. Arguments --------- `number` number of returned solutions, by default the population size ``popsize`` (AKA ``lambda``). `xmean` distribution mean, phenotyp? `sigma_fac` multiplier for internal sample width (standard deviation) `gradf` gradient, ``len(gradf(x)) == len(x)``, if ``gradf is not None`` the third solution in the returned list is "sampled" in supposedly Newton direction ``dot(C, gradf(xmean, *args))``. `args` additional arguments passed to gradf Return ------ A list of N-dimensional candidate solutions to be evaluated Example ------- >>> import cma >>> es = cma.CMAEvolutionStrategy([0,0,0,0], 0.3) >>> while not es.stop() and es.best.f > 1e-6: # my_desired_target_f_value ... X = es.ask() # get list of new solutions ... fit = [cma.fcts.rosen(x) for x in X] # call function rosen with each solution ... es.tell(X, fit) # feed values :See: `ask_and_eval`, `ask_geno`, `tell` """ pop_geno = self.ask_geno(number, xmean, sigma_fac) # N,lambda=20,200: overall CPU 7s vs 5s == 40% overhead, even without bounds! # new data: 11.5s vs 9.5s == 20% # TODO: check here, whether this is necessary? # return [self.gp.pheno(x, copy=False, into_bounds=self.boundary_handler.repair) for x in pop] # probably fine # return [Solution(self.gp.pheno(x, copy=False), copy=False) for x in pop] # here comes the memory leak, now solved pop_pheno = [self.gp.pheno(x, copy=True, into_bounds=self.boundary_handler.repair) for x in pop_geno] if gradf is not None: # see Hansen (2011), Injecting external solutions into CMA-ES if not self.gp.islinear: _print_warning(""" using the gradient (option ``gradf``) with a non-linear coordinate-wise transformation (option ``transformation``) has never been tested.""") # TODO: check this out def grad_numerical_of_coordinate_map(x, map, epsilon=None): """map is a coordinate-wise independent map, return the estimated diagonal of the Jacobian. """ eps = 1e-8 * (1 + abs(x)) if epsilon is None else epsilon return (map(x + eps) - map(x - eps)) / (2 * eps) def grad_numerical_sym(x, func, epsilon=None): """return symmetric numerical gradient of func : R^n -> R. """ eps = 1e-8 * (1 + abs(x)) if epsilon is None else epsilon grad = np.zeros(len(x)) ei = np.zeros(len(x)) # float is 1.6 times faster than int for i in rglen(x): ei[i] = eps[i] grad[i] = (func(x + ei) - func(x - ei)) / (2*eps[i]) ei[i] = 0 return grad try: if self.last_iteration_with_gradient == self.countiter: _print_warning('gradient is used several times in ' + 'this iteration', iteration=self.countiter) self.last_iteration_with_gradient = self.countiter except AttributeError: pass index_for_gradient = min((2, len(pop_pheno)-1)) xmean = self.mean if xmean is None else xmean xpheno = self.gp.pheno(xmean, copy=True, into_bounds=self.boundary_handler.repair) grad_at_mean = gradf(xpheno, *args) # lift gradient into geno-space if not self.gp.isidentity or (self.boundary_handler is not None and self.boundary_handler.has_bounds()): boundary_repair = None gradpen = 0 if isinstance(self.boundary_handler, BoundTransform): boundary_repair = self.boundary_handler.repair elif isinstance(self.boundary_handler, BoundPenalty): fpenalty = lambda x: self.boundary_handler.__call__( x, SolutionDict({tuple(x): {'geno': x}}), self.gp) gradpen = grad_numerical_sym( xmean, fpenalty) elif self.boundary_handler is None or \ isinstance(self.boundary_handler, BoundNone): pass else: raise NotImplementedError( "unknown boundary handling method" + str(self.boundary_handler) + " when using gradf") gradgp = grad_numerical_of_coordinate_map( xmean, lambda x: self.gp.pheno(x, copy=True, into_bounds=boundary_repair)) grad_at_mean = grad_at_mean * gradgp + gradpen # TODO: frozen variables brake the code (e.g. at grad of map) if len(grad_at_mean) != self.N and self.opts['fixed_variables']: NotImplementedError(""" gradient with fixed variables is not yet implemented""") v = self.D * dot(self.B.T, self.sigma_vec * grad_at_mean) # newton_direction = sv * B * D * D * B^T * sv * gradient = sv * B * D * v # v = D^-1 * B^T * sv^-1 * newton_direction = D * B^T * sv * gradient q = sum(v**2) if q: # Newton direction pop_geno[index_for_gradient] = xmean - self.sigma \ * (self.N / q)**0.5 \ * (self.sigma_vec * dot(self.B, self.D * v)) else: pop_geno[index_for_gradient] = xmean _print_warning('gradient zero observed', iteration=self.countiter) pop_pheno[index_for_gradient] = self.gp.pheno( pop_geno[index_for_gradient], copy=True, into_bounds=self.boundary_handler.repair) # insert solutions, this could also (better?) be done in self.gp.pheno for i in rglen((pop_geno)): self.sent_solutions.insert(pop_pheno[i], geno=pop_geno[i], iteration=self.countiter) return pop_pheno # ____________________________________________________________ # ____________________________________________________________ def ask_geno(self, number=None, xmean=None, sigma_fac=1): """get new candidate solutions in genotyp, sampled from a multi-variate normal distribution. Arguments are `number` number of returned solutions, by default the population size `popsize` (AKA lambda). `xmean` distribution mean `sigma_fac` multiplier for internal sample width (standard deviation) `ask_geno` returns a list of N-dimensional candidate solutions in genotyp representation and is called by `ask`. Details: updates the sample distribution and might change the geno-pheno transformation during this update. :See: `ask`, `ask_and_eval` """ if number is None or number < 1: number = self.sp.popsize # update distribution, might change self.mean if self.sp.CMA_on and ( (self.opts['updatecovwait'] is None and self.countiter >= self.itereigenupdated + 1. / (self.sp.c1 + self.sp.cmu) / self.N / 10 ) or (self.opts['updatecovwait'] is not None and self.countiter > self.itereigenupdated + self.opts['updatecovwait'] ) or (self.sp.neg.cmuexp * (self.countiter - self.itereigenupdated) > 0.5 ) # TODO (minor): not sure whether this is "the right" criterion ): self.updateBD() if xmean is None: xmean = self.mean else: try: xmean = self.archive[xmean]['geno'] # noise handling after call of tell except KeyError: try: xmean = self.sent_solutions[xmean]['geno'] # noise handling before calling tell except KeyError: pass if self.countiter == 0: self.tic = time.clock() # backward compatible self.elapsed_time = ElapsedTime() sigma = sigma_fac * self.sigma # update parameters for sampling the distribution # fac 0 1 10 # 150-D cigar: # 50749 50464 50787 # 200-D elli: == 6.9 # 99900 101160 # 100995 103275 == 2% loss # 100-D elli: == 6.9 # 363052 369325 < 2% loss # 365075 365755 # sample distribution if self._flgtelldone: # could be done in tell()!? self._flgtelldone = False self.ary = [] # check injections from pop_injection_directions arinj = [] if hasattr(self, 'pop_injection_directions'): if self.countiter < 4 and \ len(self.pop_injection_directions) > self.popsize - 2: _print_warning(' %d special injected samples with popsize %d, ' % (len(self.pop_injection_directions), self.popsize) + "popsize %d will be used" % (len(self.pop_injection_directions) + 2) + (" and the warning is suppressed in the following" if self.countiter == 3 else "")) while self.pop_injection_directions: y = self.pop_injection_directions.pop(0) if self.opts['CMA_sample_on_sphere_surface']: y *= (self.N**0.5 if self.opts['CSA_squared'] else self.const.chiN) / self.mahalanobis_norm(y) arinj.append(y) else: y *= self.random_rescaling_factor_to_mahalanobis_size(y) / self.sigma arinj.append(y) # each row is a solution # the 1 is a small safeguard which needs to be removed to implement "pure" adaptive encoding arz = self.randn((max([1, (number - len(arinj))]), self.N)) if self.opts['CMA_sample_on_sphere_surface']: # normalize the length to chiN for i in rglen((arz)): ss = sum(arz[i]**2) if 1 < 3 or ss > self.N + 10.1: arz[i] *= (self.N**0.5 if self.opts['CSA_squared'] else self.const.chiN) / ss**0.5 # or to average # arz *= 1 * self.const.chiN / np.mean([sum(z**2)**0.5 for z in arz]) # fac = np.mean(sum(arz**2, 1)**0.5) # print fac # arz *= self.const.chiN / fac # compute ary from arz if len(arz): # should always be true # apply unconditional mirroring, is pretty obsolete if new_injections and self.sp.lam_mirr and self.opts['CMA_mirrormethod'] == 0: for i in xrange(self.sp.lam_mirr): if 2 * (i + 1) > len(arz): if self.countiter < 4: _print_warning("fewer mirrors generated than given in parameter setting (%d<%d)" % (i, self.sp.lam_mirr)) break arz[-1 - 2 * i] = -arz[-2 - 2 * i] ary = self.sigma_vec * np.dot(self.B, (self.D * arz).T).T if len(arinj): ary = np.vstack((arinj, ary)) else: ary = array(arinj) # TODO: subject to removal in future if not new_injections and number > 2 and self.countiter > 2: if (isinstance(self.adapt_sigma, CMAAdaptSigmaTPA) or self.opts['mean_shift_line_samples'] or self.opts['pc_line_samples']): ys = [] if self.opts['pc_line_samples']: ys.append(self.pc[:]) # now TPA is with pc_line_samples if self.opts['mean_shift_line_samples']: ys.append(self.mean - self.mean_old) if not len(ys): ys.append(self.mean - self.mean_old) # assign a mirrored pair from each element of ys into ary for i, y in enumerate(ys): if len(arz) > 2 * i + 1: # at least two more samples assert y is not self.pc # y *= sum(self.randn(self.N)**2)**0.5 / self.mahalanobis_norm(y) y *= self.random_rescaling_factor_to_mahalanobis_size(y) # TODO: rescale y depending on some parameter? ary[2*i] = y / self.sigma ary[2*i + 1] = y / -self.sigma else: _print_warning('line samples omitted due to small popsize', method_name='ask_geno', iteration=self.countiter) # print(xmean[0]) pop = xmean + sigma * ary self.evaluations_per_f_value = 1 self.ary = ary return pop def random_rescale_to_mahalanobis(self, x): """change `x` like for injection, all on genotypic level""" x -= self.mean if any(x): x *= sum(self.randn(len(x))**2)**0.5 / self.mahalanobis_norm(x) x += self.mean return x def random_rescaling_factor_to_mahalanobis_size(self, y): """``self.mean + self.random_rescaling_factor_to_mahalanobis_size(y)`` is guarantied to appear like from the sample distribution. """ if len(y) != self.N: raise ValueError('len(y)=%d != %d=dimension' % (len(y), self.N)) if not any(y): _print_warning("input was all-zeros, which is probably a bug", "random_rescaling_factor_to_mahalanobis_size", iteration=self.countiter) return 1.0 return sum(self.randn(len(y))**2)**0.5 / self.mahalanobis_norm(y) def get_mirror(self, x, preserve_length=False): """return ``pheno(self.mean - (geno(x) - self.mean))``. >>> import cma >>> es = cma.CMAEvolutionStrategy(cma.np.random.randn(3), 1) >>> x = cma.np.random.randn(3) >>> assert cma.Mh.vequals_approximately(es.mean - (x - es.mean), es.get_mirror(x, preserve_length=True)) >>> x = es.ask(1)[0] >>> vals = (es.get_mirror(x) - es.mean) / (x - es.mean) >>> assert cma.Mh.equals_approximately(sum(vals), len(vals) * vals[0]) TODO: this implementation is yet experimental. TODO: this implementation includes geno-pheno transformation, however in general GP-transformation should be separated from specific code. Selectively mirrored sampling improves to a moderate extend but overadditively with active CMA for quite understandable reasons. Optimal number of mirrors are suprisingly small: 1,2,3 for maxlam=7,13,20 where 3,6,10 are the respective maximal possible mirrors that must be clearly suboptimal. """ try: dx = self.sent_solutions[x]['geno'] - self.mean except: # can only happen with injected solutions?! dx = self.gp.geno(x, from_bounds=self.boundary_handler.inverse, copy_if_changed=True) - self.mean if not preserve_length: # dx *= sum(self.randn(self.N)**2)**0.5 / self.mahalanobis_norm(dx) dx *= self.random_rescaling_factor_to_mahalanobis_size(dx) x = self.mean - dx y = self.gp.pheno(x, into_bounds=self.boundary_handler.repair) # old measure: costs 25% in CPU performance with N,lambda=20,200 self.sent_solutions.insert(y, geno=x, iteration=self.countiter) return y def _mirror_penalized(self, f_values, idx): """obsolete and subject to removal (TODO), return modified f-values such that for each mirror one becomes worst. This function is useless when selective mirroring is applied with no more than (lambda-mu)/2 solutions. Mirrors are leading and trailing values in ``f_values``. """ assert len(f_values) >= 2 * len(idx) m = np.max(np.abs(f_values)) for i in len(idx): if f_values[idx[i]] > f_values[-1 - i]: f_values[idx[i]] += m else: f_values[-1 - i] += m return f_values def _mirror_idx_cov(self, f_values, idx1): # will most likely be removed """obsolete and subject to removal (TODO), return indices for negative ("active") update of the covariance matrix assuming that ``f_values[idx1[i]]`` and ``f_values[-1-i]`` are the corresponding mirrored values computes the index of the worse solution sorted by the f-value of the better solution. TODO: when the actual mirror was rejected, it is better to return idx1 instead of idx2. Remark: this function might not be necessary at all: if the worst solution is the best mirrored, the covariance matrix updates cancel (cave: weights and learning rates), which seems what is desirable. If the mirror is bad, as strong negative update is made, again what is desirable. And the fitness--step-length correlation is in part addressed by using flat weights. """ idx2 = np.arange(len(f_values) - 1, len(f_values) - 1 - len(idx1), -1) f = [] for i in rglen((idx1)): f.append(min((f_values[idx1[i]], f_values[idx2[i]]))) # idx.append(idx1[i] if f_values[idx1[i]] > f_values[idx2[i]] else idx2[i]) return idx2[np.argsort(f)][-1::-1] def eval_mean(self, func, args=()): """evaluate the distribution mean, this is not (yet) effective in terms of termination or display""" self.fmean = func(self.mean, *args) return self.fmean # ____________________________________________________________ # ____________________________________________________________ # def ask_and_eval(self, func, args=(), gradf=None, number=None, xmean=None, sigma_fac=1, evaluations=1, aggregation=np.median, kappa=1): """samples `number` solutions and evaluates them on `func`, where each solution `s` is resampled until ``self.is_feasible(s, func(s)) is True``. Arguments --------- `func` objective function, ``func(x)`` returns a scalar `args` additional parameters for `func` `gradf` gradient of objective function, ``g = gradf(x, *args)`` must satisfy ``len(g) == len(x)`` `number` number of solutions to be sampled, by default population size ``popsize`` (AKA lambda) `xmean` mean for sampling the solutions, by default ``self.mean``. `sigma_fac` multiplier for sampling width, standard deviation, for example to get a small perturbation of solution `xmean` `evaluations` number of evaluations for each sampled solution `aggregation` function that aggregates `evaluations` values to as single value. `kappa` multiplier used for the evaluation of the solutions, in that ``func(m + kappa*(x - m))`` is the f-value for x. Return ------ ``(X, fit)``, where X -- list of solutions fit -- list of respective function values Details ------- While ``not self.is_feasible(x, func(x))``new solutions are sampled. By default ``self.is_feasible == cma.feasible == lambda x, f: f not in (None, np.NaN)``. The argument to `func` can be freely modified within `func`. Depending on the ``CMA_mirrors`` option, some solutions are not sampled independently but as mirrors of other bad solutions. This is a simple derandomization that can save 10-30% of the evaluations in particular with small populations, for example on the cigar function. Example ------- >>> import cma >>> x0, sigma0 = 8*[10], 1 # 8-D >>> es = cma.CMAEvolutionStrategy(x0, sigma0) >>> while not es.stop(): ... X, fit = es.ask_and_eval(cma.fcts.elli) # handles NaN with resampling ... es.tell(X, fit) # pass on fitness values ... es.disp(20) # print every 20-th iteration >>> print('terminated on ' + str(es.stop())) A single iteration step can be expressed in one line, such that an entire optimization after initialization becomes :: while not es.stop(): es.tell(*es.ask_and_eval(cma.fcts.elli)) """ # initialize popsize = self.sp.popsize if number is not None: popsize = number selective_mirroring = self.opts['CMA_mirrormethod'] > 0 nmirrors = self.sp.lam_mirr if popsize != self.sp.popsize: nmirrors = Mh.sround(popsize * self.sp.lam_mirr / self.sp.popsize) # TODO: now selective mirroring might be impaired assert new_injections or self.opts['CMA_mirrormethod'] < 2 if new_injections and self.opts['CMA_mirrormethod'] != 1: # otherwise mirrors are done elsewhere nmirrors = 0 assert nmirrors <= popsize // 2 self.mirrors_idx = np.arange(nmirrors) # might never be used self.mirrors_rejected_idx = [] # might never be used is_feasible = self.opts['is_feasible'] # do the work fit = [] # or np.NaN * np.empty(number) X_first = self.ask(popsize, xmean=xmean, gradf=gradf, args=args) if xmean is None: xmean = self.mean # might have changed in self.ask X = [] for k in xrange(int(popsize)): x, f = X_first.pop(0), None rejected = -1 while rejected < 0 or not is_feasible(x, f): # rejection sampling rejected += 1 if rejected: # resample x = self.ask(1, xmean, sigma_fac)[0] elif k >= popsize - nmirrors: # mirrored sample if k == popsize - nmirrors and selective_mirroring: self.mirrors_idx = np.argsort(fit)[-1:-1 - nmirrors:-1] x = self.get_mirror(X[self.mirrors_idx[popsize - 1 - k]]) if rejected == 1 and k >= popsize - nmirrors: self.mirrors_rejected_idx.append(k) # contraints handling test hardwired ccccccccccc length_normalizer = 1 # zzzzzzzzzzzzzzzzzzzzzzzzz f = func(x, *args) if kappa == 1 else \ func(xmean + kappa * length_normalizer * (x - xmean), *args) if is_feasible(x, f) and evaluations > 1: f = aggregation([f] + [(func(x, *args) if kappa == 1 else func(xmean + kappa * length_normalizer * (x - xmean), *args)) for _i in xrange(int(evaluations - 1))]) if rejected + 1 % 1000 == 0: print(' %d solutions rejected (f-value NaN or None) at iteration %d' % (rejected, self.countiter)) fit.append(f) X.append(x) self.evaluations_per_f_value = int(evaluations) return X, fit def prepare_injection_directions(self): """provide genotypic directions for TPA and selective mirroring, with no specific length normalization, to be used in the coming iteration. Details: This method is called in the end of `tell`. The result is assigned to ``self.pop_injection_directions`` and used in `ask_geno`. TODO: should be rather appended? """ # self.pop_injection_directions is supposed to be empty here if hasattr(self, 'pop_injection_directions') and self.pop_injection_directions: ValueError("Looks like a bug in calling order/logics") ary = [] if (isinstance(self.adapt_sigma, CMAAdaptSigmaTPA) or self.opts['mean_shift_line_samples']): ary.append(self.mean - self.mean_old) ary.append(self.mean_old - self.mean) # another copy! if ary[-1][0] == 0.0: _print_warning('zero mean shift encountered which ', 'prepare_injection_directions', 'CMAEvolutionStrategy', self.countiter) if self.opts['pc_line_samples']: # caveat: before, two samples were used ary.append(self.pc.copy()) if self.sp.lam_mirr and self.opts['CMA_mirrormethod'] == 2: if self.pop_sorted is None: _print_warning('pop_sorted attribute not found, mirrors obmitted', 'prepare_injection_directions', iteration=self.countiter) else: ary += self.get_selective_mirrors() self.pop_injection_directions = ary return ary def get_selective_mirrors(self, number=None, pop_sorted=None): """get mirror genotypic directions of the `number` worst solution, based on ``pop_sorted`` attribute (from last iteration). Details: Takes the last ``number=sp.lam_mirr`` entries in ``pop_sorted=self.pop_sorted`` as solutions to be mirrored. """ if pop_sorted is None: if hasattr(self, 'pop_sorted'): pop_sorted = self.pop_sorted else: return None if number is None: number = self.sp.lam_mirr res = [] for i in xrange(1, number + 1): res.append(self.mean_old - pop_sorted[-i]) return res # ____________________________________________________________ def tell(self, solutions, function_values, check_points=None, copy=False): """pass objective function values to prepare for next iteration. This core procedure of the CMA-ES algorithm updates all state variables, in particular the two evolution paths, the distribution mean, the covariance matrix and a step-size. Arguments --------- `solutions` list or array of candidate solution points (of type `numpy.ndarray`), most presumably before delivered by method `ask()` or `ask_and_eval()`. `function_values` list or array of objective function values corresponding to the respective points. Beside for termination decisions, only the ranking of values in `function_values` is used. `check_points` If ``check_points is None``, only solutions that are not generated by `ask()` are possibly clipped (recommended). ``False`` does not clip any solution (not recommended). If ``True``, clips solutions that realize long steps (i.e. also those that are unlikely to be generated with `ask()`). `check_points` can be a list of indices to be checked in solutions. `copy` ``solutions`` can be modified in this routine, if ``copy is False`` Details ------- `tell()` updates the parameters of the multivariate normal search distribution, namely covariance matrix and step-size and updates also the attributes ``countiter`` and ``countevals``. To check the points for consistency is quadratic in the dimension (like sampling points). Bugs ---- The effect of changing the solutions delivered by `ask()` depends on whether boundary handling is applied. With boundary handling, modifications are disregarded. This is necessary to apply the default boundary handling that uses unrepaired solutions but might change in future. Example ------- :: import cma func = cma.fcts.elli # choose objective function es = cma.CMAEvolutionStrategy(cma.np.random.rand(10), 1) while not es.stop(): X = es.ask() es.tell(X, [func(x) for x in X]) es.result() # where the result can be found :See: class `CMAEvolutionStrategy`, `ask()`, `ask_and_eval()`, `fmin()` """ if self._flgtelldone: raise _Error('tell should only be called once per iteration') lam = len(solutions) if lam != array(function_values).shape[0]: raise _Error('for each candidate solution ' + 'a function value must be provided') if lam + self.sp.lam_mirr < 3: raise _Error('population size ' + str(lam) + ' is too small when option CMA_mirrors * popsize < 0.5') if not isscalar(function_values[0]): if isscalar(function_values[0][0]): if self.countiter <= 1: _print_warning('function values are not a list of scalars (further warnings are suppressed)') function_values = [val[0] for val in function_values] else: raise _Error('objective function values must be a list of scalars') # ## prepare N = self.N sp = self.sp if lam < sp.mu: # rather decrease cmean instead of having mu > lambda//2 raise _Error('not enough solutions passed to function tell (mu>lambda)') self.countiter += 1 # >= 1 now self.countevals += sp.popsize * self.evaluations_per_f_value self.best.update(solutions, self.sent_solutions, function_values, self.countevals) flg_diagonal = self.opts['CMA_diagonal'] is True \ or self.countiter <= self.opts['CMA_diagonal'] if not flg_diagonal and len(self.C.shape) == 1: # C was diagonal ie 1-D # enter non-separable phase (no easy return from here) self.C = np.diag(self.C) if 1 < 3: self.B = np.eye(N) # identity(N) idx = np.argsort(self.D) self.D = self.D[idx] self.B = self.B[:, idx] self._Yneg = np.zeros((N, N)) # ## manage fitness fit = self.fit # make short cut # CPU for N,lam=20,200: this takes 10s vs 7s fit.bndpen = self.boundary_handler.update(function_values, self)(solutions, self.sent_solutions, self.gp) # for testing: # fit.bndpen = self.boundary_handler.update(function_values, self)([s.unrepaired for s in solutions]) fit.idx = np.argsort(array(fit.bndpen) + array(function_values)) fit.fit = array(function_values, copy=False)[fit.idx] # update output data TODO: this is obsolete!? However: need communicate current best x-value? # old: out['recent_x'] = self.gp.pheno(pop[0]) # self.out['recent_x'] = array(solutions[fit.idx[0]]) # TODO: change in a data structure(?) and use current as identify # self.out['recent_f'] = fit.fit[0] # fitness histories fit.hist.insert(0, fit.fit[0]) # if len(self.fit.histbest) < 120+30*N/sp.popsize or # does not help, as tablet in the beginning is the critical counter-case if ((self.countiter % 5) == 0): # 20 percent of 1e5 gen. fit.histbest.insert(0, fit.fit[0]) fit.histmedian.insert(0, np.median(fit.fit) if len(fit.fit) < 21 else fit.fit[self.popsize // 2]) if len(fit.histbest) > 2e4: # 10 + 30*N/sp.popsize: fit.histbest.pop() fit.histmedian.pop() if len(fit.hist) > 10 + 30 * N / sp.popsize: fit.hist.pop() # TODO: clean up inconsistency when an unrepaired solution is available and used # now get the genotypes pop = self.pop_sorted = [] # create pop from input argument solutions for k, s in enumerate(solutions): # use phenotype before Solution.repair() if 1 < 3: pop += [self.gp.geno(s, from_bounds=self.boundary_handler.inverse, repair=(self.repair_genotype if check_points not in (False, 0, [], ()) else None), archive=self.sent_solutions)] # takes genotype from sent_solutions, if available try: self.archive.insert(s, value=self.sent_solutions.pop(s), fitness=function_values[k]) # self.sent_solutions.pop(s) except KeyError: pass # check that TPA mirrors are available, TODO: move to TPA class? if isinstance(self.adapt_sigma, CMAAdaptSigmaTPA) and self.countiter > 3 and not (self.countiter % 3): dm = self.mean[0] - self.mean_old[0] dx0 = pop[0][0] - self.mean_old[0] dx1 = pop[1][0] - self.mean_old[0] for i in np.random.randint(1, self.N, 1): try: if not Mh.equals_approximately( (self.mean[i] - self.mean_old[i]) / (pop[0][i] - self.mean_old[i]), dm / dx0, 1e-8) or \ not Mh.equals_approximately( (self.mean[i] - self.mean_old[i]) / (pop[1][i] - self.mean_old[i]), dm / dx1, 1e-8): _print_warning('TPA error with mirrored samples', 'tell', 'CMAEvolutionStrategy', self.countiter) except ZeroDivisionError: _print_warning('zero division encountered in TPA check\n which should be very rare and is likely a bug', 'tell', 'CMAEvolutionStrategy', self.countiter) try: moldold = self.mean_old except: pass self.mean_old = self.mean mold = self.mean_old # just an alias # check and normalize each x - m # check_points is a flag (None is default: check non-known solutions) or an index list # should also a number possible (first check_points points)? if check_points not in (None, False, 0, [], ()): # useful in case of injected solutions and/or adaptive encoding, however is automatic with use_sent_solutions try: if len(check_points): idx = check_points except: idx = xrange(sp.popsize) for k in idx: self.repair_genotype(pop[k]) # only arrays can be multiple indexed pop = array(pop, copy=False) # sort pop pop = pop[fit.idx] # prepend best-ever solution to population, in case if self.opts['CMA_elitist'] and self.best.f < fit.fit[0]: if self.best.x_geno is not None: xp = [self.best.x_geno] # xp = [self.best.xdict['geno']] # xp = [self.gp.geno(self.best.x[:])] # TODO: remove # print self.mahalanobis_norm(xp[0]-self.mean) else: xp = [self.gp.geno(array(self.best.x, copy=True), self.boundary_handler.inverse, copy_if_changed=False)] print('genotype for elitist not found') self.clip_or_fit_solutions(xp, [0]) pop = array([xp[0]] + list(pop)) elif self.opts['CMA_elitist'] == 'initial': # current solution was better self.opts['CMA_elitist'] = False self.pop_sorted = pop # compute new mean self.mean = mold + self.sp.cmean * \ (sum(sp.weights * pop[0:sp.mu].T, 1) - mold) # check Delta m (this is not default, but could become at some point) # CAVE: upper_length=sqrt(2)+2 is too restrictive, test upper_length = sqrt(2*N) thoroughly. # replaced by repair_geno? # simple test case injecting self.mean: # self.mean = 1e-4 * self.sigma * np.random.randn(N) if 1 < 3: cmean = self.sp.cmean # zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz # get learning rate constants cc, c1, cmu = sp.cc, sp.c1, sp.cmu if flg_diagonal: cc, c1, cmu = sp.cc_sep, sp.c1_sep, sp.cmu_sep # now the real work can start hsig = self.adapt_sigma.hsig(self) # ps update must be done here in separable case # hsig = sum(self.ps**2) / self.N < 2 + 4./(N+1) # adjust missing variance due to hsig, in 4-D with damps=1e99 and sig0 small # hsig leads to premature convergence of C otherwise # hsiga = (1-hsig**2) * c1 * cc * (2-cc) # to be removed in future c1a = c1 - (1 - hsig**2) * c1 * cc * (2 - cc) # adjust for variance loss self.pc = (1 - cc) * self.pc + \ hsig * (sqrt(cc * (2 - cc) * sp.mueff) / self.sigma / cmean) * \ (self.mean - mold) / self.sigma_vec # covariance matrix adaptation/udpate if sp.CMA_on: # assert sp.c1 + sp.cmu < sp.mueff / N # ?? assert c1 + cmu <= 1 # default full matrix case if not flg_diagonal: Y = (pop[0:sp.mu] - mold) / (self.sigma * self.sigma_vec) Y = dot((cmu * sp.weights) * Y.T, Y) # learning rate integrated if self.sp.neg.cmuexp: tmp = (pop[-sp.neg.mu:] - mold) / (self.sigma * self.sigma_vec) # normalize to constant length (seems preferable in several aspects) for i in xrange(tmp.shape[0]): tmp[i, :] *= N**0.5 / self.mahalanobis_norm( self.sigma_vec * tmp[i, :]) / self.sigma self._Yneg *= 1 - self.sp.neg.cmuexp # for some reason necessary? self._Yneg += dot(sp.neg.weights * tmp.T, tmp) - self.C # self.update_exponential(dot(sp.neg.weights * tmp.T, tmp) - 1 * self.C, -1*self.sp.neg.cmuexp) self.C *= 1 - c1a - cmu self.C += np.outer(c1 * self.pc, self.pc) + Y self.dC[:] = np.diag(self.C) # for output and termination checking else: # separable/diagonal linear case assert(c1 + cmu <= 1) Z = np.zeros(N) for k in xrange(sp.mu): z = (pop[k] - mold) / (self.sigma * self.sigma_vec) # TODO see above Z += sp.weights[k] * z * z # is 1-D self.C = (1 - c1a - cmu) * self.C + c1 * self.pc * self.pc + cmu * Z # TODO: self.C *= exp(cmuneg * (N - dot(sp.neg.weights, **2) self.dC = self.C self.D = sqrt(self.C) # C is a 1-D array, this is why adapt_sigma needs to prepare before self.itereigenupdated = self.countiter # idx = self._mirror_idx_cov() # take half of mirrored vectors for negative update # step-size adaptation, adapt sigma # in case of TPA, function_values[0] and [1] must reflect samples colinear to xmean - xmean_old self.adapt_sigma.update(self, function_values=function_values) if self.sigma * min(self.sigma_vec * self.dC**0.5) < self.opts['minstd']: self.sigma = self.opts['minstd'] / min(self.sigma_vec * self.dC**0.5) if self.sigma * max(self.sigma_vec * self.dC**0.5) > self.opts['maxstd']: self.sigma = self.opts['maxstd'] / max(self.sigma_vec * self.dC**0.5) # g = self.countiter # N = self.N # mindx = eval(self.opts['mindx']) # if isinstance(self.opts['mindx'], basestring) else self.opts['mindx'] if self.sigma * min(self.D) < self.opts['mindx']: # TODO: sigma_vec is missing here self.sigma = self.opts['mindx'] / min(self.D) if self.sigma > 1e9 * self.sigma0: alpha = self.sigma / max(self.D) self.multiplyC(alpha) self.sigma /= alpha**0.5 self.opts['tolupsigma'] /= alpha**0.5 # to be compared with sigma # TODO increase sigma in case of a plateau? # Uncertainty noise measurement is done on an upper level # move mean into "feasible preimage", leads to weird behavior on # 40-D tablet with bound 0.1, not quite explained (constant # dragging is problematic, but why doesn't it settle), still a bug? if new_injections: self.pop_injection_directions = self.prepare_injection_directions() self.pop_sorted = [] # remove this in case pop is still needed self._flgtelldone = True # end tell() def inject(self, solutions): """inject a genotypic solution. The solution is used as direction relative to the distribution mean to compute a new candidate solution returned in method `ask_geno` which in turn is used in method `ask`. >>> import cma >>> es = cma.CMAEvolutionStrategy(4 * [1], 2) >>> while not es.stop(): ... es.inject([4 * [0.0]]) ... X = es.ask() ... break >>> assert X[0][0] == X[0][1] """ if not hasattr(self, 'pop_injection_directions'): self.pop_injection_directions = [] for solution in solutions: if len(solution) != self.N: raise ValueError('method `inject` needs a list or array' + (' each el with dimension (`len`) %d' % self.N)) self.pop_injection_directions.append( array(solution, copy=False, dtype=float) - self.mean) def result(self): """return:: (xbest, f(xbest), evaluations_xbest, evaluations, iterations, pheno(xmean), effective_stds) """ # TODO: how about xcurrent? return self.best.get() + ( self.countevals, self.countiter, self.gp.pheno(self.mean), self.gp.scales * self.sigma * self.sigma_vec * self.dC**0.5) def result_pretty(self, number_of_runs=0, time_str=None, fbestever=None): """pretty print result. Returns ``self.result()`` """ if fbestever is None: fbestever = self.best.f s = (' after %i restart' + ('s' if number_of_runs > 1 else '')) \ % number_of_runs if number_of_runs else '' for k, v in self.stop().items(): print('termination on %s=%s%s' % (k, str(v), s + (' (%s)' % time_str if time_str else ''))) print('final/bestever f-value = %e %e' % (self.best.last.f, fbestever)) if self.N < 9: print('incumbent solution: ' + str(list(self.gp.pheno(self.mean, into_bounds=self.boundary_handler.repair)))) print('std deviation: ' + str(list(self.sigma * self.sigma_vec * sqrt(self.dC) * self.gp.scales))) else: print('incumbent solution: %s ...]' % (str(self.gp.pheno(self.mean, into_bounds=self.boundary_handler.repair)[:8])[:-1])) print('std deviations: %s ...]' % (str((self.sigma * self.sigma_vec * sqrt(self.dC) * self.gp.scales)[:8])[:-1])) return self.result() def clip_or_fit_solutions(self, pop, idx): """make sure that solutions fit to sample distribution, this interface will probably change. In particular the frequency of long vectors appearing in pop[idx] - self.mean is limited. """ for k in idx: self.repair_genotype(pop[k]) def repair_genotype(self, x, copy_if_changed=False): """make sure that solutions fit to the sample distribution, this interface will probably change. In particular the frequency of x - self.mean being long is limited. """ x = array(x, copy=False) mold = array(self.mean, copy=False) if 1 < 3: # hard clip at upper_length upper_length = self.N**0.5 + 2 * self.N / (self.N + 2) # should become an Option, but how? e.g. [0, 2, 2] fac = self.mahalanobis_norm(x - mold) / upper_length if fac > 1: if copy_if_changed: x = (x - mold) / fac + mold else: # should be 25% faster: x -= mold x /= fac x += mold # print self.countiter, k, fac, self.mahalanobis_norm(pop[k] - mold) # adapt also sigma: which are the trust-worthy/injected solutions? else: if 'checktail' not in self.__dict__: # hasattr(self, 'checktail') raise NotImplementedError # from check_tail_smooth import CheckTail # for the time being # self.checktail = CheckTail() # print('untested feature checktail is on') fac = self.checktail.addchin(self.mahalanobis_norm(x - mold)) if fac < 1: x = fac * (x - mold) + mold return x def decompose_C(self): """eigen-decompose self.C and update self.dC, self.C, self.B. Known bugs: this might give a runtime error with CMA_diagonal / separable option on. """ if self.opts['CMA_diagonal']: _print_warning("this might fail with CMA_diagonal option on", iteration=self.countiter) print(self.opts['CMA_diagonal']) # print(' %.19e' % self.C[0][0]) self.C = (self.C + self.C.T) / 2 self.dC = np.diag(self.C).copy() self.D, self.B = self.opts['CMA_eigenmethod'](self.C) # self.B = np.round(self.B, 10) # for i in rglen(self.D): # d = self.D[i] # oom = np.round(np.log10(d)) # self.D[i] = 10**oom * np.round(d / 10**oom, 10) # print(' %.19e' % self.C[0][0]) # print(' %.19e' % self.D[0]) if any(self.D <= 0): _print_warning("ERROR", iteration=self.countiter) raise ValueError("covariance matrix was not positive definite," + " this must be considered as a bug") self.D = self.D**0.5 assert all(isfinite(self.D)) idx = np.argsort(self.D) self.D = self.D[idx] self.B = self.B[:, idx] # self.B[i] is a row, columns self.B[:,i] are eigenvectors self.count_eigen += 1 def updateBD(self): """update internal variables for sampling the distribution with the current covariance matrix C. This method is O(N^3), if C is not diagonal. """ # itereigenupdated is always up-to-date in the diagonal case # just double check here if self.itereigenupdated == self.countiter: return if self.opts['CMA_diagonal'] >= self.countiter: _print_warning("updateBD called in CMA_diagonal mode, " + "this should be considered a bug", "updateBD", iteration=self.countiter) # C has already positive updates, here come the additional negative updates if self.sp.neg.cmuexp: C_shrunken = (1 - self.sp.cmu - self.sp.c1)**(self.countiter - self.itereigenupdated) clip_fac = 0.60 # 0.9 is sufficient to prevent degeneration in small dimension if hasattr(self.opts['vv'], '__getitem__') and self.opts['vv'][0] == 'sweep_ccov_neg': clip_fac = 0.98 if (self.countiter - self.itereigenupdated) * self.sp.neg.cmuexp * self.N \ < clip_fac * C_shrunken: # pos.def. guarantied, because vectors are normalized self.C -= self.sp.neg.cmuexp * self._Yneg else: max_warns = 1 try: self._updateBD_warnings += 1 except AttributeError: self._updateBD_warnings = 1 if self.opts['verbose'] > 1 and \ self._updateBD_warnings <= max_warns: _print_warning('doing two additional eigen' + 'decompositions to guarantee pos.def.', 'updateBD', 'CMAEvolutionStrategy') if self._updateBD_warnings == max_warns: _print_warning('further warnings are surpressed', 'updateBD') self.decompose_C() _tmp_inverse_root_C = dot(self.B / self.D, self.B.T) _tmp_inverse_root_C = (_tmp_inverse_root_C + _tmp_inverse_root_C.T) / 2 Zneg = dot(dot(_tmp_inverse_root_C, self._Yneg), _tmp_inverse_root_C) eigvals, eigvecs = self.opts['CMA_eigenmethod'](Zneg) self.count_eigen += 1 if max(eigvals) * self.sp.neg.cmuexp <= clip_fac: self.C -= self.sp.neg.cmuexp * self._Yneg elif 1 < 3: self.C -= (clip_fac / max(eigvals)) * self._Yneg _print_warning( 'clipped learning rate for negative weights, ' + 'maximal eigenvalue = %f, maxeig * ccov = %f > %f' % (max(eigvals), max(eigvals) * self.sp.neg.cmuexp, clip_fac), iteration=self.countiter) if 1 < 3: # let's check eigvals, eigvecs = self.opts['CMA_eigenmethod'](self.C) self.count_eigen += 1 print('new min eigenval = %e, old = %e' % (min(eigvals), min(self.D)**2)) if min(eigvals) > 0: print('new cond = %e, old = %e' % (max(eigvals) / min(eigvals), (max(self.D) / min(self.D))**2)) else: # guaranties pos.def. unconditionally _print_warning('exponential update for negative weights (internally more expensive)', iteration=self.countiter) self.update_exponential(self._Yneg, -self.sp.neg.cmuexp) # self.C = self.Ypos + Cs * Mh.expms(-self.sp.neg.cmuexp*Csi*self.Yneg*Csi) * Cs # Yneg = self.Yneg # for temporary debugging, can be removed self._Yneg = np.zeros((self.N, self.N)) if hasattr(self.opts['vv'], '__getitem__') and self.opts['vv'][0].startswith('sweep_ccov'): self.opts['CMA_const_trace'] = True if self.opts['CMA_const_trace'] in (True, 1, 2): # normalize trace of C if self.opts['CMA_const_trace'] == 2: s = np.exp(2 * np.mean(np.log(self.D))) # or geom average of dC? else: s = np.mean(np.diag(self.C)) self.C /= s dC = np.diag(self.C) if max(dC) / min(dC) > 1e8: # allows for much larger condition numbers, if axis-parallel self.sigma_vec *= np.diag(self.C)**0.5 self.C = self.correlation_matrix() _print_warning('condition in coordinate system exceeded 1e8' + ', rescaled to 1') # self.C = np.triu(self.C) + np.triu(self.C,1).T # should work as well # self.D, self.B = eigh(self.C) # hermitian, ie symmetric C is assumed self.decompose_C() # assert(sum(self.D-DD) < 1e-6) # assert(sum(sum(np.dot(BB, BB.T)-np.eye(self.N))) < 1e-6) # assert(sum(sum(np.dot(BB * DD, BB.T) - self.C)) < 1e-6) # assert(all(self.B[self.countiter % self.N] == self.B[self.countiter % self.N,:])) # qqqqqqqqqq # is O(N^3) # assert(sum(abs(self.C - np.dot(self.D * self.B, self.B.T))) < N**2*1e-11) if 1 < 3 and max(self.D) / min(self.D) > 1e6 and self.gp.isidentity: # TODO: allow to do this again # dmean_prev = dot(self.B, (1. / self.D) * dot(self.B.T, (self.mean - 0*self.mean_old) / self.sigma_vec)) self.gp._tf_matrix = (self.sigma_vec * dot(self.B * self.D, self.B.T).T).T self.gp._tf_matrix_inv = (dot(self.B / self.D, self.B.T).T / self.sigma_vec).T self.gp.tf_pheno = lambda x: dot(self.gp._tf_matrix, x) self.gp.tf_geno = lambda x: dot(self.gp._tf_matrix_inv, x) # not really necessary self.gp.isidentity = False assert self.mean is not self.mean_old self.mean = self.gp.geno(self.mean) # same as tf_geno self.mean_old = self.gp.geno(self.mean_old) # not needed? self.pc = self.gp.geno(self.pc) self.D[:] = 1.0 self.B = np.eye(self.N) self.C = np.eye(self.N) self.dC[:] = 1.0 self.sigma_vec = 1 # dmean_now = dot(self.B, (1. / self.D) * dot(self.B.T, (self.mean - 0*self.mean_old) / self.sigma_vec)) # assert Mh.vequals_approximately(dmean_now, dmean_prev) _print_warning('\n geno-pheno transformation introduced based on current C,\n injected solutions become "invalid" in this iteration', 'updateBD', 'CMAEvolutionStrategy', self.countiter) self.itereigenupdated = self.countiter def multiplyC(self, alpha): """multiply C with a scalar and update all related internal variables (dC, D,...)""" self.C *= alpha if self.dC is not self.C: self.dC *= alpha self.D *= alpha**0.5 def update_exponential(self, Z, eta, BDpair=None): """exponential update of C that guarantees positive definiteness, that is, instead of the assignment ``C = C + eta * Z``, we have ``C = C**.5 * exp(eta * C**-.5 * Z * C**-.5) * C**.5``. Parameter `Z` should have expectation zero, e.g. sum(w[i] * z[i] * z[i].T) - C if E z z.T = C. Parameter `eta` is the learning rate, for ``eta == 0`` nothing is updated. This function conducts two eigendecompositions, assuming that B and D are not up to date, unless `BDpair` is given. Given BDpair, B is the eigensystem and D is the vector of sqrt(eigenvalues), one eigendecomposition is omitted. Reference: Glasmachers et al 2010, Exponential Natural Evolution Strategies """ if eta == 0: return if BDpair: B, D = BDpair else: D, B = self.opts['CMA_eigenmethod'](self.C) self.count_eigen += 1 D **= 0.5 Cs = dot(B, (B * D).T) # square root of C Csi = dot(B, (B / D).T) # square root of inverse of C self.C = dot(Cs, dot(Mh.expms(eta * dot(Csi, dot(Z, Csi)), self.opts['CMA_eigenmethod']), Cs)) self.count_eigen += 1 # ____________________________________________________________ # ____________________________________________________________ def feedForResume(self, X, function_values): """Given all "previous" candidate solutions and their respective function values, the state of a `CMAEvolutionStrategy` object can be reconstructed from this history. This is the purpose of function `feedForResume`. Arguments --------- `X` (all) solution points in chronological order, phenotypic representation. The number of points must be a multiple of popsize. `function_values` respective objective function values Details ------- `feedForResume` can be called repeatedly with only parts of the history. The part must have the length of a multiple of the population size. `feedForResume` feeds the history in popsize-chunks into `tell`. The state of the random number generator might not be reconstructed, but this would be only relevant for the future. Example ------- :: import cma # prepare (x0, sigma0) = ... # initial values from previous trial X = ... # list of generated solutions from a previous trial f = ... # respective list of f-values # resume es = cma.CMAEvolutionStrategy(x0, sigma0) es.feedForResume(X, f) # continue with func as objective function while not es.stop(): X = es.ask() es.tell(X, [func(x) for x in X]) Credits to Dirk Bueche and Fabrice Marchal for the feeding idea. :See: class `CMAEvolutionStrategy` for a simple dump/load to resume """ if self.countiter > 0: _print_warning('feed should generally be used with a new object instance') if len(X) != len(function_values): raise _Error('number of solutions ' + str(len(X)) + ' and number function values ' + str(len(function_values)) + ' must not differ') popsize = self.sp.popsize if (len(X) % popsize) != 0: raise _Error('number of solutions ' + str(len(X)) + ' must be a multiple of popsize (lambda) ' + str(popsize)) for i in rglen((X) / popsize): # feed in chunks of size popsize self.ask() # a fake ask, mainly for a conditioned calling of updateBD # and secondary to get possibly the same random state self.tell(X[i * popsize:(i + 1) * popsize], function_values[i * popsize:(i + 1) * popsize]) # ____________________________________________________________ # ____________________________________________________________ def readProperties(self): """reads dynamic parameters from property file (not implemented) """ print('not yet implemented') # ____________________________________________________________ # ____________________________________________________________ def correlation_matrix(self): if len(self.C.shape) <= 1: return None c = self.C.copy() for i in xrange(c.shape[0]): fac = c[i, i]**0.5 c[:, i] /= fac c[i, :] /= fac c = (c + c.T) / 2.0 return c def mahalanobis_norm(self, dx): """compute the Mahalanobis norm that is induced by the adapted sample distribution, covariance matrix ``C`` times ``sigma**2``, including ``sigma_vec``. The expected Mahalanobis distance to the sample mean is about ``sqrt(dimension)``. Argument -------- A *genotype* difference `dx`. Example ------- >>> import cma, numpy >>> es = cma.CMAEvolutionStrategy(numpy.ones(10), 1) >>> xx = numpy.random.randn(2, 10) >>> d = es.mahalanobis_norm(es.gp.geno(xx[0]-xx[1])) `d` is the distance "in" the true sample distribution, sampled points have a typical distance of ``sqrt(2*es.N)``, where ``es.N`` is the dimension, and an expected distance of close to ``sqrt(N)`` to the sample mean. In the example, `d` is the Euclidean distance, because C = I and sigma = 1. """ return sqrt(sum((self.D**-1. * np.dot(self.B.T, dx / self.sigma_vec))**2)) / self.sigma def _metric_when_multiplied_with_sig_vec(self, sig): """return D^-1 B^T diag(sig) B D as a measure for C^-1/2 diag(sig) C^1/2 :param sig: a vector "used" as diagonal matrix :return: """ return dot((self.B * self.D**-1.).T * sig, self.B * self.D) def disp_annotation(self): """print annotation for `disp()`""" print('Iterat #Fevals function value axis ratio sigma min&max std t[m:s]') sys.stdout.flush() def disp(self, modulo=None): # TODO: rather assign opt['verb_disp'] as default? """prints some single-line infos according to `disp_annotation()`, if ``iteration_counter % modulo == 0`` """ if modulo is None: modulo = self.opts['verb_disp'] # console display if modulo: if (self.countiter - 1) % (10 * modulo) < 1: self.disp_annotation() if self.countiter > 0 and (self.stop() or self.countiter < 4 or self.countiter % modulo < 1): if self.opts['verb_time']: toc = self.elapsed_time() stime = str(int(toc // 60)) + ':' + str(round(toc % 60, 1)) else: stime = '' print(' '.join((repr(self.countiter).rjust(5), repr(self.countevals).rjust(6), '%.15e' % (min(self.fit.fit)), '%4.1e' % (self.D.max() / self.D.min()), '%6.2e' % self.sigma, '%6.0e' % (self.sigma * min(self.sigma_vec * sqrt(self.dC))), '%6.0e' % (self.sigma * max(self.sigma_vec * sqrt(self.dC))), stime))) # if self.countiter < 4: sys.stdout.flush() return self def plot(self): try: self.logger.plot() except AttributeError: _print_warning('plotting failed, no logger attribute found') except: _print_warning(('plotting failed with:', sys.exc_info()[0]), 'plot', 'CMAEvolutionStrategy') return self cma_default_options = { # the follow string arguments are evaluated if they do not contain "filename" 'AdaptSigma': 'CMAAdaptSigmaCSA # or any other CMAAdaptSigmaBase class e.g. CMAAdaptSigmaTPA', 'CMA_active': 'True # negative update, conducted after the original update', # 'CMA_activefac': '1 # learning rate multiplier for active update', 'CMA_cmean': '1 # learning rate for the mean value', 'CMA_const_trace': 'False # normalize trace, value CMA_const_trace=2 normalizes sum log eigenvalues to zero', 'CMA_diagonal': '0*100*N/sqrt(popsize) # nb of iterations with diagonal covariance matrix, True for always', # TODO 4/ccov_separable? 'CMA_eigenmethod': 'np.linalg.eigh # 0=numpy-s eigh, -1=pygsl, otherwise cma.Misc.eig (slower)', 'CMA_elitist': 'False #v or "initial" or True, elitism likely impairs global search performance', 'CMA_mirrors': 'popsize < 6 # values <0.5 are interpreted as fraction, values >1 as numbers (rounded), otherwise about 0.16 is used', 'CMA_mirrormethod': '1 # 0=unconditional, 1=selective, 2==experimental', 'CMA_mu': 'None # parents selection parameter, default is popsize // 2', 'CMA_on': 'True # False or 0 for no adaptation of the covariance matrix', 'CMA_sample_on_sphere_surface': 'False #v all mutation vectors have the same length', 'CMA_rankmu': 'True # False or 0 for omitting rank-mu update of covariance matrix', 'CMA_rankmualpha': '0.3 # factor of rank-mu update if mu=1, subject to removal, default might change to 0.0', 'CMA_dampsvec_fac': 'np.Inf # tentative and subject to changes, 0.5 would be a "default" damping for sigma vector update', 'CMA_dampsvec_fade': '0.1 # tentative fading out parameter for sigma vector update', 'CMA_teststds': 'None # factors for non-isotropic initial distr. of C, mainly for test purpose, see CMA_stds for production', 'CMA_stds': 'None # multipliers for sigma0 in each coordinate, not represented in C, makes scaling_of_variables obsolete', # 'CMA_AII': 'False # not yet tested', 'CSA_dampfac': '1 #v positive multiplier for step-size damping, 0.3 is close to optimal on the sphere', 'CSA_damp_mueff_exponent': '0.5 # zero would mean no dependency of damping on mueff, useful with CSA_disregard_length option', 'CSA_disregard_length': 'False #v True is untested', 'CSA_clip_length_value': 'None #v untested, [0, 0] means disregarding length completely', 'CSA_squared': 'False #v use squared length for sigma-adaptation ', 'boundary_handling': 'BoundTransform # or BoundPenalty, unused when ``bounds in (None, [None, None])``', 'bounds': '[None, None] # lower (=bounds[0]) and upper domain boundaries, each a scalar or a list/vector', # , eval_parallel2': 'not in use {"processes": None, "timeout": 12, "is_feasible": lambda x: True} # distributes function calls to processes processes' 'fixed_variables': 'None # dictionary with index-value pairs like {0:1.1, 2:0.1} that are not optimized', 'ftarget': '-inf #v target function value, minimization', 'is_feasible': 'is_feasible #v a function that computes feasibility, by default lambda x, f: f not in (None, np.NaN)', 'maxfevals': 'inf #v maximum number of function evaluations', 'maxiter': '100 + 50 * (N+3)**2 // popsize**0.5 #v maximum number of iterations', 'mean_shift_line_samples': 'False #v sample two new solutions colinear to previous mean shift', 'mindx': '0 #v minimal std in any direction, cave interference with tol*', 'minstd': '0 #v minimal std in any coordinate direction, cave interference with tol*', 'maxstd': 'inf #v maximal std in any coordinate direction', 'pc_line_samples': 'False #v two line samples along the evolution path pc', 'popsize': '4+int(3*log(N)) # population size, AKA lambda, number of new solution per iteration', 'randn': 'np.random.standard_normal #v randn((lam, N)) must return an np.array of shape (lam, N)', 'scaling_of_variables': 'None # (rather use CMA_stds) scale for each variable, sigma0 is interpreted w.r.t. this scale, in that effective_sigma0 = sigma0*scaling. Internally the variables are divided by scaling_of_variables and sigma is unchanged, default is np.ones(N)', 'seed': 'None # random number seed', 'signals_filename': 'cmaes_signals.par # read from this file, e.g. "stop now"', 'termination_callback': 'None #v a function returning True for termination, called after each iteration step and could be abused for side effects', 'tolfacupx': '1e3 #v termination when step-size increases by tolfacupx (diverges). That is, the initial step-size was chosen far too small and better solutions were found far away from the initial solution x0', 'tolupsigma': '1e20 #v sigma/sigma0 > tolupsigma * max(sqrt(eivenvals(C))) indicates "creeping behavior" with usually minor improvements', 'tolfun': '1e-11 #v termination criterion: tolerance in function value, quite useful', 'tolfunhist': '1e-12 #v termination criterion: tolerance in function value history', 'tolstagnation': 'int(100 + 100 * N**1.5 / popsize) #v termination if no improvement over tolstagnation iterations', 'tolx': '1e-11 #v termination criterion: tolerance in x-changes', 'transformation': 'None # [t0, t1] are two mappings, t0 transforms solutions from CMA-representation to f-representation (tf_pheno), t1 is the (optional) back transformation, see class GenoPheno', 'typical_x': 'None # used with scaling_of_variables', 'updatecovwait': 'None #v number of iterations without distribution update, name is subject to future changes', # TODO: rename: iterwaitupdatedistribution? 'verbose': '1 #v verbosity e.v. of initial/final message, -1 is very quiet, -9 maximally quiet, not yet fully implemented', 'verb_append': '0 # initial evaluation counter, if append, do not overwrite output files', 'verb_disp': '100 #v verbosity: display console output every verb_disp iteration', 'verb_filenameprefix': 'outcmaes # output filenames prefix', 'verb_log': '1 #v verbosity: write data to files every verb_log iteration, writing can be time critical on fast to evaluate functions', 'verb_plot': '0 #v in fmin(): plot() is called every verb_plot iteration', 'verb_time': 'True #v output timings on console', 'vv': '0 #? versatile variable for hacking purposes, value found in self.opts["vv"]' } class CMAOptions(dict): """``CMAOptions()`` returns a dictionary with the available options and their default values for class ``CMAEvolutionStrategy``. ``CMAOptions('pop')`` returns a subset of recognized options that contain 'pop' in there keyword name or (default) value or description. ``CMAOptions(opts)`` returns the subset of recognized options in ``dict(opts)``. Option values can be "written" in a string and, when passed to fmin or CMAEvolutionStrategy, are evaluated using "N" and "popsize" as known values for dimension and population size (sample size, number of new solutions per iteration). All default option values are such a string. Details ------- ``CMAOptions`` entries starting with ``tol`` are termination "tolerances". For `tolstagnation`, the median over the first and the second half of at least `tolstagnation` iterations are compared for both, the per-iteration best and per-iteration median function value. Example ------- :: import cma cma.CMAOptions('tol') is a shortcut for cma.CMAOptions().match('tol') that returns all options that contain 'tol' in their name or description. To set an option import cma opts = cma.CMAOptions() opts.set('tolfun', 1e-12) opts['tolx'] = 1e-11 :See: `fmin`(), `CMAEvolutionStrategy`, `_CMAParameters` """ # @classmethod # self is the class, not the instance # @property # def default(self): # """returns all options with defaults""" # return fmin([],[]) @staticmethod def defaults(): """return a dictionary with default option values and description""" return dict((str(k), str(v)) for k, v in cma_default_options.items()) # getting rid of the u of u"name" by str(u"name") # return dict(cma_default_options) @staticmethod def versatile_options(): """return list of options that can be changed at any time (not only be initialized), however the list might not be entirely up to date. The string ' #v ' in the default value indicates a 'versatile' option that can be changed any time. """ return tuple(sorted(i[0] for i in list(CMAOptions.defaults().items()) if i[1].find(' #v ') > 0)) def check(self, options=None): """check for ambiguous keys and move attributes into dict""" self.check_values(options) self.check_attributes(options) self.check_values(options) return self def check_values(self, options=None): corrected_key = CMAOptions().corrected_key # caveat: infinite recursion validated_keys = [] original_keys = [] if options is None: options = self for key in options: correct_key = corrected_key(key) if correct_key is None: raise ValueError("""%s is not a valid option""" % key) if correct_key in validated_keys: if key == correct_key: key = original_keys[validated_keys.index(key)] raise ValueError("%s was not a unique key for %s option" % (key, correct_key)) validated_keys.append(correct_key) original_keys.append(key) return options def check_attributes(self, opts=None): """check for attributes and moves them into the dictionary""" if opts is None: opts = self if 1 < 3: # the problem with merge is that ``opts['ftarget'] = new_value`` # would be overwritten by the old ``opts.ftarget``. # The solution here is to empty opts.__dict__ after the merge if hasattr(opts, '__dict__'): for key in list(opts.__dict__): if key in self._attributes: continue _print_warning( """ An option attribute has been merged into the dictionary, thereby possibly overwriting the dictionary value, and the attribute has been removed. Assign options with ``opts['%s'] = value`` # dictionary assignment or use ``opts.set('%s', value) # here isinstance(opts, CMAOptions) instead of ``opts.%s = value`` # attribute assignment """ % (key, key, key), 'check', 'CMAOptions') opts[key] = opts.__dict__[key] # getattr(opts, key) delattr(opts, key) # is that cosher? # delattr is necessary to prevent that the attribute # overwrites the dict entry later again return opts @staticmethod def merge(self, dict_=None): """not is use so far, see check()""" if dict_ is None and hasattr(self, '__dict__'): dict_ = self.__dict__ # doesn't work anymore as we have _lock attribute if dict_ is None: return self self.update(dict_) return self def __init__(self, s=None, unchecked=False): """return an `CMAOptions` instance, either with the default options, if ``s is None``, or with all options whose name or description contains `s`, if `s` is a string (case is disregarded), or with entries from dictionary `s` as options, not complemented with default options or settings Returns: see above. """ # if not CMAOptions.defaults: # this is different from self.defaults!!! # CMAOptions.defaults = fmin([],[]) if s is None: super(CMAOptions, self).__init__(CMAOptions.defaults()) # dict.__init__(self, CMAOptions.defaults()) should be the same # self = CMAOptions.defaults() elif isinstance(s, basestring): super(CMAOptions, self).__init__(CMAOptions().match(s)) # we could return here else: super(CMAOptions, self).__init__(s) if not unchecked and s is not None: self.check() # caveat: infinite recursion for key in list(self.keys()): correct_key = self.corrected_key(key) if correct_key not in CMAOptions.defaults(): _print_warning('invalid key ``' + str(key) + '`` removed', '__init__', 'CMAOptions') self.pop(key) elif key != correct_key: self[correct_key] = self.pop(key) # self.evaluated = False # would become an option entry self._lock_setting = False self._attributes = self.__dict__.copy() # are not valid keys self._attributes['_attributes'] = len(self._attributes) def init(self, dict_or_str, val=None, warn=True): """initialize one or several options. Arguments --------- `dict_or_str` a dictionary if ``val is None``, otherwise a key. If `val` is provided `dict_or_str` must be a valid key. `val` value for key Details ------- Only known keys are accepted. Known keys are in `CMAOptions.defaults()` """ # dic = dict_or_key if val is None else {dict_or_key:val} self.check(dict_or_str) dic = dict_or_str if val is not None: dic = {dict_or_str:val} for key, val in dic.items(): key = self.corrected_key(key) if key not in CMAOptions.defaults(): # TODO: find a better solution? if warn: print('Warning in cma.CMAOptions.init(): key ' + str(key) + ' ignored') else: self[key] = val return self def set(self, dic, val=None, force=False): """set can assign versatile options from `CMAOptions.versatile_options()` with a new value, use `init()` for the others. Arguments --------- `dic` either a dictionary or a key. In the latter case, `val` must be provided `val` value for `key`, approximate match is sufficient `force` force setting of non-versatile options, use with caution This method will be most probably used with the ``opts`` attribute of a `CMAEvolutionStrategy` instance. """ if val is not None: # dic is a key in this case dic = {dic:val} # compose a dictionary for key_original, val in list(dict(dic).items()): key = self.corrected_key(key_original) if not self._lock_setting or \ key in CMAOptions.versatile_options(): self[key] = val else: _print_warning('key ' + str(key_original) + ' ignored (not recognized as versatile)', 'set', 'CMAOptions') return self # to allow o = CMAOptions(o).set(new) def complement(self): """add all missing options with their default values""" # add meta-parameters, given options have priority self.check() for key in CMAOptions.defaults(): if key not in self: self[key] = CMAOptions.defaults()[key] return self def settable(self): """return the subset of those options that are settable at any time. Settable options are in `versatile_options()`, but the list might be incomplete. """ return CMAOptions([i for i in list(self.items()) if i[0] in CMAOptions.versatile_options()]) def __call__(self, key, default=None, loc=None): """evaluate and return the value of option `key` on the fly, or returns those options whose name or description contains `key`, case disregarded. Details ------- Keys that contain `filename` are not evaluated. For ``loc==None``, `self` is used as environment but this does not define ``N``. :See: `eval()`, `evalall()` """ try: val = self[key] except: return self.match(key) if loc is None: loc = self # TODO: this hack is not so useful: popsize could be there, but N is missing try: if isinstance(val, basestring): val = val.split('#')[0].strip() # remove comments if isinstance(val, basestring) and \ key.find('filename') < 0: # and key.find('mindx') < 0: val = eval(val, globals(), loc) # invoke default # TODO: val in ... fails with array type, because it is applied element wise! # elif val in (None,(),[],{}) and default is not None: elif val is None and default is not None: val = eval(str(default), globals(), loc) except: pass # slighly optimistic: the previous is bug-free return val def corrected_key(self, key): """return the matching valid key, if ``key.lower()`` is a unique starting sequence to identify the valid key, ``else None`` """ matching_keys = [] for allowed_key in CMAOptions.defaults(): if allowed_key.lower() == key.lower(): return allowed_key if allowed_key.lower().startswith(key.lower()): matching_keys.append(allowed_key) return matching_keys[0] if len(matching_keys) == 1 else None def eval(self, key, default=None, loc=None, correct_key=True): """Evaluates and sets the specified option value in environment `loc`. Many options need ``N`` to be defined in `loc`, some need `popsize`. Details ------- Keys that contain 'filename' are not evaluated. For `loc` is None, the self-dict is used as environment :See: `evalall()`, `__call__` """ # TODO: try: loc['dim'] = loc['N'] etc if correct_key: # in_key = key # for debugging only key = self.corrected_key(key) self[key] = self(key, default, loc) return self[key] def evalall(self, loc=None, defaults=None): """Evaluates all option values in environment `loc`. :See: `eval()` """ self.check() if defaults is None: defaults = cma_default_options # TODO: this needs rather the parameter N instead of loc if 'N' in loc: # TODO: __init__ of CMA can be simplified popsize = self('popsize', defaults['popsize'], loc) for k in list(self.keys()): k = self.corrected_key(k) self.eval(k, defaults[k], {'N':loc['N'], 'popsize':popsize}) self._lock_setting = True return self def match(self, s=''): """return all options that match, in the name or the description, with string `s`, case is disregarded. Example: ``cma.CMAOptions().match('verb')`` returns the verbosity options. """ match = s.lower() res = {} for k in sorted(self): s = str(k) + '=\'' + str(self[k]) + '\'' if match in s.lower(): res[k] = self[k] return CMAOptions(res, unchecked=True) def pp(self): pprint(self) def pprint(self, linebreak=80): for i in sorted(self.items()): s = str(i[0]) + "='" + str(i[1]) + "'" a = s.split(' ') # print s in chunks l = '' # start entire to the left while a: while a and len(l) + len(a[0]) < linebreak: l += ' ' + a.pop(0) print(l) l = ' ' # tab for subsequent lines print_ = pprint # Python style to prevent clash with keywords printme = pprint # ____________________________________________________________ # ____________________________________________________________ class _CMAStopDict(dict): """keep and update a termination condition dictionary, which is "usually" empty and returned by `CMAEvolutionStrategy.stop()`. The class methods entirely depend on `CMAEvolutionStrategy` class attributes. Details ------- This class is not relevant for the end-user and could be a nested class, but nested classes cannot be serialized. Example ------- >>> import cma >>> es = cma.CMAEvolutionStrategy(4 * [1], 1, {'verbose':-1}) >>> print(es.stop()) {} >>> es.optimize(cma.fcts.sphere, verb_disp=0) >>> print(es.stop()) {'tolfun': 1e-11} :See: `OOOptimizer.stop()`, `CMAEvolutionStrategy.stop()` """ def __init__(self, d={}): update = isinstance(d, CMAEvolutionStrategy) super(_CMAStopDict, self).__init__({} if update else d) self._stoplist = [] # to keep multiple entries self.lastiter = 0 # probably not necessary if isinstance(d, _CMAStopDict): # inherit self._stoplist = d._stoplist # multiple entries self.lastiter = d.lastiter # probably not necessary if update: self._update(d) def __call__(self, es=None, check=True): """update and return the termination conditions dictionary """ if not check: return self if es is None and self.es is None: raise ValueError('termination conditions need an optimizer to act upon') self._update(es) return self def _update(self, es): """Test termination criteria and update dictionary """ if es is None: es = self.es assert es is not None if es.countiter == 0: # in this case termination tests fail self.__init__() return self self.lastiter = es.countiter self.es = es self.clear() # compute conditions from scratch N = es.N opts = es.opts self.opts = opts # a hack to get _addstop going # fitness: generic criterion, user defined w/o default self._addstop('ftarget', es.best.f < opts['ftarget']) # maxiter, maxfevals: generic criteria self._addstop('maxfevals', es.countevals - 1 >= opts['maxfevals']) self._addstop('maxiter', ## meta_parameters.maxiter_multiplier == 1.0 es.countiter >= 1.0 * opts['maxiter']) # tolx, tolfacupx: generic criteria # tolfun, tolfunhist (CEC:tolfun includes hist) self._addstop('tolx', all([es.sigma * xi < opts['tolx'] for xi in es.sigma_vec * es.pc]) and all([es.sigma * xi < opts['tolx'] for xi in es.sigma_vec * sqrt(es.dC)])) self._addstop('tolfacupx', any(es.sigma * es.sigma_vec * sqrt(es.dC) > es.sigma0 * es.sigma_vec0 * opts['tolfacupx'])) self._addstop('tolfun', es.fit.fit[-1] - es.fit.fit[0] < opts['tolfun'] and max(es.fit.hist) - min(es.fit.hist) < opts['tolfun']) self._addstop('tolfunhist', len(es.fit.hist) > 9 and max(es.fit.hist) - min(es.fit.hist) < opts['tolfunhist']) # worst seen false positive: table N=80,lam=80, getting worse for fevals=35e3 \approx 50 * N**1.5 # but the median is not so much getting worse # / 5 reflects the sparsity of histbest/median # / 2 reflects the left and right part to be compared ## meta_parameters.tolstagnation_multiplier == 1.0 l = int(max(( 1.0 * opts['tolstagnation'] / 5. / 2, len(es.fit.histbest) / 10))) # TODO: why max(..., len(histbest)/10) ??? # TODO: the problem in the beginning is only with best ==> ??? # equality should handle flat fitness self._addstop('tolstagnation', # leads sometimes early stop on ftablet, fcigtab, N>=50? 1 < 3 and opts['tolstagnation'] and es.countiter > N * (5 + 100 / es.popsize) and len(es.fit.histbest) > 100 and 2 * l < len(es.fit.histbest) and np.median(es.fit.histmedian[:l]) >= np.median(es.fit.histmedian[l:2 * l]) and np.median(es.fit.histbest[:l]) >= np.median(es.fit.histbest[l:2 * l])) # iiinteger: stagnation termination can prevent to find the optimum self._addstop('tolupsigma', opts['tolupsigma'] and es.sigma / np.max(es.D) > es.sigma0 * opts['tolupsigma']) if 1 < 3: # non-user defined, method specific # noeffectaxis (CEC: 0.1sigma), noeffectcoord (CEC:0.2sigma), conditioncov idx = np.where(es.mean == es.mean + 0.2 * es.sigma * es.sigma_vec * es.dC**0.5)[0] self._addstop('noeffectcoord', any(idx), idx) # any([es.mean[i] == es.mean[i] + 0.2 * es.sigma * # (es.sigma_vec if isscalar(es.sigma_vec) else es.sigma_vec[i]) * # sqrt(es.dC[i]) # for i in xrange(N)]) # ) if opts['CMA_diagonal'] is not True and es.countiter > opts['CMA_diagonal']: i = es.countiter % N self._addstop('noeffectaxis', sum(es.mean == es.mean + 0.1 * es.sigma * es.D[i] * es.B[:, i]) == N) self._addstop('conditioncov', es.D[-1] > 1e7 * es.D[0], 1e14) # TODO self._addstop('callback', es.callbackstop) # termination_callback try: with open(self.opts['signals_filename'], 'r') as f: for line in f.readlines(): words = line.split() if len(words) < 2 or words[0].startswith(('#', '%')): continue if words[0] == 'stop' and words[1] == 'now': if len(words) > 2 and not words[2].startswith( self.opts['verb_filenameprefix']): continue self._addstop('file_signal', True, "stop now") break except IOError: pass if len(self): self._addstop('flat fitness: please (re)consider how to compute the fitness more elaborate', len(es.fit.hist) > 9 and max(es.fit.hist) == min(es.fit.hist)) return self def _addstop(self, key, cond, val=None): if cond: self.stoplist.append(key) # can have the same key twice self[key] = val if val is not None \ else self.opts.get(key, None) def clear(self): for k in list(self): self.pop(k) self.stoplist = [] # ____________________________________________________________ # ____________________________________________________________ class _CMAParameters(object): """strategy parameters like population size and learning rates. Note: contrary to `CMAOptions`, `_CMAParameters` is not (yet) part of the "user-interface" and subject to future changes (it might become a `collections.namedtuple`) Example ------- >>> import cma >>> es = cma.CMAEvolutionStrategy(20 * [0.1], 1) (6_w,12)-CMA-ES (mu_w=3.7,w_1=40%) in dimension 20 (seed=504519190) # the seed is "random" by default >>> >>> type(es.sp) # sp contains the strategy parameters >>> >>> es.sp.disp() {'CMA_on': True, 'N': 20, 'c1': 0.004181139918745593, 'c1_sep': 0.034327992810300939, 'cc': 0.17176721127681213, 'cc_sep': 0.25259494835857677, 'cmean': 1.0, 'cmu': 0.0085149624979034746, 'cmu_sep': 0.057796356229390715, 'cs': 0.21434997799189287, 'damps': 1.2143499779918929, 'mu': 6, 'mu_f': 6.0, 'mueff': 3.7294589343030671, 'popsize': 12, 'rankmualpha': 0.3, 'weights': array([ 0.40240294, 0.25338908, 0.16622156, 0.10437523, 0.05640348, 0.01720771])} >>> >> es.sp == cma._CMAParameters(20, 12, cma.CMAOptions().evalall({'N': 20})) True :See: `CMAOptions`, `CMAEvolutionStrategy` """ def __init__(self, N, opts, ccovfac=1, verbose=True): """Compute strategy parameters, mainly depending on dimension and population size, by calling `set` """ self.N = N if ccovfac == 1: ccovfac = opts['CMA_on'] # that's a hack self.popsize = None # declaring the attribute, not necessary though self.set(opts, ccovfac=ccovfac, verbose=verbose) def set(self, opts, popsize=None, ccovfac=1, verbose=True): """Compute strategy parameters as a function of dimension and population size """ alpha_cc = 1.0 # cc-correction for mueff, was zero before def conedf(df, mu, N): """used for computing separable learning rate""" return 1. / (df + 2.*sqrt(df) + float(mu) / N) def cmudf(df, mu, alphamu): """used for computing separable learning rate""" return (alphamu + mu - 2. + 1. / mu) / (df + 4.*sqrt(df) + mu / 2.) sp = self N = sp.N if popsize: opts.evalall({'N':N, 'popsize':popsize}) else: popsize = opts.evalall({'N':N})['popsize'] # the default popsize is computed in CMAOptions() ## meta_parameters.lambda_exponent == 0.0 popsize = int(popsize + N** 0.0 - 1) sp.popsize = popsize if opts['CMA_mirrors'] < 0.5: sp.lam_mirr = int(0.5 + opts['CMA_mirrors'] * popsize) elif opts['CMA_mirrors'] > 1: sp.lam_mirr = int(0.5 + opts['CMA_mirrors']) else: sp.lam_mirr = int(0.5 + 0.16 * min((popsize, 2 * N + 2)) + 0.29) # 0.158650... * popsize is optimal # lam = arange(2,22) # mirr = 0.16 + 0.29/lam # print(lam); print([int(0.5 + l) for l in mirr*lam]) # [ 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21] # [1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4] ## meta_parameters.parent_fraction == 0.5 sp.mu_f = 0.5 * sp.popsize # float value of mu if opts['CMA_mu'] is not None: sp.mu_f = opts['CMA_mu'] sp.mu = int(sp.mu_f + 0.499999) # round down for x.5 sp.mu = max((sp.mu, 1)) # in principle we have mu_opt = popsize/2 + lam_mirr/2, # which means in particular weights should only be negative for q > 0.5+mirr_frac/2 if sp.mu > sp.popsize - 2 * sp.lam_mirr + 1: _print_warning("pairwise selection is not implemented, therefore " + " mu = %d > %d = %d - 2*%d + 1 = popsize - 2*mirr + 1 can produce a bias" % ( sp.mu, sp.popsize - 2 * sp.lam_mirr + 1, sp.popsize, sp.lam_mirr)) if sp.lam_mirr > sp.popsize // 2: raise _Error("fraction of mirrors in the population as read from option CMA_mirrors cannot be larger 0.5, " + "theoretically optimal is 0.159") sp.weights = log(max([sp.mu, sp.popsize / 2.0]) + 0.5) - log(1 + np.arange(sp.mu)) sp.weights /= sum(sp.weights) sp.mueff = 1 / sum(sp.weights**2) # TODO: this will disappear, as it is done in class CMAAdaptSigmaCSA ## meta_parameters.cs_exponent == 1.0 b = 1.0 ## meta_parameters.cs_multiplier == 1.0 sp.cs = 1.0 * (sp.mueff + 2)**b / (N + (sp.mueff + 3)**b) # TODO: this doesn't change dependency of dimension # sp.cs = (sp.mueff + 2) / (N + 1.5*sp.mueff + 1) ## meta_parameters.cc_exponent == 1.0 b = 1.0 ## meta_parameters.cc_multiplier == 1.0 sp.cc = 1.0 * \ (4 + alpha_cc * sp.mueff / N)**b / \ (N**b + (4 + alpha_cc * 2 * sp.mueff / N)**b) sp.cc_sep = (1 + 1 / N + alpha_cc * sp.mueff / N) / (N**0.5 + 1 / N + alpha_cc * 2 * sp.mueff / N) # \not\gg\cc if hasattr(opts['vv'], '__getitem__') and opts['vv'][0] == 'sweep_ccov1': ## meta_parameters.cc_multiplier == 1.0 sp.cc = 1.0 * (4 + sp.mueff / N)**0.5 / ((N + 4)**0.5 + (2 * sp.mueff / N)**0.5) sp.rankmualpha = opts['CMA_rankmualpha'] # sp.rankmualpha = _evalOption(opts['CMA_rankmualpha'], 0.3) ## meta_parameters.c1_multiplier == 1.0 sp.c1 = ( 1.0 * ccovfac * min(1, sp.popsize / 6) * ## meta_parameters.c1_exponent == 2.0 2 / ((N + 1.3)** 2.0 + sp.mueff)) # 1/0 sp.c1_sep = ccovfac * conedf(N, sp.mueff, N) if opts['CMA_rankmu'] != 0: # also empty ## meta_parameters.cmu_multiplier == 2.0 alphacov, mu = 2.0 , sp.mueff sp.cmu = min(1 - sp.c1, ccovfac * alphacov * ## meta_parameters.cmu_exponent == 2.0 (sp.rankmualpha + mu - 2 + 1 / mu) / ((N + 2)** 2.0 + alphacov * mu / 2)) if hasattr(opts['vv'], '__getitem__') and opts['vv'][0] == 'sweep_ccov': sp.cmu = opts['vv'][1] sp.cmu_sep = min(1 - sp.c1_sep, ccovfac * cmudf(N, sp.mueff, sp.rankmualpha)) else: sp.cmu = sp.cmu_sep = 0 if hasattr(opts['vv'], '__getitem__') and opts['vv'][0] == 'sweep_ccov1': sp.c1 = opts['vv'][1] sp.neg = _BlancClass() if opts['CMA_active'] and opts['CMA_on']: # in principle we have mu_opt = popsize/2 + lam_mirr/2, # which means in particular weights should only be negative for q > 0.5+mirr_frac/2 if 1 < 3: # seems most natural: continuation of log(lambda/2) - log(k) qqqqqqqqqqqqqqqqqqqqqqqqqq sp.neg.mu_f = popsize // 2 # not sure anymore what this is good for sp.neg.weights = array([log(k) - log(popsize/2 + 1/2) for k in np.arange(np.ceil(popsize/2 + 1.1/2), popsize + .1)]) sp.neg.mu = len(sp.neg.weights) sp.neg.weights /= sum(sp.neg.weights) sp.neg.mueff = 1 / sum(sp.neg.weights**2) ## meta_parameters.cact_exponent == 1.5 sp.neg.cmuexp = opts['CMA_active'] * 0.3 * sp.neg.mueff / ((N + 2)** 1.5 + 1.0 * sp.neg.mueff) if hasattr(opts['vv'], '__getitem__') and opts['vv'][0] == 'sweep_ccov_neg': sp.neg.cmuexp = opts['vv'][1] # reasoning on learning rate cmuexp: with sum |w| == 1 and # length-normalized vectors in the update, the residual # variance in any direction exceeds exp(-N*cmuexp) assert sp.neg.mu >= sp.lam_mirr # not really necessary # sp.neg.minresidualvariance = 0.66 # not it use, keep at least 0.66 in all directions, small popsize is most critical else: sp.neg.cmuexp = 0 sp.CMA_on = sp.c1 + sp.cmu > 0 # print(sp.c1_sep / sp.cc_sep) if not opts['CMA_on'] and opts['CMA_on'] not in (None, [], (), ''): sp.CMA_on = False # sp.c1 = sp.cmu = sp.c1_sep = sp.cmu_sep = 0 mueff_exponent = 0.5 if 1 < 3: mueff_exponent = opts['CSA_damp_mueff_exponent'] # TODO: this will disappear, as it is done in class CMAAdaptSigmaCSA sp.damps = opts['CSA_dampfac'] * (0.5 + 0.5 * min([1, (sp.lam_mirr / (0.159 * sp.popsize) - 1)**2])**1 + 2 * max([0, ((sp.mueff - 1) / (N + 1))**mueff_exponent - 1]) + sp.cs ) sp.cmean = float(opts['CMA_cmean']) # sp.kappa = 1 # 4-D, lam=16, rank1, kappa < 4 does not influence convergence rate # in larger dim it does, 15-D with defaults, kappa=8 factor 2 if verbose: if not sp.CMA_on: print('covariance matrix adaptation turned off') if opts['CMA_mu'] != None: print('mu = %f' % (sp.mu_f)) # return self # the constructor returns itself def disp(self): pprint(self.__dict__) def fmin(objective_function, x0, sigma0, options=None, args=(), gradf=None, restarts=0, restart_from_best='False', incpopsize=2, eval_initial_x=False, noise_handler=None, noise_change_sigma_exponent=1, noise_kappa_exponent=0, # TODO: add max kappa value as parameter bipop=False): """functional interface to the stochastic optimizer CMA-ES for non-convex function minimization. Calling Sequences ================= ``fmin(objective_function, x0, sigma0)`` minimizes `objective_function` starting at `x0` and with standard deviation `sigma0` (step-size) ``fmin(objective_function, x0, sigma0, options={'ftarget': 1e-5})`` minimizes `objective_function` up to target function value 1e-5, which is typically useful for benchmarking. ``fmin(objective_function, x0, sigma0, args=('f',))`` minimizes `objective_function` called with an additional argument ``'f'``. ``fmin(objective_function, x0, sigma0, options={'ftarget':1e-5, 'popsize':40})`` uses additional options ``ftarget`` and ``popsize`` ``fmin(objective_function, esobj, None, options={'maxfevals': 1e5})`` uses the `CMAEvolutionStrategy` object instance `esobj` to optimize `objective_function`, similar to `esobj.optimize()`. Arguments ========= `objective_function` function to be minimized. Called as ``objective_function(x, *args)``. `x` is a one-dimensional `numpy.ndarray`. `objective_function` can return `numpy.NaN`, which is interpreted as outright rejection of solution `x` and invokes an immediate resampling and (re-)evaluation of a new solution not counting as function evaluation. `x0` list or `numpy.ndarray`, initial guess of minimum solution before the application of the geno-phenotype transformation according to the ``transformation`` option. It can also be a string holding a Python expression that is evaluated to yield the initial guess - this is important in case restarts are performed so that they start from different places. Otherwise `x0` can also be a `cma.CMAEvolutionStrategy` object instance, in that case `sigma0` can be ``None``. `sigma0` scalar, initial standard deviation in each coordinate. `sigma0` should be about 1/4th of the search domain width (where the optimum is to be expected). The variables in `objective_function` should be scaled such that they presumably have similar sensitivity. See also option `scaling_of_variables`. `options` a dictionary with additional options passed to the constructor of class ``CMAEvolutionStrategy``, see ``cma.CMAOptions()`` for a list of available options. ``args=()`` arguments to be used to call the `objective_function` ``gradf`` gradient of f, where ``len(gradf(x, *args)) == len(x)``. `gradf` is called once in each iteration if ``gradf is not None``. ``restarts=0`` number of restarts with increasing population size, see also parameter `incpopsize`, implementing the IPOP-CMA-ES restart strategy, see also parameter `bipop`; to restart from different points (recommended), pass `x0` as a string. ``restart_from_best=False`` which point to restart from ``incpopsize=2`` multiplier for increasing the population size `popsize` before each restart ``eval_initial_x=None`` evaluate initial solution, for `None` only with elitist option ``noise_handler=None`` a ``NoiseHandler`` instance or ``None``, a simple usecase is ``cma.fmin(f, 6 * [1], 1, noise_handler=cma.NoiseHandler(6))`` see ``help(cma.NoiseHandler)``. ``noise_change_sigma_exponent=1`` exponent for sigma increment for additional noise treatment ``noise_evaluations_as_kappa`` instead of applying reevaluations, the "number of evaluations" is (ab)used as scaling factor kappa (experimental). ``bipop`` if True, run as BIPOP-CMA-ES; BIPOP is a special restart strategy switching between two population sizings - small (like the default CMA, but with more focused search) and large (progressively increased as in IPOP). This makes the algorithm perform well both on functions with many regularly or irregularly arranged local optima (the latter by frequently restarting with small populations). For the `bipop` parameter to actually take effect, also select non-zero number of (IPOP) restarts; the recommended setting is ``restarts<=9`` and `x0` passed as a string. Note that small-population restarts do not count into the total restart count. Optional Arguments ================== All values in the `options` dictionary are evaluated if they are of type `str`, besides `verb_filenameprefix`, see class `CMAOptions` for details. The full list is available via ``cma.CMAOptions()``. >>> import cma >>> cma.CMAOptions() Subsets of options can be displayed, for example like ``cma.CMAOptions('tol')``, or ``cma.CMAOptions('bound')``, see also class `CMAOptions`. Return ====== Return the list provided by `CMAEvolutionStrategy.result()` appended with termination conditions, an `OOOptimizer` and a `BaseDataLogger`:: res = es.result() + (es.stop(), es, logger) where - ``res[0]`` (``xopt``) -- best evaluated solution - ``res[1]`` (``fopt``) -- respective function value - ``res[2]`` (``evalsopt``) -- respective number of function evaluations - ``res[3]`` (``evals``) -- number of overall conducted objective function evaluations - ``res[4]`` (``iterations``) -- number of overall conducted iterations - ``res[5]`` (``xmean``) -- mean of the final sample distribution - ``res[6]`` (``stds``) -- effective stds of the final sample distribution - ``res[-3]`` (``stop``) -- termination condition(s) in a dictionary - ``res[-2]`` (``cmaes``) -- class `CMAEvolutionStrategy` instance - ``res[-1]`` (``logger``) -- class `CMADataLogger` instance Details ======= This function is an interface to the class `CMAEvolutionStrategy`. The latter class should be used when full control over the iteration loop of the optimizer is desired. Examples ======== The following example calls `fmin` optimizing the Rosenbrock function in 10-D with initial solution 0.1 and initial step-size 0.5. The options are specified for the usage with the `doctest` module. >>> import cma >>> # cma.CMAOptions() # returns all possible options >>> options = {'CMA_diagonal':100, 'seed':1234, 'verb_time':0} >>> >>> res = cma.fmin(cma.fcts.rosen, [0.1] * 10, 0.5, options) (5_w,10)-CMA-ES (mu_w=3.2,w_1=45%) in dimension 10 (seed=1234) Covariance matrix is diagonal for 10 iterations (1/ccov=29.0) Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec 1 10 1.264232686260072e+02 1.1e+00 4.40e-01 4e-01 4e-01 2 20 1.023929748193649e+02 1.1e+00 4.00e-01 4e-01 4e-01 3 30 1.214724267489674e+02 1.2e+00 3.70e-01 3e-01 4e-01 100 1000 6.366683525319511e+00 6.2e+00 2.49e-02 9e-03 3e-02 200 2000 3.347312410388666e+00 1.2e+01 4.52e-02 8e-03 4e-02 300 3000 1.027509686232270e+00 1.3e+01 2.85e-02 5e-03 2e-02 400 4000 1.279649321170636e-01 2.3e+01 3.53e-02 3e-03 3e-02 500 5000 4.302636076186532e-04 4.6e+01 4.78e-03 3e-04 5e-03 600 6000 6.943669235595049e-11 5.1e+01 5.41e-06 1e-07 4e-06 650 6500 5.557961334063003e-14 5.4e+01 1.88e-07 4e-09 1e-07 termination on tolfun : 1e-11 final/bestever f-value = 5.55796133406e-14 2.62435631419e-14 mean solution: [ 1. 1.00000001 1. 1. 1. 1.00000001 1.00000002 1.00000003 ...] std deviation: [ 3.9193387e-09 3.7792732e-09 4.0062285e-09 4.6605925e-09 5.4966188e-09 7.4377745e-09 1.3797207e-08 2.6020765e-08 ...] >>> >>> print('best solutions fitness = %f' % (res[1])) best solutions fitness = 2.62435631419e-14 >>> assert res[1] < 1e-12 The above call is pretty much equivalent with the slightly more verbose call:: es = cma.CMAEvolutionStrategy([0.1] * 10, 0.5, options=options).optimize(cma.fcts.rosen) The following example calls `fmin` optimizing the Rastrigin function in 3-D with random initial solution in [-2,2], initial step-size 0.5 and the BIPOP restart strategy (that progressively increases population). The options are specified for the usage with the `doctest` module. >>> import cma >>> # cma.CMAOptions() # returns all possible options >>> options = {'seed':12345, 'verb_time':0, 'ftarget': 1e-8} >>> >>> res = cma.fmin(cma.fcts.rastrigin, '2. * np.random.rand(3) - 1', 0.5, ... options, restarts=9, bipop=True) (3_w,7)-aCMA-ES (mu_w=2.3,w_1=58%) in dimension 3 (seed=12345) Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec 1 7 1.633489455763566e+01 1.0e+00 4.35e-01 4e-01 4e-01 2 14 9.762462950258016e+00 1.2e+00 4.12e-01 4e-01 4e-01 3 21 2.461107851413725e+01 1.4e+00 3.78e-01 3e-01 4e-01 100 700 9.949590571272680e-01 1.7e+00 5.07e-05 3e-07 5e-07 123 861 9.949590570932969e-01 1.3e+00 3.93e-06 9e-09 1e-08 termination on tolfun=1e-11 final/bestever f-value = 9.949591e-01 9.949591e-01 mean solution: [ 9.94958638e-01 -7.19265205e-10 2.09294450e-10] std deviation: [ 8.71497860e-09 8.58994807e-09 9.85585654e-09] [...] (4_w,9)-aCMA-ES (mu_w=2.8,w_1=49%) in dimension 3 (seed=12349) Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec 1 5342.0 2.114883315350800e+01 1.0e+00 3.42e-02 3e-02 4e-02 2 5351.0 1.810102940125502e+01 1.4e+00 3.79e-02 3e-02 4e-02 3 5360.0 1.340222457448063e+01 1.4e+00 4.58e-02 4e-02 6e-02 50 5783.0 8.631491965616078e-09 1.6e+00 2.01e-04 8e-06 1e-05 termination on ftarget=1e-08 after 4 restarts final/bestever f-value = 8.316963e-09 8.316963e-09 mean solution: [ -3.10652459e-06 2.77935436e-06 -4.95444519e-06] std deviation: [ 1.02825265e-05 8.08348144e-06 8.47256408e-06] In either case, the method:: cma.plot(); (based on `matplotlib.pyplot`) produces a plot of the run and, if necessary:: cma.show() shows the plot in a window. Finally:: cma.savefig('myfirstrun') # savefig from matplotlib.pyplot will save the figure in a png. We can use the gradient like >>> import cma >>> res = cma.fmin(cma.fcts.rosen, np.zeros(10), 0.1, ... options = {'ftarget':1e-8,}, ... gradf=cma.fcts.grad_rosen, ... ) >>> assert cma.fcts.rosen(res[0]) < 1e-8 >>> assert res[2] < 3600 # 1% are > 3300 >>> assert res[3] < 3600 # 1% are > 3300 :See: `CMAEvolutionStrategy`, `OOOptimizer.optimize(), `plot()`, `CMAOptions`, `scipy.optimize.fmin()` """ # style guides say there should be the above empty line if 1 < 3: # try: # pass on KeyboardInterrupt if not objective_function: # cma.fmin(0, 0, 0) return CMAOptions() # these opts are by definition valid fmin_options = locals().copy() # archive original options del fmin_options['objective_function'] del fmin_options['x0'] del fmin_options['sigma0'] del fmin_options['options'] del fmin_options['args'] if options is None: options = cma_default_options CMAOptions().check_attributes(options) # might modify options # checked that no options.ftarget = opts = CMAOptions(options.copy()).complement() # BIPOP-related variables: runs_with_small = 0 small_i = [] large_i = [] popsize0 = None # to be evaluated after the first iteration maxiter0 = None # to be evaluated after the first iteration base_evals = 0 irun = 0 best = BestSolution() while True: # restart loop sigma_factor = 1 # Adjust the population according to BIPOP after a restart. if not bipop: # BIPOP not in use, simply double the previous population # on restart. if irun > 0: popsize_multiplier = fmin_options['incpopsize'] ** (irun - runs_with_small) opts['popsize'] = popsize0 * popsize_multiplier elif irun == 0: # Initial run is with "normal" population size; it is # the large population before first doubling, but its # budget accounting is the same as in case of small # population. poptype = 'small' elif sum(small_i) < sum(large_i): # An interweaved run with small population size poptype = 'small' runs_with_small += 1 # _Before_ it's used in popsize_lastlarge sigma_factor = 0.01 ** np.random.uniform() # Local search popsize_multiplier = fmin_options['incpopsize'] ** (irun - runs_with_small) opts['popsize'] = np.floor(popsize0 * popsize_multiplier ** (np.random.uniform() ** 2)) opts['maxiter'] = min(maxiter0, 0.5 * sum(large_i) / opts['popsize']) # print('small basemul %s --> %s; maxiter %s' % (popsize_multiplier, opts['popsize'], opts['maxiter'])) else: # A run with large population size; the population # doubling is implicit with incpopsize. poptype = 'large' popsize_multiplier = fmin_options['incpopsize'] ** (irun - runs_with_small) opts['popsize'] = popsize0 * popsize_multiplier opts['maxiter'] = maxiter0 # print('large basemul %s --> %s; maxiter %s' % (popsize_multiplier, opts['popsize'], opts['maxiter'])) # recover from a CMA object if irun == 0 and isinstance(x0, CMAEvolutionStrategy): es = x0 x0 = es.inputargs['x0'] # for the next restarts if isscalar(sigma0) and isfinite(sigma0) and sigma0 > 0: es.sigma = sigma0 # debatable whether this makes sense: sigma0 = es.inputargs['sigma0'] # for the next restarts if options is not None: es.opts.set(options) # ignore further input args and keep original options else: # default case if irun and eval(str(fmin_options['restart_from_best'])): print_warning('CAVE: restart_from_best is often not useful', verbose=opts['verbose']) es = CMAEvolutionStrategy(best.x, sigma_factor * sigma0, opts) else: es = CMAEvolutionStrategy(x0, sigma_factor * sigma0, opts) if eval_initial_x or es.opts['CMA_elitist'] == 'initial' \ or (es.opts['CMA_elitist'] and eval_initial_x is None): x = es.gp.pheno(es.mean, into_bounds=es.boundary_handler.repair, archive=es.sent_solutions) es.best.update([x], es.sent_solutions, [objective_function(x, *args)], 1) es.countevals += 1 opts = es.opts # processed options, unambiguous # a hack: fmin_opts = CMAOptions(fmin_options.copy(), unchecked=True) for k in fmin_opts: # locals() cannot be modified directly, exec won't work # in 3.x, therefore fmin_opts.eval(k, loc={'N': es.N, 'popsize': opts['popsize']}, correct_key=False) append = opts['verb_append'] or es.countiter > 0 or irun > 0 # es.logger is "the same" logger, because the "identity" # is only determined by the `filenameprefix` logger = CMADataLogger(opts['verb_filenameprefix'], opts['verb_log']) logger.register(es, append).add() # no fitness values here es.logger = logger if noise_handler: noisehandler = noise_handler noise_handling = True if fmin_opts['noise_change_sigma_exponent'] > 0: es.opts['tolfacupx'] = inf else: noisehandler = NoiseHandler(es.N, 0) noise_handling = False es.noise_handler = noisehandler # the problem: this assumes that good solutions cannot take longer than bad ones: # with EvalInParallel(objective_function, 2, is_feasible=opts['is_feasible']) as eval_in_parallel: if 1 < 3: while not es.stop(): # iteration loop # X, fit = eval_in_parallel(lambda: es.ask(1)[0], es.popsize, args, repetitions=noisehandler.evaluations-1) X, fit = es.ask_and_eval(objective_function, args, gradf=gradf, evaluations=noisehandler.evaluations, aggregation=np.median) # treats NaN with resampling # TODO: check args and in case use args=(noisehandler.evaluations, ) es.tell(X, fit) # prepare for next iteration if noise_handling: # it would be better to also use these f-evaluations in tell es.sigma *= noisehandler(X, fit, objective_function, es.ask, args=args)**fmin_opts['noise_change_sigma_exponent'] es.countevals += noisehandler.evaluations_just_done # TODO: this is a hack, not important though # es.more_to_write.append(noisehandler.evaluations_just_done) if noisehandler.maxevals > noisehandler.minevals: es.more_to_write.append(noisehandler.get_evaluations()) if 1 < 3: es.sp.cmean *= exp(-noise_kappa_exponent * np.tanh(noisehandler.noiseS)) if es.sp.cmean > 1: es.sp.cmean = 1 es.disp() logger.add(# more_data=[noisehandler.evaluations, 10**noisehandler.noiseS] if noise_handling else [], modulo=1 if es.stop() and logger.modulo else None) if (opts['verb_log'] and opts['verb_plot'] and (es.countiter % max(opts['verb_plot'], opts['verb_log']) == 0 or es.stop())): logger.plot(324) # end while not es.stop mean_pheno = es.gp.pheno(es.mean, into_bounds=es.boundary_handler.repair, archive=es.sent_solutions) fmean = objective_function(mean_pheno, *args) es.countevals += 1 es.best.update([mean_pheno], es.sent_solutions, [fmean], es.countevals) best.update(es.best, es.sent_solutions) # in restarted case # es.best.update(best) this_evals = es.countevals - base_evals base_evals = es.countevals # BIPOP stats update if irun == 0: popsize0 = opts['popsize'] maxiter0 = opts['maxiter'] # XXX: This might be a bug? Reproduced from Matlab # small_i.append(this_evals) if bipop: if poptype == 'small': small_i.append(this_evals) else: # poptype == 'large' large_i.append(this_evals) # final message if opts['verb_disp']: es.result_pretty(irun, time.asctime(time.localtime()), best.f) irun += 1 # if irun > fmin_opts['restarts'] or 'ftarget' in es.stop() \ # if irun > restarts or 'ftarget' in es.stop() \ if irun - runs_with_small > fmin_opts['restarts'] or 'ftarget' in es.stop() \ or 'maxfevals' in es.stop(check=False): break opts['verb_append'] = es.countevals opts['popsize'] = fmin_opts['incpopsize'] * es.sp.popsize # TODO: use rather options? opts['seed'] += 1 # while irun # es.out['best'] = best # TODO: this is a rather suboptimal type for inspection in the shell if 1 < 3: if irun: es.best.update(best) # TODO: there should be a better way to communicate the overall best return es.result() + (es.stop(), es, logger) else: # previously: to be removed return (best.x.copy(), best.f, es.countevals, dict((('stopdict', _CMAStopDict(es._stopdict)) , ('mean', es.gp.pheno(es.mean)) , ('std', es.sigma * es.sigma_vec * sqrt(es.dC) * es.gp.scales) , ('out', es.out) , ('opts', es.opts) # last state of options , ('cma', es) , ('inputargs', es.inputargs) )) ) # TODO refine output, can #args be flexible? # is this well usable as it is now? else: # except KeyboardInterrupt: # Exception, e: if eval(str(options['verb_disp'])) > 0: print(' in/outcomment ``raise`` in last line of cma.fmin to prevent/restore KeyboardInterrupt exception') raise # cave: swallowing this exception can silently mess up experiments, if ctrl-C is hit # _____________________________________________________________________ # _____________________________________________________________________ # class BaseDataLogger(object): """"abstract" base class for a data logger that can be used with an `OOOptimizer` Details: attribute `modulo` is used in ``OOOptimizer.optimize`` """ def add(self, optim=None, more_data=[]): """abstract method, add a "data point" from the state of `optim` into the logger, the argument `optim` can be omitted if it was `register()`-ed before, acts like an event handler""" raise NotImplementedError def register(self, optim): """abstract method, register an optimizer `optim`, only needed if `add()` is called without a value for the `optim` argument""" self.optim = optim def disp(self): """display some data trace (not implemented)""" print('method BaseDataLogger.disp() not implemented, to be done in subclass ' + str(type(self))) def plot(self): """plot data (not implemented)""" print('method BaseDataLogger.plot() is not implemented, to be done in subclass ' + str(type(self))) def data(self): """return logged data in a dictionary (not implemented)""" print('method BaseDataLogger.data() is not implemented, to be done in subclass ' + str(type(self))) # _____________________________________________________________________ # _____________________________________________________________________ # class CMADataLogger(BaseDataLogger): """data logger for class `CMAEvolutionStrategy`. The logger is identified by its name prefix and (over-)writes or reads according data files. Therefore, the logger must be considered as *global* variable with unpredictable side effects, if two loggers with the same name and on the same working folder are used at the same time. Examples ======== :: import cma es = cma.CMAEvolutionStrategy(...) logger = cma.CMADataLogger().register(es) while not es.stop(): ... logger.add() # add can also take an argument logger.plot() # or a short cut can be used: cma.plot() # plot data from logger with default name logger2 = cma.CMADataLogger('just_another_filename_prefix').load() logger2.plot() logger2.disp() :: import cma from matplotlib.pylab import * res = cma.fmin(cma.Fcts.sphere, rand(10), 1e-0) logger = res[-1] # the CMADataLogger logger.load() # by "default" data are on disk semilogy(logger.f[:,0], logger.f[:,5]) # plot f versus iteration, see file header show() Details ======= After loading data, the logger has the attributes `xmean`, `xrecent`, `std`, `f`, `D` and `corrspec` corresponding to ``xmean``, ``xrecentbest``, ``stddev``, ``fit``, ``axlen`` and ``axlencorr`` filename trails. :See: `disp()`, `plot()` """ default_prefix = 'outcmaes' # names = ('axlen','fit','stddev','xmean','xrecentbest') # key_names_with_annotation = ('std', 'xmean', 'xrecent') def __init__(self, name_prefix=default_prefix, modulo=1, append=False): """initialize logging of data from a `CMAEvolutionStrategy` instance, default ``modulo=1`` means logging with each call """ # super(CMAData, self).__init__({'iter':[], 'stds':[], 'D':[], # 'sig':[], 'fit':[], 'xm':[]}) # class properties: self.name_prefix = name_prefix if name_prefix \ else CMADataLogger.default_prefix if isinstance(self.name_prefix, CMAEvolutionStrategy): self.name_prefix = self.name_prefix.opts.eval( 'verb_filenameprefix') self.file_names = ('axlen', 'axlencorr', 'fit', 'stddev', 'xmean', 'xrecentbest') """used in load, however hard-coded in add""" self.key_names = ('D', 'corrspec', 'f', 'std', 'xmean', 'xrecent') """used in load, however hard-coded in plot""" self._key_names_with_annotation = ('std', 'xmean', 'xrecent') """used in load to add one data row to be modified in plot""" self.modulo = modulo """how often to record data, allows calling `add` without args""" self.append = append """append to previous data""" self.counter = 0 """number of calls to `add`""" self.last_iteration = 0 self.registered = False self.last_correlation_spectrum = None self._eigen_counter = 1 # reduce costs def data(self): """return dictionary with data. If data entries are None or incomplete, consider calling ``.load().data()`` to (re-)load the data from files first. """ d = {} for name in self.key_names: d[name] = self.__dict__.get(name, None) return d def register(self, es, append=None, modulo=None): """register a `CMAEvolutionStrategy` instance for logging, ``append=True`` appends to previous data logged under the same name, by default previous data are overwritten. """ if not isinstance(es, CMAEvolutionStrategy): raise TypeError("only class CMAEvolutionStrategy can be " + "registered for logging") self.es = es if append is not None: self.append = append if modulo is not None: self.modulo = modulo self.registered = True return self def initialize(self, modulo=None): """reset logger, overwrite original files, `modulo`: log only every modulo call""" if modulo is not None: self.modulo = modulo try: es = self.es # must have been registered except AttributeError: pass # TODO: revise usage of es... that this can pass raise _Error('call register() before initialize()') self.counter = 0 # number of calls of add self.last_iteration = 0 # some lines are only written if iteration>last_iteration # write headers for output fn = self.name_prefix + 'fit.dat' strseedtime = 'seed=%d, %s' % (es.opts['seed'], time.asctime()) try: with open(fn, 'w') as f: f.write('% # columns="iteration, evaluation, sigma, axis ratio, ' + 'bestever, best, median, worst objective function value, ' + 'further objective values of best", ' + strseedtime + # strftime("%Y/%m/%d %H:%M:%S", localtime()) + # just asctime() would do '\n') except (IOError, OSError): print('could not open file ' + fn) fn = self.name_prefix + 'axlen.dat' try: with open(fn, 'w') as f: f.write('% columns="iteration, evaluation, sigma, ' + 'max axis length, ' + ' min axis length, all principle axes lengths ' + ' (sorted square roots of eigenvalues of C)", ' + strseedtime + '\n') except (IOError, OSError): print('could not open/write file ' + fn) fn = self.name_prefix + 'axlencorr.dat' try: with open(fn, 'w') as f: f.write('% columns="iteration, evaluation, min max(neg(.)) min(pos(.))' + ' max correlation, correlation matrix principle axes lengths ' + ' (sorted square roots of eigenvalues of correlation matrix)", ' + strseedtime + '\n') except (IOError, OSError): print('could not open file ' + fn) fn = self.name_prefix + 'stddev.dat' try: with open(fn, 'w') as f: f.write('% # columns=["iteration, evaluation, sigma, void, void, ' + ' stds==sigma*sqrt(diag(C))", ' + strseedtime + '\n') except (IOError, OSError): print('could not open file ' + fn) fn = self.name_prefix + 'xmean.dat' try: with open(fn, 'w') as f: f.write('% # columns="iteration, evaluation, void, void, void, xmean", ' + strseedtime) f.write(' # scaling_of_variables: ') if np.size(es.gp.scales) > 1: f.write(' '.join(map(str, es.gp.scales))) else: f.write(str(es.gp.scales)) f.write(', typical_x: ') if np.size(es.gp.typical_x) > 1: f.write(' '.join(map(str, es.gp.typical_x))) else: f.write(str(es.gp.typical_x)) f.write('\n') except (IOError, OSError): print('could not open/write file ' + fn) fn = self.name_prefix + 'xrecentbest.dat' try: with open(fn, 'w') as f: f.write('% # iter+eval+sigma+0+fitness+xbest, ' + strseedtime + '\n') except (IOError, OSError): print('could not open/write file ' + fn) return self # end def __init__ def load(self, filenameprefix=None): """load (or reload) data from output files, `load()` is called in `plot()` and `disp()`. Argument `filenameprefix` is the filename prefix of data to be loaded (six files), by default ``'outcmaes'``. Return self with (added) attributes `xrecent`, `xmean`, `f`, `D`, `std`, 'corrspec' """ if not filenameprefix: filenameprefix = self.name_prefix assert len(self.file_names) == len(self.key_names) for i in rglen((self.file_names)): fn = filenameprefix + self.file_names[i] + '.dat' try: self.__dict__[self.key_names[i]] = _fileToMatrix(fn) except: _print_warning('reading from file "' + fn + '" failed', 'load', 'CMADataLogger') try: if self.key_names[i] in self._key_names_with_annotation: # copy last row to later fill in annotation position for display self.__dict__[self.key_names[i]].append( self.__dict__[self.key_names[i]][-1]) self.__dict__[self.key_names[i]] = \ array(self.__dict__[self.key_names[i]], copy=False) except: _print_warning('no data for %s' % fn, 'load', 'CMADataLogger') return self def add(self, es=None, more_data=[], modulo=None): """append some logging data from `CMAEvolutionStrategy` class instance `es`, if ``number_of_times_called % modulo`` equals to zero, never if ``modulo==0``. The sequence ``more_data`` must always have the same length. When used for a different optimizer class, this function can be (easily?) adapted by changing the assignments under INTERFACE in the implemention. """ mod = modulo if modulo is not None else self.modulo self.counter += 1 if mod == 0 or (self.counter > 3 and (self.counter - 1) % mod): return if es is None: try: es = self.es # must have been registered except AttributeError : raise _Error('call `add` with argument `es` or ``register(es)`` before ``add()``') elif not self.registered: self.register(es) if 1 < 3: if self.counter == 1 and not self.append and self.modulo != 0: self.initialize() # write file headers self.counter = 1 # --- INTERFACE, can be changed if necessary --- if not isinstance(es, CMAEvolutionStrategy): # not necessary _print_warning('type CMAEvolutionStrategy expected, found ' + str(type(es)), 'add', 'CMADataLogger') evals = es.countevals iteration = es.countiter eigen_decompositions = es.count_eigen sigma = es.sigma axratio = es.D.max() / es.D.min() xmean = es.mean # TODO: should be optionally phenotype? fmean_noise_free = es.fmean_noise_free fmean = es.fmean # TODO: find a different way to communicate current x and f? try: besteverf = es.best.f bestf = es.fit.fit[0] worstf = es.fit.fit[-1] medianf = es.fit.fit[es.sp.popsize // 2] except: if iteration > 0: # first call without f-values is OK raise try: xrecent = es.best.last.x except: xrecent = None maxD = es.D.max() minD = es.D.min() diagD = es.D diagC = es.sigma * es.sigma_vec * sqrt(es.dC) more_to_write = es.more_to_write es.more_to_write = [] # --- end interface --- try: # fit if iteration > self.last_iteration: fn = self.name_prefix + 'fit.dat' with open(fn, 'a') as f: f.write(str(iteration) + ' ' + str(evals) + ' ' + str(sigma) + ' ' + str(axratio) + ' ' + str(besteverf) + ' ' + '%.16e' % bestf + ' ' + str(medianf) + ' ' + str(worstf) + ' ' # + str(es.sp.popsize) + ' ' # + str(10**es.noiseS) + ' ' # + str(es.sp.cmean) + ' ' + ' '.join(str(i) for i in more_to_write) + ' ' + ' '.join(str(i) for i in more_data) + ' ' + '\n') # axlen fn = self.name_prefix + 'axlen.dat' if 1 < 3: with open(fn, 'a') as f: # does not rely on reference counting f.write(str(iteration) + ' ' + str(evals) + ' ' + str(sigma) + ' ' + str(maxD) + ' ' + str(minD) + ' ' + ' '.join(map(str, diagD)) + '\n') # correlation matrix eigenvalues if 1 < 3: fn = self.name_prefix + 'axlencorr.dat' c = es.correlation_matrix() if c is not None: # accept at most 50% internal loss if self._eigen_counter < eigen_decompositions / 2: self.last_correlation_spectrum = \ sorted(es.opts['CMA_eigenmethod'](c)[0]**0.5) self._eigen_counter += 1 if self.last_correlation_spectrum is None: self.last_correlation_spectrum = len(diagD) * [1] c = c[c < 1 - 1e-14] # remove diagonal elements c[c > 1 - 1e-14] = 1 - 1e-14 c[c < -1 + 1e-14] = -1 + 1e-14 c_min = np.min(c) c_max = np.max(c) if np.min(abs(c)) == 0: c_medminus = 0 # thereby zero "is negative" c_medplus = 0 # thereby zero "is positive" else: c_medminus = c[np.argmin(1/c)] # c is flat c_medplus = c[np.argmax(1/c)] # c is flat with open(fn, 'a') as f: f.write(str(iteration) + ' ' + str(evals) + ' ' + str(c_min) + ' ' + str(c_medminus) + ' ' # the one closest to 0 + str(c_medplus) + ' ' # the one closest to 0 + str(c_max) + ' ' + ' '.join(map(str, self.last_correlation_spectrum)) + '\n') # stddev fn = self.name_prefix + 'stddev.dat' with open(fn, 'a') as f: f.write(str(iteration) + ' ' + str(evals) + ' ' + str(sigma) + ' ' + '0 0 ' + ' '.join(map(str, diagC)) + '\n') # xmean fn = self.name_prefix + 'xmean.dat' with open(fn, 'a') as f: f.write(str(iteration) + ' ' + str(evals) + ' ' # + str(sigma) + ' ' + '0 ' + str(fmean_noise_free) + ' ' + str(fmean) + ' ' # TODO: this does not make sense # TODO should be optional the phenotyp? + ' '.join(map(str, xmean)) + '\n') # xrecent fn = self.name_prefix + 'xrecentbest.dat' if iteration > 0 and xrecent is not None: with open(fn, 'a') as f: f.write(str(iteration) + ' ' + str(evals) + ' ' + str(sigma) + ' ' + '0 ' + str(bestf) + ' ' + ' '.join(map(str, xrecent)) + '\n') except (IOError, OSError): if iteration <= 1: _print_warning(('could not open/write file %s: ' % fn, sys.exc_info())) self.last_iteration = iteration def closefig(self): pyplot.close(self.fighandle) def save_to(self, nameprefix, switch=False): """saves logger data to a different set of files, for ``switch=True`` also the loggers name prefix is switched to the new value """ if not nameprefix or not isinstance(nameprefix, basestring): raise _Error('filename prefix must be a nonempty string') if nameprefix == self.default_prefix: raise _Error('cannot save to default name "' + nameprefix + '...", chose another name') if nameprefix == self.name_prefix: return for name in self.file_names: open(nameprefix + name + '.dat', 'w').write(open(self.name_prefix + name + '.dat').read()) if switch: self.name_prefix = nameprefix def select_data(self, iteration_indices): """keep only data of `iteration_indices`""" dat = self iteridx = iteration_indices dat.f = dat.f[np.where([x in iteridx for x in dat.f[:, 0]])[0], :] dat.D = dat.D[np.where([x in iteridx for x in dat.D[:, 0]])[0], :] try: iteridx = list(iteridx) iteridx.append(iteridx[-1]) # last entry is artificial except: pass dat.std = dat.std[np.where([x in iteridx for x in dat.std[:, 0]])[0], :] dat.xmean = dat.xmean[np.where([x in iteridx for x in dat.xmean[:, 0]])[0], :] try: dat.xrecent = dat.x[np.where([x in iteridx for x in dat.xrecent[:, 0]])[0], :] except AttributeError: pass try: dat.corrspec = dat.x[np.where([x in iteridx for x in dat.corrspec[:, 0]])[0], :] except AttributeError: pass def plot(self, fig=None, iabscissa=1, iteridx=None, plot_mean=False, # was: plot_mean=True foffset=1e-19, x_opt=None, fontsize=9): """plot data from a `CMADataLogger` (using the files written by the logger). Arguments --------- `fig` figure number, by default 325 `iabscissa` ``0==plot`` versus iteration count, ``1==plot`` versus function evaluation number `iteridx` iteration indices to plot Return `CMADataLogger` itself. Examples -------- :: import cma logger = cma.CMADataLogger() # with default name # try to plot the "default logging" data (e.g. # from previous fmin calls, which is essentially what # also cma.plot() does) logger.plot() cma.savefig('fig325.png') # save current figure logger.closefig() Dependencies: matlabplotlib/pyplot. """ try: # pyplot: prodedural interface for matplotlib from matplotlib.pyplot import figure, subplot, hold, gcf except ImportError: ImportError('could not find matplotlib.pyplot module, function plot() is not available') return if fig is None: fig = 325 if iabscissa not in (0, 1): iabscissa = 1 self.load() # better load only conditionally? dat = self dat.x = dat.xmean # this is the genotyp if not plot_mean: if len(dat.x) < 2: print('not enough data to plot recent x') else: dat.x = dat.xrecent # index out some data if iteridx is not None: self.select_data(iteridx) if len(dat.f) <= 1: print('nothing to plot') return # not in use anymore, see formatter above # xticklocs = np.arange(5) * np.round(minxend/4., -int(np.log10(minxend/4.))) # dfit(dfit<1e-98) = NaN; # TODO: if abscissa==0 plot in chunks, ie loop over subsets where # dat.f[:,0]==countiter is monotonous figure(fig) self._enter_plotting(fontsize) self.fighandle = gcf() # fighandle.number subplot(2, 2, 1) self.plot_divers(iabscissa, foffset) pyplot.xlabel('') # Scaling subplot(2, 2, 3) self.plot_axes_scaling(iabscissa) # spectrum of correlation matrix figure(fig) subplot(2, 2, 2) if plot_mean: self.plot_mean(iabscissa, x_opt) else: self.plot_xrecent(iabscissa, x_opt) pyplot.xlabel('') # pyplot.xticks(xticklocs) # standard deviations subplot(2, 2, 4) self.plot_stds(iabscissa) self._finalize_plotting() return self def plot_all(self, fig=None, iabscissa=1, iteridx=None, foffset=1e-19, x_opt=None, fontsize=9): """ plot data from a `CMADataLogger` (using the files written by the logger). Arguments --------- `fig` figure number, by default 425 `iabscissa` ``0==plot`` versus iteration count, ``1==plot`` versus function evaluation number `iteridx` iteration indices to plot Return `CMADataLogger` itself. Examples -------- :: import cma logger = cma.CMADataLogger() # with default name # try to plot the "default logging" data (e.g. # from previous fmin calls, which is essentially what # also cma.plot() does) logger.plot_all() cma.savefig('fig425.png') # save current figure logger.closefig() Dependencies: matlabplotlib/pyplot. """ try: # pyplot: prodedural interface for matplotlib from matplotlib.pyplot import figure, subplot, gcf except ImportError: ImportError('could not find matplotlib.pyplot module, function plot() is not available') return if fig is None: fig = 426 if iabscissa not in (0, 1): iabscissa = 1 self.load() dat = self # index out some data if iteridx is not None: self.select_data(iteridx) if len(dat.f) == 0: print('nothing to plot') return # not in use anymore, see formatter above # xticklocs = np.arange(5) * np.round(minxend/4., -int(np.log10(minxend/4.))) # dfit(dfit<1e-98) = NaN; # TODO: if abscissa==0 plot in chunks, ie loop over subsets where dat.f[:,0]==countiter is monotonous figure(fig) self._enter_plotting(fontsize) self.fighandle = gcf() # fighandle.number if 1 < 3: subplot(2, 3, 1) self.plot_divers(iabscissa, foffset) pyplot.xlabel('') # standard deviations subplot(2, 3, 4) self.plot_stds(iabscissa) # Scaling subplot(2, 3, 2) self.plot_axes_scaling(iabscissa) pyplot.xlabel('') # spectrum of correlation matrix subplot(2, 3, 5) self.plot_correlations(iabscissa) # x-vectors subplot(2, 3, 3) self.plot_xrecent(iabscissa, x_opt) pyplot.xlabel('') subplot(2, 3, 6) self.plot_mean(iabscissa, x_opt) self._finalize_plotting() return self def plot_axes_scaling(self, iabscissa=1): if not hasattr(self, 'D'): self.load() dat = self self._enter_plotting() pyplot.semilogy(dat.D[:, iabscissa], dat.D[:, 5:], '-b') pyplot.hold(True) pyplot.grid(True) ax = array(pyplot.axis()) # ax[1] = max(minxend, ax[1]) pyplot.axis(ax) pyplot.title('Principle Axes Lengths') # pyplot.xticks(xticklocs) self._xlabel(iabscissa) self._finalize_plotting() return self def plot_stds(self, iabscissa=1): if not hasattr(self, 'std'): self.load() dat = self self._enter_plotting() # remove sigma from stds (graphs become much better readible) dat.std[:, 5:] = np.transpose(dat.std[:, 5:].T / dat.std[:, 2].T) # ax = array(pyplot.axis()) # ax[1] = max(minxend, ax[1]) # axis(ax) if 1 < 2 and dat.std.shape[1] < 100: # use fake last entry in x and std for line extension-annotation minxend = int(1.06 * dat.std[-2, iabscissa]) # minxend = int(1.06 * dat.x[-2, iabscissa]) dat.std[-1, iabscissa] = minxend # TODO: should be ax[1] idx = np.argsort(dat.std[-2, 5:]) idx2 = np.argsort(idx) dat.std[-1, 5 + idx] = np.logspace(np.log10(np.min(dat.std[:, 5:])), np.log10(np.max(dat.std[:, 5:])), dat.std.shape[1] - 5) dat.std[-1, iabscissa] = minxend # TODO: should be ax[1] pyplot.semilogy(dat.std[:, iabscissa], dat.std[:, 5:], '-') pyplot.hold(True) ax = array(pyplot.axis()) yy = np.logspace(np.log10(ax[2]), np.log10(ax[3]), dat.std.shape[1] - 5) # yyl = np.sort(dat.std[-1,5:]) idx = np.argsort(dat.std[-1, 5:]) idx2 = np.argsort(idx) # plot(np.dot(dat.std[-2, iabscissa],[1,1]), array([ax[2]+1e-6, ax[3]-1e-6]), 'k-') # vertical separator # vertical separator pyplot.plot(np.dot(dat.std[-2, iabscissa], [1, 1]), array([ax[2] + 1e-6, ax[3] - 1e-6]), # array([np.min(dat.std[:, 5:]), np.max(dat.std[:, 5:])]), 'k-') pyplot.hold(True) # plot([dat.std[-1, iabscissa], ax[1]], [dat.std[-1,5:], yy[idx2]], 'k-') # line from last data point for i in rglen((idx)): # text(ax[1], yy[i], ' '+str(idx[i])) pyplot.text(dat.std[-1, iabscissa], dat.std[-1, 5 + i], ' ' + str(i)) else: pyplot.semilogy(dat.std[:, iabscissa], dat.std[:, 5:], '-') pyplot.hold(True) pyplot.grid(True) pyplot.title(r'Standard Deviations $\times$ $\sigma^{-1}$ in All Coordinates') # pyplot.xticks(xticklocs) self._xlabel(iabscissa) self._finalize_plotting() return self def plot_mean(self, iabscissa=1, x_opt=None, annotations=None): if not hasattr(self, 'xmean'): self.load() self.x = self.xmean self._plot_x(iabscissa, x_opt, 'mean', annotations=annotations) self._xlabel(iabscissa) return self def plot_xrecent(self, iabscissa=1, x_opt=None, annotations=None): if not hasattr(self, 'xrecent'): self.load() self.x = self.xrecent self._plot_x(iabscissa, x_opt, 'curr best', annotations=annotations) self._xlabel(iabscissa) return self def plot_correlations(self, iabscissa=1): """spectrum of correlation matrix and largest correlation""" if not hasattr(self, 'corrspec'): self.load() if len(self.corrspec) < 2: return self x = self.corrspec[:, iabscissa] y = self.corrspec[:, 6:] # principle axes ys = self.corrspec[:, :6] # "special" values from matplotlib.pyplot import semilogy, hold, text, grid, axis, title self._enter_plotting() semilogy(x, y, '-c') hold(True) semilogy(x[:], np.max(y, 1) / np.min(y, 1), '-r') text(x[-1], np.max(y[-1, :]) / np.min(y[-1, :]), 'axis ratio') if ys is not None: semilogy(x, 1 + ys[:, 2], '-b') text(x[-1], 1 + ys[-1, 2], '1 + min(corr)') semilogy(x, 1 - ys[:, 5], '-b') text(x[-1], 1 - ys[-1, 5], '1 - max(corr)') semilogy(x[:], 1 + ys[:, 3], '-k') text(x[-1], 1 + ys[-1, 3], '1 + max(neg corr)') semilogy(x[:], 1 - ys[:, 4], '-k') text(x[-1], 1 - ys[-1, 4], '1 - min(pos corr)') grid(True) ax = array(axis()) # ax[1] = max(minxend, ax[1]) axis(ax) title('Spectrum (roots) of correlation matrix') # pyplot.xticks(xticklocs) self._xlabel(iabscissa) self._finalize_plotting() return self def plot_divers(self, iabscissa=1, foffset=1e-19): """plot fitness, sigma, axis ratio... :param iabscissa: 0 means vs evaluations, 1 means vs iterations :param foffset: added to f-value :See: `plot()` """ from matplotlib.pyplot import semilogy, hold, grid, \ axis, title, text fontsize = pyplot.rcParams['font.size'] if not hasattr(self, 'f'): self.load() dat = self minfit = min(dat.f[:, 5]) dfit = dat.f[:, 5] - minfit # why not using idx? dfit[dfit < 1e-98] = np.NaN self._enter_plotting() if dat.f.shape[1] > 7: # semilogy(dat.f[:, iabscissa], abs(dat.f[:,[6, 7, 10, 12]])+foffset,'-k') semilogy(dat.f[:, iabscissa], abs(dat.f[:, [6, 7]]) + foffset, '-k') hold(True) # (larger indices): additional fitness data, for example constraints values if dat.f.shape[1] > 8: # dd = abs(dat.f[:,7:]) + 10*foffset # dd = np.where(dat.f[:,7:]==0, np.NaN, dd) # cannot be semilogy(dat.f[:, iabscissa], np.abs(dat.f[:, 8:]) + 10 * foffset, 'y') hold(True) idx = np.where(dat.f[:, 5] > 1e-98)[0] # positive values semilogy(dat.f[idx, iabscissa], dat.f[idx, 5] + foffset, '.b') hold(True) grid(True) semilogy(dat.f[:, iabscissa], abs(dat.f[:, 5]) + foffset, '-b') text(dat.f[-1, iabscissa], abs(dat.f[-1, 5]) + foffset, r'$|f_\mathsf{best}|$', fontsize=fontsize + 2) # negative f-values, dots sgn = np.sign(dat.f[:, 5]) sgn[np.abs(dat.f[:, 5]) < 1e-98] = 0 idx = np.where(sgn < 0)[0] semilogy(dat.f[idx, iabscissa], abs(dat.f[idx, 5]) + foffset, '.m') # , markersize=5 # lines between negative f-values dsgn = np.diff(sgn) start_idx = 1 + np.where((dsgn < 0) * (sgn[1:] < 0))[0] stop_idx = 1 + np.where(dsgn > 0)[0] if sgn[0] < 0: start_idx = np.concatenate(([0], start_idx)) for istart in start_idx: istop = stop_idx[stop_idx > istart] istop = istop[0] if len(istop) else 0 idx = xrange(istart, istop if istop else dat.f.shape[0]) if len(idx) > 1: semilogy(dat.f[idx, iabscissa], abs(dat.f[idx, 5]) + foffset, 'm') # , markersize=5 # lines between positive and negative f-values # TODO: the following might plot values very close to zero if istart > 0: # line to the left of istart semilogy(dat.f[istart-1:istart+1, iabscissa], abs(dat.f[istart-1:istart+1, 5]) + foffset, '--m') if istop: # line to the left of istop semilogy(dat.f[istop-1:istop+1, iabscissa], abs(dat.f[istop-1:istop+1, 5]) + foffset, '--m') # mark the respective first positive values semilogy(dat.f[istop, iabscissa], abs(dat.f[istop, 5]) + foffset, '.b', markersize=7) # mark the respective first negative values semilogy(dat.f[istart, iabscissa], abs(dat.f[istart, 5]) + foffset, '.r', markersize=7) # standard deviations std semilogy(dat.std[:-1, iabscissa], np.vstack([list(map(max, dat.std[:-1, 5:])), list(map(min, dat.std[:-1, 5:]))]).T, '-m', linewidth=2) text(dat.std[-2, iabscissa], max(dat.std[-2, 5:]), 'max std', fontsize=fontsize) text(dat.std[-2, iabscissa], min(dat.std[-2, 5:]), 'min std', fontsize=fontsize) # delta-fitness in cyan idx = isfinite(dfit) if 1 < 3: idx_nan = np.where(idx == False)[0] # gaps if not len(idx_nan): # should never happen semilogy(dat.f[:, iabscissa][idx], dfit[idx], '-c') else: i_start = 0 for i_end in idx_nan: if i_end > i_start: semilogy(dat.f[:, iabscissa][i_start:i_end], dfit[i_start:i_end], '-c') i_start = i_end + 1 if len(dfit) > idx_nan[-1] + 1: semilogy(dat.f[:, iabscissa][idx_nan[-1]+1:], dfit[idx_nan[-1]+1:], '-c') text(dat.f[idx, iabscissa][-1], dfit[idx][-1], r'$f_\mathsf{best} - \min(f)$', fontsize=fontsize + 2) # overall minimum i = np.argmin(dat.f[:, 5]) semilogy(dat.f[i, iabscissa], np.abs(dat.f[i, 5]), 'ro', markersize=9) semilogy(dat.f[i, iabscissa], dfit[idx][np.argmin(dfit[idx])] + 1e-98, 'ro', markersize=9) # semilogy(dat.f[-1, iabscissa]*np.ones(2), dat.f[-1,4]*np.ones(2), 'rd') # AR and sigma semilogy(dat.f[:, iabscissa], dat.f[:, 3], '-r') # AR semilogy(dat.f[:, iabscissa], dat.f[:, 2], '-g') # sigma text(dat.f[-1, iabscissa], dat.f[-1, 3], r'axis ratio', fontsize=fontsize) text(dat.f[-1, iabscissa], dat.f[-1, 2] / 1.5, r'$\sigma$', fontsize=fontsize+3) ax = array(axis()) # ax[1] = max(minxend, ax[1]) axis(ax) text(ax[0] + 0.01, ax[2], # 10**(log10(ax[2])+0.05*(log10(ax[3])-log10(ax[2]))), '.min($f$)=' + repr(minfit)) #'.f_recent=' + repr(dat.f[-1, 5])) # title('abs(f) (blue), f-min(f) (cyan), Sigma (green), Axis Ratio (red)') # title(r'blue:$\mathrm{abs}(f)$, cyan:$f - \min(f)$, green:$\sigma$, red:axis ratio', # fontsize=fontsize - 0.0) title(r'$|f_{\mathrm{best},\mathrm{med},\mathrm{worst}}|$, $f - \min(f)$, $\sigma$, axis ratio') # if __name__ != 'cma': # should be handled by the caller self._xlabel(iabscissa) self._finalize_plotting() return self def _enter_plotting(self, fontsize=9): """assumes that a figure is open """ # interactive_status = matplotlib.is_interactive() self.original_fontsize = pyplot.rcParams['font.size'] pyplot.rcParams['font.size'] = fontsize pyplot.hold(False) # opens a figure window, if non exists pyplot.ioff() def _finalize_plotting(self): pyplot.ion() pyplot.draw() # update "screen" pyplot.show() # show figure # matplotlib.interactive(interactive_status) pyplot.rcParams['font.size'] = self.original_fontsize def _xlabel(self, iabscissa=1): pyplot.xlabel('iterations' if iabscissa == 0 else 'function evaluations') def _plot_x(self, iabscissa=1, x_opt=None, remark=None, annotations=None): """If ``x_opt is not None`` the difference to x_opt is plotted in log scale """ if not hasattr(self, 'x'): _print_warning('no x-attributed found, use methods ' + 'plot_xrecent or plot_mean', 'plot_x', 'CMADataLogger') return from matplotlib.pyplot import plot, semilogy, hold, text, grid, axis, title dat = self # for convenience and historical reasons # modify fake last entry in x for line extension-annotation if dat.x.shape[1] < 100: minxend = int(1.06 * dat.x[-2, iabscissa]) # write y-values for individual annotation into dat.x dat.x[-1, iabscissa] = minxend # TODO: should be ax[1] if x_opt is None: idx = np.argsort(dat.x[-2, 5:]) idx2 = np.argsort(idx) dat.x[-1, 5 + idx] = np.linspace(np.min(dat.x[:, 5:]), np.max(dat.x[:, 5:]), dat.x.shape[1] - 5) else: # y-axis is in log xdat = np.abs(dat.x[:, 5:] - np.array(x_opt, copy=False)) idx = np.argsort(xdat[-2, :]) idx2 = np.argsort(idx) xdat[-1, idx] = np.logspace(np.log10(np.min(abs(xdat[xdat!=0]))), np.log10(np.max(np.abs(xdat))), dat.x.shape[1] - 5) else: minxend = 0 self._enter_plotting() if x_opt is not None: # TODO: differentate neg and pos? semilogy(dat.x[:, iabscissa], abs(xdat), '-') else: plot(dat.x[:, iabscissa], dat.x[:, 5:], '-') hold(True) grid(True) ax = array(axis()) # ax[1] = max(minxend, ax[1]) axis(ax) ax[1] -= 1e-6 # to prevent last x-tick annotation, probably superfluous if dat.x.shape[1] < 100: yy = np.linspace(ax[2] + 1e-6, ax[3] - 1e-6, dat.x.shape[1] - 5) # yyl = np.sort(dat.x[-1,5:]) if x_opt is not None: # semilogy([dat.x[-1, iabscissa], ax[1]], [abs(dat.x[-1, 5:]), yy[idx2]], 'k-') # line from last data point semilogy(np.dot(dat.x[-2, iabscissa], [1, 1]), array([ax[2] * (1+1e-6), ax[3] / (1+1e-6)]), 'k-') else: # plot([dat.x[-1, iabscissa], ax[1]], [dat.x[-1,5:], yy[idx2]], 'k-') # line from last data point plot(np.dot(dat.x[-2, iabscissa], [1, 1]), array([ax[2] + 1e-6, ax[3] - 1e-6]), 'k-') # plot(array([dat.x[-1, iabscissa], ax[1]]), # reshape(array([dat.x[-1,5:], yy[idx2]]).flatten(), (2,4)), '-k') for i in rglen(idx): # TODOqqq: annotate phenotypic value!? # text(ax[1], yy[i], 'x(' + str(idx[i]) + ')=' + str(dat.x[-2,5+idx[i]])) text(dat.x[-1, iabscissa], dat.x[-1, 5 + i] if x_opt is None else np.abs(xdat[-1, i]), ('x(' + str(i) + ')=' if annotations is None else str(i) + ':' + annotations[i] + "=") + str(dat.x[-2, 5 + i])) i = 2 # find smallest i where iteration count differs (in case the same row appears twice) while i < len(dat.f) and dat.f[-i][0] == dat.f[-1][0]: i += 1 title('Object Variables (' + (remark + ', ' if remark is not None else '') + str(dat.x.shape[1] - 5) + '-D, popsize~' + (str(int((dat.f[-1][1] - dat.f[-i][1]) / (dat.f[-1][0] - dat.f[-i][0]))) if len(dat.f.T[0]) > 1 and dat.f[-1][0] > dat.f[-i][0] else 'NA') + ')') self._finalize_plotting() def downsampling(self, factor=10, first=3, switch=True, verbose=True): """ rude downsampling of a `CMADataLogger` data file by `factor`, keeping also the first `first` entries. This function is a stump and subject to future changes. Return self. Arguments --------- - `factor` -- downsampling factor - `first` -- keep first `first` entries - `switch` -- switch the new logger to the downsampled logger original_name+'down' Details ------- ``self.name_prefix+'down'`` files are written Example ------- :: import cma cma.downsampling() # takes outcmaes* files cma.plot('outcmaesdown') """ newprefix = self.name_prefix + 'down' for name in self.file_names: f = open(newprefix + name + '.dat', 'w') iline = 0 cwritten = 0 for line in open(self.name_prefix + name + '.dat'): if iline < first or iline % factor == 0: f.write(line) cwritten += 1 iline += 1 f.close() if verbose and iline > first: print('%d' % (cwritten) + ' lines written in ' + newprefix + name + '.dat') if switch: self.name_prefix += 'down' return self # ____________________________________________________________ # ____________________________________________________________ # def disp(self, idx=100): # r_[0:5,1e2:1e9:1e2,-10:0]): """displays selected data from (files written by) the class `CMADataLogger`. Arguments --------- `idx` indices corresponding to rows in the data file; if idx is a scalar (int), the first two, then every idx-th, and the last three rows are displayed. Too large index values are removed. Example ------- >>> import cma, numpy as np >>> res = cma.fmin(cma.fcts.elli, 7 * [0.1], 1, {'verb_disp':1e9}) # generate data >>> assert res[1] < 1e-9 >>> assert res[2] < 4400 >>> l = cma.CMADataLogger() # == res[-1], logger with default name, "points to" above data >>> l.disp([0,-1]) # first and last >>> l.disp(20) # some first/last and every 20-th line >>> l.disp(np.r_[0:999999:100, -1]) # every 100-th and last >>> l.disp(np.r_[0, -10:0]) # first and ten last >>> cma.disp(l.name_prefix, np.r_[0::100, -10:]) # the same as l.disp(...) Details ------- The data line with the best f-value is displayed as last line. :See: `disp()` """ filenameprefix = self.name_prefix def printdatarow(dat, iteration): """print data of iteration i""" i = np.where(dat.f[:, 0] == iteration)[0][0] j = np.where(dat.std[:, 0] == iteration)[0][0] print('%5d' % (int(dat.f[i, 0])) + ' %6d' % (int(dat.f[i, 1])) + ' %.14e' % (dat.f[i, 5]) + ' %5.1e' % (dat.f[i, 3]) + ' %6.2e' % (max(dat.std[j, 5:])) + ' %6.2e' % min(dat.std[j, 5:])) dat = CMADataLogger(filenameprefix).load() ndata = dat.f.shape[0] # map index to iteration number, is difficult if not all iteration numbers exist # idx = idx[np.where(map(lambda x: x in dat.f[:,0], idx))[0]] # TODO: takes pretty long # otherwise: if idx is None: idx = 100 if isscalar(idx): # idx = np.arange(0, ndata, idx) if idx: idx = np.r_[0, 1, idx:ndata - 3:idx, -3:0] else: idx = np.r_[0, 1, -3:0] idx = array(idx) idx = idx[idx < ndata] idx = idx[-idx <= ndata] iters = dat.f[idx, 0] idxbest = np.argmin(dat.f[:, 5]) iterbest = dat.f[idxbest, 0] if len(iters) == 1: printdatarow(dat, iters[0]) else: self.disp_header() for i in iters: printdatarow(dat, i) self.disp_header() printdatarow(dat, iterbest) sys.stdout.flush() def disp_header(self): heading = 'Iterat Nfevals function value axis ratio maxstd minstd' print(heading) # end class CMADataLogger # ____________________________________________________________ # ____________________________________________________________ # last_figure_number = 324 def plot(name=None, fig=None, abscissa=1, iteridx=None, plot_mean=False, foffset=1e-19, x_opt=None, fontsize=9): """ plot data from files written by a `CMADataLogger`, the call ``cma.plot(name, **argsdict)`` is a shortcut for ``cma.CMADataLogger(name).plot(**argsdict)`` Arguments --------- `name` name of the logger, filename prefix, None evaluates to the default 'outcmaes' `fig` filename or figure number, or both as a tuple (any order) `abscissa` 0==plot versus iteration count, 1==plot versus function evaluation number `iteridx` iteration indices to plot Return `None` Examples -------- :: cma.plot(); # the optimization might be still # running in a different shell cma.savefig('fig325.png') cma.closefig() cdl = cma.CMADataLogger().downsampling().plot() # in case the file sizes are large Details ------- Data from codes in other languages (C, Java, Matlab, Scilab) have the same format and can be plotted just the same. :See: `CMADataLogger`, `CMADataLogger.plot()` """ global last_figure_number if not fig: last_figure_number += 1 fig = last_figure_number if isinstance(fig, (int, float)): last_figure_number = fig CMADataLogger(name).plot(fig, abscissa, iteridx, plot_mean, foffset, x_opt, fontsize) def disp(name=None, idx=None): """displays selected data from (files written by) the class `CMADataLogger`. The call ``cma.disp(name, idx)`` is a shortcut for ``cma.CMADataLogger(name).disp(idx)``. Arguments --------- `name` name of the logger, filename prefix, `None` evaluates to the default ``'outcmaes'`` `idx` indices corresponding to rows in the data file; by default the first five, then every 100-th, and the last 10 rows. Too large index values are removed. Examples -------- :: import cma, numpy # assume some data are available from previous runs cma.disp(None,numpy.r_[0,-1]) # first and last cma.disp(None,numpy.r_[0:1e9:100,-1]) # every 100-th and last cma.disp(idx=numpy.r_[0,-10:0]) # first and ten last cma.disp(idx=numpy.r_[0:1e9:1e3,-10:0]) :See: `CMADataLogger.disp()` """ return CMADataLogger(name if name else CMADataLogger.default_prefix ).disp(idx) # ____________________________________________________________ def _fileToMatrix(file_name): """rudimentary method to read in data from a file""" # TODO: np.loadtxt() might be an alternative # try: if 1 < 3: lres = [] for line in open(file_name, 'r').readlines(): if len(line) > 0 and line[0] not in ('%', '#'): lres.append(list(map(float, line.split()))) res = lres while res != [] and res[0] == []: # remove further leading empty lines del res[0] return res # except: print('could not read file ' + file_name) # ____________________________________________________________ # ____________________________________________________________ class NoiseHandler(object): """Noise handling according to [Hansen et al 2009, A Method for Handling Uncertainty in Evolutionary Optimization...] The interface of this class is yet versatile and subject to changes. The noise handling follows closely [Hansen et al 2009] in the measurement part, but the implemented treatment is slightly different: for ``noiseS > 0``, ``evaluations`` (time) and sigma are increased by ``alpha``. For ``noiseS < 0``, ``evaluations`` (time) is decreased by ``alpha**(1/4)``. The (second) parameter ``evaluations`` defines the maximal number of evaluations for a single fitness computation. If it is a list, the smallest element defines the minimal number and if the list has three elements, the median value is the start value for ``evaluations``. ``NoiseHandler`` serves to control the noise via steps-size increase and number of re-evaluations, for example via ``fmin`` or with ``ask_and_eval()``. Examples -------- Minimal example together with `fmin` on a non-noisy function: >>> import cma >>> cma.fmin(cma.felli, 7 * [1], 1, noise_handler=cma.NoiseHandler(7)) in dimension 7 (which needs to be given tice). More verbose example in the optimization loop with a noisy function defined in ``func``: >>> import cma, numpy as np >>> func = lambda x: cma.fcts.sphere(x) * (1 + 4 * np.random.randn() / len(x)) # cma.Fcts.noisysphere >>> es = cma.CMAEvolutionStrategy(np.ones(10), 1) >>> nh = cma.NoiseHandler(es.N, maxevals=[1, 1, 30]) >>> while not es.stop(): ... X, fit_vals = es.ask_and_eval(func, evaluations=nh.evaluations) ... es.tell(X, fit_vals) # prepare for next iteration ... es.sigma *= nh(X, fit_vals, func, es.ask) # see method __call__ ... es.countevals += nh.evaluations_just_done # this is a hack, not important though ... es.logger.add(more_data = [nh.evaluations, nh.noiseS]) # add a data point ... es.disp() ... # nh.maxevals = ... it might be useful to start with smaller values and then increase >>> print(es.stop()) >>> print(es.result()[-2]) # take mean value, the best solution is totally off >>> assert sum(es.result()[-2]**2) < 1e-9 >>> print(X[np.argmin(fit_vals)]) # not bad, but probably worse than the mean >>> # es.logger.plot() The command ``logger.plot()`` will plot the logged data. The noise options of `fmin()` control a `NoiseHandler` instance similar to this example. The command ``cma.CMAOptions('noise')`` lists in effect the parameters of `__init__` apart from ``aggregate``. Details ------- The parameters reevals, theta, c_s, and alpha_t are set differently than in the original publication, see method `__init__()`. For a very small population size, say popsize <= 5, the measurement technique based on rank changes is likely to fail. Missing Features ---------------- In case no noise is found, ``self.lam_reeval`` should be adaptive and get at least as low as 1 (however the possible savings from this are rather limited). Another option might be to decide during the first call by a quantitative analysis of fitness values whether ``lam_reeval`` is set to zero. More generally, an automatic noise mode detection might also set the covariance matrix learning rates to smaller values. :See: `fmin()`, `CMAEvolutionStrategy.ask_and_eval()` """ # TODO: for const additive noise a better version might be with alphasigma also used for sigma-increment, # while all other variance changing sources are removed (because they are intrinsically biased). Then # using kappa to get convergence (with unit sphere samples): noiseS=0 leads to a certain kappa increasing rate? def __init__(self, N, maxevals=[1, 1, 1], aggregate=np.median, reevals=None, epsilon=1e-7, parallel=False): """parameters are `N` dimension, (only) necessary to adjust the internal "alpha"-parameters `maxevals` maximal value for ``self.evaluations``, where ``self.evaluations`` function calls are aggregated for noise treatment. With ``maxevals == 0`` the noise handler is (temporarily) "switched off". If `maxevals` is a list, min value and (for >2 elements) median are used to define minimal and initial value of ``self.evaluations``. Choosing ``maxevals > 1`` is only reasonable, if also the original ``fit`` values (that are passed to `__call__`) are computed by aggregation of ``self.evaluations`` values (otherwise the values are not comparable), as it is done within `fmin()`. `aggregate` function to aggregate single f-values to a 'fitness', e.g. ``np.median``. `reevals` number of solutions to be reevaluated for noise measurement, can be a float, by default set to ``2 + popsize/20``, where ``popsize = len(fit)`` in ``__call__``. zero switches noise handling off. `epsilon` multiplier for perturbation of the reevaluated solutions `parallel` a single f-call with all resampled solutions :See: `fmin()`, `CMAOptions`, `CMAEvolutionStrategy.ask_and_eval()` """ self.lam_reeval = reevals # 2 + popsize/20, see method indices(), originally 2 + popsize/10 self.epsilon = epsilon self.parallel = parallel ## meta_parameters.noise_theta == 0.5 self.theta = 0.5 # 0.5 # originally 0.2 self.cum = 0.3 # originally 1, 0.3 allows one disagreement of current point with resulting noiseS ## meta_parameters.noise_alphasigma == 2.0 self.alphasigma = 1 + 2.0 / (N + 10) # 2, unit sphere sampling: 1 + 1 / (N + 10) ## meta_parameters.noise_alphaevals == 2.0 self.alphaevals = 1 + 2.0 / (N + 10) # 2, originally 1.5 ## meta_parameters.noise_alphaevalsdown_exponent == -0.25 self.alphaevalsdown = self.alphaevals** -0.25 # originally 1/1.5 # zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz self.evaluations = 1 # to aggregate for a single f-evaluation self.minevals = 1 self.maxevals = int(np.max(maxevals)) if hasattr(maxevals, '__contains__'): # i.e. can deal with ``in`` if len(maxevals) > 1: self.minevals = min(maxevals) self.evaluations = self.minevals if len(maxevals) > 2: self.evaluations = np.median(maxevals) ## meta_parameters.noise_aggregate == None self.f_aggregate = aggregate if not None else {1: np.median, 2: np.mean}[ None ] self.evaluations_just_done = 0 # actually conducted evals, only for documentation self.noiseS = 0 def __call__(self, X, fit, func, ask=None, args=()): """proceed with noise measurement, set anew attributes ``evaluations`` (proposed number of evaluations to "treat" noise) and ``evaluations_just_done`` and return a factor for increasing sigma. Parameters ---------- `X` a list/sequence/vector of solutions `fit` the respective list of function values `func` the objective function, ``fit[i]`` corresponds to ``func(X[i], *args)`` `ask` a method to generate a new, slightly disturbed solution. The argument is (only) mandatory if ``epsilon`` is not zero, see `__init__()`. `args` optional additional arguments to `func` Details ------- Calls the methods ``reeval()``, ``update_measure()`` and ``treat()`` in this order. ``self.evaluations`` is adapted within the method `treat()`. """ self.evaluations_just_done = 0 if not self.maxevals or self.lam_reeval == 0: return 1.0 res = self.reeval(X, fit, func, ask, args) if not len(res): return 1.0 self.update_measure() return self.treat() def get_evaluations(self): """return ``self.evaluations``, the number of evalutions to get a single fitness measurement""" return self.evaluations def treat(self): """adapt self.evaluations depending on the current measurement value and return ``sigma_fac in (1.0, self.alphasigma)`` """ if self.noiseS > 0: self.evaluations = min((self.evaluations * self.alphaevals, self.maxevals)) return self.alphasigma else: self.evaluations = max((self.evaluations * self.alphaevalsdown, self.minevals)) return 1.0 # / self.alphasigma def reeval(self, X, fit, func, ask, args=()): """store two fitness lists, `fit` and ``fitre`` reevaluating some solutions in `X`. ``self.evaluations`` evaluations are done for each reevaluated fitness value. See `__call__()`, where `reeval()` is called. """ self.fit = list(fit) self.fitre = list(fit) self.idx = self.indices(fit) if not len(self.idx): return self.idx evals = int(self.evaluations) if self.f_aggregate else 1 fagg = np.median if self.f_aggregate is None else self.f_aggregate for i in self.idx: X_i = X[i] if self.epsilon: if self.parallel: self.fitre[i] = fagg(func(ask(evals, X_i, self.epsilon), *args)) else: self.fitre[i] = fagg([func(ask(1, X_i, self.epsilon)[0], *args) for _k in xrange(evals)]) else: self.fitre[i] = fagg([func(X_i, *args) for _k in xrange(evals)]) self.evaluations_just_done = evals * len(self.idx) return self.fit, self.fitre, self.idx def update_measure(self): """updated noise level measure using two fitness lists ``self.fit`` and ``self.fitre``, return ``self.noiseS, all_individual_measures``. Assumes that `self.idx` contains the indices where the fitness lists differ """ lam = len(self.fit) idx = np.argsort(self.fit + self.fitre) ranks = np.argsort(idx).reshape((2, lam)) rankDelta = ranks[0] - ranks[1] - np.sign(ranks[0] - ranks[1]) # compute rank change limits using both ranks[0] and ranks[1] r = np.arange(1, 2 * lam) # 2 * lam - 2 elements limits = [0.5 * (Mh.prctile(np.abs(r - (ranks[0, i] + 1 - (ranks[0, i] > ranks[1, i]))), self.theta * 50) + Mh.prctile(np.abs(r - (ranks[1, i] + 1 - (ranks[1, i] > ranks[0, i]))), self.theta * 50)) for i in self.idx] # compute measurement # max: 1 rankchange in 2*lambda is always fine s = np.abs(rankDelta[self.idx]) - Mh.amax(limits, 1) # lives roughly in 0..2*lambda self.noiseS += self.cum * (np.mean(s) - self.noiseS) return self.noiseS, s def indices(self, fit): """return the set of indices to be reevaluated for noise measurement. Given the first values are the earliest, this is a useful policy also with a time changing objective. """ ## meta_parameters.noise_reeval_multiplier == 1.0 lam_reev = 1.0 * (self.lam_reeval if self.lam_reeval else 2 + len(fit) / 20) lam_reev = int(lam_reev) + ((lam_reev % 1) > np.random.rand()) ## meta_parameters.noise_choose_reeval == 1 choice = 1 if choice == 1: # take n_first first and reev - n_first best of the remaining n_first = lam_reev - lam_reev // 2 sort_idx = np.argsort(array(fit, copy=False)[n_first:]) + n_first return np.array(list(range(0, n_first)) + list(sort_idx[0:lam_reev - n_first]), copy=False) elif choice == 2: idx_sorted = np.argsort(array(fit, copy=False)) # take lam_reev equally spaced, starting with best linsp = np.linspace(0, len(fit) - len(fit) / lam_reev, lam_reev) return idx_sorted[[int(i) for i in linsp]] # take the ``lam_reeval`` best from the first ``2 * lam_reeval + 2`` values. elif choice == 3: return np.argsort(array(fit, copy=False)[:2 * (lam_reev + 1)])[:lam_reev] else: raise ValueError('unrecognized choice value %d for noise reev' % choice) # ____________________________________________________________ # ____________________________________________________________ class Sections(object): """plot sections through an objective function. A first rational thing to do, when facing an (expensive) application. By default 6 points in each coordinate are evaluated. This class is still experimental. Examples -------- >>> import cma, numpy as np >>> s = cma.Sections(cma.Fcts.rosen, np.zeros(3)).do(plot=False) >>> s.do(plot=False) # evaluate the same points again, i.e. check for noise >> try: ... s.plot() ... except: ... print('plotting failed: matplotlib.pyplot package missing?') Details ------- Data are saved after each function call during `do()`. The filename is attribute ``name`` and by default ``str(func)``, see `__init__()`. A random (orthogonal) basis can be generated with ``cma.Rotation()(np.eye(3))``. CAVEAT: The default name is unique in the function name, but it should be unique in all parameters of `__init__()` but `plot_cmd` and `load`. If, for example, a different basis is chosen, either the name must be changed or the ``.pkl`` file containing the previous data must first be renamed or deleted. ``s.res`` is a dictionary with an entry for each "coordinate" ``i`` and with an entry ``'x'``, the middle point. Each entry ``i`` is again a dictionary with keys being different dx values and the value being a sequence of f-values. For example ``s.res[2][0.1] == [0.01, 0.01]``, which is generated using the difference vector ``s .basis[2]`` like ``s.res[2][dx] += func(s.res['x'] + dx * s.basis[2])``. :See: `__init__()` """ def __init__(self, func, x, args=(), basis=None, name=None, plot_cmd=pyplot.plot if pyplot else None, load=True): """ Parameters ---------- `func` objective function `x` point in search space, middle point of the sections `args` arguments passed to `func` `basis` evaluated points are ``func(x + locations[j] * basis[i]) for i in len(basis) for j in len(locations)``, see `do()` `name` filename where to save the result `plot_cmd` command used to plot the data, typically matplotlib pyplots `plot` or `semilogy` `load` load previous data from file ``str(func) + '.pkl'`` """ self.func = func self.args = args self.x = x self.name = name if name else str(func).replace(' ', '_').replace('>', '').replace('<', '') self.plot_cmd = plot_cmd # or semilogy self.basis = np.eye(len(x)) if basis is None else basis try: self.load() if any(self.res['x'] != x): self.res = {} self.res['x'] = x # TODO: res['x'] does not look perfect else: print(self.name + ' loaded') except: self.res = {} self.res['x'] = x def do(self, repetitions=1, locations=np.arange(-0.5, 0.6, 0.2), plot=True): """generates, plots and saves function values ``func(y)``, where ``y`` is 'close' to `x` (see `__init__()`). The data are stored in the ``res`` attribute and the class instance is saved in a file with (the weired) name ``str(func)``. Parameters ---------- `repetitions` for each point, only for noisy functions is >1 useful. For ``repetitions==0`` only already generated data are plotted. `locations` coordinated wise deviations from the middle point given in `__init__` """ if not repetitions: self.plot() return res = self.res for i in xrange(len(self.basis)): # i-th coordinate if i not in res: res[i] = {} # xx = np.array(self.x) # TODO: store res[i]['dx'] = self.basis[i] here? for dx in locations: xx = self.x + dx * self.basis[i] xkey = dx # xx[i] if (self.basis == np.eye(len(self.basis))).all() else dx if xkey not in res[i]: res[i][xkey] = [] n = repetitions while n > 0: n -= 1 res[i][xkey].append(self.func(xx, *self.args)) if plot: self.plot() self.save() return self def plot(self, plot_cmd=None, tf=lambda y: y): """plot the data we have, return ``self``""" if not plot_cmd: plot_cmd = self.plot_cmd colors = 'bgrcmyk' pyplot.hold(False) res = self.res flatx, flatf = self.flattened() minf = np.inf for i in flatf: minf = min((minf, min(flatf[i]))) addf = 1e-9 - minf if minf <= 1e-9 else 0 for i in sorted(res.keys()): # we plot not all values here if isinstance(i, int): color = colors[i % len(colors)] arx = sorted(res[i].keys()) plot_cmd(arx, [tf(np.median(res[i][x]) + addf) for x in arx], color + '-') pyplot.text(arx[-1], tf(np.median(res[i][arx[-1]])), i) pyplot.hold(True) plot_cmd(flatx[i], tf(np.array(flatf[i]) + addf), color + 'o') pyplot.ylabel('f + ' + str(addf)) pyplot.draw() pyplot.ion() pyplot.show() # raw_input('press return') return self def flattened(self): """return flattened data ``(x, f)`` such that for the sweep through coordinate ``i`` we have for data point ``j`` that ``f[i][j] == func(x[i][j])`` """ flatx = {} flatf = {} for i in self.res: if isinstance(i, int): flatx[i] = [] flatf[i] = [] for x in sorted(self.res[i]): for d in sorted(self.res[i][x]): flatx[i].append(x) flatf[i].append(d) return flatx, flatf def save(self, name=None): """save to file""" import pickle name = name if name else self.name fun = self.func del self.func # instance method produces error pickle.dump(self, open(name + '.pkl', "wb")) self.func = fun return self def load(self, name=None): """load from file""" import pickle name = name if name else self.name s = pickle.load(open(name + '.pkl', 'rb')) self.res = s.res # disregard the class return self #____________________________________________________________ #____________________________________________________________ class _Error(Exception): """generic exception of cma module""" pass # ____________________________________________________________ # ____________________________________________________________ # class ElapsedTime(object): """using ``time.clock`` with overflow handling to measure CPU time. Example: >>> clock = ElapsedTime() # clock starts here >>> t1 = clock() # get elapsed CPU time Details: 32-bit C overflows after int(2**32/1e6) == 4294s about 72 min """ def __init__(self): self.tic0 = time.clock() self.tic = self.tic0 self.lasttoc = time.clock() self.lastdiff = time.clock() - self.lasttoc self.time_to_add = 0 self.messages = 0 reset = __init__ def __call__(self): toc = time.clock() if toc - self.tic >= self.lasttoc - self.tic: self.lastdiff = toc - self.lasttoc self.lasttoc = toc else: # overflow, reset self.tic if self.messages < 3: self.messages += 1 print(' in cma.ElapsedTime: time measure overflow, last difference estimated from', self.tic0, self.tic, self.lasttoc, toc, toc - self.lasttoc, self.lastdiff) self.time_to_add += self.lastdiff + self.lasttoc - self.tic self.tic = toc # reset self.lasttoc = toc self.elapsedtime = toc - self.tic + self.time_to_add return self.elapsedtime class Misc(object): # ____________________________________________________________ # ____________________________________________________________ # class MathHelperFunctions(object): """static convenience math helper functions, if the function name is preceded with an "a", a numpy array is returned """ @staticmethod def aclamp(x, upper): return -Misc.MathHelperFunctions.apos(-x, -upper) @staticmethod def equals_approximately(a, b, eps=1e-12): if a < 0: a, b = -1 * a, -1 * b return (a - eps < b < a + eps) or ((1 - eps) * a < b < (1 + eps) * a) @staticmethod def vequals_approximately(a, b, eps=1e-12): a, b = array(a), array(b) idx = np.where(a < 0)[0] if len(idx): a[idx], b[idx] = -1 * a[idx], -1 * b[idx] return (np.all(a - eps < b) and np.all(b < a + eps) ) or (np.all((1 - eps) * a < b) and np.all(b < (1 + eps) * a)) @staticmethod def expms(A, eig=np.linalg.eigh): """matrix exponential for a symmetric matrix""" # TODO: check that this works reliably for low rank matrices # first: symmetrize A D, B = eig(A) return np.dot(B, (np.exp(D) * B).T) @staticmethod def amax(vec, vec_or_scalar): return array(Misc.MathHelperFunctions.max(vec, vec_or_scalar)) @staticmethod def max(vec, vec_or_scalar): b = vec_or_scalar if isscalar(b): m = [max(x, b) for x in vec] else: m = [max(vec[i], b[i]) for i in rglen((vec))] return m @staticmethod def minmax(val, min_val, max_val): assert min_val <= max_val return min((max_val, max((val, min_val)))) @staticmethod def aminmax(val, min_val, max_val): return array([min((max_val, max((v, min_val)))) for v in val]) @staticmethod def amin(vec_or_scalar, vec_or_scalar2): return array(Misc.MathHelperFunctions.min(vec_or_scalar, vec_or_scalar2)) @staticmethod def min(a, b): iss = isscalar if iss(a) and iss(b): return min(a, b) if iss(a): a, b = b, a # now only b can be still a scalar if iss(b): return [min(x, b) for x in a] else: # two non-scalars must have the same length return [min(a[i], b[i]) for i in rglen((a))] @staticmethod def norm(vec, expo=2): return sum(vec**expo)**(1 / expo) @staticmethod def apos(x, lower=0): """clips argument (scalar or array) from below at lower""" if lower == 0: return (x > 0) * x else: return lower + (x > lower) * (x - lower) @staticmethod def prctile(data, p_vals=[0, 25, 50, 75, 100], sorted_=False): """``prctile(data, 50)`` returns the median, but p_vals can also be a sequence. Provides for small samples better values than matplotlib.mlab.prctile, however also slower. """ ps = [p_vals] if isscalar(p_vals) else p_vals if not sorted_: data = sorted(data) n = len(data) d = [] for p in ps: fi = p * n / 100 - 0.5 if fi <= 0: # maybe extrapolate? d.append(data[0]) elif fi >= n - 1: d.append(data[-1]) else: i = int(fi) d.append((i + 1 - fi) * data[i] + (fi - i) * data[i + 1]) return d[0] if isscalar(p_vals) else d @staticmethod def sround(nb): # TODO: to be vectorized """return stochastic round: floor(nb) + (rand() 1000: n = np.random.randn() / np.random.randn() return n / 25 @staticmethod def standard_finite_cauchy(size=1): try: l = len(size) except TypeError: l = 0 if l == 0: return array([Mh.cauchy_with_variance_one() for _i in xrange(size)]) elif l == 1: return array([Mh.cauchy_with_variance_one() for _i in xrange(size[0])]) elif l == 2: return array([[Mh.cauchy_with_variance_one() for _i in xrange(size[1])] for _j in xrange(size[0])]) else: raise _Error('len(size) cannot be large than two') @staticmethod def likelihood(x, m=None, Cinv=None, sigma=1, detC=None): """return likelihood of x for the normal density N(m, sigma**2 * Cinv**-1)""" # testing: MC integrate must be one: mean(p(x_i)) * volume(where x_i are uniformely sampled) # for i in xrange(3): print mean([cma.likelihood(20*r-10, dim * [0], None, 3) for r in rand(10000,dim)]) * 20**dim if m is None: dx = x else: dx = x - m # array(x) - array(m) n = len(x) s2pi = (2 * np.pi)**(n / 2.) if Cinv is None: return exp(-sum(dx**2) / sigma**2 / 2) / s2pi / sigma**n if detC is None: detC = 1. / np.linalg.linalg.det(Cinv) return exp(-np.dot(dx, np.dot(Cinv, dx)) / sigma**2 / 2) / s2pi / abs(detC)**0.5 / sigma**n @staticmethod def loglikelihood(self, x, previous=False): """return log-likelihood of `x` regarding the current sample distribution""" # testing of original fct: MC integrate must be one: mean(p(x_i)) * volume(where x_i are uniformely sampled) # for i in xrange(3): print mean([cma.likelihood(20*r-10, dim * [0], None, 3) for r in rand(10000,dim)]) * 20**dim # TODO: test this!! # c=cma.fmin... # c[3]['cma'].loglikelihood(...) if previous and hasattr(self, 'lastiter'): sigma = self.lastiter.sigma Crootinv = self.lastiter._Crootinv xmean = self.lastiter.mean D = self.lastiter.D elif previous and self.countiter > 1: raise _Error('no previous distribution parameters stored, check options importance_mixing') else: sigma = self.sigma Crootinv = self._Crootinv xmean = self.mean D = self.D dx = array(x) - xmean # array(x) - array(m) n = self.N logs2pi = n * log(2 * np.pi) / 2. logdetC = 2 * sum(log(D)) dx = np.dot(Crootinv, dx) res = -sum(dx**2) / sigma**2 / 2 - logs2pi - logdetC / 2 - n * log(sigma) if 1 < 3: # testing s2pi = (2 * np.pi)**(n / 2.) detC = np.prod(D)**2 res2 = -sum(dx**2) / sigma**2 / 2 - log(s2pi * abs(detC)**0.5 * sigma**n) assert res2 < res + 1e-8 or res2 > res - 1e-8 return res # ____________________________________________________________ # ____________________________________________________________ # # C and B are arrays rather than matrices, because they are # addressed via B[i][j], matrices can only be addressed via B[i,j] # tred2(N, B, diagD, offdiag); # tql2(N, diagD, offdiag, B); # Symmetric Householder reduction to tridiagonal form, translated from JAMA package. @staticmethod def eig(C): """eigendecomposition of a symmetric matrix, much slower than `numpy.linalg.eigh`, return ``(EVals, Basis)``, the eigenvalues and an orthonormal basis of the corresponding eigenvectors, where ``Basis[i]`` the i-th row of ``Basis`` columns of ``Basis``, ``[Basis[j][i] for j in range(len(Basis))]`` the i-th eigenvector with eigenvalue ``EVals[i]`` """ # class eig(object): # def __call__(self, C): # Householder transformation of a symmetric matrix V into tridiagonal form. # -> n : dimension # -> V : symmetric nxn-matrix # <- V : orthogonal transformation matrix: # tridiag matrix == V * V_in * V^t # <- d : diagonal # <- e[0..n-1] : off diagonal (elements 1..n-1) # Symmetric tridiagonal QL algorithm, iterative # Computes the eigensystem from a tridiagonal matrix in roughtly 3N^3 operations # -> n : Dimension. # -> d : Diagonale of tridiagonal matrix. # -> e[1..n-1] : off-diagonal, output from Householder # -> V : matrix output von Householder # <- d : eigenvalues # <- e : garbage? # <- V : basis of eigenvectors, according to d # tred2(N, B, diagD, offdiag); B=C on input # tql2(N, diagD, offdiag, B); # private void tred2 (int n, double V[][], double d[], double e[]) { def tred2 (n, V, d, e): # This is derived from the Algol procedures tred2 by # Bowdler, Martin, Reinsch, and Wilkinson, Handbook for # Auto. Comp., Vol.ii-Linear Algebra, and the corresponding # Fortran subroutine in EISPACK. num_opt = False # factor 1.5 in 30-D for j in xrange(n): d[j] = V[n - 1][j] # d is output argument # Householder reduction to tridiagonal form. for i in xrange(n - 1, 0, -1): # Scale to avoid under/overflow. h = 0.0 if not num_opt: scale = 0.0 for k in xrange(i): scale = scale + abs(d[k]) else: scale = sum(abs(d[0:i])) if scale == 0.0: e[i] = d[i - 1] for j in xrange(i): d[j] = V[i - 1][j] V[i][j] = 0.0 V[j][i] = 0.0 else: # Generate Householder vector. if not num_opt: for k in xrange(i): d[k] /= scale h += d[k] * d[k] else: d[:i] /= scale h = np.dot(d[:i], d[:i]) f = d[i - 1] g = h**0.5 if f > 0: g = -g e[i] = scale * g h = h - f * g d[i - 1] = f - g if not num_opt: for j in xrange(i): e[j] = 0.0 else: e[:i] = 0.0 # Apply similarity transformation to remaining columns. for j in xrange(i): f = d[j] V[j][i] = f g = e[j] + V[j][j] * f if not num_opt: for k in xrange(j + 1, i): g += V[k][j] * d[k] e[k] += V[k][j] * f e[j] = g else: e[j + 1:i] += V.T[j][j + 1:i] * f e[j] = g + np.dot(V.T[j][j + 1:i], d[j + 1:i]) f = 0.0 if not num_opt: for j in xrange(i): e[j] /= h f += e[j] * d[j] else: e[:i] /= h f += np.dot(e[:i], d[:i]) hh = f / (h + h) if not num_opt: for j in xrange(i): e[j] -= hh * d[j] else: e[:i] -= hh * d[:i] for j in xrange(i): f = d[j] g = e[j] if not num_opt: for k in xrange(j, i): V[k][j] -= (f * e[k] + g * d[k]) else: V.T[j][j:i] -= (f * e[j:i] + g * d[j:i]) d[j] = V[i - 1][j] V[i][j] = 0.0 d[i] = h # end for i-- # Accumulate transformations. for i in xrange(n - 1): V[n - 1][i] = V[i][i] V[i][i] = 1.0 h = d[i + 1] if h != 0.0: if not num_opt: for k in xrange(i + 1): d[k] = V[k][i + 1] / h else: d[:i + 1] = V.T[i + 1][:i + 1] / h for j in xrange(i + 1): if not num_opt: g = 0.0 for k in xrange(i + 1): g += V[k][i + 1] * V[k][j] for k in xrange(i + 1): V[k][j] -= g * d[k] else: g = np.dot(V.T[i + 1][0:i + 1], V.T[j][0:i + 1]) V.T[j][:i + 1] -= g * d[:i + 1] if not num_opt: for k in xrange(i + 1): V[k][i + 1] = 0.0 else: V.T[i + 1][:i + 1] = 0.0 if not num_opt: for j in xrange(n): d[j] = V[n - 1][j] V[n - 1][j] = 0.0 else: d[:n] = V[n - 1][:n] V[n - 1][:n] = 0.0 V[n - 1][n - 1] = 1.0 e[0] = 0.0 # Symmetric tridiagonal QL algorithm, taken from JAMA package. # private void tql2 (int n, double d[], double e[], double V[][]) { # needs roughly 3N^3 operations def tql2 (n, d, e, V): # This is derived from the Algol procedures tql2, by # Bowdler, Martin, Reinsch, and Wilkinson, Handbook for # Auto. Comp., Vol.ii-Linear Algebra, and the corresponding # Fortran subroutine in EISPACK. num_opt = False # using vectors from numpy makes it faster if not num_opt: for i in xrange(1, n): # (int i = 1; i < n; i++): e[i - 1] = e[i] else: e[0:n - 1] = e[1:n] e[n - 1] = 0.0 f = 0.0 tst1 = 0.0 eps = 2.0**-52.0 for l in xrange(n): # (int l = 0; l < n; l++) { # Find small subdiagonal element tst1 = max(tst1, abs(d[l]) + abs(e[l])) m = l while m < n: if abs(e[m]) <= eps * tst1: break m += 1 # If m == l, d[l] is an eigenvalue, # otherwise, iterate. if m > l: iiter = 0 while 1: # do { iiter += 1 # (Could check iteration count here.) # Compute implicit shift g = d[l] p = (d[l + 1] - g) / (2.0 * e[l]) r = (p**2 + 1)**0.5 # hypot(p,1.0) if p < 0: r = -r d[l] = e[l] / (p + r) d[l + 1] = e[l] * (p + r) dl1 = d[l + 1] h = g - d[l] if not num_opt: for i in xrange(l + 2, n): d[i] -= h else: d[l + 2:n] -= h f = f + h # Implicit QL transformation. p = d[m] c = 1.0 c2 = c c3 = c el1 = e[l + 1] s = 0.0 s2 = 0.0 # hh = V.T[0].copy() # only with num_opt for i in xrange(m - 1, l - 1, -1): # (int i = m-1; i >= l; i--) { c3 = c2 c2 = c s2 = s g = c * e[i] h = c * p r = (p**2 + e[i]**2)**0.5 # hypot(p,e[i]) e[i + 1] = s * r s = e[i] / r c = p / r p = c * d[i] - s * g d[i + 1] = h + s * (c * g + s * d[i]) # Accumulate transformation. if not num_opt: # overall factor 3 in 30-D for k in xrange(n): # (int k = 0; k < n; k++) { h = V[k][i + 1] V[k][i + 1] = s * V[k][i] + c * h V[k][i] = c * V[k][i] - s * h else: # about 20% faster in 10-D hh = V.T[i + 1].copy() # hh[:] = V.T[i+1][:] V.T[i + 1] = s * V.T[i] + c * hh V.T[i] = c * V.T[i] - s * hh # V.T[i] *= c # V.T[i] -= s * hh p = -s * s2 * c3 * el1 * e[l] / dl1 e[l] = s * p d[l] = c * p # Check for convergence. if abs(e[l]) <= eps * tst1: break # } while (Math.abs(e[l]) > eps*tst1); d[l] = d[l] + f e[l] = 0.0 # Sort eigenvalues and corresponding vectors. # tql2 N = len(C[0]) if 1 < 3: V = [[x[i] for i in xrange(N)] for x in C] # copy each "row" d = N * [0.] e = N * [0.] tred2(N, V, d, e) tql2(N, d, e, V) return (array(d), array(V)) Mh = Misc.MathHelperFunctions # if _experimental: # from new_stuff import * def pprint(to_be_printed): """nicely formated print""" try: import pprint as pp # generate an instance PrettyPrinter # pp.PrettyPrinter().pprint(to_be_printed) pp.pprint(to_be_printed) except ImportError: if isinstance(to_be_printed, dict): print('{') for k, v in to_be_printed.items(): print("'" + k + "'" if isinstance(k, basestring) else k, ': ', "'" + v + "'" if isinstance(k, basestring) else v, sep="") print('}') else: print('could not import pprint module, appling regular print') print(to_be_printed) pp = pprint class ConstRandnShift(object): """``ConstRandnShift()(x)`` adds a fixed realization of ``stddev * randn(len(x))`` to the vector x. By default, the realized shift is the same for each instance of ``ConstRandnShift``, see ``seed`` argument. This class is used in class ``FFWrapper.ShiftedFitness`` as default transformation. See: class ``FFWrapper.ShiftedFitness`` """ def __init__(self, stddev=3, seed=1): """with ``seed=None`` each instance realizes a different shift""" self.seed = seed self.stddev = stddev self._xopt = {} def __call__(self, x): """return "shifted" ``x - shift`` """ try: x_opt = self._xopt[len(x)] except KeyError: if self.seed is None: shift = np.random.randn(len(x)) else: rstate = np.random.get_state() np.random.seed(self.seed) shift = np.random.randn(len(x)) np.random.set_state(rstate) x_opt = self._xopt.setdefault(len(x), self.stddev * shift) return array(x, copy=False) - x_opt def get(self, dimension): """return shift applied to ``zeros(dimension)`` >>> import numpy as np, cma >>> s = cma.ConstRandnShift() >>> assert all(s(-s.get(3)) == np.zeros(3)) >>> assert all(s.get(3) == s(np.zeros(3))) """ return self.__call__(np.zeros(dimension)) class Rotation(object): """Rotation class that implements an orthogonal linear transformation, one for each dimension. By default reach ``Rotation`` instance provides a different "random" but fixed rotation. This class is used to implement non-separable test functions, most conveniently via `FFWrapper.RotatedFitness`. Example: >>> import cma, numpy as np >>> R = cma.Rotation() >>> R2 = cma.Rotation() # another rotation >>> x = np.array((1,2,3)) >>> print(R(R(x), inverse=1)) [ 1. 2. 3.] See: `FFWrapper.RotatedFitness` """ dicMatrices = {} # store matrix if necessary, for each dimension def __init__(self, seed=None): """by default a random but fixed rotation, different for each instance""" self.seed = seed self.dicMatrices = {} # otherwise there might be shared bases which is probably not what we want def __call__(self, x, inverse=False): # function when calling an object """Rotates the input array `x` with a fixed rotation matrix (``self.dicMatrices['str(len(x))']``) """ x = np.array(x, copy=False) N = x.shape[0] # can be an array or matrix, TODO: accept also a list of arrays? if str(N) not in self.dicMatrices: # create new N-basis for once and all rstate = np.random.get_state() np.random.seed(self.seed) if self.seed else np.random.seed() B = np.random.randn(N, N) for i in xrange(N): for j in xrange(0, i): B[i] -= np.dot(B[i], B[j]) * B[j] B[i] /= sum(B[i]**2)**0.5 self.dicMatrices[str(N)] = B np.random.set_state(rstate) if inverse: return np.dot(self.dicMatrices[str(N)].T, x) # compute rotation else: return np.dot(self.dicMatrices[str(N)], x) # compute rotation # Use rotate(x) to rotate x rotate = Rotation() # ____________________________________________________________ # ____________________________________________________________ # class FFWrapper(object): """ A collection of (yet experimental) classes to implement fitness transformations and wrappers. Aliased to `FF2` below. """ class FitnessTransformation(object): """This class does nothing but serve as an interface template. Typical use-case:: f = FitnessTransformation(f, parameters_if_needed)`` See: class ``TransformSearchSpace`` """ def __init__(self, fitness_function, *args, **kwargs): """`fitness_function` must be callable (e.g. a function or a callable class instance)""" # the original fitness to be called self.inner_fitness = fitness_function # self.condition_number = ... def __call__(self, x, *args): """identity as default transformation""" if hasattr(self, 'x_transformation'): x = self.x_transformation(x) f = self.inner_fitness(x, *args) if hasattr(self, 'f_transformation'): f = self.f_transformation(f) return f class BookKeeping(FitnessTransformation): """a stump for experimenting with use-cases and possible extensions of book keeping use-case: f = BookKeeping(f) print(f.count_evaluations) """ def __init__(self, callable=None): self.count_evaluations = 0 self.inner_fitness = callable def __call__(self, *args): # assert len(args[0]) # x-vector self.count_evaluations += 1 return self.inner_fitness(*args) class TransformSearchSpace(FitnessTransformation): """:: f = TransformSearchSpace(f, ConstRandnShift()) constructs the composed function f <- f o shift. Details: to some extend this is a nice shortcut for:: f = lambda x, *args: f_in(ConstRandnShift()(x), *args) however the `lambda` definition depends on the value of ``f_in`` even after ``f`` has been assigned. See: `ShiftedFitness`, `RotatedFitness` """ def __init__(self, fitness_function, transformation): """``TransformSearchSpace(f, s)(x) == f(s(x))`` >>> import cma >>> f0 = lambda x: sum(x) >>> shift_fct = cma.ConstRandnShift() >>> f = cma.FF2.TransformSearchSpace(f0, shift_fct) >>> x = [1, 2, 3] >>> assert f(x) == f0(shift_fct(x)) """ self.inner_fitness = fitness_function # akin to FitnessTransformation.__init__(self, fitness_function) # akin to super(TransformSearchSpace, self).__init__(fitness_function) self.x_transformation = transformation # will be used in base class class ScaleCoordinates(TransformSearchSpace): """define a scaling of each variable """ def __init__(self, fitness_function, multipliers=None): """ :param fitness_function: a callable object :param multipliers: recycling is not implemented, i.e. the dimension must fit to the `fitness_function` argument when called """ super(FFWrapper.ScaleCoordinates, self).__init__( fitness_function, self.transformation) # TransformSearchSpace.__init__(self, fitness_function, # self.transformation) self.multiplier = multipliers if self.multiplier is not None and hasattr(self.multiplier, 'len'): self.multiplier = array(self.multiplier, copy=True) def transformation(x, *args): if self.multiplier is None: return array(x, copy=False) return self.multiplier * array(x, copy=False) class ShiftedFitness(TransformSearchSpace): """``f = cma.ShiftedFitness(cma.fcts.sphere)`` constructs a shifted sphere function, by default the shift is computed from class ``ConstRandnShift`` with std dev 3. """ def __init__(self, f, shift=None): """``shift(x)`` must return a (stable) shift of x. Details: this class solely provides as default second argument to TransformSearchSpace a shift in search space. ``shift=lambda x: x`` would provide "no shift", ``None`` expands to ``cma.ConstRandnShift()``. """ self.inner_fitness = f self.x_transformation = shift if shift else ConstRandnShift() # alternatively we could have called super class RotatedFitness(TransformSearchSpace): """``f = cma.RotatedFitness(cma.fcts.elli)`` constructs a rotated ellipsoid function """ def __init__(self, f, rotate=rotate): """``rotate(x)`` must return a (stable) rotation of x. Details: this class solely provides a default second argument to TransformSearchSpace, namely a search space rotation. """ super(FFWrapper.RotatedFitness, self).__init__(f, rotate) # self.x_transformation = rotate class FixVariables(TransformSearchSpace): """fix variables to given values, thereby reducing the dimensionality of the preimage. The constructor takes ``index_value_pairs`` as dict or list of pairs as input and returns a function with smaller preimage space than `f`. Details: this might replace the fixed_variables option in CMAOptions in future, but hasn't been tested yet. """ def __init__(self, f, index_value_pairs): """`f` has """ super(FFWrapper.FixVariables, self).__init__(f, self.insert_variables) # same as TransformSearchSpace.__init__(f, self.insert_variables) self.index_value_pairs = dict(index_value_pairs) def insert_variables(self, x): y = np.zeros(len(x) + len(self.index_value_pairs)) assert len(y) > max(self.index_value_pairs) j = 0 for i in xrange(len(y)): if i in self.index_value_pairs: y[i] = self.index_value_pairs[i] else: y[i] = x[j] j += 1 return y class SomeNaNFitness(FitnessTransformation): def __init__(self, fitness_function, probability_of_nan=0.1): self.p = probability_of_nan self.inner_fitness = fitness_function def __call__(self, x, *args): if np.random.rand(1) <= self.p: return np.NaN else: return self.inner_fitness(x, *args) class NoisyFitness(FitnessTransformation): """apply noise via f += rel_noise(dim) * f + abs_noise()""" def __init__(self, fitness_function, rel_noise=lambda dim: 1.1 * np.random.randn() / dim, abs_noise=lambda: 1.1 * np.random.randn()): self.rel_noise = rel_noise self.abs_noise = abs_noise self.inner_fitness = fitness_function def __call__(self, x, *args): f = self.inner_fitness(x, *args) if self.rel_noise: f += f * self.rel_noise(len(x)) assert isscalar(f) if self.abs_noise: f += self.abs_noise() return f class GlueArguments(FitnessTransformation): """``f = cma.FF2.GlueArguments(cma.fcts.elli, cond=1e4)`` >>> import cma >>> f = cma.FF2.GlueArguments(cma.fcts.elli, cond=1e1) >>> f([1, 2]) # == 1**2 + 1e1 * 2**2 41.0 """ def __init__(self, fitness_function, *args, **kwargs): self.inner_fitness = fitness_function self.args = args self.kwargs = kwargs def __call__(self, x, *args): return self.inner_fitness(array(x, copy=False), *(args + self.args), **self.kwargs) class UnknownFF(object): """search in [-10, 10] for the unknown (optimum)""" def __init__(self, seed=2): self.seed = seed self._x_opt_ = {} self.rotate = Rotation(seed) self.count_evaluations = 0 def _x_opt(self, dim): rstate = np.random.get_state() np.random.seed(self.seed) x = self._x_opt_.setdefault(dim, 0 * 3 * np.random.randn(dim)) np.random.set_state(rstate) return x def typical_x(self, dim): off = self.rotate(np.floor(np.arange(0, 3, 3. / dim)) / np.logspace(0, 1, dim), inverse=True) off[np.s_[3:]] += 0.005 off[-1] *= 1e2 off[0] /= 2.0e3 if off[0] > 0 else 1e3 off[2] /= 3.01e4 if off[2] < 0 else 2e4 return self._x_opt(dim) + off def __call__(self, x): self.count_evaluations += 1 N = len(x) x = x - self._x_opt(N) x[-1] /= 1e2 x[0] *= 2.0e3 if x[0] > 0 else 1e3 x[2] *= 3.01e4 if x[2] < 0 else 2e4 x = np.logspace(0, 1, N) * self.rotate(x) return 10 * N - np.e**2 + \ sum(x**2 - 10 * np.cos(2 * np.pi * x)) FF2 = FFWrapper class FitnessFunctions(object): """ versatile container for test objective functions """ def __init__(self): self.counter = 0 # number of calls or any other practical use def rot(self, x, fun, rot=1, args=()): """returns ``fun(rotation(x), *args)``, ie. `fun` applied to a rotated argument""" if len(np.shape(array(x))) > 1: # parallelized res = [] for x in x: res.append(self.rot(x, fun, rot, args)) return res if rot: return fun(rotate(x, *args)) else: return fun(x) def somenan(self, x, fun, p=0.1): """returns sometimes np.NaN, otherwise fun(x)""" if np.random.rand(1) < p: return np.NaN else: return fun(x) def rand(self, x): """Random test objective function""" return np.random.random(1)[0] def linear(self, x): return -x[0] def lineard(self, x): if 1 < 3 and any(array(x) < 0): return np.nan if 1 < 3 and sum([ (10 + i) * x[i] for i in rglen(x)]) > 50e3: return np.nan return -sum(x) def sphere(self, x): """Sphere (squared norm) test objective function""" # return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0] return sum((x + 0)**2) def grad_sphere(self, x, *args): return 2*array(x, copy=False) def grad_to_one(self, x, *args): return array(x, copy=False) - 1 def sphere_pos(self, x): """Sphere (squared norm) test objective function""" # return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0] c = 0.0 if x[0] < c: return np.nan return -c**2 + sum((x + 0)**2) def spherewithoneconstraint(self, x): return sum((x + 0)**2) if x[0] > 1 else np.nan def elliwithoneconstraint(self, x, idx=[-1]): return self.ellirot(x) if all(array(x)[idx] > 1) else np.nan def spherewithnconstraints(self, x): return sum((x + 0)**2) if all(array(x) > 1) else np.nan # zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz def noisysphere(self, x, noise=2.10e-9, cond=1.0, noise_offset=0.10): """noise=10 does not work with default popsize, noise handling does not help """ return self.elli(x, cond=cond) * (1 + noise * np.random.randn() / len(x)) + noise_offset * np.random.rand() def spherew(self, x): """Sphere (squared norm) with sum x_i = 1 test objective function""" # return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0] # s = sum(abs(x)) # return sum((x/s+0)**2) - 1/len(x) # return sum((x/s)**2) - 1/len(x) return -0.01 * x[0] + abs(x[0])**-2 * sum(x[1:]**2) def partsphere(self, x): """Sphere (squared norm) test objective function""" self.counter += 1 # return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0] dim = len(x) x = array([x[i % dim] for i in xrange(2 * dim)]) N = 8 i = self.counter % dim # f = sum(x[i:i + N]**2) f = sum(x[np.random.randint(dim, size=N)]**2) return f def sectorsphere(self, x): """asymmetric Sphere (squared norm) test objective function""" return sum(x**2) + (1e6 - 1) * sum(x[x < 0]**2) def cornersphere(self, x): """Sphere (squared norm) test objective function constraint to the corner""" nconstr = len(x) - 0 if any(x[:nconstr] < 1): return np.NaN return sum(x**2) - nconstr def cornerelli(self, x): """ """ if any(x < 1): return np.NaN return self.elli(x) - self.elli(np.ones(len(x))) def cornerellirot(self, x): """ """ if any(x < 1): return np.NaN return self.ellirot(x) def normalSkew(self, f): N = np.random.randn(1)[0]**2 if N < 1: N = f * N # diminish blow up lower part return N def noiseC(self, x, func=sphere, fac=10, expon=0.8): f = func(self, x) N = np.random.randn(1)[0] / np.random.randn(1)[0] return max(1e-19, f + (float(fac) / len(x)) * f**expon * N) def noise(self, x, func=sphere, fac=10, expon=1): f = func(self, x) # R = np.random.randn(1)[0] R = np.log10(f) + expon * abs(10 - np.log10(f)) * np.random.rand(1)[0] # sig = float(fac)/float(len(x)) # R = log(f) + 0.5*log(f) * random.randn(1)[0] # return max(1e-19, f + sig * (f**np.log10(f)) * np.exp(R)) # return max(1e-19, f * np.exp(sig * N / f**expon)) # return max(1e-19, f * normalSkew(f**expon)**sig) return f + 10**R # == f + f**(1+0.5*RN) def cigar(self, x, rot=0, cond=1e6, noise=0): """Cigar test objective function""" if rot: x = rotate(x) x = [x] if isscalar(x[0]) else x # scalar into list f = [(x[0]**2 + cond * sum(x[1:]**2)) * np.exp(noise * np.random.randn(1)[0] / len(x)) for x in x] return f if len(f) > 1 else f[0] # 1-element-list into scalar def grad_cigar(self, x, *args): grad = 2 * 1e6 * np.array(x) grad[0] /= 1e6 return grad def diagonal_cigar(self, x, cond=1e6): axis = np.ones(len(x)) / len(x)**0.5 proj = dot(axis, x) * axis s = sum(proj**2) s += cond * sum((x - proj)**2) return s def tablet(self, x, rot=0): """Tablet test objective function""" if rot and rot is not fcts.tablet: x = rotate(x) x = [x] if isscalar(x[0]) else x # scalar into list f = [1e6 * x[0]**2 + sum(x[1:]**2) for x in x] return f if len(f) > 1 else f[0] # 1-element-list into scalar def grad_tablet(self, x, *args): grad = 2 * np.array(x) grad[0] *= 1e6 return grad def cigtab(self, y): """Cigtab test objective function""" X = [y] if isscalar(y[0]) else y f = [1e-4 * x[0]**2 + 1e4 * x[1]**2 + sum(x[2:]**2) for x in X] return f if len(f) > 1 else f[0] def twoaxes(self, y): """Cigtab test objective function""" X = [y] if isscalar(y[0]) else y N2 = len(X[0]) // 2 f = [1e6 * sum(x[0:N2]**2) + sum(x[N2:]**2) for x in X] return f if len(f) > 1 else f[0] def ellirot(self, x): return fcts.elli(array(x), 1) def hyperelli(self, x): N = len(x) return sum((np.arange(1, N + 1) * x)**2) def halfelli(self, x): l = len(x) // 2 felli = self.elli(x[:l]) return felli + 1e-8 * sum(x[l:]**2) def elli(self, x, rot=0, xoffset=0, cond=1e6, actuator_noise=0.0, both=False): """Ellipsoid test objective function""" if not isscalar(x[0]): # parallel evaluation return [self.elli(xi, rot) for xi in x] # could save 20% overall if rot: x = rotate(x) N = len(x) if actuator_noise: x = x + actuator_noise * np.random.randn(N) ftrue = sum(cond**(np.arange(N) / (N - 1.)) * (x + xoffset)**2) alpha = 0.49 + 1. / N beta = 1 felli = np.random.rand(1)[0]**beta * ftrue * \ max(1, (10.**9 / (ftrue + 1e-99))**(alpha * np.random.rand(1)[0])) # felli = ftrue + 1*np.random.randn(1)[0] / (1e-30 + # np.abs(np.random.randn(1)[0]))**0 if both: return (felli, ftrue) else: # return felli # possibly noisy value return ftrue # + np.random.randn() def grad_elli(self, x, *args): cond = 1e6 N = len(x) return 2 * cond**(np.arange(N) / (N - 1.)) * array(x, copy=False) def fun_as_arg(self, x, *args): """``fun_as_arg(x, fun, *more_args)`` calls ``fun(x, *more_args)``. Use case:: fmin(cma.fun_as_arg, args=(fun,), gradf=grad_numerical) calls fun_as_args(x, args) and grad_numerical(x, fun, args=args) """ fun = args[0] more_args = args[1:] if len(args) > 1 else () return fun(x, *more_args) def grad_numerical(self, x, func, epsilon=None): """symmetric gradient""" eps = 1e-8 * (1 + abs(x)) if epsilon is None else epsilon grad = np.zeros(len(x)) ei = np.zeros(len(x)) # float is 1.6 times faster than int for i in rglen(x): ei[i] = eps[i] grad[i] = (func(x + ei) - func(x - ei)) / (2*eps[i]) ei[i] = 0 return grad def elliconstraint(self, x, cfac=1e8, tough=True, cond=1e6): """ellipsoid test objective function with "constraints" """ N = len(x) f = sum(cond**(np.arange(N)[-1::-1] / (N - 1)) * x**2) cvals = (x[0] + 1, x[0] + 1 + 100 * x[1], x[0] + 1 - 100 * x[1]) if tough: f += cfac * sum(max(0, c) for c in cvals) else: f += cfac * sum(max(0, c + 1e-3)**2 for c in cvals) return f def rosen(self, x, alpha=1e2): """Rosenbrock test objective function""" x = [x] if isscalar(x[0]) else x # scalar into list f = [sum(alpha * (x[:-1]**2 - x[1:])**2 + (1. - x[:-1])**2) for x in x] return f if len(f) > 1 else f[0] # 1-element-list into scalar def grad_rosen(self, x, *args): N = len(x) grad = np.zeros(N) grad[0] = 2 * (x[0] - 1) + 200 * (x[1] - x[0]**2) * -2 * x[0] i = np.arange(1, N - 1) grad[i] = 2 * (x[i] - 1) - 400 * (x[i+1] - x[i]**2) * x[i] + 200 * (x[i] - x[i-1]**2) grad[N-1] = 200 * (x[N-1] - x[N-2]**2) return grad def diffpow(self, x, rot=0): """Diffpow test objective function""" N = len(x) if rot: x = rotate(x) return sum(np.abs(x)**(2. + 4.*np.arange(N) / (N - 1.)))**0.5 def rosenelli(self, x): N = len(x) return self.rosen(x[:N / 2]) + self.elli(x[N / 2:], cond=1) def ridge(self, x, expo=2): x = [x] if isscalar(x[0]) else x # scalar into list f = [x[0] + 100 * np.sum(x[1:]**2)**(expo / 2.) for x in x] return f if len(f) > 1 else f[0] # 1-element-list into scalar def ridgecircle(self, x, expo=0.5): """happy cat by HG Beyer""" a = len(x) s = sum(x**2) return ((s - a)**2)**(expo / 2) + s / a + sum(x) / a def happycat(self, x, alpha=1. / 8): s = sum(x**2) return ((s - len(x))**2)**alpha + (s / 2 + sum(x)) / len(x) + 0.5 def flat(self, x): return 1 return 1 if np.random.rand(1) < 0.9 else 1.1 return np.random.randint(1, 30) def branin(self, x): # in [0,15]**2 y = x[1] x = x[0] + 5 return (y - 5.1 * x**2 / 4 / np.pi**2 + 5 * x / np.pi - 6)**2 + 10 * (1 - 1 / 8 / np.pi) * np.cos(x) + 10 - 0.397887357729738160000 def goldsteinprice(self, x): x1 = x[0] x2 = x[1] return (1 + (x1 + x2 + 1)**2 * (19 - 14 * x1 + 3 * x1**2 - 14 * x2 + 6 * x1 * x2 + 3 * x2**2)) * ( 30 + (2 * x1 - 3 * x2)**2 * (18 - 32 * x1 + 12 * x1**2 + 48 * x2 - 36 * x1 * x2 + 27 * x2**2)) - 3 def griewank(self, x): # was in [-600 600] x = (600. / 5) * x return 1 - np.prod(np.cos(x / sqrt(1. + np.arange(len(x))))) + sum(x**2) / 4e3 def rastrigin(self, x): """Rastrigin test objective function""" if not isscalar(x[0]): N = len(x[0]) return [10 * N + sum(xi**2 - 10 * np.cos(2 * np.pi * xi)) for xi in x] # return 10*N + sum(x**2 - 10*np.cos(2*np.pi*x), axis=1) N = len(x) return 10 * N + sum(x**2 - 10 * np.cos(2 * np.pi * x)) def schaffer(self, x): """ Schaffer function x0 in [-100..100]""" N = len(x) s = x[0:N - 1]**2 + x[1:N]**2 return sum(s**0.25 * (np.sin(50 * s**0.1)**2 + 1)) def schwefelelli(self, x): s = 0 f = 0 for i in rglen(x): s += x[i] f += s**2 return f def schwefelmult(self, x, pen_fac=1e4): """multimodal Schwefel function with domain -500..500""" y = [x] if isscalar(x[0]) else x N = len(y[0]) f = array([418.9829 * N - 1.27275661e-5 * N - sum(x * np.sin(np.abs(x)**0.5)) + pen_fac * sum((abs(x) > 500) * (abs(x) - 500)**2) for x in y]) return f if len(f) > 1 else f[0] def optprob(self, x): n = np.arange(len(x)) + 1 f = n * x * (1 - x)**(n - 1) return sum(1 - f) def lincon(self, x, theta=0.01): """ridge like linear function with one linear constraint""" if x[0] < 0: return np.NaN return theta * x[1] + x[0] def rosen_nesterov(self, x, rho=100): """needs exponential number of steps in a non-increasing f-sequence. x_0 = (-1,1,...,1) See Jarre (2011) "On Nesterov's Smooth Chebyshev-Rosenbrock Function" """ f = 0.25 * (x[0] - 1)**2 f += rho * sum((x[1:] - 2 * x[:-1]**2 + 1)**2) return f def powel_singular(self, x): # ((8 * np.sin(7 * (x[i] - 0.9)**2)**2 ) + (6 * np.sin())) res = np.sum((x[i - 1] + 10 * x[i])**2 + 5 * (x[i + 1] - x[i + 2])**2 + (x[i] - 2 * x[i + 1])**4 + 10 * (x[i - 1] - x[i + 2])**4 for i in xrange(1, len(x) - 2)) return 1 + res def styblinski_tang(self, x): """in [-5, 5] """ # x_opt = N * [-2.90353402], seems to have essentially # (only) 2**N local optima return (39.1661657037714171054273576010019 * len(x))**1 + \ sum(x**4 - 16*x**2 + 5*x) / 2 def trid(self, x): return sum((x-1)**2) - sum(x[:-1] * x[1:]) def bukin(self, x): """Bukin function from Wikipedia, generalized simplistically from 2-D. http://en.wikipedia.org/wiki/Test_functions_for_optimization""" s = 0 for k in xrange((1+len(x)) // 2): z = x[2 * k] y = x[min((2*k + 1, len(x)-1))] s += 100 * np.abs(y - 0.01 * z**2)**0.5 + 0.01 * np.abs(z + 10) return s fcts = FitnessFunctions() Fcts = fcts # for cross compatibility, as if the functions were static members of class Fcts FF = fcts def felli(x): """unbound test function, needed to test multiprocessor""" return sum(1e6**(np.arange(len(x)) / (len(x) - 1)) * (np.array(x, copy=False))**2) # ____________________________________________ # ____________________________________________________________ def _test(module=None): # None is fine when called from inside the module import doctest print(doctest.testmod(module)) # this is pretty coool! def process_doctest_output(stream=None): """ """ import fileinput s1 = "" s2 = "" s3 = "" state = 0 for line in fileinput.input(stream): # takes argv as file or stdin if 1 < 3: s3 += line if state < -1 and line.startswith('***'): print(s3) if line.startswith('***'): s3 = "" if state == -1: # found a failed example line s1 += '\n\n*** Failed Example:' + line s2 += '\n\n\n' # line # state = 0 # wait for 'Expected:' line if line.startswith('Expected:'): state = 1 continue elif line.startswith('Got:'): state = 2 continue elif line.startswith('***'): # marks end of failed example state = 0 elif line.startswith('Failed example:'): state = -1 elif line.startswith('Exception raised'): state = -2 # in effect more else: if state == 1: s1 += line + '' if state == 2: s2 += line + '' # ____________________________________________________________ # ____________________________________________________________ # def main(argv=None): """to install and/or test from the command line use:: python cma.py [options | func dim sig0 [optkey optval][optkey optval]...] with options being ``--test`` (or ``-t``) to run the doctest, ``--test -v`` to get (much) verbosity. ``install`` to install cma.py (uses setup from distutils.core). ``--doc`` for more infos. Or start Python or (even better) ``ipython`` and:: import cma cma.main('--test') help(cma) help(cma.fmin) res = fmin(cma.fcts.rosen, 10 * [0], 1) cma.plot() Examples ======== Testing with the local python distribution from a command line in a folder where ``cma.py`` can be found:: python cma.py --test And a single run on the Rosenbrock function:: python cma.py rosen 10 1 # dimension initial_sigma python cma.py plot In the python shell:: import cma cma.main('--test') """ if argv is None: argv = sys.argv # should have better been sys.argv[1:] else: if isinstance(argv, list): argv = ['python'] + argv # see above else: argv = ['python'] + [argv] # uncomment for unit test # _test() # handle input arguments, getopt might be helpful ;-) if len(argv) >= 1: # function and help if len(argv) == 1 or argv[1].startswith('-h') or argv[1].startswith('--help'): print(main.__doc__) fun = None elif argv[1].startswith('-t') or argv[1].startswith('--test'): import doctest if len(argv) > 2 and (argv[2].startswith('--v') or argv[2].startswith('-v')): # verbose print('doctest for cma.py: due to different platforms and python versions') print('and in some cases due to a missing unique random seed') print('many examples will "fail". This is OK, if they give a similar') print('to the expected result and if no exception occurs. ') # if argv[1][2] == 'v': doctest.testmod(sys.modules[__name__], report=True) # this is quite cool! else: # was: if len(argv) > 2 and (argv[2].startswith('--qu') or argv[2].startswith('-q')): print('doctest for cma.py: launching...') # not anymore: (it might be necessary to close the pop up window to finish) fn = '_cma_doctest_.txt' stdout = sys.stdout try: with open(fn, 'w') as f: sys.stdout = f clock = ElapsedTime() doctest.testmod(sys.modules[__name__], report=True) # this is quite cool! t_elapsed = clock() finally: sys.stdout = stdout process_doctest_output(fn) # clean up try: import os for name in os.listdir('.'): if (name.startswith('bound_method_FitnessFunctions.rosen_of_cma.FitnessFunctions_object_at_') and name.endswith('.pkl')): os.remove(name) except: pass print('doctest for cma.py: finished (no other output should be seen after launching, more in file _cma_doctest_.txt)') print(' elapsed time [s]:', t_elapsed) return elif argv[1] == '--doc': print(__doc__) print(CMAEvolutionStrategy.__doc__) print(fmin.__doc__) fun = None elif argv[1] == '--fcts': print('List of valid function names:') print([d for d in dir(fcts) if not d.startswith('_')]) fun = None elif argv[1] in ('install', '--install'): from distutils.core import setup setup(name="cma", long_description=__doc__, version=__version__.split()[0], description="CMA-ES, Covariance Matrix Adaptation Evolution Strategy for non-linear numerical optimization in Python", author="Nikolaus Hansen", author_email="hansen at lri.fr", maintainer="Nikolaus Hansen", maintainer_email="hansen at lri.fr", url="https://www.lri.fr/~hansen/cmaes_inmatlab.html#python", license="BSD", classifiers = [ "Intended Audience :: Science/Research", "Intended Audience :: Education", "Intended Audience :: Other Audience", "Topic :: Scientific/Engineering", "Topic :: Scientific/Engineering :: Mathematics", "Topic :: Scientific/Engineering :: Artificial Intelligence", "Operating System :: OS Independent", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Development Status :: 4 - Beta", "Environment :: Console", "License :: OSI Approved :: BSD License", # "License :: OSI Approved :: MIT License", ], keywords=["optimization", "CMA-ES", "cmaes"], py_modules=["cma"], requires=["numpy"], ) fun = None elif argv[1] in ('plot',): plot(name=argv[2] if len(argv) > 2 else None) raw_input('press return') fun = None elif len(argv) > 3: fun = eval('fcts.' + argv[1]) else: print('try -h option') fun = None if fun is not None: if len(argv) > 2: # dimension x0 = np.ones(eval(argv[2])) if len(argv) > 3: # sigma sig0 = eval(argv[3]) opts = {} for i in xrange(5, len(argv), 2): opts[argv[i - 1]] = eval(argv[i]) # run fmin if fun is not None: tic = time.time() fmin(fun, x0, sig0, opts) # ftarget=1e-9, tolfacupx=1e9, verb_log=10) # plot() # print ' best function value ', res[2]['es'].best[1] print('elapsed time [s]: + %.2f', round(time.time() - tic, 2)) elif not len(argv): fmin(fcts.elli, np.ones(6) * 0.1, 0.1, {'ftarget':1e-9}) # ____________________________________________________________ # ____________________________________________________________ # # mainly for testing purpose # executed when called from an OS shell if __name__ == "__main__": # for i in xrange(1000): # how to find the memory leak # main(["cma.py", "rastrigin", "10", "5", "popsize", "200", "maxfevals", "24999", "verb_log", "0"]) main() csg-1.4.1/share/scripts/inverse/cma_processor.py000077500000000000000000000102061315264121600217510ustar00rootroot00000000000000#!/usr/bin/env python2 # # Copyright 2009-2012 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #option parsing #TODO optparse is depreceated since 2.7 #switch to argparse (NOT supported in 2.6) from optparse import OptionParser from sys import exit import sys import os from re import match import pickle try: import numpy except: exit("Could not import numpy modules used by cma") class state: """state class""" def __init__(self): self.state="Undefined" self.parameters=[] self.solutions=[] self.comments="" def read(self,filename): statefile = open(filename) for line in statefile: m=match("^#State = \s*(\S*)",line) if m: self.state=m.group(1) elif match("^#",line): self.comments += line else: if len(line.strip()) == 0: continue try: li=line.split() array=numpy.array([numpy.float64(i) for i in line.split()[0:-1]]) except: exit("paramter set ("+line.strip()+") contains a non-numerical value") self.parameters.append(array[0:-1]) self.solutions.append(array[-1]) if self.state != "Initialization" and not match("^(complete|try)$",li[-1]): exit("We can only handle parameter sets flagged with Complete or Try and we found '"+li[-1]+"'") statefile.close() self.comments=self.comments.strip() if self.state == "Undefined": exit("Could not fetch state from :"+filename) l = len(self.parameters[0]) for i in range(1,len(self.parameters)): if len(self.parameters[i]) != l: exit("Length of parameter set "+str(i+1)+" mismatched previous one") def write(self,filename): statefile= open (filename,"w+") statefile.write("#State = "+self.state+"\n") statefile.write(self.comments+"\n") for i in range(len(self.parameters)): for j in range(len(self.parameters[i])): statefile.write('%e'%self.parameters[i][j]+" ") statefile.write(str(self.solutions[i])+" pending\n") statefile.close() try: import cma except: exit("cma module could not be imported, please make sure to cma.py in your PYTHONPATH. The cma.py is available from http://www.lri.fr/~hansen/cmaes_inmatlab.html, get with 'csg_call cma get'") usage = "usage: %prog [options] statefile-in statefile-out" parser = OptionParser(usage=usage) parser.add_option("--eps", dest="eps", metavar="EPS", help="tolerance for initialization", default=0.1) (options, args) = parser.parse_args() if len(args) != 2: exit("two statefile required as parameters") current_state=state() current_state.read(args[0]) print "We are in State '",current_state.state, "' with parameters\n",current_state.parameters,"solutions: ",current_state.solutions if current_state.state == "Initialization": if len(current_state.parameters) != 1: exit("In Initialization step the state file should contain only one set (line)") es=cma.CMAEvolutionStrategy(current_state.parameters[0],options.eps) else: [es, X ] = pickle.load(open("cma.internal_state.cur")) if not numpy.allclose(X,current_state.parameters): exit("Parameterfile mismatches with internally saved parameters") es.tell(X,current_state.solutions) new_state=state() new_state.state="Running" new_state.parameters=es.ask() new_state.solutions=[ 0 for i in range(len(new_state.parameters))] new_state.comments=current_state.comments print "We going to State '",new_state.state, "' with parameters\n",new_state.parameters,"solutions: ",new_state.solutions new_state.write(args[1]) #we need to pickle parameters as well as they are saved in a dict (string compare) #and internal precission is float64 pickle.dump([es,new_state.parameters],open("cma.internal_state.new", 'w')) csg-1.4.1/share/scripts/inverse/configuration_compare.py000077500000000000000000000042761315264121600235010ustar00rootroot00000000000000#!/usr/bin/env python2 # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #option parsing #TODO optparse is depreceated since 2.7 #switch to argparse (NOT supported in 2.6) from optparse import OptionParser usage = "usage: %prog [options] conf1 conf2" parser = OptionParser(usage=usage) parser.add_option("--eps", dest="eps", metavar="EPS", help="tolerance for mismatch", default=1e-2) (options, args) = parser.parse_args() def die(str): print str quit(254) if len(args) != 2: die("two configurations required as parameters") #open both files try: conf1 = open(args[0]) conf2 = open(args[1]) except: die("error while opening files") #skip the first line conf1.readline() conf2.readline() #compare the second line (nat) if str(conf1.readline()).strip() != str(conf2.readline()).strip(): die("nat does not match") conf1 = conf1.readlines() conf2 = conf2.readlines() #loop through files for i in range(len(conf1)): vals1 = conf1[i].split() vals2 = conf2[i].split() #6 cols expected if len(vals1) != 6: break #compare 1st, 2nd, 3rd column without tolerance for j in range(3): if vals1[j] != vals2[j]: die("mismatch in line "+str(i+3)+" col "+str(j+1)+", "+str(vals1[j])+"!="+str(vals2[j])) #compare 4th-6th col with tolerance for j in range(3,6): if abs(float(vals1[j]) - float(vals2[j])) > options.eps: die("mismatch in line "+str(i+3)+" col "+str(j+1)+", "+str(vals1[j])+"!="+str(vals2[j])) #compare last line without tolerance for j in range(3): if vals1[j] != vals2[j]: die("mismatch in line "+str(i+3)+" col "+str(j+1)+", "+str(vals1[j])+"!="+str(vals2[j])) csg-1.4.1/share/scripts/inverse/convergence_check_default.sh000077500000000000000000000026151315264121600242400ustar00rootroot00000000000000#! /bin/bash # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # if [[ $1 = "--help" ]]; then cat <" "$limit" || touch 'stop' csg-1.4.1/share/scripts/inverse/csg_table000066400000000000000000000205041315264121600204050ustar00rootroot00000000000000# # Copyright 2009-2015 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #general scripts tag file tag_file.sh dummy dummy dummy.sh #only here for building the manual (not actually used) functions common functions_common.sh csg master inverse.sh # BEGIN: these scripts have to be defined for each method # method initialization prepare ibi prepare_generic.sh prepare imc prepare_imc.sh prepare generic prepare_generic.sh prepare optimizer prepare_optimizer.sh prepare re prepare_re.sh prepare_single ibi prepare_generic_single.sh prepare_single imc prepare_generic_single.sh prepare_single optimizer prepare_optimizer_single.sh initstep ibi initialize_step_generic.sh initstep imc initialize_step_generic.sh initstep optimizer initialize_step_optimizer.sh initstep re initialize_step_re.sh #this is just to warn users from old versions prepare ibm prepare_ibm.sh update ibm update_ibm.sh # the update step # add up the potential # TODO: think about name add_pot, is other name maybe better add_pot ibi add_pot_generic.sh add_pot imc add_pot_generic.sh add_pot optimizer dummy.sh add_pot re dummy.sh # END: these scripts have to be defined for each method # pre update pre_update ibi dummy.sh pre_update imc dummy.sh pre_update optimizer dummy.sh pre_update re pre_update_re.sh # post update post_update ibi post_update_generic.sh post_update imc post_update_generic.sh post_update optimizer dummy.sh post_update re post_update_generic.sh post_update_single ibi post_update_generic_single.sh post_update_single imc post_update_generic_single.sh post_update_single re post_update_re_single.sh #post update scripts postupd scale postupd_scale.sh postupd pressure postupd_pressure.sh postupd lj postupd_addlj.sh postupd splinesmooth postupd_splinesmooth.sh postupd smooth postupd_smooth.sh postupd shift postadd_shift.sh postupd dummy postadd_dummy.sh postupd tag tag_file.sh postupd extrapolate postupd_extrapolate.sh postupd kbibi postupd_kbibi_correction.sh postupd cibi postupd_cibi_correction.sh # post add post add post_add.sh post add_single post_add_single.sh # post add scripts postadd tag tag_file.sh postadd dummy postadd_dummy.sh postadd copyback postadd_copyback.sh postadd compress postadd_compress.sh postadd convergence postadd_convergence.sh postadd acc_convergence postadd_acc_convergence.sh postadd shift postadd_shift.sh postadd overwrite postadd_overwrite.sh postadd plot postadd_plot.sh postadd average postadd_average.sh #convergence checks convergence_check default convergence_check_default.sh # table preparation resample target resample_target.sh dpot crop dpot_crop.pl # ibi specific stuff update ibi update_ibi.sh update ibi_single update_ibi_single.sh update ibi_pot update_ibi_pot.pl # imc specific stuff update imc update_imc.sh imcsolver matlab solve_matlab.sh solve matlab linsolve.m imcsolver octave solve_octave.sh solve octave linsolve.octave imcsolver numpy solve_numpy.sh solve numpy linsolve.py imc purify imc_purify.sh # optimzer(generic) stuff optimizer prepare_state optimizer_prepare_state.sh optimizer parameters_to_potential optimizer_parameters_to_potential.sh optimizer state_to_potentials optimizer_state_to_potentials.sh optimizer state_to_mapping optimizer_state_to_mapping.sh update optimizer update_optimizer.sh update optimizer_single update_optimizer_single.sh optimizer_target rdf optimizer_target_rdf.sh optimizer_target density optimizer_target_density.sh optimizer_target pressure optimizer_target_pressure.sh # simplex specific stuff simplex precede_state simplex_downhill_processor.pl # cma specific stuff cma precede_state cma_processor.py # relative entroy (re) specific stuff update re update_re.sh # recalculate reference calc target_rdf calc_target_rdf_generic.sh # pressure correction pressure_cor simple pressure_cor_simple.pl pressure_cor wjk pressure_cor_wjk.pl # lj addition compute_lj 12_6 lj_126.pl # kirkwood-buff correction kbibi ramp_correction kbibi_ramp_correction.pl calc kbint calc_kbint.sh # generic table manipulation table add add_POT.pl table integrate table_integrate.pl table extrapolate table_extrapolate.pl table merge merge_tables.pl table smooth table_smooth.pl table linearop table_linearop.pl #do not use table functional here as it needs gnuplot table dummy table_dummy.sh table get_value table_get_value.pl table getsubset table_getsubset.py table smooth_borders table_smooth_borders.py table switch_border table_switch_border.pl table compare table_combine.pl --die --op = table combine table_combine.pl table average table_average.sh table scale table_scale.pl table change_flag table_change_flag.sh table functional table_functional.sh # generic potential manipulation potential extrapolate potential_extrapolate.sh potential shift potential_shift.pl convert_potential tab table_to_tab.pl # generic distribution manipulation dist adjust dist_adjust.pl dist invert dist_boltzmann_invert.pl # tools needed for (atom/molecule) configuration handling configuration compare configuration_compare.py # error analysis tables jackknife tables_jackknife.pl # BEGIN: these scripts have to be defined for each simulation program # interface to gromacs initstep gromacs initialize_step_genericsim.sh run gromacs run_gromacs.sh clean gromacs clean_generic.sh presimulation gromacs run_gromacs.sh --pre pressure gromacs calc_pressure_gromacs.sh rdf gromacs calc_rdf_generic.sh imc_stat gromacs imc_stat_generic.sh density gromacs calc_density_generic.sh # gromacs scripts specific stuff convert_potential gromacs potential_to_gromacs.sh convert_potentials gromacs potentials_to_generic.sh convert_potential xvg table_to_xvg.pl #function for gromacs functions gromacs functions_gromacs.sh # Interface to ESPResSo initstep espresso initialize_step_genericsim.sh run espresso run_genericsim.sh clean espresso clean_generic.sh rdf espresso calc_rdf_generic.sh imc_stat espresso imc_stat_generic.sh density espresso calc_density_generic.sh # ESPResSo scripts convert_potential espresso potential_to_generic.sh convert_potentials espresso potentials_to_generic.sh # function for ESPResSo functions espresso functions_genericsim.sh # lammps scripts convert_potential lammps potential_to_lammps.sh convert_potentials lammps potentials_to_generic.sh # interface to lammps initstep lammps initialize_step_genericsim.sh run lammps run_genericsim.sh clean lammps clean_generic.sh rdf lammps calc_rdf_generic.sh imc_stat lammps imc_stat_generic.sh density lammps calc_density_generic.sh # function for lammps functions lammps functions_genericsim.sh # espressopp scripts convert_potential espressopp potential_to_generic.sh convert_potentials espressopp potentials_to_generic.sh # interface to espressopp initstep espressopp initialize_step_genericsim.sh run espressopp run_genericsim.sh clean espressopp clean_generic.sh rdf espressopp calc_rdf_generic.sh imc_stat espressopp imc_stat_generic.sh density espressopp calc_density_generic.sh # function for espressopp functions espressopp functions_genericsim.sh # interface to dlpoly initstep dlpoly initialize_step_genericsim.sh run dlpoly run_genericsim.sh clean dlpoly clean_generic.sh rdf dlpoly calc_rdf_generic.sh imc_stat dlpoly imc_stat_generic.sh density dlpoly calc_density_generic.sh # function for dlpoly functions dlpoly functions_dlpoly.sh # dlpoly scripts convert_potential dlpoly potential_to_dlpoly.sh convert_potentials dlpoly potentials_to_dlpoly.sh # hoomd-blue scripts convert_potential hoomd-blue potential_to_generic.sh convert_potentials hoomd-blue potentials_to_generic.sh # interface to hoomd-blue initstep hoomd-blue initialize_step_genericsim.sh run hoomd-blue run_genericsim.sh clean hoomd-blue clean_generic.sh rdf hoomd-blue calc_rdf_generic.sh imc_stat hoomd-blue imc_stat_generic.sh density hoomd-blue calc_density_generic.sh # function for hoomd-blue functions hoomd-blue functions_genericsim.sh # END: these scripts have to be defined for each simulation program csg-1.4.1/share/scripts/inverse/dist_adjust.pl000077500000000000000000000040251315264121600214140ustar00rootroot00000000000000#! /usr/bin/perl -w # # Copyright 2009-2013 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # use strict; $_=$0; s#^.*/##; my $progname=$_; my $usage="Usage: $progname [OPTIONS] "; my $with_errors="no"; my $with_entropie="no"; my $kbT=undef; my $from="right"; my $spherical="no"; # read program arguments while ((defined ($ARGV[0])) and ($ARGV[0] =~ /^-./)) { if (($ARGV[0] !~ /^--/) and (length($ARGV[0])>2)){ $_=shift(@ARGV); #short opt having agruments examples fo if ( $_ =~ /^-[fo]/ ) { unshift(@ARGV,substr($_,0,2),substr($_,2)); } else{ unshift(@ARGV,substr($_,0,2),"-".substr($_,2)); } } if (($ARGV[0] eq "-h") or ($ARGV[0] eq "--help")) { print <2)){ $_=shift(@ARGV); #short opt having agruments examples fo if ( $_ =~ /^-[fo]/ ) { unshift(@ARGV,substr($_,0,2),substr($_,2)); } else{ unshift(@ARGV,substr($_,0,2),"-".substr($_,2)); } } if (($ARGV[0] eq "-h") or ($ARGV[0] eq "--help")) { print < go and implement it\n" unless (( "$type" eq "bond" ) or ("$type" eq "dihedral") or ("$type" eq "angle") or ("$type" eq "non-bonded")); } elsif ($ARGV[0] eq "--min"){ shift(@ARGV); $dist_min = shift(@ARGV); } else { die "Unknown option '".$ARGV[0]."' !\n"; } } die "2 parameters are necessary\n" if ($#ARGV<1); die "$progname: kbT not defined specify it with --kbT option\n" unless defined($kbT); use CsgFunctions; my $infile="$ARGV[0]"; my $outfile="$ARGV[1]"; my @x; my @dist; my @flag; (readin_table($infile,@x,@dist,@flag)) || die "$progname: error at readin_table\n"; my @pot; for (my $i=0;$i<=$#x;$i++){ if ($dist[$i]>$dist_min) { my $norm=1; if ( "$type" eq "bond" ) { $norm=$x[$i]*$x[$i]; } elsif ( "$type" eq "angle" ) { $norm=sin($x[$i]); } $pot[$i]=-$kbT*log($dist[$i]/$norm); } else { $pot[$i]="nan"; $flag[$i]="u"; } } #find a valid point my $valid_i=-1; for (my $i=0;$i<=$#pot;$i++){ if ($flag[$i] eq "i") { $valid_i=$i; last; } } die "All data points from file '$infile' are invalid after Boltzmann inversion, please check if your distribution is a valid dist.\n" if ($valid_i==-1); #set point at beginning to invalid my $first=undef; for (my $i=$valid_i;$i>=0;$i--){ if ($flag[$i] eq "u") { $pot[$i]=$pot[$i+1]; $flag[$i]="o"; $first=$i unless defined $first; } } $first=0 unless defined $first; #set point at end to invalid my $last=undef; for (my $i=$valid_i;$i<=$#pot;$i++){ if ($flag[$i] eq "u") { $pot[$i]=$pot[$i-1]; $flag[$i]="o"; $last=$i unless defined $last; } } $last=$#pot unless defined $last; my $n=10; my $valid=$last-$first+1; die "Only $valid points are valid after Boltzmann inversion from file '$infile', please check if your distribution is a valid dist.\n" if ($valid < $n); saveto_table($outfile,@x,@pot,@flag) || die "$progname: error at save table\n"; csg-1.4.1/share/scripts/inverse/dpot_crop.pl000077500000000000000000000053071315264121600210740ustar00rootroot00000000000000#! /usr/bin/perl -w # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # use strict; $_=$0; s#^.*/##; my $progname=$_; my $usage="Usage: $progname [OPTIONS] "; #Defaults my $withflag=undef; while ((defined ($ARGV[0])) and ($ARGV[0] =~ /^-./)) { if (($ARGV[0] !~ /^--/) and (length($ARGV[0])>2)){ $_=shift(@ARGV); #short opt having agruments examples fo if ( $_ =~ /^-[fo]/ ) { unshift(@ARGV,substr($_,0,2),substr($_,2)); } else{ unshift(@ARGV,substr($_,0,2),"-".substr($_,2)); } } if (($ARGV[0] eq "-h") or ($ARGV[0] eq "--help")) { print < -1; $withflag = $ARGV[0]; } else { die "Unknown option '".$ARGV[0]."' !\n"; } shift(@ARGV); } #Print usage die "missing parameters\n$usage\n" unless $#ARGV >= 1; use CsgFunctions; my $infile="$ARGV[0]"; my $outfile="$ARGV[1]"; my @r; my @val; my @flag; (readin_table($infile,@r,@val,@flag)) || die "$progname: error at readin_table\n"; # find last u/o my $i_first; # TODO: look for at least 3 successive points with i for($i_first=0; ($i_first<$#r) && ($flag[$i_first] =~ /[uo]/); $i_first++) {} my $ncrop=0; while($i_first + $ncrop<=$#r-3) { my $i = $i_first + $ncrop; my $delta_1 = $val[$i] - $val[$i + 1]; my $delta_2 = $val[$i + 1 ] - $val[$i + 2]; # do both deltas have the same sign? if($delta_1 * $delta_2 > 0) { last; } elsif (abs($val[$i]) < 0.5 && abs($val[$i+1]) < 0.5) { last; } $flag[$i]='o'; $ncrop++; if($ncrop > 3) { print "error: need to crop more than 3 points in $infile. think about sampleing/grid interval."; exit 1; } } if($ncrop > 0) { print "warnng, I cropped $ncrop points at the beginning\n"; } saveto_table($outfile,@r,@val,@flag) || die "$progname: error at save table\n"; csg-1.4.1/share/scripts/inverse/dummy.sh000077500000000000000000000014551315264121600202350ustar00rootroot00000000000000#! /bin/bash # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # if [[ $1 = "--help" ]]; then cat <&4 echo -e "$*" >&2 else echo -e "${color}$*${off}" >&2 fi else [[ -z $* ]] && return if [[ -n ${CSGLOG} ]]; then echo -e "${color}$*${off}" >&3 echo -e "$*" else echo -e "${color}$*${off}" fi fi } export -f msg show_callstack() { #show the current callstack local space line if [[ -n $CSG_CALLSTACK ]]; then echo "$CSG_CALLSTACK" space="$(echo "$CSG_CALLSTACK" | sed -n '$s/[^[:space:]].*$/ /p')" else space="" fi [[ $0 = *"bash" ]] || echo "${space}${0} - linenumber ${BASH_LINENO[ $(( ${#FUNCNAME[@]} -2 ))]}" for ((c=${#FUNCNAME[*]}-1;c>0;c--)); do [[ ${FUNCNAME[$c]} = main ]] && continue #main is useless as the info was printed 2 lines above space+=" " if [[ $0 = *csg_call || $0 = *inverse.sh ]]; then echo "${space}${FUNCNAME[$c]} - linenumber ${BASH_LINENO[ $(( $c - 1 )) ]} in ${BASH_SOURCE[$c]}" else echo "${space}${FUNCNAME[$c]} - linenumber ${BASH_LINENO[ $(( $c - 1 )) ]} (see 'csg_call --cat function ${FUNCNAME[$c]}')" fi done [[ $1 = "--extra" ]] || return 0 shift for i in "$@"; do space+=" " echo "${space}${i} - linenumber ???" done } export -f show_callstack unset -f die die () { #make the iterative frame work stopp local pid pids c place #Output callstack to stderr in case die was executed in $( ) echo -e "\nCallstack:" >&2 show_callstack >&2 [[ -z $CSGLOG ]] && place="Details can be found above" || place="For details see the logfile $CSGLOG" msg --color red --to-stderr "$(csg_banner "ERROR:" "$@" "$place")" if [[ -n ${CSG_MASTER_PID} ]]; then #grabbing the pid group would be easier, but it would not work on AIX pid="$$" pids="$$" c=0 #find the parent of pid until we reach CSG_MASTER_PID until [[ ${CSG_MASTER_PID} -eq $pid ]]; do #get the parent pid using BSD style due to AIX pid=$(ps -o ppid= -p "$pid" 2>/dev/null) [[ -z $pid ]] && pids="0" && break #store them in inverse order to kill parents before the child pids="$pid $pids" ((c++)) #at max 10000 iterations if [[ $c -eq 10000 ]]; then #failback to default, see comment below pids="0" break fi done if [[ -n ${CSGLOG} ]]; then echo "${FUNCNAME[0]}: (called from $$) CSG_MASTER_PID is $CSG_MASTER_PID" >&2 echo "${FUNCNAME[0]}: pids to kill: $pids" >&2 fi kill $pids else #send kill signal to all process within the process groups kill 0 fi exit 1 } export -f die cat_external() { #takes a two tags and shows content of the according script local script script="$(source_wrapper $1 $2)" || die "${FUNCNAME[0]}: source_wrapper $1 $2 failed" if [[ $1 = "function" ]]; then type $2 | sed '1d' else cat "${script/ *}" fi } export -f cat_external do_external() { #takes two tags, find the according script and excute it local script tags quiet="no" ret [[ $1 = "-q" ]] && quiet="yes" && shift script="$(source_wrapper $1 $2)" || die "${FUNCNAME[0]}: source_wrapper $1 $2 failed" tags="$1 $2" [[ $1 != "function" && ! -x ${script/ *} ]] && die "${FUNCNAME[0]}: subscript '${script/ *}' (from tags $tags), is not executable! (Run chmod +x ${script/ *})" #print this message to stderr to allow $(do_external ) and do_external XX > [[ $quiet = "no" ]] && echo "Running subscript '${script##*/}${3:+ }${@:3}' (from tags $tags) dir ${script%/*}" >&2 # in debugmode we don't need to do anything special for $1 = function as set -x is already done if [[ -n $CSGDEBUG ]] && [[ -n "$(sed -n '1s@bash@XXX@p' "${script/ *}")" ]]; then CSG_CALLSTACK="$(show_callstack)" "${BASH}" -x $script "${@:3}" elif [[ -n $CSGDEBUG && -n "$(sed -n '1s@perl@XXX@p' "${script/ *}")" ]]; then local perl_debug="$(mktemp perl_debug.XXX)" ret PERLDB_OPTS="NonStop=1 AutoTrace=1 frame=2 LineInfo=$perl_debug" perl -dS $script "${@:3}" ret=$? cat "$perl_debug" 2>&1 [[ $ret -eq 0 ]] elif [[ $1 != "function" && -n "$(sed -n '1s@perl@XXX@p' "${script/ *}")" ]]; then CSG_CALLSTACK="$(show_callstack --extra "${script/ *}")" $script "${@:3}" else CSG_CALLSTACK="$(show_callstack)" $script "${@:3}" fi || die "${FUNCNAME[0]}: subscript" "$script ${*:3}" "(from tags $tags) failed" } export -f do_external critical() { #executes arguments as command and calls die if not succesful local quiet="no" [[ $1 = "-q" ]] && quiet="yes" && shift [[ -z $1 ]] && die "${FUNCNAME[0]}: missing argument" #print this message to stderr because $(critical something) is used very often [[ $quiet = "no" ]] && echo "Running critical command '$*'" >&2 "$@" || die "${FUNCNAME[0]}: '$*' failed" } export -f critical for_all (){ #do something for all interactions (1st argument) local bondtype ibondtype rbondtype bondtypes name interactions quiet="no" [[ $1 = "-q" ]] && quiet="yes" && shift [[ -z $1 || -z $2 ]] && die "${FUNCNAME[0]}: need at least two arguments" bondtypes="$1" shift interactions=( $(csg_get_interaction_property --all name) ) min=( $(csg_get_interaction_property --all min) ) [[ ${#min[@]} -ne ${#interactions[@]} ]] && die "${FUNCNAME[0]}: one interaction has no name or min" name=$(has_duplicate "${interactions[@]}") && die "${FUNCNAME[0]}: interaction name $name appears twice" for bondtype in $bondtypes; do #check that type is bonded or non-bonded is_part "$bondtype" "non-bonded bonded angle bond dihedral" || die "for_all: Argument 1 needs to be non-bonded, bonded, angle, bond or dihedral" [[ $quiet = "no" ]] && echo "For all $bondtype" >&2 #internal bondtype is_part "$bondtype" "angle bond dihedral bonded" && ibondtype="bonded" || ibondtype="non-bonded" interactions=( $(csg_get_property --allow-empty cg.$ibondtype.name) ) #filter me away for name in "${interactions[@]}"; do #check if interaction is actually angle, bond or dihedral if is_part "$bondtype" "angle bond dihedral"; then rbondtype=$(bondtype="$ibondtype" bondname="$name" csg_get_interaction_property bondtype) [[ $rbondtype = $bondtype ]] || continue fi #print this message to stderr to avoid problem with $(for_all something) [[ $quiet = no ]] && echo "for_all: run '$*' for interaction named '$name'" >&2 #we need to use bash -c here to allow things like $(csg_get_interaction_property name) in arguments #write variable defines in the front is better, that export #no need to run unset afterwards bondtype="$ibondtype" \ bondname="$name" \ CSG_CALLSTACK="$(show_callstack)" \ "${BASH}" -c "$*" || die "${FUNCNAME[0]}: ${BASH} -c '$*' failed for interaction named '$name'" done done } export -f for_all csg_get_interaction_property () { #gets an interaction property from the xml file, should only be called from inside a for_all loop or with --all option local ret allow_empty="no" for_all="no" xmltype while [[ $1 = --* ]]; do case $1 in --allow-empty) allow_empty="yes";; --all) for_all="yes";; *) die "${FUNCNAME[0]}: Unknow option '$1'";; esac shift done [[ -n $1 ]] || die "${FUNCNAME[0]}: Missing argument" if [[ $for_all = "yes" ]]; then [[ $1 = "bondtype" ]] && die "${FUNCNAME[0]}: --all + bondtype not implemented yet" local t for t in non-bonded bonded; do ret+=" $(csg_get_property --allow-empty "cg.$t.$1")" #filter me away done ret="$(echo "$ret" | trim_all)" [[ -z $ret ]] && die "${FUNCNAME[0]}: Not a single interaction has a value for the property $1" echo "$ret" return 0 fi #make these this case work even without name or type (called by csg_call) if [[ $1 = "name" ]]; then [[ -n $bondname ]] && echo "$bondname" && return 0 die "${FUNCNAME[0]}: bondname is undefined (when calling from csg_call set it by --ia-name option)" fi if [[ $1 = "bondtype" ]]; then #bondtype is special -> dirty hack - removed whenever issue 13 is fixed [[ -z "$bondtype" ]] && die "${FUNCNAME[0]}: bondtype is undefined (when calling from csg_call set it by --ia-type option)" #for_all notation for any kind of bonded interaction, find the real type if [[ $bondtype = "bonded" ]]; then [[ -z ${bondname} ]] && die "${FUNCNAME[0]}: bondtype 'bonded' needs a bondname (when calling from csg_call set it by --ia-name option) or change type to angle, bond or dihedral" [[ -n "$(type -p csg_property)" ]] || die "${FUNCNAME[0]}: Could not find csg_property" mapping="$(csg_get_property --allow-empty cg.inverse.map)" #make error message more useful [[ -z ${mapping} ]] && die "${FUNCNAME[0]}: bondtype 'bonded' needs a mapping file (cg.inverse.map in xml) to determine the actual bond type (when calling from csg_call better use --ia-type bond, angle or dihedral)" local map names=() ret= ret2 dup for map in ${mapping}; do [[ -f "$(get_main_dir)/$map" ]] || die "${FUNCNAME[0]}: Mapping file '$map' for bonded interaction not found in maindir" names+=( $(critical -q csg_property --file "$(get_main_dir)/$map" --path cg_molecule.topology.cg_bonded.*.name --print . --short) ) dup=$(has_duplicate "${names[@]}") && die "${FUNCNAME[0]}: cg_bonded name '$dup' appears twice in file(s) $mapping" ret2="$(critical -q csg_property --file "$(get_main_dir)/$map" --path cg_molecule.topology.cg_bonded.* --filter name="$bondname" --print . --with-path | trim_all)" ret2="$(echo "$ret2" | critical sed -n 's/.*cg_bonded\.\([^[:space:]]*\) .*/\1/p')" if [[ -n $ret2 ]]; then [[ -n $ret ]] && die "${FUNCNAME[0]}: Found cg_bonded type for name '$bondname' twice" ret="${ret2}" fi done [[ -z $ret ]] && die "${FUNCNAME[0]}: Could not find a bonded definition with name '$bondname' in the mapping file(s) '$mapping'. Make sure to use the same name in the settings file (or --ia-name when calling from csg_call) and the mapping file." echo "$ret" else echo "$bondtype" fi return 0 fi [[ -n $CSGXMLFILE ]] || die "${FUNCNAME[0]}: CSGXMLFILE is undefined (when calling from csg_call set it by --options option)" [[ -n $bondtype ]] || die "${FUNCNAME[0]}: bondtype is undefined (when calling from csg_call set it by --ia-type option)" [[ -n $bondname ]] || die "${FUNCNAME[0]}: bondname is undefined (when calling from csg_call set it by --ia-name option)" #map bondtype back to tags in xml file (for csg_call) case "$bondtype" in "non-bonded") xmltype="non-bonded";; "bonded"|"bond"|"angle"|"dihedral") xmltype="bonded";; *) msg "Unknown bondtype '$bondtype' - assume non-bonded" xmltype="non-bonded";; esac [[ -n "$(type -p csg_property)" ]] || die "${FUNCNAME[0]}: Could not find csg_property" #the --filter/--path(!=.) option will make csg_property fail if $1 does not exist #so no critical here ret="$(csg_property --file $CSGXMLFILE --short --path cg.${xmltype} --filter name=$bondname --print $1 | trim_all)" #overwrite with function call value [[ -z $ret && -n $2 ]] && ret="$2" [[ -z $ret ]] && echo "${FUNCNAME[0]}: No value for '$1' found in $CSGXMLFILE, trying $VOTCASHARE/xml/csg_defaults.xml" >&2 # if still empty fetch it from defaults file if [[ -z $ret && -f $VOTCASHARE/xml/csg_defaults.xml ]]; then ret="$(critical -q csg_property --file "$VOTCASHARE/xml/csg_defaults.xml" --short --path cg.${xmltype}.$1 --print . | trim_all)" [[ $allow_empty = "yes" && -n "$res" ]] && msg "WARNING: '${FUNCNAME[0]} $1' was called with --allow-empty, but a default was found in '$VOTCASHARE/xml/csg_defaults.xml'" #from time to time the default is only given in the non-bonded section [[ -z $ret ]] && ret="$(critical -q csg_property --file "$VOTCASHARE/xml/csg_defaults.xml" --short --path cg.non-bonded.$1 --print . | trim_all)" [[ -n $ret ]] && echo "${FUNCNAME[0]}: value for '$1' from $VOTCASHARE/xml/csg_defaults.xml: $ret" >&2 fi [[ $allow_empty = "no" && -z $ret ]] && die "${FUNCNAME[0]}: Could not get '$1' for interaction with name '$bondname' from ${CSGXMLFILE} and no default was found in $VOTCASHARE/xml/csg_defaults.xml" [[ -z $ret ]] && echo "${FUNCNAME[0]}: returning emtpy value for '$1'" >&2 echo "${ret}" } export -f csg_get_interaction_property csg_get_property () { #get an property from the xml file local ret allow_empty if [[ $1 = "--allow-empty" ]]; then shift allow_empty="yes" else allow_empty="no" fi [[ -n $1 ]] || die "${FUNCNAME[0]}: Missing argument" [[ -n $CSGXMLFILE ]] || die "${FUNCNAME[0]}: CSGXMLFILE is undefined (when calling from csg_call set it by --options option)" [[ -n "$(type -p csg_property)" ]] || die "${FUNCNAME[0]}: Could not find csg_property" #csg_property only fails if xml file is bad otherwise result is empty #leave the -q here to avoid flooding with messages ret="$(critical -q csg_property --file $CSGXMLFILE --path ${1} --short --print . | trim_all)" #overwrite with function call value [[ -z $ret && -n $2 ]] && ret="$2" [[ -z $ret ]] && echo "${FUNCNAME[0]}: No value for '$1' found in $CSGXMLFILE, trying $VOTCASHARE/xml/csg_defaults.xml" >&2 #if still empty fetch it from defaults file if [[ -z $ret && -f $VOTCASHARE/xml/csg_defaults.xml ]]; then ret="$(critical -q csg_property --file "$VOTCASHARE/xml/csg_defaults.xml" --path "${1}" --short --print . | trim_all)" [[ $allow_empty = "yes" && -n "$res" ]] && msg "WARNING: '${FUNCNAME[0]} $1' was called with --allow-empty, but a default was found in '$VOTCASHARE/xml/csg_defaults.xml'" #avoid endless recursion [[ $1 = cg.inverse.program && -n $ret ]] || sim_prog="$ret" \ sim_prog="$(csg_get_property cg.inverse.program)" #no problem to call recursively as sim_prog has a default if [[ -z $ret ]] && [[ $1 = *${sim_prog}* ]]; then local path=${1/${sim_prog}/sim_prog} ret="$(critical -q csg_property --file "$VOTCASHARE/xml/csg_defaults.xml" --path "${path}" --short --print . | trim_all)" fi [[ -n $ret ]] && echo "${FUNCNAME[0]}: value for '$1' from $VOTCASHARE/xml/csg_defaults.xml: $ret" >&2 [[ $allow_empty = "yes" && -n "$res" ]] && msg "WARNING: '${FUNCNAME[0]} $1' was called with --allow-empty, but a default was found in '$VOTCASHARE/xml/csg_defaults.xml'" fi [[ $allow_empty = "no" && -z $ret ]] && die "${FUNCNAME[0]}: Could not get '$1' from ${CSGXMLFILE} and no default was found in $VOTCASHARE/xml/csg_defaults.xml" [[ -z $ret ]] && echo "${FUNCNAME[0]}: returning emtpy value for '$1'" >&2 echo "${ret}" } export -f csg_get_property trim_all() { #make multiple lines into one and strip white space from beginning and the end, reads from stdin [[ -n "$(type -p tr)" ]] || die "${FUNCNAME[0]}: Could not find tr" tr '\n' ' ' | sed -e s'/^[[:space:]]*//' -e s'/[[:space:]]*$//' || die "${FUNCNAME[0]}: sed of argument $i failed" } export -f trim_all mark_done () { #mark a task (1st argument) as done in the restart file local file [[ -n $1 ]] || die "${FUNCNAME[0]}: Missing argument" file="$(get_restart_file)" is_done "$1" || echo "$1 done" >> "${file}" } export -f mark_done is_done () { #checks if something is already do in the restart file local file [[ -n $1 ]] || die "${FUNCNAME[0]}: Missing argument" file="$(get_restart_file)" [[ -f ${file} ]] || return 1 [[ -n "$(sed -n "/^$1 done\$/p" ${file})" ]] && return 0 return 1 } export -f is_done is_int() { #checks if all arguments are integers local i [[ -z $1 ]] && die "${FUNCNAME[0]}: Missing argument" for i in "$@"; do [[ -n $i && -z ${i//[0-9]} ]] || return 1 done return 0 } export -f is_int to_int() { #convert all given numbers to int using awk's int function local i [[ -z $1 ]] && die "${FUNCNAME[0]}: Missing argument" for i in "$@"; do is_num "$i" || die "${FUNCNAME[0]}: $i is not a number" awk -v x="$i" 'BEGIN{ print ( int(x) ) }' || die "${FUNCNAME[0]}: awk failed" done return 0 } export -f to_int is_part() { #checks if 1st argument is part of the set given by other arguments [[ -z $1 || -z $2 ]] && die "${FUNCNAME[0]}: Missing argument" [[ " ${@:2} " = *" $1 "* ]] } export -f is_part has_duplicate() { #check if one of the arguments is double local i j [[ -z $1 ]] && die "${FUNCNAME[0]}: Missing argument" for ((i=1;i<$#;i++)); do for ((j=i+1;j<=$#;j++)); do [[ ${!i} = ${!j} ]] && echo ${!i} && return 0 done done return 1 } export -f has_duplicate remove_duplicate() { #remove duplicates list of arguments local i j out=() c [[ -z $1 ]] && die "${FUNCNAME[0]}: Missing argument" for ((i=1;i<=$#;i++)); do c=0 for ((j=0;j<${#out[@]};j++)); do [[ ${!i} = ${out[j]} ]] && ((c++)) done [[ $c -eq 0 ]] && out+=( "${!i}" ) done echo "${out[@]}" } export -f remove_duplicate is_num() { #checks if all arguments are numbers local i res [[ -z $1 ]] && die "${FUNCNAME[0]}: Missing argument" for i in "$@"; do res=$(awk -v x="$i" 'BEGIN{ print ( x+0==x ) }') || die "${FUNCNAME[0]}: awk failed" [[ $res -eq 1 ]] || return 1 unset res done return 0 } export -f is_num get_stepname() { #get the dir name of a certain step number (1st argument) local name [[ -n $1 ]] || die "${FUNCNAME[0]}: Missing argument" if [[ $1 = "--trunc" ]]; then echo "step_" return 0 fi is_int "${1}" || die "${FUNCNAME[0]}: needs a int as argument, but got $1" name="$(printf step_%03i "$1")" [[ -z $name ]] && die "${FUNCNAME[0]}: Could not get stepname" echo "$name" } export -f get_stepname update_stepnames(){ #updated the current working step to a certain number (1st argument) local thisstep laststep nr [[ -n $1 ]] || die "${FUNCNAME[0]}: Missing argument" nr="$1" is_int "$nr" || die "${FUNCNAME[0]}: needs a int as argument, but got $nr" [[ -z $CSG_MAINDIR ]] && die "${FUNCNAME[0]}: CSG_MAINDIR is undefined" [[ -d $CSG_MAINDIR ]] || die "${FUNCNAME[0]}: $CSG_MAINDIR is not dir" thisstep="$(get_stepname $nr)" export CSG_THISSTEP="$CSG_MAINDIR/$thisstep" if [[ $nr -gt 0 ]]; then laststep="$(get_stepname $((nr-1)) )" export CSG_LASTSTEP="$CSG_MAINDIR/$laststep" fi } export -f update_stepnames get_current_step_dir() { #print the directory of the current step [[ -z $CSG_THISSTEP ]] && die "${FUNCNAME[0]}: \$CSG_THISSTEP is undefined (when calling from csg_call export it yourself)" if [[ $1 = "--no-check" ]]; then : else [[ -d $CSG_THISSTEP ]] || die "${FUNCNAME[0]}: $CSG_THISSTEP is not dir" fi echo "$CSG_THISSTEP" } export -f get_current_step_dir get_last_step_dir() { #print the directory of the last step [[ -z $CSG_LASTSTEP ]] && die "${FUNCNAME[0]}: CSG_LASTSTEP is undefined (when calling from csg_call export it yourself)" [[ -d $CSG_LASTSTEP ]] || die "${FUNCNAME[0]}: $CSG_LASTSTEP is not dir" echo "$CSG_LASTSTEP" } export -f get_last_step_dir get_main_dir() { #print the main directory [[ -z $CSG_MAINDIR ]] && die "${FUNCNAME[0]}: CSG_MAINDIR is defined" [[ -d $CSG_MAINDIR ]] || die "${FUNCNAME[0]}: $CSG_MAINDIR is not dir" echo "$CSG_MAINDIR" } export -f get_main_dir get_current_step_nr() { #print the main directory local name nr name=$(get_current_step_dir) nr=$(get_step_nr $name) echo "$nr" } export -f get_current_step_nr get_step_nr() { #print the number of a certain step directory (1st argument) local nr trunc trunc=$(get_stepname --trunc) [[ -n $1 ]] || die "${FUNCNAME[0]}: Missing argument" nr=${1##*/} nr=${nr#$trunc} #convert to base 10 and cut leading zeros nr=$((10#$nr)) is_int "$nr" || die "${FUNCNAME[0]}: Could not fetch step nr, got $nr" echo "$nr" } export -f get_step_nr cp_from_main_dir() { #copy something from the main directory critical pushd "$(get_main_dir)" if [[ $1 = "--rename" ]]; then shift [[ $# -eq 2 && -n $1 && -n $2 ]] || die "${FUNCNAME[0]}: with --rename option has to be called with exactly 2 (non-empty) arguments" echo "cp_from_main_dir: '$1' to '$2'" critical cp $1 "$(dirs -l +1)/$2" else echo "cp_from_main_dir: '$@'" critical cp $@ "$(dirs -l +1)" fi critical popd } export -f cp_from_main_dir cp_from_last_step() { #copy something from the last step if [[ $1 = "--rename" ]]; then shift [[ $# -eq 2 && -n $1 && -n $2 ]] || die "${FUNCNAME[0]}: with --rename option has to be called with exactly 2 (non-empty) arguments" echo "cp_from_last_step: '$1' to '$2'" critical pushd "$(get_last_step_dir)" critical cp $1 "$(dirs -l +1)/$2" critical popd else echo "cp_from_last_step: '$@'" critical pushd "$(get_last_step_dir)" critical cp $@ "$(dirs -l +1)" critical popd fi } export -f cp_from_last_step get_time() { #gives back current time in sec from 1970 date +%s || die "${FUNCNAME[0]}: date +%s failed" } export -f get_time get_number_tasks() { #get the number of possible tasks from the xml file or determine it automatically under some systems local tasks tasks="$(csg_get_property cg.inverse.simulation.tasks)" [[ $tasks = "auto" ]] && tasks=0 is_int "$tasks" || die "${FUNCNAME[0]}: cg.inverse.simulation.tasks needs to be a number or 'auto', but I got $(csg_get_property cg.inverse.simulation.tasks)" if [[ $tasks -eq 0 ]]; then #auto-detect if [[ -r /proc/cpuinfo ]]; then #linux tasks=$(sed -n '/processor/p' /proc/cpuinfo | sed -n '$=') elif [[ -x /usr/sbin/sysctl ]]; then #mac os tasks=$(/usr/sbin/sysctl -n hw.ncpu) elif [[ -x /usr/sbin/lsdev ]]; then #AIX tasks=$(/usr/sbin/lsdev | sed -n '/Processor/p' | sed -n '$=') fi is_int "${tasks}" || tasks=1 #failback in case we got non-int fi echo "$tasks" } export -f get_number_tasks get_table_comment() { #get comment lines from a table and add common information, which include the git id and other information local version co [[ -n "$(type -p csg_call)" ]] || die "${FUNCNAME[0]}: Could not find csg_call" version="$(csg_call --version)" || die "${FUNCNAME[0]}: csg_call --version failed" echo "Created on $(date) by $USER@$HOSTNAME" echo "called from $version" | sed "s/csg_call/${0##*/}/" [[ -n ${CSGXMLFILE} ]] && echo "settings file: $(globalize_file $CSGXMLFILE)" echo "working directory: $PWD" if [[ -f $1 ]]; then co=$(sed -n 's/^[#@][[:space:]]*//p' "$1") || die "${FUNCNAME[0]}: sed failed" [[ -n $co ]] && echo "Comments from $(globalize_file $1):\n$co" fi } export -f get_table_comment csg_inverse_clean() { #clean out the main directory local i files log t [[ -n $1 ]] && t="$1" || t="30" log="$(csg_get_property cg.inverse.log_file 2>/dev/null)" echo -e "So, you want to clean?\n" echo "I will remove:" files="$(ls -d done ${log} $(get_stepname --trunc)* *~ 2>/dev/null)" if [[ -z $files ]]; then echo "Nothing to clean" else msg --color red $files msg --color blue "\nCTRL-C to stop it" for ((i=$t;i>0;i--)); do echo -n "$i " sleep 1 done rm -rf $files msg --color green "\n\nDone, hope you are happy now" fi } export -f csg_inverse_clean check_path_variable() { #check if a variable contains only valid paths local old_IFS dir [[ -z $1 ]] && die "${FUNCNAME[0]}: Missing argument" for var in "$@"; do [[ -z $var ]] && continue old_IFS="$IFS" IFS=":" for dir in ${!var}; do [[ -z $dir ]] && continue [[ $dir = *votca* ]] || continue #to many error otherwise [[ -d $dir ]] || die "${FUNCNAME[0]}: $dir from variable $var is not a directory" done IFS="$old_IFS" done } export -f check_path_variable add_to_csgshare() { #added an directory to the csg internal search directories local dir end="no" [[ $1 = "--at-the-end" ]] && end="yes" && shift [[ -z $1 ]] && die "${FUNCNAME[0]}: Missing argument" for dirlist in "$@"; do old_IFS="$IFS" IFS=":" for dir in $dirlist; do #dir maybe contains $PWD or something eval dir="$dir" [[ -d $dir ]] || die "${FUNCNAME[0]}: Could not find scriptdir $dir" dir="$(globalize_dir "$dir")" if [[ $end = "yes" ]]; then export CSGSHARE="${CSGSHARE}${CSGSHARE:+:}$dir" export PERL5LIB="${PERL5LIB}${PERL5LIB:+:}$dir" export PYTHONPATH="${PYTHONPATH}${PYTHONPATH:+:}$dir" else export CSGSHARE="$dir${CSGSHARE:+:}$CSGSHARE" export PERL5LIB="$dir${PERL5LIB:+:}$PERL5LIB" export PYTHONPATH="$dir${PYTHONPATH:+:}$PYTHONPATH" fi done IFS="$old_IFS" done check_path_variable CSGSHARE PERL5LIB PYTHONPATH } export -f add_to_csgshare globalize_dir() { #convert a local directory to a global one [[ -z $1 ]] && die "${FUNCNAME[0]}: missing argument" [[ -d $1 ]] || die "${FUNCNAME[0]}: '$1' is not a dir" cd "$1" pwd } export -f globalize_dir globalize_file() { #convert a local file name to a global one [[ -z $1 ]] && die "${FUNCNAME[0]}: missing argument" [[ -f $1 ]] || die "${FUNCNAME[0]}: '$1' is not a file" local dir [[ ${1%/*} = ${1} ]] && dir="." || dir="${1%/*}" echo "$(globalize_dir "$dir")/${1##*/}" } export -f globalize_file source_function() { #source an extra function file local function_file [[ -n $1 ]] || die "${FUNCNAME[0]}: Missing argument" function_file=$(source_wrapper functions $1) || die "${FUNCNAME[0]}: source_wrapper functions $1 failed" source ${function_file} || die "${FUNCNAME[0]}: source ${function_file} failed" } export -f source_function csg_banner() { #print a big banner local i l=0 list=() [[ -z $1 ]] && return 0 for i in "$@"; do while [[ -n $i && -z ${i/*\\n*} ]]; do list[$l]="${i%%\\n*}" ((l++)) i="${i#*\\n}" done list[$l]=$i ((l++)) done l="1" for i in "${list[@]}"; do [[ ${#l} -lt ${#i} ]] && l="${i}" done echo "####${l//?/#}" echo "# ${l//?/ } #" for i in "${list[@]}"; do printf "# %-${#l}s #\n" "$i" done echo "# ${l//?/ } #" echo "####${l//?/#}" } export -f csg_banner csg_calc() { #simple calculator, a + b, ... local res ret=0 err="1e-2" [[ -z $1 || -z $2 || -z $3 ]] && die "${FUNCNAME[0]}: Needs 3 arguments, but got '$*'" is_num "$1" || die "${FUNCNAME[0]}: First argument of csg_calc should be a number, but got '$1'" is_num "$3" || die "${FUNCNAME[0]}: Third argument of csg_calc should be a number, but got '$3'" [[ -n "$(type -p awk)" ]] || die "${FUNCNAME[0]}: Could not find awk" #we use awk -v because then " 1 " or "1\n" is equal to 1 case "$2" in "+"|"-"|'*'|"/"|"^") res="$(awk -v x="$1" -v y="$3" "BEGIN{print ( x $2 y ) }")" || die "${FUNCNAME[0]}: awk -v x='$1' -v y='$3' 'BEGIN{print ( x $2 y ) }' failed" true;; '>'|'<' ) res="$(awk -v x="$1" -v y="$3" "BEGIN{print ( x $2 y )}")" || die "${FUNCNAME[0]}: awk -v x='$1' -v y='$3' 'BEGIN{print ( x $2 y )}' failed" #awk return 1 for true and 0 for false, shell exit codes are the other way around ret="$((1-$res))" #return value matters res="" true;; "="|"==") #this is really tricky... case x=0,y=0 is catched by (x==y) after that |x-y|/max(|x|,|y|) will work expect for x,y beginng close to zero res="$(awk -v x="$1" -v y="$3" -v e="$err" \ 'function max(x,y){return (x>y)?x:y;} function abs(x){return (x<0)?-x:x;} BEGIN{if (x==y){print 1;}else{if (abs(x-y)&1 4>&2 >> "$CSGLOG" 2>&1 echo -e "\n\n#################################" echo "# Appending to existing logfile #" echo -e "#################################\n\n" msg --color blue "Appending to existing logfile ${CSGLOG##*/}" else exec 3>&1 4>&2 >> "$CSGLOG" 2>&1 msg "For a more verbose log see: ${CSGLOG##*/}" fi } export -f enable_logging get_restart_file() { #print the name of the restart file to use local file file="$(csg_get_property cg.inverse.restart_file)" [[ -z ${file/*\/*} ]] && die "${FUNCNAME[0]}: cg.inverse.restart_file has to be a local file with slash '/'" echo "$file" } export -f get_restart_file check_for_obsolete_xml_options() { #check xml file for obsolete options local i for i in cg.inverse.mpi.tasks cg.inverse.mpi.cmd cg.inverse.parallel.tasks cg.inverse.parallel.cmd \ cg.inverse.gromacs.mdrun.bin cg.inverse.espresso.bin cg.inverse.scriptdir cg.inverse.gromacs.grompp.topol \ cg.inverse.gromacs.grompp.index cg.inverse.gromacs.g_rdf.topol cg.inverse.convergence_check \ cg.inverse.convergence_check_options.name_glob cg.inverse.convergence_check_options.limit \ cg.inverse.espresso.table_end cg.inverse.gromacs.traj_type cg.inverse.gromacs.topol_out \ cg.inverse.espresso.blockfile cg.inverse.espresso.blockfile_out cg.inverse.espresso.n_steps \ cg.inverse.espresso.exclusions cg.inverse.espresso.debug cg.inverse.espresso.n_snapshots \ cg.non-bonded.inverse.espresso.index1 cg.non-bonded.inverse.espresso.index2 cg.inverse.espresso.success \ cg.inverse.espresso.scriptdir cg.non-bonded.inverse.post_update_options.kbibi.type \ cg.inverse.imc.numpy.bin \ ; do [[ -z "$(csg_get_property --allow-empty $i)" ]] && continue #filter me away new="" case $i in cg.inverse.mpi.tasks|cg.inverse.parallel.tasks) new="cg.inverse.simulation.tasks";; cg.inverse.gromacs.mdrun.bin|cg.inverse.espresso.bin) new="${i/bin/command}";; cg.inverse.scriptdir) new="${i/dir/path}";; cg.inverse.gromacs.grompp.index) new="${i/.grompp}";; cg.inverse.gromacs.grompp.topol) new="cg.inverse.gromacs.topol_in";; cg.inverse.gromacs.g_rdf.topol) new="${i/g_}";; cg.inverse.gromacs.topol_out) new="${i/_out}";; cg.inverse.gromacs.traj_type) new="";; cg.inverse.convergence_check) new="${i}.type";; cg.inverse.convergence_check_options.limit) new="cg.inverse.convergence_check.limit";; esac [[ -n $new ]] && new="has been renamed to $new" || new="has been removed" die "${FUNCNAME[0]}: The xml option $i $new\nPlease remove the obsolete options from the xmlfile" done } export -f check_for_obsolete_xml_options check_for_bug_179() { #check if shell has bug #179 simple_fct() { echo "Shell OK";} export -f simple_fct if [[ $(perl -e '$x=`bash -c "simple_fct"`; print $x' 2> /dev/null) != "Shell OK" ]]; then die "Your shell seems to exhibit bug #179 (see https://github.com/votca/csg/issues/179)\nIn short, as a workaround you want to run 'sudo ln -fs bash /bin/sh' and re-login!" fi unset simple_fct } check_for_bug_179 unset check_for_bug_179 command_not_found_handle() { #print and error message if a command or a function was not found die "Command/function $1 not found (when calling from csg_call you might need to add --simprog option or set cg.inverse.program in the xml file)" } export -f command_not_found_handle #in bash4 this is not needed, but for older bash we add add a failback from most important simulation functions for i in simulation_finish checkpoint_exist get_simulation_setting; do eval $i\(\) { command_not_found_handle $i\; } eval export -f $i done unset i csg-1.4.1/share/scripts/inverse/functions_dlpoly.sh000077500000000000000000000040161315264121600224710ustar00rootroot00000000000000#!/bin/bash # # Copyright 2009-2013 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # if [[ $1 = "--help" ]]; then cat <' "$equi_time" && echo "$t1" || echo "$equi_time" } export -f calc_begin_time calc_end_time() { #return dt * nsteps local dt steps dt=$(get_simulation_setting dt) steps=$(get_simulation_setting nsteps) csg_calc "$dt" "*" "$steps" } export -f calc_end_time gromacs_log() { #redirect stdin to a separate gromacs log file, 1st argument can be the name of the command to echo if redirection takes place local log log2 log="$(csg_get_property --allow-empty cg.inverse.gromacs.log)" if [[ -z $log ]]; then [[ ${CSG_RUNTEST} ]] && tee >(cat - >&4) || cat return $? fi log="${log##*/}" log2="$(csg_get_property cg.inverse.log_file)" log2="${log2##*/}" [[ $log = $log2 ]] && die "${FUNCNAME}: cg.inverse.gromacs.log is equal cg.inverse.log_file" [[ -n $* ]] && echo "Sending output of '$*' to $log (also look for errors there)" || echo "Sending stdin to $log (also look for errors there)" [[ ${CSG_RUNTEST} ]] && tee >(cat - >&4) || cat >> "$log" return $? } export -f gromacs_log csg-1.4.1/share/scripts/inverse/imc_purify.sh000077500000000000000000000036161315264121600212510ustar00rootroot00000000000000#!/bin/bash # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # if [ "$1" = "--help" ]; then cat <" "0"; then msg --color blue --to-stderr "Automatically setting equi_time to 0, because CSG_RUNTEST was set" equi_time=0 fi first_frame="$(csg_get_property cg.inverse.$sim_prog.first_frame)" tasks=$(get_number_tasks) msg "Calculating IMC statistics using $tasks tasks" if is_done "imc_analysis"; then echo "IMC analysis is already done" else #copy+resample all target dist in $this_dir for_all "non-bonded bonded" do_external resample target '$(csg_get_interaction_property inverse.target)' '$(csg_get_interaction_property name).dist.tgt' critical csg_stat --do-imc --options "$CSGXMLFILE" --top "$topol" --trj "$traj" \ --begin $equi_time --first-frame $first_frame --nt $tasks mark_done "imc_analysis" fi csg-1.4.1/share/scripts/inverse/imcdata_from_blocks.octave000066400000000000000000000014711315264121600237260ustar00rootroot00000000000000name='CG-CG' blocks=load(strcat(name, '.blocks')); corr_avg = load(strcat(name, '_', num2str(blocks(1)), '.cor')); S_avg = load(strcat(name, '_', num2str(blocks(1)), '.S')); dS_avg = load(strcat(name, '_', num2str(blocks(1)), '.imc')); for i = 2:length(blocks) corr_file=strcat(name, '_', num2str(blocks(i)), '.cor'); S_file=strcat(name, '_', num2str(blocks(i)), '.S'); dS_file=strcat(name, '_', num2str(blocks(i)), '.imc'); corr=load(corr_file); S=load(S_file); dS=load(dS_file); corr_avg += corr; S_avg(:,2) += S(:,2); dS_avg(:,2) += S(:,2); end; corr_avg = corr_avg/length(blocks); S_avg = S_avg/length(blocks); gmc = -(corr_avg - reshape(kron(S_avg(:,2),S_avg(:,2)), size(corr_avg))); save('-ascii', strcat(name, '.gmc.block'), 'gmc'); save('-ascii', strcat(name, '.imc.block'), 'dS'); #quit csg-1.4.1/share/scripts/inverse/initialize_step_generic.sh000077500000000000000000000021411315264121600237630ustar00rootroot00000000000000#! /bin/bash # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # if [ "$1" = "--help" ]; then cat <2)){ $_=shift(@ARGV); #short opt having agruments examples fo if ( $_ =~ /^-[fo]/ ) { unshift(@ARGV,substr($_,0,2),substr($_,2)); } else{ unshift(@ARGV,substr($_,0,2),"-".substr($_,2)); } } if (($ARGV[0] eq "-h") or ($ARGV[0] eq "--help")){ print <0.0001); die "Different start potential point \n" if (($r_aim[0]-$r_cur[0]) > 0.0001); die "Different end potential point \n" if ( $#r_aim != $#r_cur ); die "kbibi.start is smaller than r_min\n" if ($int_start < $r_min); die "kbibi.stop is bigger than r_max\n" if ($int_stop > $r_max); my $j=0; my $avg_int=0; for (my $i=0;$i<=$#r_aim;$i++){ if (($r_aim[$i]>=$int_start) && ($r_aim[$i]<=$int_stop)) { $avg_int+=$kbint_cur[$i]-$kbint_aim[$i]; $j++; } } $avg_int/=$j; my $comment="#$progname: avg_int($int_start:$int_stop)=$avg_int ramp_factor=$ramp_factor r_ramp=$r_ramp\n"; my @dpot; my @flag; for (my $i=0;$i<=$#r_aim;$i++){ if ($r_aim[$i]> $r_ramp) { $dpot[$i]=0; #beyond r_ramp correction is 0 } else { $dpot[$i]=($avg_int*$ramp_factor*(1.0-($r_aim[$i]/$r_ramp)))*$kbt; } $flag[$i]="i"; } my $outfile="$ARGV[2]"; saveto_table($outfile,@r_aim,@dpot,@flag,$comment) || die "$progname: error at save table\n"; csg-1.4.1/share/scripts/inverse/linsolve.m000066400000000000000000000002311315264121600205430ustar00rootroot00000000000000A = load('$name.gmc'); b = load('$name.imc'); I=eye(size(A)); x(:,1)=b(:,1); x(:,2)=-(A'*inv((A'*A)+$reg*I))*b(:,2); save '$name_out' x '-ascii' quit csg-1.4.1/share/scripts/inverse/linsolve.octave000066400000000000000000000002321315264121600215710ustar00rootroot00000000000000A = load('$name.gmc'); b = load('$name.imc'); I=eye(size(A)); x(:,1)=b(:,1); x(:,2)=-(A'*inv((A'*A)+$reg*I))*b(:,2); save -ascii '$name_out' x quit csg-1.4.1/share/scripts/inverse/linsolve.py000077500000000000000000000024151315264121600207500ustar00rootroot00000000000000#!/usr/bin/env python2 # # Copyright 2009-2016 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from optparse import OptionParser import numpy as np import numpy.linalg as la usage = "Usage: %prog [options] group output" parser = OptionParser(usage=usage) parser.add_option("--reg", dest="reg", metavar="REG", help="regularization factor", default=0) (options, args) = parser.parse_args() if len(args) != 2: exit("two statefile required as parameters") A = np.loadtxt(args[0]+'.gmc'); b = np.loadtxt(args[0]+'.imc'); x = np.empty([len(b),2]) n, m = A.shape I = np.identity(m) x[:,0] = b[:,0] x[:,1] = -np.dot(np.dot(la.inv(np.dot(A.T, A) + float(options.reg)*I), A.T), b[:,1]); np.savetxt(args[1], x) csg-1.4.1/share/scripts/inverse/lj_126.pl000066400000000000000000000032031315264121600200660ustar00rootroot00000000000000#! /usr/bin/perl -w # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # use strict; ( my $progname = $0 ) =~ s#^.*/##; if (defined($ARGV[0])&&("$ARGV[0]" eq "--help")){ print <0.0){ $pot[$i]=$c12/(($r[$i])**12) - $c6/(($r[$i])**6); } else { $pot[$i]=1.0E20; # very large number } $flag[$i]="i"; } saveto_table($outfile,@r,@pot,@flag,$comment) || die "$progname: error at save table\n"; csg-1.4.1/share/scripts/inverse/merge_tables.pl000077500000000000000000000065131315264121600215340ustar00rootroot00000000000000#! /usr/bin/perl -w # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # use strict; $_=$0; s#^.*/##; my $progname=$_; my $usage="Usage: $progname [OPTIONS] "; #Defaults my $noflags='no'; my $novalues='no'; my $withflag=undef; while ((defined ($ARGV[0])) and ($ARGV[0] =~ /^-./)) { if (($ARGV[0] !~ /^--/) and (length($ARGV[0])>2)){ $_=shift(@ARGV); #short opt having agruments examples fo if ( $_ =~ /^-[fo]/ ) { unshift(@ARGV,substr($_,0,2),substr($_,2)); } else{ unshift(@ARGV,substr($_,0,2),"-".substr($_,2)); } } if (($ARGV[0] eq "-h") or ($ARGV[0] eq "--help")) { print < -1; $withflag = $ARGV[0]; } else { die "Unknown option '".$ARGV[0]."' !\n"; } shift(@ARGV); } #Print usage die "missing parameters\n$usage\n" unless $#ARGV > 1; use CsgFunctions; my $src="$ARGV[0]"; my $dst="$ARGV[1]"; my $out="$ARGV[2]"; print "tables $src $dst $out\n"; my @r_src; my @val_src; my @flag_src; my $comments1; (readin_table($src,@r_src,@val_src,@flag_src,$comments1)) || die "$progname: error at readin_table\n"; my @r_dst; my @val_dst; my @flag_dst; my $comments2; (readin_table($dst,@r_dst,@val_dst,@flag_dst,$comments2)) || die "$progname: error at readin_table\n"; my $idst=0; for(my $i=0; $i<=$#r_src; $i++) { # skip if flag does not match if($withflag) { if(!($flag_src[$i] =~ m/[$withflag]/)) { next; } } # advance in dst till same r while($r_dst[$idst] < $r_src[$i] - 1e-15) { $idst++; if ($idst > $#r_dst) { die "merge_tables.pl: destination table is too short"; } } my $tmp= $r_src[$i]-$r_dst[$idst]; die "error: grid mismatch" if(abs($r_dst[$idst] - $r_src[$i]) > 1e-15); if($novalues eq 'no') { $val_dst[$idst] = $val_src[$i]; } if($noflags eq 'no') { $flag_dst[$idst] = $flag_src[$i]; } } my $comments="# $progname: merged $src with $dst to $out\n"; $comments.="$comments1" if (defined($comments1)); $comments.="$comments2" if (defined($comments2)); saveto_table($out,@r_dst,@val_dst,@flag_dst,$comments) || die "$progname: error at save table\n"; csg-1.4.1/share/scripts/inverse/optimizer_parameters_to_potential.sh000077500000000000000000000040411315264121600261220ustar00rootroot00000000000000#! /bin/bash # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # if [[ $1 = "--help" ]]; then cat < "$1" echo "#Interactions: ${names}" >> "$1" echo "#Method: ${otype}" >> "$1" echo "#State = Initialization" >> $1 echo "#Format $parameters conv flag" >> "$1" #added conv=0 and flag=pending to all lines critical paste "${liste[@]}" | critical sed -e 's/$/ 0 pending/' >> "$1" #cma converts initial values to first population if [[ $otype = cma ]]; then tmpfile="$(critical mktemp "$1.XXX")" eps="$(csg_get_property cg.inverse.optimizer.cma.eps)" critical mv "$1" "$tmpfile" do_external cma precede_state --eps "$eps" "$tmpfile" "$1" fi csg-1.4.1/share/scripts/inverse/optimizer_state_to_mapping.sh000077500000000000000000000042051315264121600245350ustar00rootroot00000000000000#! /bin/bash # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # if [[ $1 = "--help" ]]; then cat < "$output" parameters="$(sed -n "${line}p" "$input")" for_all "non-bonded bonded" do_external optimizer parameters_to_potential "$parameters" csg-1.4.1/share/scripts/inverse/optimizer_target_density.sh000077500000000000000000000041471315264121600242320ustar00rootroot00000000000000#! /bin/bash # # Copyright 2009-2016 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # if [[ $1 = "--help" ]]; then cat < "${name}.density.conv" csg-1.4.1/share/scripts/inverse/optimizer_target_pressure.sh000077500000000000000000000030771315264121600244240ustar00rootroot00000000000000#! /bin/bash # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # if [[ $1 = "--help" ]]; then cat < use pre simulation)" p_now="${p_undef}" fi p_target="$(csg_get_interaction_property inverse.p_target)" critical awk -v x="$p_now" -v y="$p_target" 'BEGIN{print sqrt((x-y)^2)}' > "${name}.pressure.conv" csg-1.4.1/share/scripts/inverse/optimizer_target_rdf.sh000077500000000000000000000046351315264121600233300ustar00rootroot00000000000000#! /bin/bash # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # if [[ $1 = "--help" ]]; then cat < "${name}.rdf.conv" else do_external table combine --sum --op d "${name}.dist.tgt" "${name}.dist.new" > "${name}.rdf.conv" fi csg-1.4.1/share/scripts/inverse/post_add.sh000077500000000000000000000015011315264121600206670ustar00rootroot00000000000000#! /bin/bash # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # if [ "$1" = "--help" ]; then cat <> "${name}.aconv" csg-1.4.1/share/scripts/inverse/postadd_average.sh000077500000000000000000000052231315264121600222270ustar00rootroot00000000000000#! /bin/bash # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # if [ "$1" = "--help" ]; then cat <=0;step_i++)); do step_dir="$(get_stepname $step_nr)" if [[ -f $(get_main_dir)/$step_dir/${name}.${dist}.cur ]]; then tables[$step_i]="$(get_main_dir)/$step_dir/${name}.${dist}.cur" fi ((step_nr--)) done # compute the average if more than one tables found if [[ ${#tables[@]} -gt 1 ]]; then do_external table average --output ${name}.${dist}.avg "${tables[@]}" else # copy the single table to *.avg critical cp ${tables[0]} ${name}.${dist}.avg fi fi done csg-1.4.1/share/scripts/inverse/postadd_compress.sh000077500000000000000000000023721315264121600224520ustar00rootroot00000000000000#! /bin/bash # # Copyright 2009-2014 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # if [ "$1" = "--help" ]; then cat < ${name}.${dist}.conv wdiff=$(csg_calc "$weight" "*" "${diff}") echo "Convergence of $dist for ${name} was ${diff} and has weight $weight, so difference is $wdiff" sum=$(csg_calc $sum + $wdiff) done echo "$sum" > ${name}.conv csg-1.4.1/share/scripts/inverse/postadd_copyback.sh000077500000000000000000000017111315264121600224060ustar00rootroot00000000000000#! /bin/bash # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # if [ "$1" = "--help" ]; then cat <> gnuplot_pipe.log [[ $REPLY = "exit" ]] && break fi done | $gnuplot $opts & while true; do if [[ -z $(ps -o pid= -p "${CSG_MASTER_PID}") ]]; then echo "exit" > $(get_main_dir)/gnuplot_pipe rm -rf gnuplot_pipe gnuplot_pipe.log gnuplot_pipe.lock exit fi sleep 1 #lowers the load done & sleep 1 #wait for gnuplot_pipe cd - > /dev/null ) 7> $(get_main_dir)/gnuplot_pipe.lock #gnuplot is in laststep, move to current one echo "cd '$PWD'" > $(get_main_dir)/gnuplot_pipe || die "piping to gnuplot_pipe failed" #name pipe accept only one command at the time, for i in $(cat ); do echo $i > pipe; done would do the same echo "load '$(get_main_dir)/$script'" > $(get_main_dir)/gnuplot_pipe || die "piping to gnuplot_pipe failed" else [[ -z $(type -p killall) ]] && die "${0##*/}: could not find killall needed to kill gnuplot" killall $what_to_kill $gnuplot $opts "$(get_main_dir)/$script" || true #exit code not always clear fi csg-1.4.1/share/scripts/inverse/postadd_shift.sh000077500000000000000000000017701315264121600217350ustar00rootroot00000000000000#! /bin/bash # # Copyright 2009-2013 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # if [ "$1" = "--help" ]; then cat < $tmpfile spmin=$(sed -n -e '1p' $tmpfile | awk '{print $1}') spmax=$(sed -n -e '$p' $tmpfile | awk '{print $1}') spstep=$(csg_get_interaction_property inverse.post_update_options.splinesmooth.step) comment="$(get_table_comment)" critical csg_resample --in $tmpfile --out "$2" --grid $min:$step:$max --type cubic --fitgrid $spmin:$spstep:$spmax --comment "$comment" csg-1.4.1/share/scripts/inverse/potential_extrapolate.sh000077500000000000000000000053111315264121600235040ustar00rootroot00000000000000#!/bin/bash # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # show_help () { cat <2)){ $_=shift(@ARGV); #short opt having agruments examples fo if ( $_ =~ /^-[fo]/ ) { unshift(@ARGV,substr($_,0,2),substr($_,2)); } else{ unshift(@ARGV,substr($_,0,2),"-".substr($_,2)); } } if (($ARGV[0] eq "-h") or ($ARGV[0] eq "--help")){ print < \n" if ($#ARGV<1); use CsgFunctions; my $infile="$ARGV[0]"; my $outfile="$ARGV[1]"; # read in the current dpot my @r; my @dpot; my @flag; my $comments; (readin_table($infile,@r,@dpot,@flag,$comments)) || die "$progname: error at readin_table\n"; my $zero=undef; if ( "$type" eq "non-bonded" ) { $zero=$dpot[$#r]; } elsif (( "$type" eq "bond" ) or ("$type" eq "dihedral") or ("$type" eq "angle") or ("$type" eq "bonded")) { for(my $i=0; $i<=$#r; $i++) { $zero=$dpot[$i] if (($flag[$i] =~ /[i]/) and not defined($zero)); $zero=$dpot[$i] if (($flag[$i] =~ /[i]/) and ($dpot[$i]<$zero)); } die "No valid value found in $infile" unless defined($zero); } else{ die "$progname: Unsupported type of interatction: $type -> go and implement it\n"; } # shift potential by $zero for(my $i=0; $i<=$#r; $i++) { $dpot[$i] -= $zero; } # save to file saveto_table($outfile,@r,@dpot,@flag,$comments) || die "$progname: error at save table\n"; csg-1.4.1/share/scripts/inverse/potential_to_dlpoly.sh000077500000000000000000000116651315264121600231720ustar00rootroot00000000000000#!/bin/bash # # Copyright 2009-2014 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # if [ "$1" = "--help" ]; then cat < "${OUT}" #max 80 chars # see dlpoly manual ngrid = int(cut/delta) + 4 table_grid="$(($table_grid+4))" # nm -> Angs bin_size1="$(csg_calc "$bin_size" "*" 10)" table_end1="$(csg_calc "$table_end" "*" 10)" echo "$bin_size1 $table_end1 $table_grid" >> "${OUT}" fi elif [[ $bondtype = "bond" ]]; then OUT="TABBND" table_end="$(csg_get_property cg.inverse.dlpoly.bonds.table_end)" table_grid="$(csg_get_property cg.inverse.dlpoly.bonds.table_grid)" bin_size="$(csg_calc "$table_end" "/" $table_grid)" # make sure the TAB* file is removed externally if [[ ! -f "${OUT}" ]]; then echo "# Table for dlpoly from VOTCA with love" > "${OUT}" #max 80 chars # nm -> Angs table_end1="$(csg_calc "$table_end" "*" 10)" echo "# $table_end1 $table_grid" >> "${OUT}" fi elif [[ $bondtype = "angle" ]]; then OUT="TABANG" table_end="3.14159265359" table_grid="$(csg_get_property cg.inverse.dlpoly.angles.table_grid)" bin_size="$(csg_calc "$table_end" "/" $table_grid)" # make sure the TAB* file is removed externally if [[ ! -f "${OUT}" ]]; then echo "# Table for dlpoly from VOTCA with love" > "${OUT}" #max 80 chars echo "# $table_grid" >> "${OUT}" fi elif [[ $bondtype = "dihedral" ]]; then OUT="TABDIH" table_zero="-3.14159265359" table_end="3.14159265359" table_grid="$(csg_get_property cg.inverse.dlpoly.dihedrals.table_grid)" bin_size="$(csg_calc "$table_end" "-" $table_zero)" bin_size="$(csg_calc "$bin_size" "/" $table_grid)" # make sure the TAB* file is removed externally if [[ ! -f "${OUT}" ]]; then echo "# Table for dlpoly from VOTCA with love" > "${OUT}" #max 80 chars echo "# $table_grid" >> "${OUT}" fi else die "${0##*/}: conversion of ${bondtype} interaction to generic tables is not implemented yet!" fi # Yes, the dlpoly table starts at ${bin_size} table_begin="$(csg_calc "$table_zero" "+" $bin_size)" #keep the grid for now, so that extrapolate can calculate the right mean comment="$(get_table_comment)" smooth2="$(critical mktemp ${trunc}.pot.extended.XXXXX)" critical csg_resample --in ${input} --out "${smooth2}" --grid "${table_zero}:${step}:${table_end}" --comment "$comment" extrapolate="$(critical mktemp ${trunc}.pot.extrapolated.XXXXX)" do_external potential extrapolate --type "$bondtype" "${smooth2}" "${extrapolate}" smooth="$(critical mktemp ${trunc}.pot.smooth.XXXXX)" deriv="$(critical mktemp ${trunc}.pot.deriv.XXXXX)" critical csg_resample --in ${extrapolate} --out "${smooth}" --der "${deriv}" --grid "${table_begin}:${bin_size}:${table_end}" --comment "$comment" #shift does not change derivative tshift="$(critical mktemp ${trunc}.pot.shift.XXXXX)" do_external potential shift --type "$bondtype" "${smooth}" "${tshift}" do_external convert_potential tab --header dlpoly --type "${bondtype}" "${tshift}" "${deriv}" "${output}" if [[ -f $OUT ]]; then echo "Appending $output to $OUT" if [[ $bondtype = "non-bonded" ]]; then #votca non-bonded types might not correspond to dl_poly's internal types, only use a failback header="$(csg_get_interaction_property --allow-empty dlpoly.header)" [[ -z ${header} ]] && header="$(csg_get_interaction_property type1) $(csg_get_interaction_property type2)" echo "${header}" >> "$OUT" else header="$(csg_get_interaction_property dlpoly.header)" # an empty line must precede each data block (for another bond type), then the bond type (two atom types) follow echo "" >> "$OUT" echo "# ${header}" >> "$OUT" fi cat "${output}" >> "$OUT" fi csg-1.4.1/share/scripts/inverse/potential_to_generic.sh000077500000000000000000000051421315264121600232740ustar00rootroot00000000000000#!/bin/bash # # Copyright 2009-2014 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # if [ "$1" = "--help" ]; then cat < cg.inverse.gromacs.table_end ($tablend)" max="$(csg_get_interaction_property max)" rvdw="$(get_simulation_setting rvdw)" csg_calc "$max" ">" "$rvdw" && die "${0##*/}: rvdw ($rvdw) is smaller than max ($max)" [[ -z $tablend ]] && tablend=$(csg_calc "$rlist" + "$tabext") elif [[ -z $tablend ]]; then die "${0##*/}: cg.inverse.gromacs.table_end was not defined in xml seeting file" fi elif [[ $tabtype = "bond" ]]; then tablend="$(csg_get_property cg.inverse.gromacs.table_end)" elif [[ $tabtype = "angle" ]]; then tablend=180 elif [[ $tabtype = "dihedral" ]]; then zero="-180" tablend=180 else die "${0##*/}: Unknown interaction type $tabtype" fi [[ $step ]] || step=$(csg_get_interaction_property step) gromacs_bins="$(csg_get_property cg.inverse.gromacs.table_bins)" comment="$(get_table_comment $input)" if [[ $tabtype = "angle" || $tabtype = "dihedral" ]] && [[ $r2d != 1 ]]; then scale="$(critical mktemp ${trunc}.pot.scale.XXXXX)" do_external table linearop --on-x "${input}" "${scale}" "$r2d" "0" step=$(csg_calc $r2d "*" $step) else scale="${input}" fi #keep the grid for now, so that extrapolate can calculate the right mean smooth="$(critical mktemp ${trunc}.pot.smooth.XXXXX)" critical csg_resample --in ${scale} --out "$smooth" --grid "${zero}:${step}:${tablend}" extrapol="$(critical mktemp ${trunc}.pot.extrapol.XXXXX)" do_external potential extrapolate ${clean:+--clean} --type "$tabtype" "${smooth}" "${extrapol}" interpol="$(critical mktemp ${trunc}.pot.interpol.XXXXX)" critical csg_resample --in "${extrapol}" --out "$interpol" --grid "${zero}:${gromacs_bins}:${tablend}" --comment "$comment" if [[ $do_shift = "yes" ]]; then tshift="$(critical mktemp ${trunc}.pot.shift.XXXXX)" do_external potential shift --type "$tabtype" "${interpol}" "${tshift}" else tshift="$interpol" fi potmax="$(csg_get_property --allow-empty cg.inverse.gromacs.pot_max)" do_external convert_potential xvg ${potmax:+--max} ${potmax} --type "${xvgtype}" "${tshift}" "${output}" if [[ $clean ]]; then rm -f "${smooth}" "${interpol}" "${extrapol}" "${tshift}" "${scale}" fi csg-1.4.1/share/scripts/inverse/potential_to_lammps.sh000077500000000000000000000103111315264121600231430ustar00rootroot00000000000000#!/bin/bash # # Copyright 2009-2014 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # show_help () { cat <$p_target){ $pref=-0.1*$kBT; } else { $pref=0.1*$kBT; } #Determine pressure factor my $p_factor=($p_now-$p_target)*$scale_factor; $p_factor=-$p_factor if $p_factor<0; #Only use pressure factor if not too big #max is 0.1kbT $pref*=$p_factor if $p_factor<1; my @r; my @pot; my @flag; my $outfile="$ARGV[1]"; my $comment="#$progname: p_now=$p_now, p_target=$p_target, prefactor=$pref\n"; for(my $i=$min/$delta_r;$i<=$max/$delta_r;$i++){ $r[$i]=$i*$delta_r; $pot[$i]=$pref*(1-$r[$i]/$max); $flag[$i]="i"; } saveto_table($outfile,@r,@pot,@flag,$comment) || die "$progname: error at save table\n"; csg-1.4.1/share/scripts/inverse/pressure_cor_wjk.pl000077500000000000000000000055711315264121600224740ustar00rootroot00000000000000#! /usr/bin/perl -w # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # use strict; ( my $progname = $0 ) =~ s#^.*/##; if (defined($ARGV[0])&&("$ARGV[0]" eq "--help")){ print < 0.1*$kBT){ if ($pref >0){ $pref=0.1*$kBT; }else{ $pref=-0.1*$kBT; } } $pref=$pref*$scale_factor; print "Pressure correction factor: A=$pref\n"; # my $prefile="${name}.pressure.prefactor"; # saveto_table($prefile,$pref) || die "$progname: error at save table\n"; my @r; my @pot; my @flag; my $outfile="$ARGV[1]"; my $comment="#$progname: p_now=$p_now, p_target=$p_target, prefactor=$pref\n"; for(my $i=$min/$delta_r;$i<=$max/$delta_r;$i++){ $r[$i]=$i*$delta_r; $pot[$i]=$pref*(1-$r[$i]/$max); $flag[$i]="i"; } saveto_table($outfile,@r,@pot,@flag,$comment) || die "$progname: error at save table\n"; csg-1.4.1/share/scripts/inverse/resample_target.sh000077500000000000000000000042121315264121600222520ustar00rootroot00000000000000#! /bin/bash # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # if [ "$1" = "--help" ]; then cat <> $mdp msg --color blue --to-stderr "Automatically added 'cutoff-scheme = Group' to $mdp, tabulated interactions only work with Group cutoff-scheme!" fi if [[ ${CSG_MDRUN_STEPS} ]]; then msg --color blue --to-stderr "Appending -nsteps ${CSG_MDRUN_STEPS} to mdrun options" mdrun_opts+=" -nsteps $CSG_MDRUN_STEPS" fi if [[ ${CSG_MDRUN_OPTS} ]]; then msg --color blue --to-stderr "Appending ${CSG_MDRUN_OPTS} to mdrun options" mdrun_opts+=" ${CSG_MDRUN_OPTS}" fi #see can run grompp again as checksum of tpr does not appear in the checkpoint critical ${grompp[@]} -n "${index}" -f "${mdp}" -p "$topol_in" -o "$tpr" -c "${conf}" ${grompp_opts} 2>&1 | gromacs_log "${grompp[@]} -n "${index}" -f "${mdp}" -p "$topol_in" -o "$tpr" -c "${conf}" ${grompp_opts}" [[ -f $tpr ]] || die "${0##*/}: gromacs tpr file '$tpr' not found after runing grompp" mdrun="$(csg_get_property cg.inverse.gromacs.mdrun.command)" #no check for mdrun, because mdrun_mpi could maybe exist only computenodes if [[ -n $CSGENDING ]]; then #seconds left for the run wall_h=$(( $CSGENDING - $(get_time) )) #convert to hours wall_h=$(csg_calc $wall_h / 3600 ) echo "${0##*/}: Setting $mdrun maxh option to $wall_h (hours)" mdrun_opts="-cpi $checkpoint -maxh $wall_h ${mdrun_opts}" else echo "${0##*/}: No walltime defined, so no time limitation given to $mdrun" fi #>gmx-5.1 has new handling of bonded tables, remove this block we drop support for gmx-5.0 if [[ ${gmx_ver} = *"VERSION 5.1"* || ${gmx_ver} = *"version 2016"* ]] && [[ ${mdrun_opts} != *tableb* ]]; then tables= for i in table_[abd][0-9]*.xvg; do [[ -f $i ]] && tables+=" $i" done if [[ -n ${tables} ]]; then msg --color blue --to-stderr "Automatically added '-tableb${tables} to mdrun options (add -tableb option to cg.inverse.gromacs.mdrun.opts yourself if this is wrong)" mdrun_opts+=" -tableb${tables}" fi fi critical $mdrun -s "${tpr}" -c "${confout}" -o "${traj%.*}".trr -x "${traj%.*}".xtc ${mdrun_opts} ${CSG_RUNTEST:+-v} 2>&1 | gromacs_log "$mdrun -s "${tpr}" -c "${confout}" -o "${traj%.*}".trr -x "${traj%.*}".xtc ${mdrun_opts}" [[ -z "$(sed -n '/[nN][aA][nN]/p' ${confout})" ]] || die "${0##*/}: There is a nan in '${confout}', this seems to be wrong." csg-1.4.1/share/scripts/inverse/simplex_downhill_processor.pl000077500000000000000000000125261315264121600245640ustar00rootroot00000000000000#! /usr/bin/perl -w # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # use strict; ( my $progname = $0 ) =~ s#^.*/##; if (defined($ARGV[0])&&("$ARGV[0]" eq "--help")){ print < $second_highest ) { $next_state="Contraction"; if ($try > $highest ) { remove_parameter_set(@simplex_table,"try"); } else { replace_parameter_flag(@simplex_table,"try","complete"); remove_parameter_set(@simplex_table,"highest"); } } else { #$try is between $lowest and $second_highest $next_state="Reflection"; replace_parameter_flag(@simplex_table,"try","complete"); remove_parameter_set(@simplex_table,"highest"); } } elsif ($state eq "Expansion") { $next_state="Reflection"; my $tryold=get_convergence_value(@simplex_table,"tryold"); if ($try < $tryold) { #tryold is the reflection point from before remove_parameter_set(@simplex_table,"tryold"); replace_parameter_flag(@simplex_table,"try","complete"); remove_parameter_set(@simplex_table,"highest"); } else { remove_parameter_set(@simplex_table,"try"); replace_parameter_flag(@simplex_table,"tryold","complete"); remove_parameter_set(@simplex_table,"highest"); } } elsif ($state eq "Contraction") { if ($try < $highest) { replace_parameter_flag(@simplex_table,"try","complete"); remove_parameter_set(@simplex_table,"highest"); $next_state="Reflection"; } else { $next_state="Reduction"; remove_parameter_set(@simplex_table,"try"); } } elsif ($state eq "Reduction") { replace_parameter_flag(@simplex_table,"try","complete"); $next_state="Reflection"; }else { die "$progname: Unknown state '$state'\n"; } if ($next_state eq "Reflection") { sort_simplex_table(@simplex_table); my @center_parameter=calc_parameter_center(@simplex_table); my @highest_parameter=@{$simplex_table[$#simplex_table]}; my @try_paramter=linop_parameter(@center_parameter,$alpha,@center_parameter,@highest_parameter); push(@simplex_table,\@try_paramter); } elsif ($next_state eq "Expansion") { my @tryold_parameter=remove_parameter_set(@simplex_table,"tryold"); #this should not go into the center sort_simplex_table(@simplex_table); my @center_parameter=calc_parameter_center(@simplex_table); my @highest_parameter=@{$simplex_table[$#simplex_table]}; my @try_paramter=linop_parameter(@center_parameter,$gamma,@center_parameter,@highest_parameter); push(@simplex_table,\@try_paramter,\@tryold_parameter); } elsif ($next_state eq "Contraction") { sort_simplex_table(@simplex_table); my @center_parameter=calc_parameter_center(@simplex_table); my @highest_parameter=@{$simplex_table[$#simplex_table]}; my @try_paramter=linop_parameter(@highest_parameter,$rho,@center_parameter,@highest_parameter); push(@simplex_table,\@try_paramter); } elsif ($next_state eq "Reduction") { sort_simplex_table(@simplex_table); my @lowest_parameter=@{$simplex_table[0]}; for (my $i=1; $i<=$#simplex_table;$i++) { my @try_paramter=linop_parameter(@lowest_parameter,$sigma,@{$simplex_table[$i]},@lowest_parameter); $simplex_table[$i]=\@try_paramter; } }else { die "$progname: Unknown state '$next_state'\n"; } print "Preparing $next_state with parameters ($parameter_names):\n"; for (my $i=0;$i<=$#simplex_table;$i++){ print "@{$simplex_table[$i]}\n"; } (saveto_simplex_state($ARGV[1],$next_state,@simplex_table,$comments)) || die "$progname: error at readin_simplex_table\n"; csg-1.4.1/share/scripts/inverse/skeleton.pl000077500000000000000000000016751315264121600207330ustar00rootroot00000000000000#! /usr/bin/perl -w # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # use strict; ( my $progname = $0 ) =~ s#^.*/##; if (defined($ARGV[0])&&("$ARGV[0]" eq "--help")){ print < Used external packages: matlab EOF exit 0 fi [[ -z $1 || -z $2 -z $3 ]] && die "${0##*/}: Missing arguments" # initialize & run the matlab file cat_external solve matlab | sed -e "s/\$name_out/$2/" -e "s/\$name/$1/" -e "s/\$reg/$3/" > solve_$1.m || die "${0##*/}: sed failed" matlab="$(csg_get_property cg.inverse.imc.matlab.bin)" [ -n "$(type -p $matlab)" ] || die "${0##*/}: matlab binary '$matlab' not found" #matlab does not like -_. etc in filenames critical cp solve_$1.m solve.m critical $matlab -r solve -nosplash -nodesktop rm -f solve.m [[ -f $2 ]] || die "Matlab failed" # temporary compatibility issue critical sed -ie 's/NaN/0.0/' "$2" critical sed -ie 's/Inf/0.0/' "$2" csg-1.4.1/share/scripts/inverse/solve_numpy.sh000077500000000000000000000021601315264121600214540ustar00rootroot00000000000000#! /bin/bash # # Copyright 2009-2016 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # if [ "$1" = "--help" ]; then cat < Uses external packages: numpy EOF exit 0 fi [[ -z $1 || -z $2 || -z $3 ]] && die "${0##*/}: Missing arguments" do_external solve numpy --reg "$3" "$1" "$2" [[ -f $2 ]] || die "Python failed" # temporary compatibility issue #TODO: check these lines! critical sed -ie 's/NaN/0.0/' "$2" critical sed -ie 's/Inf/0.0/' "$2" csg-1.4.1/share/scripts/inverse/solve_octave.sh000077500000000000000000000025741315264121600215760ustar00rootroot00000000000000#! /bin/bash # # Copyright 2009-2016 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # if [ "$1" = "--help" ]; then cat < Used external packages: octave EOF exit 0 fi [[ -z $1 || -z $2 || -z $3 ]] && die "${0##*/}: Missing arguments" # initialize & run the octave file cat_external solve octave | sed -e "s/\$name_out/$2/" -e "s/\$name/$1/" -e "s/\$reg/$3/" > solve_$1.octave || die "${0##*/}: sed failed" octave="$(csg_get_property cg.inverse.imc.octave.bin)" [ -n "$(type -p $octave)" ] || die "${0##*/}: octave binary '$octave' not found" critical $octave solve_$1.octave [[ -f $2 ]] || die "Octave failed" # temporary compatibility issue critical sed -ie 's/NaN/0.0/' "$2" critical sed -ie 's/Inf/0.0/' "$2" csg-1.4.1/share/scripts/inverse/start_framework.sh000077500000000000000000000032561315264121600223150ustar00rootroot00000000000000#! /bin/bash # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # if [ "$1" = "--help" ]; then cat <&2; exit 1; } #check for VOTCASHARE [[ -n ${VOTCASHARE} ]] || die "Error: VOTCASHARE not definded" [[ -d ${VOTCASHARE} ]] || die "VOTCASHARE '$VOTCASHARE' is not a dir" [[ -d ${VOTCASHARE}/scripts/inverse ]] || die "\$VOTCASHARE/scripts/inverse is not found. Is VOTCASHARE set corectly?" [[ -f ${VOTCASHARE}/scripts/inverse/inverse.sh ]] || die "Could not find inverse.sh, \$VOTCASHARE/scripts/inverse seem to point to the wrong place!" [[ -f ${VOTCASHARE}/scripts/inverse/functions_common.sh ]] || die "Could not find default common framework functions (functions_common.sh)" source "${VOTCASHARE}/scripts/inverse/functions_common.sh" || exit 1 #this is needed by die later export CSG_MASTER_PID="$$" export CSG_MAINDIR="$PWD" #do no overwrite CSGSHARE stuff set by user from the outside add_to_csgshare --at-the-end "${VOTCASHARE}/scripts/inverse" csg-1.4.1/share/scripts/inverse/table_average.sh000077500000000000000000000064541315264121600216670ustar00rootroot00000000000000#! /bin/bash # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # show_help () { cat < $t [[ -z $l ]] && l=$(critical sed -n '$=' $t) [[ $l -eq $(critical sed -n '$=' $t) ]] || die "Number of lines (after comments have been striped) mismatches in $f from $1" tables[$c]="$t" ((c++)) done t=$(critical mktemp "table_all.XXXX") critical paste "${tables[@]}" > "${t}" #no critical here to avoid huge error message awk -v c1="$colx" -v c2="$coly" -v s="$cols" ' function isnum(x){return(x==x+0)} { sum=0; sum2=0; c=0; for (i=0;i "/dev/stderr"; exit 1; } } flag="u" if (isnum(sum)&&isnum(sum2)) { flag="i" } print $1,sum/c,sqrt((sum2-sum*sum/c)/(c*(c-1))),flag; }' $t > $out || die "${0##*/}: averaging with awk failed" if [[ $clean = "yes" ]]; then rm -f "${tables[@]}" "$t" fi csg-1.4.1/share/scripts/inverse/table_change_flag.sh000077500000000000000000000016671315264121600224740ustar00rootroot00000000000000#! /bin/bash # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # if [ "$1" = "--help" ]; then cat < $2 csg-1.4.1/share/scripts/inverse/table_combine.pl000077500000000000000000000125501315264121600216640ustar00rootroot00000000000000#! /usr/bin/perl -w # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # use strict; ( my $progname = $0 ) =~ s#^.*/##; my $usage="Usage: $progname [OPTIONS] "; my $epsilon=1e-5; my $op=undef; my $noflags=undef; my $dosum=undef; my $die=undef; my $scale=1.0; my $withflag=undef; while ((defined ($ARGV[0])) and ($ARGV[0] =~ /^-./)) { if (($ARGV[0] !~ /^--/) and (length($ARGV[0])>2)){ $_=shift(@ARGV); #short opt having agruments examples fo if ( $_ =~ /^-[fo]/ ) { unshift(@ARGV,substr($_,0,2),substr($_,2)); } else{ unshift(@ARGV,substr($_,0,2),"-".substr($_,2)); } } elsif($ARGV[0] eq "--error") { shift(@ARGV); $epsilon = shift(@ARGV); } elsif($ARGV[0] eq "--no-flags") { shift(@ARGV); $noflags="yes"; } elsif($ARGV[0] eq "--op") { shift(@ARGV); $op = shift(@ARGV); } elsif($ARGV[0] eq "--scale") { shift(@ARGV); $scale = shift(@ARGV); } elsif($ARGV[0] eq "--die") { shift(@ARGV); $die = "yes"; } elsif($ARGV[0] eq "--sum") { shift(@ARGV); $dosum = "yes"; } elsif ($ARGV[0] eq "--withflag"){ shift(@ARGV); die "nothing given for --withflag" unless $#ARGV > -1; $withflag = shift(@ARGV); } elsif (($ARGV[0] eq "-h") or ($ARGV[0] eq "--help")) { print < abs($y)); return abs($x-$y)/abs($y); } sub operation($$$) { defined($_[2]) || die "operation: Missing argument\n"; my $x=$_[0]; my $op="$_[1]"; my $y=$_[2]; if ($op =~ /\+|-|\*|\/|x/) { $op="*" if ($op eq "x"); my $val = eval "$x $op $y"; die "operation: Could not calculate '$x $op $y'\n" if $@; return $val; } elsif ($op eq "=") { my $diff=&difference_relative($x,$y,$epsilon); return 1 if ($diff > $epsilon); return 0; } elsif ($op eq "d") { return abs($x-$y); } elsif ($op eq "d2") { return ($x-$y)*($x-$y); } else { die "operation: Unknown operation $op\n"; } } my $sum=0; my @table; for (my $i=0;$i<=$#r1; $i++) { # check for positions abs($r1[$i] - $r2[$i]) < $epsilon || die "$progname: first column different at position $i\n"; # check for flags unless ($noflags){ $flag1[$i] eq $flag2[$i] || die "$progname: flag different at position $i\n"; # skip if flag does not match if (($withflag) and ($flag1[$i] !~ m/[$withflag]/)) { next; } } # perform given operation my $value=&operation($pot1[$i],$op,$pot2[$i]); if (($die)&&($op eq "=")&&($value == 1)) { die "progname: second column different at position $i\n"; } $value*=$scale; $sum+=$value; $table[$i]=$value; } if ($die) { #notthing } elsif ($dosum) { print "$sum\n"; } else { my $comments="# $progname: combining $file1 $op $file2 into $ARGV[2]\n"; $comments.="#Comments from $file1\n$comments1" if (defined($comments1)); $comments.="#Comments from $file2\n$comments2" if (defined($comments2)); saveto_table($ARGV[2],@r1,@table,@flag1,$comments) || die "$progname: error at save table\n"; } csg-1.4.1/share/scripts/inverse/table_dummy.sh000077500000000000000000000042421315264121600214010ustar00rootroot00000000000000#! /bin/bash # # Copyright 2009-2016 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # clean=no y1=0 y2=0 show_help () { cat < $tmpfile echo "$max ${y2}" >> $tmpfile comment="$(get_table_comment)" critical csg_resample --type linear --in ${tmpfile} --out "${2}" --grid "${1}" --comment "${comment}" if [[ $clean = "yes" ]]; then rm -f "${tmpfile}" fi csg-1.4.1/share/scripts/inverse/table_extrapolate.pl000077500000000000000000000144501315264121600226010ustar00rootroot00000000000000#! /usr/bin/perl -w # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # sub extrapolate_constant($$$$) { my $y0 = $_[1]; return $y0; } sub extrapolate_linear($$$$) { my $x0 = $_[0]; my $y0 = $_[1]; my $m = $_[2]; my $x = $_[3]; return $m*($x - $x0) + $y0; } sub sasha_shit($$$$) { my $x0 = $_[0]; my $y0 = $_[1]; my $m = $_[2]; my $x = $_[3]; my $a = ($m**2)/(4*$y0); my $b = $x0 - 2*$y0/$m; #my $b = $x0 + 2*$y0/$m; #my $a = $m/(2*($x0-$b)); return $a*($x-$b)**2; } sub extrapolate_quad($$$$) { my $x0 = $_[0]; my $y0 = $_[1]; my $m = $_[2]; my $x = $_[3]; # $curv is a global variable my $a = 0.5*$m/$curv - $x0; my $b = $y0 - 0.25*$m*$m/$curv; return $curv*($x + $a)**2 + $b } sub extrapolate_exp($$$$) { my $x0 = $_[0]; my $y0 = $_[1]; my $m = $_[2]; my $x = $_[3]; my $a = $y0*exp(-$m*$x0 / $y0); my $b = $m/$y0; return $a*exp($b*$x); } use strict; $_=$0; s#^.*/##; my $progname=$_; my $usage="Usage: $progname [OPTIONS] "; my $avgpoints = 3; my $function="quadratic"; my $region = "leftright"; my $flag_update ="yes"; our $curv = 10000.0; # curvature for quadratic extrapolation # read program arguments while ((defined ($ARGV[0])) and ($ARGV[0] =~ /^-./)) { if (($ARGV[0] !~ /^--/) and (length($ARGV[0])>2)){ $_=shift(@ARGV); #short opt having agruments examples fo if ( $_ =~ /^-[fo]/ ) { unshift(@ARGV,substr($_,0,2),substr($_,2)); } else{ unshift(@ARGV,substr($_,0,2),"-".substr($_,2)); } } if($ARGV[0] eq "--avgpoints") { $avgpoints = $ARGV[1]; shift(@ARGV); shift(@ARGV); } elsif($ARGV[0] eq "--function") { $function = $ARGV[1]; shift(@ARGV); shift(@ARGV); } elsif($ARGV[0] eq "--region") { $region = $ARGV[1]; shift(@ARGV); shift(@ARGV); } elsif($ARGV[0] eq "--curvature") { $curv = $ARGV[1]; shift(@ARGV); shift(@ARGV); } elsif($ARGV[0] eq "--no-flagupdate") { shift(@ARGV); $flag_update="no"; } elsif (($ARGV[0] eq "-h") or ($ARGV[0] eq "--help")) { print < 0; use CsgFunctions; my $infile="$ARGV[0]"; my @r; my @val; my @flag; my $comments; (readin_table($infile,@r,@val,@flag,$comments)) || die "$progname: error at readin_table\n"; my $outfile="$ARGV[1]"; #============== my ($do_left, $do_right); # parse $region: decide where to extrapolate if ($region eq "left") { $do_left = 1; $do_right = 0; } elsif ($region eq "right") { $do_left = 0; $do_right = 1; } elsif ($region eq "leftright") { $do_left = 1; $do_right = 1; } else { die "$progname: Unknown region: $region !\n"; } my $extrap_method; # parse $function: decide which method to use if ($function eq "constant") { $extrap_method = \&extrapolate_constant; } elsif ($function eq "linear") { $extrap_method = \&extrapolate_linear; } elsif ($function eq "quadratic") { $extrap_method = \&extrapolate_quad; } elsif ($function eq "exponential") { $extrap_method = \&extrapolate_exp; } elsif ($function eq "sasha") { $extrap_method = \&sasha_shit; } else { die "$progname: Unknown extrapolation function: $function !\n"; } # do extrapolation: left if ($do_left) { # find beginning my $first; for ($first=0;$first<=$#r;$first++) { last if($flag[$first] eq "i"); } # grad of beginning my $grad_beg; if ($function eq "constant") { $grad_beg = 0; } else { $grad_beg = ($val[$first + $avgpoints] - $val[$first])/($r[$first + $avgpoints] - $r[$first]); } print "$progname: extrapolating the left using $function with gradient $grad_beg\n"; # now extrapolate beginning for(my $i=$first-1; $i >= 0; $i--) { $val[$i] = &{$extrap_method}($r[$first], $val[$first], $grad_beg, $r[$i]); $flag[$i]="i" if ($flag_update eq "yes"); } } # do extrapolation: right if ($do_right) { # find end my $last; for ($last=$#r;$last>0;$last--) { last if($flag[$last] eq "i"); } # grad of end my $grad_end; if ($function eq "constant") { $grad_end = 0; } else { $grad_end = ($val[$last] - $val[$last - $avgpoints])/($r[$last] - $r[$last-$avgpoints]); } print "$progname: extrapolating the right using $function with gradient $grad_end\n"; # now extrapolate ends for(my $i=$last+1; $i <= $#r; $i++) { $val[$i] = &{$extrap_method}($r[$last], $val[$last], $grad_end, $r[$i]); $flag[$i]="i" if ($flag_update eq "yes"); } } #============== saveto_table($outfile,@r,@val,@flag,$comments) || die "$progname: error at save table\n"; csg-1.4.1/share/scripts/inverse/table_functional.sh000077500000000000000000000075421315264121600224160ustar00rootroot00000000000000#! /bin/bash # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # show_help () { cat < "$tmpfile" echo -e "#\n#Plot script:" >> "$tmpfile" [[ -n $vars ]] && echo -e "$vars" >> "$tmpfile" for i in "${headers[@]}"; do echo "load '$i'" >> "$tmpfile" done echo "set samples $samples" >> "$tmpfile" echo "set table '$tmpfile2'" >> "$tmpfile" echo "plot [${grid[1]}:${grid[3]}] $fct" >> "$tmpfile" critical $gnuplot "$tmpfile" critical sed -e 's/^#*/#/' "$tmpfile" > "$output" echo -e "#\n# Gnuplot output:" >> "$output" critical sed -e '/^[[:space:]]*$/d' "$tmpfile2" >> "$output" if [[ $clean = "yes" ]]; then rm -f "$tmpfile" "$tmpfile2" fi csg-1.4.1/share/scripts/inverse/table_get_value.pl000077500000000000000000000036071315264121600222260ustar00rootroot00000000000000#! /usr/bin/perl -w # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # use strict; $_=$0; s#^.*/##; my $progname=$_; my $usage="Usage: $progname [OPTIONS] X infile"; # read program arguments while ((defined ($ARGV[0])) and ($ARGV[0] =~ /^-./)) { if (($ARGV[0] !~ /^--/) and (length($ARGV[0])>2)){ $_=shift(@ARGV); #short opt having agruments examples fo if ( $_ =~ /^-[fo]/ ) { unshift(@ARGV,substr($_,0,2),substr($_,2)); } else{ unshift(@ARGV,substr($_,0,2),"-".substr($_,2)); } } if (($ARGV[0] eq "-h") or ($ARGV[0] eq "--help")) { print < 0; use CsgFunctions; my $X="$ARGV[0]"; my $infile="$ARGV[1]"; my @x; my @y; my @flag; (readin_table($infile,@x,@y,@flag)) || die "$progname: error at readin_table\n"; my $value=$y[0]; for(my $i=1; $i<=$#x; $i++) { if($x[$i]<$X) { $value=$y[$i]; } else { $value=$y[$i] unless (($x[$i]-$X)>($X-$x[$i-1])); print "$value\n"; exit 0; } } die "$progname: value $X not found\n"; csg-1.4.1/share/scripts/inverse/table_getsubset.py000077500000000000000000000044061315264121600222730ustar00rootroot00000000000000#!/usr/bin/env python2 # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import sys import os #import pygsl.sf import getopt #from pygsl import spline #from pygsl import _numobj as numx import math xvalues = [] yvalues = [] outfile = "" options = ["xstart=", "xstop=", "infile=", "outfile=","help"] try: opts, args = getopt.getopt(sys.argv[1:], "", options) except getopt.GetoptError, err: # print help information and exit: print str(err) # will print something like "option -a not recognized" print options sys.exit(2) for o, a in opts: if o == "--help": print """%(name)s, version %(ver)s This script get the a subset of a table Usage: %(name)s Allowed options: --xstart X.X x value where the subset starts --xstop X.X x value where the subset stops --infile FILE input file --outfile FILE output file """ % {'name': os.path.basename(sys.argv[0]),'ver': '%version%'} sys.exit(0) elif o == "-v": verbose = True elif o == "--xstart": xstart = float(a) elif o == "--xstop": xstop = float(a) elif o in ("--infile"): infile = a elif o in ("--outfile"): outfile = a else: print options assert False, "unhandled option" for line in open(infile,"r").readlines(): if line[0] != "@" and line[0] != "#": values = line.split() if float(values[0]) >= xstart and float(values[0]) <= xstop: xvalues.append(float(values[0])) yvalues.append(float(values[1])) f = open(outfile,"w") i = 0 tempx = [] tempy = [] for x in xvalues: tempx.append (x) tempy.append(yvalues[i]) i=i+1 i = 0 for x in tempx: f.write('%15.10e %15.10e i\n' % (x-xstart, tempy[i])) i=i+1 csg-1.4.1/share/scripts/inverse/table_integrate.pl000077500000000000000000000120651315264121600222330ustar00rootroot00000000000000#! /usr/bin/perl -w # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # use strict; $_=$0; s#^.*/##; my $progname=$_; my $usage="Usage: $progname [OPTIONS] "; my $with_errors="no"; my $with_entropie="no"; my $kbT=undef; my $from="right"; my $spherical="no"; # read program arguments while ((defined ($ARGV[0])) and ($ARGV[0] =~ /^-./)) { if (($ARGV[0] !~ /^--/) and (length($ARGV[0])>2)){ $_=shift(@ARGV); #short opt having agruments examples fo if ( $_ =~ /^-[fo]/ ) { unshift(@ARGV,substr($_,0,2),substr($_,2)); } else{ unshift(@ARGV,substr($_,0,2),"-".substr($_,2)); } } if (($ARGV[0] eq "-h") or ($ARGV[0] eq "--help")) { print <=0;$i--){ if ($r[$i]>0) { $force[$i] += 2*$kbT/$r[$i]; } } } if ("$spherical" eq "yes"){ for (my $i=0;$i<=$#r;$i++){ $force[$i] *= ($r[$i])**2; } } my $outfile="$ARGV[1]"; my @pot; my @pot_errors; my @ww; if ("$from" eq "right") { #calc pot with trapez rule #int_j= sum_i^j (r_i+1 - r_i)*(f_i+f_i+1)/2 #int_j+1= int_j + (r_i+1 - r_i)*(f_i+f_i+1)/2 #int_j= int_j+1 - (r_i+1 - r_i)*(f_i+f_i+1)/2 #begin from end to make pot(max)=0 $pot[$#r]=0; $ww[$#r]=0; for (my $i=$#r-1;$i>=0;$i--){ #hh = delta x /2 my $hh=0.5*($r[$i+1]-$r[$i]); $pot[$i]=$pot[$i+1] - $hh*($force[$i+1]+$force[$i]); $ww[$i]+= $hh; $ww[$i+1]+= $hh; } #ww contains w_i=(r_i+1-r_i-1)/2 if ("$with_errors" eq "yes") { #all error are independent(we assume that) #resort sum (only one force per summand) # U_j= sum_i ^j = sum_i^j f_i(r_i+1 - r_i-1)/2 + randterm # o^2(U_j)=sum_i o^2(f_i)*(r_i+1 - r_i-1)/2 + o^2(randterm) my $var_int = ($ww[$#r]*$force_errors[$#r])**2; $pot_errors[$#r]=sqrt($var_int); for(my $i=$#r-1; $i>=0;$i--) { my $hh = 0.5*($r[$i+1] - $r[$i]); $pot_errors[$i] = sqrt($var_int + ($hh*$force_errors[$i])**2); $var_int += ($ww[$i]*$force_errors[$i])**2; } } } else { $pot[0]=0; $ww[0]=0; for (my $i=1;$i<=$#r;$i++){ #hh = delta x /2 my $hh=0.5*($r[$i]-$r[$i-1]); $pot[$i]=$pot[$i-1] + $hh*($force[$i]+$force[$i-1]); $ww[$i]+= $hh; $ww[$i+1]+= $hh; } if ("$with_errors" eq "yes") { my $var_int = ($ww[0]*$force_errors[0])**2; $pot_errors[0]=sqrt($var_int); for(my $i=1; $i<=$#r;$i++) { my $hh = 0.5*($r[$i] - $r[$i-1]); $pot_errors[$i] = sqrt($var_int + ($hh*$force_errors[$i])**2); $var_int += ($ww[$i]*$force_errors[$i])**2; } } } if ("$with_errors" eq "yes") { (saveto_table_err($outfile,@r,@pot,@pot_errors,@flag)) || die "$progname: error at save table\n"; }else { (saveto_table($outfile,@r,@pot,@flag)) || die "$progname: error at save table\n"; } csg-1.4.1/share/scripts/inverse/table_linearop.pl000077500000000000000000000063071315264121600220640ustar00rootroot00000000000000#! /usr/bin/perl -w # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # use strict; $_=$0; s#^.*/##; my $progname=$_; my $usage="Usage: $progname [OPTIONS] "; #Defaults my $withflag=undef; my $with_errors="no"; my $col="y"; while ((defined ($ARGV[0])) and ($ARGV[0] =~ /^-./)) { if (($ARGV[0] !~ /^--/) and (length($ARGV[0])>2)){ $_=shift(@ARGV); #short opt having agruments examples fo if ( $_ =~ /^-[fo]/ ) { unshift(@ARGV,substr($_,0,2),substr($_,2)); } else{ unshift(@ARGV,substr($_,0,2),"-".substr($_,2)); } } if (($ARGV[0] eq "-h") or ($ARGV[0] eq "--help")) { print < -1; $withflag = shift(@ARGV); } elsif ($ARGV[0] eq "--with-errors"){ shift(@ARGV); $with_errors="yes"; } elsif ($ARGV[0] eq "--on-x"){ shift(@ARGV); $col="x"; } else { die "Unknown option '".$ARGV[0]."' !\n"; } } #Print usage die "missing parameters\n$usage\n" unless $#ARGV >= 3; my $a = $ARGV[2]; my $b = $ARGV[3]; use CsgFunctions; my $file="$ARGV[0]"; my $outfile="$ARGV[1]"; print "$progname: $file to $outfile with $col' = $a*$col + $b \n"; my @r; my @val; my @flag; my @errors; my $comments=""; if ("$with_errors" eq "yes") { (readin_table_err($file,@r,@val,@errors,@flag,$comments)) || die "$progname: error at readin_table\n"; } else { (readin_table($file,@r,@val,@flag,$comments)) || die "$progname: error at readin_table\n"; } for(my $i=0; $i<=$#r; $i++) { # skip if flag does not match if(($withflag) and ($flag[$i] !~ m/[$withflag]/)) { next; } if ("$col" eq "x") { $r[$i]=$a*$r[$i]+$b; } else { $val[$i] = $a*$val[$i] + $b; if ("$with_errors" eq "yes") { $errors[$i] = $a*$errors[$i]; } } } $comments.="# $progname: $file -> $outfile $col' = $a*$col + $b\n"; if ("$with_errors" eq "yes") { saveto_table_err($outfile,@r,@val,@errors,@flag,$comments) || die "$progname: error at save table\n"; }else { saveto_table($outfile,@r,@val,@flag,$comments) || die "$progname: error at save table\n"; } csg-1.4.1/share/scripts/inverse/table_scale.pl000077500000000000000000000040671315264121600213430ustar00rootroot00000000000000#! /usr/bin/perl -w # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # use strict; $_=$0; s#^.*/##; my $progname=$_; my $usage="Usage: $progname [OPTIONS] infile outfile prefactor1 prefactor2"; # read program arguments while ((defined ($ARGV[0])) and ($ARGV[0] =~ /^-./)) { if (($ARGV[0] !~ /^--/) and (length($ARGV[0])>2)){ $_=shift(@ARGV); #short opt having agruments examples fo if ( $_ =~ /^-[fo]/ ) { unshift(@ARGV,substr($_,0,2),substr($_,2)); } else{ unshift(@ARGV,substr($_,0,2),"-".substr($_,2)); } } if (($ARGV[0] eq "-h") or ($ARGV[0] eq "--help")) { print <=3 ; use CsgFunctions; my $infile="$ARGV[0]"; my @r; my @val; my @flag; my $comments; (readin_table($infile,@r,@val,@flag,$comments)) || die "$progname: error at readin_table\n"; my $outfile="$ARGV[1]"; my @out; my $prefactor="$ARGV[2]"; my $prefactor2 = "$ARGV[3]"; for (my $i=0;$i<=$#r;$i++){ # do a linear interpoltation between the prefactors $out[$i]=$i/$#r*$val[$i]*$prefactor2+(1-$i/$#r)*$val[$i]*$prefactor; } saveto_table($outfile,@r,@out,@flag,$comments) || die "$progname: error at save table\n"; csg-1.4.1/share/scripts/inverse/table_smooth.pl000077500000000000000000000033201315264121600215540ustar00rootroot00000000000000#! /usr/bin/perl -w # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # use strict; ( my $progname = $0 ) =~ s#^.*/##; if (defined($ARGV[0])&&("$ARGV[0]" eq "--help")){ print <= xstart and float(values[0]) <= xstop: xvalues.append(float(values[0])) yvalues.append(float(values[1])) f = open(outfile,"w") i = 0 tempx = [] tempy = [] for x in xvalues: tempx.append (x) if x-xstart < 0.1*(xstop-xstart): tempy.append ((1-weight(math.fabs(x-xstart)))*yvalues[i]) elif x-xstart > 0.9*(xstop-xstart): tempy.append ((1-weight(math.fabs(xstop-x)))*yvalues[i]) else: tempy.append(yvalues[i]) i=i+1 i = 0 for x in tempx: f.write('%15.10e %15.10e i\n' % (x, tempy[i])) i=i+1 csg-1.4.1/share/scripts/inverse/table_switch_border.pl000077500000000000000000000034521315264121600231070ustar00rootroot00000000000000#! /usr/bin/perl -w # # Copyright 2016 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # use strict; ( my $progname = $0 ) =~ s#^.*/##; if (defined($ARGV[0])&&("$ARGV[0]" eq "--help")){ print < EOF exit 0; } die "3 parameters are necessary\n" if ($#ARGV<2); use CsgFunctions; my $infile="$ARGV[0]"; my @r_cur; my @pot_cur; my @flag_cur; (readin_table($infile,@r_cur,@pot_cur,@flag_cur)) || die "$progname: error at readin_table\n"; my $outfile="$ARGV[1]"; my @pot; my $a = $ARGV[2]; # TODO: think about addition rules # now I did it like that to always maintain interval of interest in all potentials # find end my $last; for ($last=$#r_cur;$last>0;$last--) { last if($flag_cur[$last] eq "i"); } use constant { PI => 4 * atan2(1,1) }; for (my $i=0;$i<=$#r_cur;$i++){ $pot[$i]=$pot_cur[$i]; if($flag_cur[$i] eq "i") { if($r_cur[$i]>$a) { $pot[$i] = $pot_cur[$i] * cos(PI*($r_cur[$i]-$a)/(2.0*($r_cur[$last]-$a))) } } } saveto_table($outfile,@r_cur,@pot,@flag_cur) || die "$progname: error at save table\n"; csg-1.4.1/share/scripts/inverse/table_to_tab.pl000077500000000000000000000127401315264121600215210ustar00rootroot00000000000000#! /usr/bin/perl -w # # Copyright 2009-2014 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # use strict; ( my $progname = $0 ) =~ s#^.*/##; my $usage="Usage: $progname [OPTIONS] "; my $type="non-bonded"; my $sim_prog="none"; while ((defined ($ARGV[0])) and ($ARGV[0] =~ /^-./)) { if (($ARGV[0] !~ /^--/) and (length($ARGV[0])>2)){ $_=shift(@ARGV); #short opt having agruments examples fo if ( $_ =~ /^-[fo]/ ) { unshift(@ARGV,substr($_,0,2),substr($_,2)); } else{ unshift(@ARGV,substr($_,0,2),"-".substr($_,2)); } } if (($ARGV[0] eq "-h") or ($ARGV[0] eq "--help")){ print < $outfile") or die "saveto_table: could not open $outfile\n"; if ($sim_prog eq "espresso") { # espresso specific header - no other starting comments printf(OUTFILE "#%d %f %f\n", $#r+1, $r[0],$r[$#r]); for(my $i=0;$i<=$#r;$i++){ printf(OUTFILE "%15.10e %15.10e %15.10e\n",$r[$i],($r[$i]>0)?-$pot_deriv[$i]/$r[$i]:-$pot_deriv[$i], $pot[$i]); } } elsif ($sim_prog eq "lammps") { if ($type eq "non-bonded"){ printf(OUTFILE "VOTCA\n"); printf(OUTFILE "N %i R %f %f\n\n",$#r+1,$r[0],$r[$#r]); for(my $i=0;$i<=$#r;$i++){ printf(OUTFILE "%i %15.10e %15.10e %15.10e\n",$i+1,$r[$i], $pot[$i], -$pot_deriv[$i]); } } elsif ( $type eq "bond" || $type eq "angle" || $type eq "dihedral" ) { printf(OUTFILE "VOTCA\n"); printf(OUTFILE "N %i\n\n",$#r+1); for(my $i=0;$i<=$#r;$i++){ printf(OUTFILE "%i %12.5e %15.7e %15.7e\n",$i+1,$r[$i], $pot[$i], -$pot_deriv[$i]); } } else { #should never happen die "$progname: tabulated potentials/forces for lammps $type not implemented\n"; } } elsif ($sim_prog eq "dlpoly") { if ($type eq "non-bonded"){ # see dlpoly manual ngrid = cut/delta + 4 = $#r + 4 as table starts with delta (not 0) # number of lines int((ngrid+3)/4) for(my $i=0;$i<4*int(($#r+7)/4);$i++){ printf(OUTFILE "%15.7e",($i>$#r)?0:$pot[$i]); printf(OUTFILE "%s",($i%4==3)?"\n":" "); } for(my $i=0;$i<4*int(($#r+7)/4);$i++){ # no scaling factor needed 1 kJ/nm *nm = 1 (kJ/Angs)*Angs printf(OUTFILE "%15.7e",($i>$#r)?0:-$pot_deriv[$i]*$r[$i]); printf(OUTFILE "%s",($i%4==3)?"\n":" "); } } elsif ( $type eq "bond" ) { for(my $i=0;$i<=$#r;$i++){ #nm -> Angs: $r[$i]*10.0 printf(OUTFILE "%12.5e %15.7e %15.7e\n",$r[$i]*10.0, $pot[$i], -$pot_deriv[$i]*$r[$i]); } } elsif ( $type eq "angle" || $type eq "dihedral" ) { my $RadToDegree=180.0/3.14159265359; for(my $i=0;$i<=$#r;$i++){ #rad -> degree: $r[$i]*$RadToDegree, and $pot_deriv[$i]/$RadToDegree printf(OUTFILE "%12.5e %15.7e %15.7e\n",$r[$i]*$RadToDegree, $pot[$i], -$pot_deriv[$i]/$RadToDegree); } } else { #should never happen die "$progname: tabulated potentials/forces for dlpoly $type not implemented\n"; } } elsif ($sim_prog eq "gromacs") { printf(OUTFILE "#This is just a failback, for using different columns use table_to_xvg.pl instead!\n"); for(my $i=0;$i<=$#r;$i++){ printf(OUTFILE "%15.10e %15.10e %15.10e %15.10e %15.10e %15.10e %15.10e\n",$r[$i], ,0,0,0,0,$pot[$i], -$pot_deriv[$i]); } } else { #generic for espressopp / hoomd-blue for(my $i=0;$i<=$#r;$i++){ printf(OUTFILE "%15.10e %15.10e %15.10e\n",$r[$i], $pot[$i], -$pot_deriv[$i]); } } close(OUTFILE) or die "Error at closing $outfile\n"; csg-1.4.1/share/scripts/inverse/table_to_xvg.pl000077500000000000000000000074771315264121600215720ustar00rootroot00000000000000#! /usr/bin/perl -w # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # use strict; ( my $progname = $0 ) =~ s#^.*/##; my $usage="Usage: $progname [OPTIONS] "; my $type="non-bonded"; my $gmx_max=undef; while ((defined ($ARGV[0])) and ($ARGV[0] =~ /^-./)) { if (($ARGV[0] !~ /^--/) and (length($ARGV[0])>2)){ $_=shift(@ARGV); #short opt having agruments examples fo if ( $_ =~ /^-[fo]/ ) { unshift(@ARGV,substr($_,0,2),substr($_,2)); } else{ unshift(@ARGV,substr($_,0,2),"-".substr($_,2)); } } if (($ARGV[0] eq "-h") or ($ARGV[0] eq "--help")){ print <$gmx_max; $pot[$i]=-$gmx_max if $pot[$i]<-$gmx_max; } } my @force; #calc force for (my $i=1;$i<$#r;$i++){ $force[$i]=-($pot[$i+1]-$pot[$i-1])/($r[$i+1]-$r[$i-1]); } if ( "$type" eq "dihedral" ) { $force[0]=-($pot[1]-$pot[$#r-1])/($r[1]-$r[0]+$r[$#r]-$r[$#r-1]); $force[$#r]=$force[0]; } else { $force[0]=0; $force[$#r]=0.0; } open(OUTFILE,"> $outfile") or die "saveto_table: could not open $outfile\n"; my $fmt=undef; my $begin=0; my $end=undef; if (( "$type" eq "non-bonded" ) or ("$type" eq "C12" )) { $fmt=sprintf("%%15.10e %15.10e %15.10e %15.10e %15.10e %%15.10e %%15.10e\n",0,0,0,0); } elsif ( "$type" eq "C6" ){ $fmt=sprintf("%%15.10e %15.10e %15.10e %%15.10e %%15.10e %15.10e %15.10e\n",0,0,0,0); } elsif ( "$type" eq "CB" ){ $fmt=sprintf("%%15.10e %%15.10e %%15.10e %15.10e %15.10e %15.10e %15.10e\n",0,0,0,0); } elsif ( "$type" eq "bond" ){ $fmt="%15.10e %15.10e %15.10e\n"; } elsif ( "$type" eq "angle" ){ $fmt="%15.10e %15.10e %15.10e\n"; $end=180; } elsif ( "$type" eq "dihedral" ){ $fmt="%15.10e %15.10e %15.10e\n"; $begin=-180; $end=180; } else{ die "$progname: Unsupported type of interatction: $type -> go and implement it\n"; } die "$progname: table for type $type should begin with $begin, but I found $r[0]\n" if(abs($begin-$r[0]) > 1e-3); die "$progname: table for type $type should end with $end, but I found $r[$#r]\n" if(($end) and (abs($end-$r[$#r]) > 1e-3)); print OUTFILE "$comments" if (defined($comments)); for(my $i=0;$i<=$#r;$i++){ printf(OUTFILE "$fmt",$r[$i],$pot[$i], $force[$i]); } close(OUTFILE) or die "Error at closing $outfile\n"; csg-1.4.1/share/scripts/inverse/tables_jackknife.pl000077500000000000000000000043041315264121600223560ustar00rootroot00000000000000#! /usr/bin/perl -w # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # use strict; ( my $progname = $0 ) =~ s#^.*/##; if (defined($ARGV[0])&&("$ARGV[0]" eq "--help")){ print < 0) { my $file_cur="$ARGV[0]"; my @r_cur; my @val_cur; my @flag_cur; (readin_table($file_cur,@r_cur,@val_cur,@flag_cur)) || die "$progname: error at readin_table\n"; #should never happen, but .... die "Different grids\n" if (($r_delta[1]-$r_delta[0]-$r_cur[1]+$r_cur[0])>0.0001); die "Different start potential point \n" if (($r_delta[0]-$r_cur[0]) > 0.0001); die "Different end potential point \n" if ( $#r_cur != $#r_delta ); for (my $i=0;$i<=$#r_cur;$i++) { $err[$i] += ($val_cur[$i] - $val_full[$i])**2; # is already nan or we don't change } shift @ARGV; $nblocks = $nblocks + 1; } for (my $i=0;$i<=$#r_full;$i++) { $err[$i]=sqrt(($nblocks-1)/$nblocks*$err[$i]); } saveto_table_err($outfile,@r_full,@val_full,@flag_full,@err) || die "$progname: error at save table\n"; csg-1.4.1/share/scripts/inverse/tag_file.sh000077500000000000000000000022601315264121600206470ustar00rootroot00000000000000#! /bin/bash # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # if [ "$1" = "--help" ]; then cat < "$output" || die "${0##*/}: sed failed" cat "$input" >> "$output" || die "${0##*/}: sed failed" csg-1.4.1/share/scripts/inverse/update_ibi.sh000077500000000000000000000021351315264121600212030ustar00rootroot00000000000000#! /bin/bash # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # if [ "$1" = "--help" ]; then cat <0.0001); die "Different start potential point \n" if (($r_aim[0]-$r_cur[0]) > 0.0001); die "Different end potential point \n" if ( $#r_aim != $#r_cur ); my $outfile="$ARGV[3]"; my @dpot; my @flag; my $value=0.0; #start from the end to make the begining have the last value for (my $i=$#r_aim;$i>=0;$i--){ if (($rdf_aim[$i] > 1e-10) && ($rdf_cur[$i] > 1e-10)) { $dpot[$i]=log($rdf_cur[$i]/$rdf_aim[$i])*$pref; $flag[$i]="i"; } else { $dpot[$i]=$value; $flag[$i]="o"; } if($pot_flags_cur[$i] =~ /[u]/) { $dpot[$i]=$value; $flag[$i]="o"; } else { $value=$dpot[$i]; } } my $comment="#progname: aim_rdf=$aim_rdf_file cur_rdf=$cur_rdf_file cur_pot=$cur_rdf_file\n"; saveto_table($outfile,@r_aim,@dpot,@flag,$comment) || die "$progname: error at save table\n"; csg-1.4.1/share/scripts/inverse/update_ibi_single.sh000077500000000000000000000032521315264121600225450ustar00rootroot00000000000000#! /bin/bash # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # if [ "$1" = "--help" ]; then cat < "${otype}.state.try" do_external ${otype} precede_state "${otype}.state.try" "${otype}.state.done" state="$(critical sed -n 's/#State = \(.*\)$/\1/p' ${otype}.state.done)" [[ ${otype} = simplex ]] && msg "Simplex state changed to $state" else pending="$(echo "$pending" | critical sed -n '$=')" msg "There are still $pending simulations to be performed before the next ${otype} state change" critical sed "${active}s/0 active$/ $conv complete/" "${otype}.state.cur" > "${otype}.state.done" if [[ ${otype} = cma ]]; then critical cp "${otype}.internal_state.cur" "${otype}.internal_state.new" fi fi do_external optimizer state_to_potentials "${otype}.state.done" "${otype}.state.new" csg-1.4.1/share/scripts/inverse/update_optimizer_single.sh000077500000000000000000000034101315264121600240200ustar00rootroot00000000000000#! /bin/bash # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # if [[ $1 = "--help" ]]; then cat < "${name}.conv" tasklist=$(csg_get_interaction_property --allow-empty inverse.post_update) [[ -z $tasklist ]] || die "Postupd tasks for $name found, this is not allowed in optimizer" csg-1.4.1/share/scripts/inverse/update_re.sh000077500000000000000000000044071315264121600210520ustar00rootroot00000000000000#! /bin/bash # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # if [ "$1" = "--help" ]; then cat <" "0"; then msg --color blue --to-stderr "Automatically setting equi_time to 0, because CSG_RUNTEST was set" equi_time=0 fi first_frame="$(csg_get_property cg.inverse.$sim_prog.first_frame)" csg_reupdate_opts="$(csg_get_property --allow-empty cg.inverse.re.csg_reupdate.opts)" if [[ ${CSG_RUNTEST} ]] ; then msg --color blue --to-stderr "Automatically adding '--hessian-check no', because CSG_RUNTEST was set" csg_reupdate_opts+=" --hessian-check no" fi tasks=$(get_number_tasks) if is_done "re_update"; then echo "RE update is already done" else #copy+resample all target dist in $this_dir for_all "non-bonded bonded" do_external resample target '$(csg_get_interaction_property inverse.target)' '$(csg_get_interaction_property name).dist.tgt' critical csg_reupdate --nt $tasks --top ${topol} --trj $traj --options $CSGXMLFILE --begin $equi_time --first-frame $first_frame ${csg_reupdate_opts} mark_done "re_update" fi csg-1.4.1/share/scripts/still_dirty/000077500000000000000000000000001315264121600174255ustar00rootroot00000000000000csg-1.4.1/share/scripts/still_dirty/calc_rdf_gromacs.sh000077500000000000000000000060201315264121600232320ustar00rootroot00000000000000#! /bin/bash # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicale law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # if [[ $1 = "--help" ]]; then cat < $tmp awk "{printf(\"%s %.16e %s\\n\", \$1,($n*\$2 + \$5)/($n+1),\$3);}" $tmp > $outfile n=$((n+1)) shift done rm $tmp } average_imc() { octave $CSGINVERSE/imcdata_from_blocks.octave sed -e '/^[#@]/d' $name.gmc.block > $1.gmc sed -e '/^[#@]/d' $name.imc.block > $1.imc } calc_dpot_ibm() { update_POT="$($SOURCE_WRAPPER update ibm_pot)" \ || die "${0##*/}: $SOURCE_WRAPPER update ibm_pot failed" run_or_exit ${update_POT} ${name}.dist.tgt \ $1.dist.new ${name}.pot.cur $1.dpot.new } all_dist="" all_dpot="" for block in $(seq 1 $nblocks); do echo "skipping block $block" all_dpot="$all_dpot ${name}_no_$block.dpot.new" all_dist="$all_dist ${name}_$block.dist.new" case $method in ibm) in_dist="" for i in $(seq 1 $nblocks | sed "/^${block}\$/d"); do in_dist="$in_dist ${name}_$i.dist.new" done #begin_block $block average_tables ${name}_no_$block.dist.new $in_dist calc_dpot_ibm ${name}_no_$block #end_block $block ;; imc) seq 1 $nblocks | sed "/^${block}\$/d" > $name.blocks average_imc ${name}_no_$block $CSGINVERSE/solve_octave.sh ${name}_no_$block $name.pot.cur ;; esac done case $method in ibm) average_tables ${name}.dist.new $all_dist calc_dpot_ibm ${name} ;; imc) seq 1 $nblocks > $name.blocks average_imc ${name} $CSGINVERSE/solve_octave.sh ${name} $name.pot.cur ;; esac ~/src/csg/scripts/csg_call.sh tables jackknife $name.dpot.err CG-CG.dpot.new $all_dpot #case "$method" in # ibm) # ;; # imc) # ;; #esac #run_or_exit ${update_POT} ${name}.dist.tgt ${name}.dist.new ${name}.pot.cur ${name}.dpot.tmpcsg-1.4.1/share/scripts/still_dirty/eval_errors.sh000077500000000000000000000016071315264121600223130ustar00rootroot00000000000000#!/bin/bash # # Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # rm errors.ibm rm errors.imc for i in $(seq 1 250); do sed -e '/[uo]/d' ibm.err.$i | sed -e '1d' | awk "{sum+=\$4;n++;}END{print $i*16, sum/n}" >> errors.ibm sed -e '/[uo]/d' imc.err.$i | sed -e '1d' | awk "{sum+=\$4;n++;}END{print $i*16, sum/n}" >> errors.imc done csg-1.4.1/share/template/000077500000000000000000000000001315264121600152075ustar00rootroot00000000000000csg-1.4.1/share/template/CMakeLists.txt000066400000000000000000000007341315264121600177530ustar00rootroot00000000000000include_directories(${CMAKE_SOURCE_DIR}/include) foreach(PROG template_serial template_threaded) file(GLOB ${PROG}_SOURCES ${PROG}.cc) add_executable(${PROG} ${${PROG}_SOURCES}) target_link_libraries(${PROG} votca_csg) install(FILES ${${PROG}_SOURCES} DESTINATION ${DATA}/template) endforeach(PROG) install(FILES CMakeLists.txt.template DESTINATION ${DATA}/template RENAME CMakeLists.txt) install(FILES Makefile.template DESTINATION ${DATA}/template RENAME Makefile) csg-1.4.1/share/template/CMakeLists.txt.template000066400000000000000000000016471315264121600215710ustar00rootroot00000000000000#change the name here or run cmake -DNAME="XXX" set(NAME "template_serial" CACHE STRING "Name of the csg app") cmake_minimum_required(VERSION 2.6) project(${NAME}) set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/CMakeModules) enable_language(CXX) include(CheckCXXCompilerFlag) find_package(Boost 1.39.0 REQUIRED COMPONENTS program_options ) include_directories(${Boost_INCLUDE_DIRS}) set (BOOST_LIBRARIES ${Boost_PROGRAM_OPTIONS_LIBRARY}) find_package(VOTCA_TOOLS REQUIRED) include_directories(${VOTCA_TOOLS_INCLUDE_DIRS}) find_package(VOTCA_CSG REQUIRED) include_directories(${VOTCA_CSG_INCLUDE_DIRS}) file(GLOB ${NAME}_SOURCES ${NAME}*.cc) #add extra source files here add_executable(${NAME} ${${NAME}_SOURCES}) target_link_libraries(${NAME} ${VOTCA_CSG_LIBRARIES} ${VOTCA_TOOLS_LIBRARIES} ${BOOST_LIBRARIES}) install(TARGETS ${NAME} RUNTIME DESTINATION bin) set_target_properties(${NAME} PROPERTIES OUTPUT_NAME csg_${NAME}) csg-1.4.1/share/template/Makefile.template000066400000000000000000000021121315264121600204550ustar00rootroot00000000000000#change the name of the program here NAME=template_threaded #add extra cc file to compile here EXTRA_SRC= ###############################################################3 #below only boring default stuff #only change it if you know what you are doing ;-) #what should be done by default all: $(NAME) #if VOTCALDLIB is defined we add it to PKG_CONFIG_PATH ifeq "$(origin VOTCASHARE)" "undefined" $(error "VOTCASHARE not found, please source VOTCARC") endif #get CPPFLAGS and LDFLAGS from pkg-config CPPFLAGS=`pkg-config --cflags libvotca_csg` LIBS=`pkg-config --libs libvotca_csg` #for static binaries uncomment the next 3 lines #CPPFLAGS=`pkg-config --static --cflags libvotca_csg` #LDFLAGS=-static #LIBS=`pkg-config --static --libs libvotca_csg` #generate a list of object (.o) files OBJS=$(patsubst %.cc,%.o,$(NAME).cc $(EXTRA_SRC)) #main program depend on all objects, rest is done by implicit rules $(NAME): $(OBJS) ${CXX} ${LDFLAGS} -o ${NAME} ${OBJS} ${LIBS} #clean up rule clean: rm -f $(NAME) $(OBJS) #all, clean are phony rules, e.g. they should always run .PHONY: all clean csg-1.4.1/share/template/template_serial.cc000066400000000000000000000041731315264121600206750ustar00rootroot00000000000000/* * Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include #include #include #include #include #include //using namespace votca::tools; using namespace std; using namespace votca::csg; class CsgTestApp : public CsgApplication { string ProgramName() { return "template_nblist"; } void HelpText(ostream &out) { out << "rough template for rdf calculations"; } void Initialize(); bool DoTrajectory() {return true;} void BeginEvaluate(Topology *top, Topology *top_ref); void EvalConfiguration(Topology *top, Topology *top_ref); void EndEvaluate(); protected: HistogramNew _rdf; double _cut_off; }; int main(int argc, char** argv) { CsgTestApp app; return app.Exec(argc, argv); } void CsgTestApp::EvalConfiguration(Topology *top, Topology *top_ref) { BeadList b; b.Generate(*top, "*"); NBListGrid nb; nb.setCutoff(_cut_off); nb.Generate(b); NBList::iterator i; for(i=nb.begin(); i!=nb.end(); ++i) _rdf.Process((*i)->dist()); } void CsgTestApp::Initialize() { CsgApplication::Initialize(); AddProgramOptions("RDF options") ("c", boost::program_options::value()->default_value(1.0), "the cutoff"); } void CsgTestApp::BeginEvaluate(Topology *top, Topology *top_ref) { _cut_off = OptionsMap()["c"].as(); _rdf.Initialize(0, _cut_off, 50); } void CsgTestApp::EndEvaluate() { _rdf.data().Save("rdf.dat"); } csg-1.4.1/share/template/template_threaded.cc000066400000000000000000000141741315264121600212000ustar00rootroot00000000000000/* * Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include #include #include #include #include #include using namespace std; using namespace votca::csg; // comments were mainly added to explain the "overhead" needed for threaded // calculations/analyzations // to sum it up: instead of having one "thread" doing all your work (the whole tracetory), // you may split it into single frames and distribute it among many "workers". // a solid choice is: number of cores = number of workers. // you, as the user, are required to define how to initialize and merge your workers. // the main part of the program, EvalConfiguration, is shifted to the Worker class // but other than that stays untouched compared to a non-threaded version class CsgTestApp : public CsgApplication { string ProgramName() { return "template_threaded_rdf"; } void HelpText(ostream &out) { out << "template for threaded rdf calculations"; } void Initialize(); bool DoTrajectory() { return true; } // explicitly turn on threaded mode by overriding DoThreaded() and returning true // note that threads will be started and merged in an ordered way by default // this has the disadvantage of slowing everything down a bit (you will likely not // notice a decrease of performance), but the advantage of processing frames in // their original order // in most cases, you want that // in some cases, where reading and writing/merging does not have to occur in order, // you may consider switching SynchronizeThreads() off // in this example, where an rdf-like value is calculated, ordered reading/writing is not // neccessary. however, leave it untouched to prevent future mistakes bool DoThreaded() { return true; } // are you sure? really? // bool SynchronizeThreads() { // return false; // } void BeginEvaluate(Topology *top, Topology *top_ref); void EndEvaluate(); // ForkWorker is the function you need to override and initialize your workers CsgApplication::Worker *ForkWorker(void); // MergeWorker needs you to define how to merge different workers and their data void MergeWorker(Worker *worker); protected: // data belonging to the main class CsgTestApp HistogramNew _rdf; double _cut_off; }; // derive from CsgApplication::Worker and define your worker class RDFWorker : public CsgApplication::Worker { public: ~RDFWorker(){}; // override EvalConfiguration with your analysis routine void EvalConfiguration(Topology *top, Topology *top_ref); // data belonging to this particular worker HistogramNew _rdf; double _cut_off; }; int main(int argc, char** argv) { CsgTestApp app; return app.Exec(argc, argv); } void CsgTestApp::Initialize() { CsgApplication::Initialize(); AddProgramOptions("RDF options") ("c", boost::program_options::value()->default_value(1.0), "the cutoff"); } void CsgTestApp::BeginEvaluate(Topology *top, Topology *top_ref) { _cut_off = OptionsMap()["c"].as(); _rdf.Initialize(0, _cut_off, 50); } // create and initialize single workers // ForkWorker() will be called as often as the parameter '--nt NTHREADS' // it creates a new worker and the user is required to initialize variables etc. // (if needed) CsgApplication::Worker * CsgTestApp::ForkWorker() { RDFWorker *worker; worker = new RDFWorker(); // initialize worker->_cut_off = OptionsMap()["c"].as(); worker->_rdf.Initialize(0, worker->_cut_off, 50); return worker; } // EvalConfiguration does the actual calculation // you won't see any explicit threaded stuff here void RDFWorker::EvalConfiguration(Topology *top, Topology *top_ref) { BeadList b; b.Generate(*top, "*"); NBListGrid nb; nb.setCutoff(_cut_off); nb.Generate(b); NBList::iterator i; for (i = nb.begin(); i != nb.end(); ++i) { _rdf.Process((*i)->dist()); } } // the user is required to define how to merge the single data // belonging to each thread into the main data belonging to CsgTestApp void CsgTestApp::MergeWorker(Worker *worker) { RDFWorker * myRDFWorker; // cast generel Worker into your derived worker class(here RDFWorker) myRDFWorker = dynamic_cast (worker); // the next comment block explains how mutexes are used internally for this function: // mutexes are used to exclusively work on data // e.g., if you read or write global data, make sure that nobody else (i.e. no other worker) // works on that very same piece of data at the same time; otherwise, // you will end up with wrong results that you struggle to understand // the parent class handles a "merging mutex" for you internally; this is what happens: // first, a mutex is created, e.g. // Mutex rdfMutex; // then, for each worker, the mutex is first locked // rdfMutex.Lock()) // and MergeWorker(worker) is called (i.e. the code you define here is executed) // after MergeWorker exits, the mutex is unlocked // rdfMutex.Unlock(); // and allows other threads to get a lock and start merging // now follows your code // merging of data in this simple example is easy and does not have to follow // the original order of frames (since plain summing is commutative) _rdf.data().y() = _rdf.data().y() + myRDFWorker->_rdf.data().y(); } void CsgTestApp::EndEvaluate() { _rdf.data().y() = element_div(_rdf.data().y(), element_prod(_rdf.data().x(), _rdf.data().x()) ); _rdf.data().Save("rdf.dat"); } csg-1.4.1/share/xml/000077500000000000000000000000001315264121600141745ustar00rootroot00000000000000csg-1.4.1/share/xml/CMakeLists.txt000066400000000000000000000007211315264121600167340ustar00rootroot00000000000000set(CSG_DEFAULTS csg_defaults.xml) set(MDRUN "gmx mdrun") set(G_ENERGY "gmx energy") set(GROMPP "gmx grompp") configure_file(${CSG_DEFAULTS}.in ${CMAKE_CURRENT_BINARY_DIR}/${CSG_DEFAULTS} @ONLY) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${CSG_DEFAULTS} DESTINATION ${DATA}/xml) file(GLOB_RECURSE VOTCA_XML mapping.xml) install(FILES ${VOTCA_XML} DESTINATION ${DATA}/xml) file(GLOB_RECURSE TOPOL_XML topol.xml) install(FILES ${TOPOL_XML} DESTINATION ${DATA}/xml) csg-1.4.1/share/xml/csg_defaults.xml.in000066400000000000000000000757741315264121600200120ustar00rootroot00000000000000 Section containing the all coarse-graining options Force matching options boolean variable: false - simple least squares, true - constrained least squares. For details see the VOTCA paper. Practically, both algorithms give the same results, but simple least squares is faster. If you are a mathematician and you think that a spline can only then be called a spline if it has continuous first and second derivatives, use constrained least squares. 1e-5 Accuracy for evaluating the difference in bead positions. Default is 1e-5 number of frames, being used for block averaging. Atomistic trajectory, specified with --trj option, is divided into blocks and the force matching equations are solved separately for each block. Coarse-grained force-field, which one gets on the output is averaged over those blocks. general options for inverse script these files are removed after each iteration 0 lower convergency limit to stop none type of convergence check to do general dlpoly specific options DLPOLY.Z command to run dlpoly (name or absolute path or 'mpirun dlpoly' or such) .dlpf Gromacs binary topology (tpr) file to be written by grompp and used for the simlation .dlph Name of the output dlpoly trajectory file REVIVE REVCON Names of the dlpoly checkpoint files dlpoly internal grid end point for tabulated non-bonded potentials (applied to all non-bonded) dlpoly internal grid number for tabulated non-bonded potentials (applied to all non-bonded) dlpoly specs for tabulated bonded potentials (applied to all bonds) dlpoly internal grid end point for tabulated potentials dlpoly internal grid number for tabulated potentials dlpoly specs for tabulated bonded potentials (applied to all angles) dlpoly internal grid number for tabulated potentials dlpoly specs for tabulated bonded potentials (applied to all dihedrals) dlpoly internal grid number for tabulated potentials Espresso Command to run espresso (name or absolute path or mpirun espresso..) ${script} option to be given to espresso program, use ${script} in there 0 trash the given number of frames at the beginning of trajectory espresso internal grid for tabulated potentials Name of the output Espresso trajectory file python2 Command to run espresso (name or absolute path or mpirun espresso..) ${script} option to be given to espresso program, use ${script} in there 0 trash the given number of frames at the beginning of trajectory these files are copied to each iteration step gnuplot gnuplot binary to use gromacs specific options conf.gro Name of the coordinate file read by grompp confout.gro Name of the original outcome coordinate written by mdrun Length of the block for the error analysis no calculate error on the density: yes/no 0 begin analysis after this time when using gromacs (max of this and first_frame is used) 0 trash the given number of frames at the beginning of trajectory (max of this and first_frame is used) @G_ENERGY@ Name (or absolute path) of the g_energy binary Additional options to Gromacs g_energy (e.g. -P 1) options for pressure calculation using g_energy no is nan an allowed result: yes/no Gromacs binary topol (tpr) file to use by g_energy GMXRC to source at the startup @GROMPP@ Name (or absolute path) of the grompp binary Additional options to Gromacs grompp (e.g. -maxwarn 1) index.ndx Gromacs grompp index file to used by grompp Separate log file for gromacs programs (useful with mdrun -v) grompp.mdp Gromacs mdp file to be used by grompp state.cpt Name of the checkpint to use in case of restarted simulation @MDRUN@ Command to run mdrun (name or absolute path or mpirun mdrun..) Additional options to Gromacs mdrun (e.g. -nosum) 1000000 cut the potential at this value (gromacs bug) no A pre simulation (e.g. minimization / equilibration ) is a simulation with a different mdp/topol/index Gromacs grompp index file to used by grompp in the pre simulation Gromacs mdp file to be used by grompp in the pre simulation Gromacs text topol (top) file to use by grompp in the pre simulation Options for the case that calculation of reference system is needed 0 begin analysis after this time when using gromacs (max of this and first_frame is used) 0 trash the given number of frames at the beginning of trajectory (max of this and first_frame is used) Mapping to apply on the coarse-grained topology, use autogenerated ones (cg.inverse.optimizer.mapping.output) and given ones (map other components) Contains options for Reference rdf calculation Extra options to give to csg_stat (e.g. --nframes 100) Reference binary topology(global or local path) Reference trajectory(global or local path) Length of the block for the error analysis Space separated list of special mapping file(s) for rdf calculations needed for bonded interactions Gromacs binary topol (tpr) file to be used for csg_stat no calculate error on the rdf: yes/no 0.002 grid for gromacs xvg table extend the gromacs xvg tables to this value yes check kBT against t_ref in mdp file: yes/no topol.top Gromacs text topology (top) file read by grompp topol.tpr Gromacs binary topology (tpr) file to be written by grompp and used for the simlation traj.xtc Gromacs trajectory file to use hoomd Command to run hoomd-blue (name or absolute path or mpirun ..) ${script} option to be given to hoomd-blue program, use ${script} in there general imc specific options matlab Name (or absolute path) of the matlab binary octave Name (or absolute path) of the octave binary solver for solving a linear equation system: octave/numpy/matlab general lammps specific options lmp command to run lammps (name or absolute path or mpirun lammps..) -in ${script} option to be given to lammps program, use ${script} in there trajectory file to be created by lammps, use a format like xyz, which can be read by csg_stat options, which apply to all simulation programs Command to run for the simulation (name or absolute path or mpirun XXX ..) Name of the coordinate file read by the simulation program (if needed) Name of the original outcome coordinate written by simulation program (if any) 0 begin analysis after this time (max of this and first_frame is used) Length of the block for the error analysis no calculate error on the density: yes/no 0 trash the given number of frames at the beginning of trajectory (max of this and first_frame is used) Special topology file to be used for csg_reupdate Length of the block for the error analysis Space separated list of special mapping file(s) for rdf calculations needed for bonded interactions Special topology file to be used for csg_stat n calculate error on the rdf: yes/no General topology file to be use if no special one is specified trajectory file to be created by the simulation program option to be given to simulation program, use ${script} in there general options for realtive entropy method options for the csg_reupdate command 1 number of steps to be used for average computation. For relative entropy method, these many last iteration steps are used to compute average CG potentials or parameters or both. maindir what initial configuration to use in every step: maindir/laststep/nowhere. do the given number of iterations (0=inf) kBT in simulation progam units (XXX K *0.00831451 for gromacs) inverse.log name of the log file Special mapping file(s) for rdf calculations needed for bonded interactions method to be performed: ibi/imc/ft/optimizer general options for the cma optimizer standard epsilon, in which the best solution is searched Type of optimizer to be used gromacs simulation package to be used (gromacs/espresso/lammps) restart_points.log Name of the restart file in case a step has to be resumed 1e-10 minimal value for the rdf to consider for initial guess of the potential) list of directories for user scripts (e.g. $PWD) separated by a colon (like PATH) simulation options no tells csg_inverse that simulation was send to the backgroud auto number of threads to use for csg_stat grid Grid search algorithm, simple (N square search) or grid Interaction specific option for bonded interactions, see the cg.non-bonded section for all options
Header of the interaction in dlpoly TABBND or TABANG file. The header should be a unique set of the interaction-site names, and these should match the corresponding names specified in the mapping file.
Name of the bonded interaction. The name can be arbitrary but should be unique. For bonded interactions, this should match the name specified in the mapping file. 0 set to 1 when calculating bond dihedral potentials with csg_fmatch -> enforces periodicity of potential. (default is 0)
Interaction specific option for non-bonded interactions
Header of the interaction in dlpoly TABLE file. The header should be a unique pair of the interaction-site names, and these should match the corresponding names specified in the mapping file.
Name of the interaction. The name can be arbitrary but should be unique. For bonded interactions, this should match the name specified in the mapping file. **Bead** type 1 of non-bonded interaction. **Bead** type 2 of non-bonded interaction. Internal alias for "non-bonded" or "bonded", set automatically Lower bound of interval for potential table in which calculations are performed. Should be set based on reference distributions. Upper bound of interval for potential table in which calculations are performed. Should be set based on reference distributions. Step size of interval for potential table in which calculations are performed. If step site is too small, lots of statistics is needed ( long runs ). If it's too big, features in the distribtuion/potentials might get lost. Force matching options Minimum value of interval for distribution sampled in atomistic MD simulation. One can get this number by looking at the distribution function for this interaction. For non-bonded interactions it's the distance to the rdf start. For CG bonds and angles the variable has the similar meaning ( note, that for angles it is specified in radians ). Maximum value of interval for distribution sampled in atomistic MD simulation. One can get this number by looking at the distribution function for this interaction. For non-bonded interactions it's the cut-off of the interaction. grid spacing for the spline, which represents the interaction. This parameter should not be too big, otherwise you might lose some features of the interaction potential, and not too small either, otherwise you will have unsampled bins which result in an ill-defined equation system and NaNs in the output. Grid spacing for the output grid. Normally, one wants to have this parameter smaller than fmatch.step, to have a smooth curve, without additional spline interpolation. As a rule of thumb we normally use fmatch.out_step which is approximately 5 times smaller than fmatch.step. Relative entropy options Functional form for the potential. Available functional forms: lj126 (Lennard-Jones 12-6), ljg (Lennard-Jones 12-6 plus Gaussian), and cbspl (uniform cubic B-splines). options specific to cbspl function form Number of knot values to be used for the cbspl functional form. Uniform grid size of the CBSPL depends on this parameter; for fixed potential range more the nknots smaller the grid spacing. Make sure grid spacing is sufficiently large and enough CG simulation steps are performed such that the bins at distance greater than the minimum distance are sampled sufficiently otherwise ill-defined system of equation would give NaNs in the output. Contains all information relevant to iterative process target distribution (e.g. rdf) which is tried to match during iterations to match pressure contribution of this interaction particle density of this species (for wjk pressure correction) 1 Update cycle for the potential update. 1 means update, 0 don't update. 1 1 0 means update 2 iterations, then don't one iteration update, then repeat. Espresso specific options for this interations Name of file for tabulated potential of this interaction. This fill will be created from the internal tabulated potential format in every step. Note, though, that the original espresso script needs to contain the name of that table as the tabulated interaction (see tutorial methanol ibi_espresso for details).
Gromacs specific options for this interations Name of file for tabulated potential of this interaction. This fill will be created from the internal tabulated potential format in every step.
interaction specific options, which apply to all simulation programs Name of file for tabulated potential of this interaction. This fill will be created from the internal tabulated potential format in every step. Note, though, that the original simulation script needs to contain the name of that table as the tabulated interaction (see tutorial methanol ibi_espresso for details).
Start of the tabulated potential of this interaction. (Automatic for gromacs) Binszie of the tabulated potential of this interaction. (gromacs uses a non interaction specific option)
ection containing inverse monte carlo specific options. Group of interaction. Cross-correlations of all members of a group are taken into account for calculating the update. If no cross correlations should be calculated, interactions have to be put into different groups. 0 magnitude for regularization parameter, default =0 lammps specific options for this interations 1 scaling factor for the potential output, can be used to convert VOTCA units, nm, to other units, e.g. angstroms Name of file for tabulated potential of this interaction. This fill will be created from the internal tabulated potential format in every step. Note, though, that the lammps script needs to contain the name of that table as the tabulated interaction and the interaction is stored in the VOTCA section of the file..
Additional post processing of U after dU added to potential. This is a list of scripts separated by spaces which are called. See section on iterative framework for details. Contains all options of post add scripts Contains all options of the postadd compress sripts Files to be compressed -9 Option to give to the compression command gzip Compression command to run 1 weight factors for the convergence of this interaction, should be a list of same length as inverse.post_add_options.convergence.what dist list from what to calc the convergence: dist pot, .. tgt what base values to be used to compute convergene error: tgt, cur, .. 1 which norm to use to compute error: 1 first norm, 2 second norm Contains all options of the postadd copyback sripts File to be copied to back to maindir Contains all options of the overwrite postadd scripts 1 Cycle for overwrite postadd script (1 do, 0 do not) like do_potential. Contains all options of the plot postadd scripts 8 file descriptor to use, make it unique if you want to plot multiple things extra options to give to gnuplot_bin like -persist or -geometry kill all processes with that name before ploting (e.g. gnuplot_x11), this is more reliable than using named pipes list for which averages of last few steps are to computed: param, pot, ... For relative entropy method, specify param before pot. Additional post-processing of dU before added to potential. This is a list of scripts separated by spaces which are called. See section on iterative framework for details. Contains all options of post update scripts Contains all options of the Kirkwood-Buff integral corrections scripts 1 Update cycle for the Kirkwood-Buff integral correction (1 do, 0 do not). To do the correction every third step specify "0 0 1", similar to do_potential no calculate errors on the Kirkwood-Buff integral: yes/no no 5 Number of point to calculate the average from for the extrapolation Contains all options of the Kirkwood-Buff ramp corrections scripts 1 Update cycle for the Kirkwood-Buff ramp correction (1 do, 0 do not). To do the correction every third step specify "0 0 1", similar to do_potential scaling factor for the ramp correction no calculate errors on the Kirkwood-Buff integral: yes/no no cutoff of the ramp Where to start averaging the Kirkwood-Buff integral for the ramp Where to stop averaging the Kirkwood-Buff integral for the ramp Contains all options of the pressure correction scripts 1 Update cycle for the pressure correction (1 do, 0 do not). To do pressure correction every third step specify "0 0 1", similar to do_potential Contains all options of the simple pressure correction slope of the simple pressure correction simple Pressure correction type, can be simple or wjk Contains all options of the wjk pressure correction 1.0 extra scaling factor of pressure wjk correction 1.0 scale factor for the update Contains all options of the post_update smooth script 1 number of triangular smooth to be performed Contains all options of the post_update spline smooth script grid spacing for spline fit when doing spline smoothing Contains all options for the density calculation of the optimizer x Axis along which the density is calculated Lower bound of interval in which density calculation is performed. Upper bound of interval in which density calculation is performed. Step size of interval in which density calculation is performed. 1.0 Scaling factor for density * The molname of this interaction Filename of the target denstiy distribution in the maindir Functional form of the interaction, using parameters in here If the function is very complicated it can be defined in this files, which is used as an header option related to mapping changes no Does the mapping change in optimization: yes/no no Output file name for mapping Parameters to be fitted by the optimizer for this interaction. Note that the parameter names are global Contains all options for the pressure calculation of the optimizer Pressure to use if pressure from the simulation was nan (use a big number) Contains all options for the rdf calculation of the optimizer Filename of the target rdf in the maindir Weighting function for calculating the convergency of the rdf File with the weighting function definition calculating the rdf rdf Targets to be fitted by the optimizer 1 Weight of the targets, amount has to be the same as of targets
csg-1.4.1/share/xml/mapping.xml000066400000000000000000000053241315264121600163550ustar00rootroot00000000000000 Name of molecule in coarse-grained representation. Molecule name in reference topology. Section defining coarse grained beads of molecule. Section defining coarse grained beads of molecule. Definition of a coarse grained bead. Name of coarse grained bead. Type of coarse grained bead. Mapping scheme to be used for this bead (specified in section mapping) to map from reference system. The beads section lists all atoms of the reference system that are mapped to this particular coarse grained bead. The syntax is RESID:RESNAME:ATOMNAME, the beads are separated by spaces. The cg_bonded section contains all bonded interaction of the molecule. Those can be bond, angle or dihedral. An entry for each group of bonded interaction can be specified, e.g. several groups (types) of bonds can be specified. A specific bonded interaction can be later on addressed by MOLECULE:NAME:NUMBER, where MOLECULE is the molecule ID in the whole topology, NAME the name of the interaction group and NUMBER addresses the interaction in the group. Definition of a group of bonds. Name of the bond. List of pair of beads that define a bond. Names specified in cg_beads Definition of a group of angles. Name of the angle List of triples of beads that define a bond. Names specified in cg_beads Definition of a group of dihedrals. Since the exact functional form does not matter, this combines proper as well as improper dihedrals. Name of the dihedral List of quadruples of beads that define a bond. Names specified in cg_beads Section containing definitions of mapping schemes. Section for a mapping for 1 bead. Name of the mapping Weights of the mapping matrix. Entries are normalized to 1, number of entries must match the number of reference beads in a coarse-grained bead. csg-1.4.1/share/xml/topol.xml000066400000000000000000000061211315264121600160530ustar00rootroot00000000000000 The XML topology root element, the base for the topology can be defined by the "name" attribute Attribute name holds the name of particles group in H5MD file The the molecules in the trajectory or other operation on the molecules. Definition of the molecule, with attributes: name, nmols and nbeads. The name defines residue name, nmols tells how many times this molecule has to be replicated to match with trajectory file and nbeads defines number of beads in every molecule. Define the bead in the molecule. Attributes are: name - the name of bead, type - the type of bead, mass - the mass of bead and q - the value of charge. Clear the information about the molecules Rename the molecules; attributes: "name" - the new name, "range" - the range where the new name will be set in the format start_range:end_range Define the molecules; attributes: "name" - the name of molecule, "first" - the id of first molecule, "nbeads" - the number of beads in the molecule, "nmols" - the number of molecules This section defines the topology of the molecules, it is used to generate proper exclusions for calculating rdfs Describes the bond The name of the bond The pair of the beads in the format MOLECULE_NAME:BEAD_NAME Describes the angle The name of the angle The triplet of the beads in the format MOLECULE_NAME:BEAD_NAME Describes the dihedrals The name of the dihedral The quadruplet of the beads in the format MOLECULE_NAME:BEAD_NAME Allows defining bead types Rename the bead type; attributes: "name" - the old name, "newname" - the new name Define the mass of the bead type; attributes: "name" - the bead type name, "value" - the new mass csg-1.4.1/src/000077500000000000000000000000001315264121600130615ustar00rootroot00000000000000csg-1.4.1/src/CMakeLists.txt000066400000000000000000000001211315264121600156130ustar00rootroot00000000000000add_subdirectory(libcsg) add_subdirectory(tools) add_subdirectory(csg_boltzmann) csg-1.4.1/src/csg_boltzmann/000077500000000000000000000000001315264121600157215ustar00rootroot00000000000000csg-1.4.1/src/csg_boltzmann/CMakeLists.txt000066400000000000000000000017101315264121600204600ustar00rootroot00000000000000file(GLOB CSG_BO_SOURCES *.cc) add_executable(csg_boltzmann ${CSG_BO_SOURCES}) target_link_libraries(csg_boltzmann votca_csg) install(TARGETS csg_boltzmann RUNTIME DESTINATION bin) if (TXT2TAGS_FOUND AND BASH) add_custom_command(OUTPUT csg_boltzmann.man COMMAND ${CMAKE_CURRENT_BINARY_DIR}/csg_boltzmann --help > csg_boltzmann.help DEPENDS csg_boltzmann COMMAND ${BASH} ${CMAKE_CURRENT_BINARY_DIR}/../../scripts/help2t2t csg_boltzmann.help > csg_boltzmann.t2t COMMAND ${TXT2TAGS_EXECUTABLE} -q -t man -i csg_boltzmann.t2t -o csg_boltzmann.man DEPENDS help2t2t_build csg_boltzmann) add_custom_target(csg_boltzmann_manpage DEPENDS csg_boltzmann.man) add_dependencies(manpages csg_boltzmann_manpage) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/csg_boltzmann.man DESTINATION ${MAN}/man1 RENAME csg_boltzmann.1) set_property(DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES csg_boltzmann.help csg_boltzmann.t2t) endif(TXT2TAGS_FOUND AND BASH) csg-1.4.1/src/csg_boltzmann/Makefile_nb000066400000000000000000000043351315264121600200450ustar00rootroot00000000000000# # There exist several targets which are by default empty and which can be # used for execution of your targets. These targets are usually executed # before and after some main targets. They are: # # .build-pre: called before 'build' target # .build-post: called after 'build' target # .clean-pre: called before 'clean' target # .clean-post: called after 'clean' target # .clobber-pre: called before 'clobber' target # .clobber-post: called after 'clobber' target # .all-pre: called before 'all' target # .all-post: called after 'all' target # .help-pre: called before 'help' target # .help-post: called after 'help' target # # Targets beginning with '.' are not intended to be called on their own. # # Main targets can be executed directly, and they are: # # build build a specific configuration # clean remove built files from a configuration # clobber remove all built files # all build all configurations # help print help mesage # # Targets .build-impl, .clean-impl, .clobber-impl, .all-impl, and # .help-impl are implemented in nbproject/makefile-impl.mk. # # NOCDDL # Environment MKDIR=mkdir CP=cp CCADMIN=CCadmin RANLIB=ranlib # build build: .build-pre .build-impl .build-post .build-pre: # Add your pre 'build' code here... .build-post: # Add your post 'build' code here... # clean clean: .clean-pre .clean-impl .clean-post .clean-pre: # Add your pre 'clean' code here... .clean-post: # Add your post 'clean' code here... # clobber clobber: .clobber-pre .clobber-impl .clobber-post .clobber-pre: # Add your pre 'clobber' code here... .clobber-post: # Add your post 'clobber' code here... # all all: .all-pre .all-impl .all-post .all-pre: # Add your pre 'all' code here... .all-post: # Add your post 'all' code here... # help help: .help-pre .help-impl .help-post .help-pre: # Add your pre 'help' code here... .help-post: # Add your post 'help' code here... # include project implementation makefile include nbproject/Makefile-impl.mk csg-1.4.1/src/csg_boltzmann/analysistool.h000066400000000000000000000024561315264121600206220ustar00rootroot00000000000000/* * Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef _analasystool_H #define _analasystool_H #include #include #include #include "bondedstatistics.h" using namespace std; /** \brief base class for all analasys tools This is the base class for all analasys tool. \todo do option functions!!! */ class AnalysisTool { public: AnalysisTool() {} virtual ~AnalysisTool() {} virtual void Register(map &lib) {} virtual void Command(BondedStatistics &bs, string cmd, vector &args) {}; virtual void Help(string cmd, vector &args) {}; private: // map _options; }; #endif /* _analasystool_H */ csg-1.4.1/src/csg_boltzmann/bondedstatistics.cc000066400000000000000000000026601315264121600216020ustar00rootroot00000000000000/* * Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include "bondedstatistics.h" void BondedStatistics::BeginCG(Topology *top, Topology *top_atom) { InteractionContainer &ic = top->BondedInteractions(); InteractionContainer::iterator ia; _bonded_values.clear(); for(ia=ic.begin(); ia!=ic.end(); ++ia) { _bonded_values.CreateArray((*ia)->getName()); } } void BondedStatistics::EndCG() { } void BondedStatistics::EvalConfiguration(Topology *conf, Topology *conv_atom) { InteractionContainer &ic = conf->BondedInteractions(); InteractionContainer::iterator ia; DataCollection::container::iterator is; for(ia=ic.begin(), is = _bonded_values.begin(); ia != ic.end(); ++ia, ++is) { // const string &name = (*ia)->getName(); (*is)->push_back((*ia)->EvaluateVar(*conf)); } } csg-1.4.1/src/csg_boltzmann/bondedstatistics.h000066400000000000000000000022711315264121600214420ustar00rootroot00000000000000/* * Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef _BONDEDSTATISTICS_H #define _BONDEDSTATISTICS_H #include #include using namespace votca::tools; using namespace votca::csg; class BondedStatistics : public CGObserver { public: void BeginCG(Topology *top, Topology *top_atom = 0); void EndCG(); void EvalConfiguration(Topology *conf, Topology *conf_atom = 0); DataCollection &BondedValues() { return _bonded_values; } protected: DataCollection _bonded_values; }; #endif /* _BOLZMANNINVERSION_H */ csg-1.4.1/src/csg_boltzmann/main.cc000066400000000000000000000154121315264121600171570ustar00rootroot00000000000000/* * Copyright 2009-2015 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // TODO: This code need lots of cleaning up! please do not look at anything in here! // #include #include #include #include #include #include #include #include "analysistool.h" #include "bondedstatistics.h" #include "tabulatedpotential.h" #include "stdanalysis.h" #include using namespace std; class CsgBoltzmann : public CsgApplication { public: string ProgramName() { return "csg_boltzmann"; } void HelpText(ostream &out) { out << "Performs tasks that are needed for simple boltzmann\n" "inversion in an interactive environment."; } bool DoTrajectory() { return true; } bool DoMapping() { return true; } void Initialize(); bool EvaluateOptions(); void Run(); void InteractiveMode(); bool EvaluateTopology(Topology *top, Topology *top_ref); protected: ExclusionList *CreateExclusionList(Molecule &atomistic, Molecule &cg); BondedStatistics _bs; }; void CsgBoltzmann::Initialize() { CsgApplication::Initialize(); AddProgramOptions("Special options") ("excl", boost::program_options::value(), "write atomistic exclusion list to file"); AddObserver(&_bs); } bool CsgBoltzmann::EvaluateOptions() { CsgApplication::EvaluateOptions(); if (OptionsMap().count("excl")) { CheckRequired("cg", "excl options needs a mapping file"); } return true; } bool CsgBoltzmann::EvaluateTopology(Topology *top, Topology *top_ref) { if (OptionsMap().count("excl")) { ExclusionList *ex; if (top_ref->MoleculeCount() > 1) cout << "WARNING: cannot create exclusion list for topology with" "multiple molecules, using only first molecule\n"; cout << "Writing exclusion list for atomistic molecule " << top_ref->MoleculeByIndex(0)->getName() << " in coarse grained representation " << top_ref->MoleculeByIndex(0)->getName() << endl; ex = CreateExclusionList(*top_ref->MoleculeByIndex(0), *top->MoleculeByIndex(0)); ofstream fl; fl.open(OptionsMap()["excl"].as ().c_str()); fl << "# atomistic: " << top_ref->MoleculeByIndex(0)->getName() << " cg: " << top_ref->MoleculeByIndex(0)->getName() << " cgmap: " << OptionsMap()["cg"].as () << endl; fl << *ex; fl.close(); delete ex; return false; } return true; } ExclusionList *CsgBoltzmann::CreateExclusionList(Molecule &atomistic, Molecule &cg) { ExclusionList *ex = new ExclusionList(); //exclude all with all { list excl_list; for(int i=0; iExcludeList(excl_list); } //remove exclusions from inside a mapped bead Topology *at_top = atomistic.getParent(); for(int i=0; i &w = cg.getBead(i)->ParentBeads(); list excl_list; for(std::vector::iterator it = w.begin(); it != w.end(); ++it){ excl_list.push_back(at_top->getBead(*it)); } ex->Remove(excl_list); } //remove exclusion which come from atomistic topology and hence bonds and angles Topology *cg_top = cg.getParent(); for(int i=0; igetExclusions().IsExcluded(cg.getBead(i),cg.getBead(j))){ vector &w = cg.getBead(i)->ParentBeads(); vector &v = cg.getBead(j)->ParentBeads(); for(std::vector::iterator itw = w.begin(); itw != w.end(); ++itw){ for(std::vector::iterator itv = v.begin(); itv != v.end(); ++itv){ ex->RemoveExclusion(at_top->getBead(*itw),at_top->getBead(*itv)); } } } } } return ex; } void CsgBoltzmann::Run() { CsgApplication::Run(); if (OptionsMap().count("excl")) return; InteractiveMode(); } void CsgBoltzmann::InteractiveMode() { std::map cmds; TabulatedPotential tab; StdAnalysis std; tab.Register(cmds); std.Register(cmds); string help_text = "Interactive mode, expecting commands:\n" "help: show this help\n" "q: quit\n" "list: list all available bonds\n" "vals : write values to file\n" "hist : create histogram\n" "tab : create tabulated potential\n" "autocor : calculate autocorrelation, only one row allowed in selection!\n" "cor : calculate correlations, first row is correlated with all other rows"; cout << help_text << endl; while(1) { string line; cout << "> "; getline(cin, line); boost::trim(line); vector args; Tokenizer tok(line, " \t"); tok.ToVector(args); if(args.size() == 0) continue; string cmd = args.front(); args.erase(args.begin()); if(cmd == "q") break; std::map::iterator tool; if(cmd == "help") { if(args.size() == 0) { cout << help_text << endl; continue; } cmd = args.front(); args.erase(args.begin()); tool = cmds.find(cmd); if(tool == cmds.end()) { cout << "error, no help item found" << endl; continue; } tool->second->Help(cmd, args); cout << endl; continue; } tool = cmds.find(cmd); if(tool == cmds.end()) { cout << "error, command not found" << endl; continue; } tool->second->Command(_bs, cmd, args); } } int main(int argc, char **argv) { CsgBoltzmann app; app.Exec(argc, argv); } csg-1.4.1/src/csg_boltzmann/nbproject/000077500000000000000000000000001315264121600177075ustar00rootroot00000000000000csg-1.4.1/src/csg_boltzmann/nbproject/Makefile-Debug.mk000066400000000000000000000057001315264121600230030ustar00rootroot00000000000000# # Generated Makefile - do not edit! # # Edit the Makefile in the project folder instead (../Makefile). Each target # has a -pre and a -post target defined where you can add customized code. # # This makefile implements configuration specific macros and targets. # Environment MKDIR=mkdir CP=cp GREP=grep NM=nm CCADMIN=CCadmin RANLIB=ranlib CC=gcc CCC=g++ CXX=g++ FC=gfortran AS=as # Macros CND_PLATFORM=GNU-Linux-x86 CND_CONF=Debug CND_DISTDIR=dist # Include project Makefile include Makefile_nb # Object Directory OBJECTDIR=build/${CND_CONF}/${CND_PLATFORM} # Object Files OBJECTFILES= \ ${OBJECTDIR}/main.o \ ${OBJECTDIR}/bondedstatistics.o \ ${OBJECTDIR}/stdanalysis.o \ ${OBJECTDIR}/tabulatedpotential.o # C Compiler Flags CFLAGS= # CC Compiler Flags CCFLAGS= CXXFLAGS= # Fortran Compiler Flags FFLAGS= # Assembler Flags ASFLAGS= # Link Libraries and Options LDLIBSOPTIONS=-L/people/thnfs/homes/ruehle/gmx/lib ../../netbeans/libcsg/../../src/libcsg/libcsg.a ../../../tools/netbeans/libtools/../../src/libtools/libtools.a -lgmx -lexpat -lm -lfftw3 -lboost_program_options # Build Targets .build-conf: ${BUILD_SUBPROJECTS} "${MAKE}" -f nbproject/Makefile-Debug.mk csg_boltzmann csg_boltzmann: ../../netbeans/libcsg/../../src/libcsg/libcsg.a csg_boltzmann: ../../../tools/netbeans/libtools/../../src/libtools/libtools.a csg_boltzmann: ${OBJECTFILES} ${LINK.cc} -o csg_boltzmann ${OBJECTFILES} ${LDLIBSOPTIONS} ${OBJECTDIR}/main.o: main.cc ${MKDIR} -p ${OBJECTDIR} ${RM} $@.d $(COMPILE.cc) -g -I/people/thnfs/homes/ruehle/gmx/install/include/gromacs -I../../include -I../../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/main.o main.cc ${OBJECTDIR}/bondedstatistics.o: bondedstatistics.cc ${MKDIR} -p ${OBJECTDIR} ${RM} $@.d $(COMPILE.cc) -g -I/people/thnfs/homes/ruehle/gmx/install/include/gromacs -I../../include -I../../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/bondedstatistics.o bondedstatistics.cc ${OBJECTDIR}/stdanalysis.o: stdanalysis.cc ${MKDIR} -p ${OBJECTDIR} ${RM} $@.d $(COMPILE.cc) -g -I/people/thnfs/homes/ruehle/gmx/install/include/gromacs -I../../include -I../../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/stdanalysis.o stdanalysis.cc ${OBJECTDIR}/tabulatedpotential.o: tabulatedpotential.cc ${MKDIR} -p ${OBJECTDIR} ${RM} $@.d $(COMPILE.cc) -g -I/people/thnfs/homes/ruehle/gmx/install/include/gromacs -I../../include -I../../../include -MMD -MP -MF $@.d -o ${OBJECTDIR}/tabulatedpotential.o tabulatedpotential.cc # Subprojects .build-subprojects: cd ../../netbeans/libcsg && ${MAKE} -f Makefile_nb CONF=Debug cd ../../../tools/netbeans/libtools && ${MAKE} -f Makefile_nb CONF=Debug # Clean Targets .clean-conf: ${CLEAN_SUBPROJECTS} ${RM} -r build/Debug ${RM} csg_boltzmann # Subprojects .clean-subprojects: cd ../../netbeans/libcsg && ${MAKE} -f Makefile_nb CONF=Debug clean cd ../../../tools/netbeans/libtools && ${MAKE} -f Makefile_nb CONF=Debug clean # Enable dependency checking .dep.inc: .depcheck-impl include .dep.inc csg-1.4.1/src/csg_boltzmann/nbproject/Makefile-Release.mk000066400000000000000000000037441315264121600233430ustar00rootroot00000000000000# # Generated Makefile - do not edit! # # Edit the Makefile in the project folder instead (../Makefile). Each target # has a -pre and a -post target defined where you can add customized code. # # This makefile implements configuration specific macros and targets. # Environment MKDIR=mkdir CP=cp GREP=grep NM=nm CCADMIN=CCadmin RANLIB=ranlib CC=gcc CCC=g++ CXX=g++ FC=gfortran AS=as # Macros CND_PLATFORM=GNU-Linux-x86 CND_CONF=Release CND_DISTDIR=dist # Include project Makefile include Makefile_nb # Object Directory OBJECTDIR=build/${CND_CONF}/${CND_PLATFORM} # Object Files OBJECTFILES= \ ${OBJECTDIR}/main.o \ ${OBJECTDIR}/bondedstatistics.o \ ${OBJECTDIR}/stdanalysis.o \ ${OBJECTDIR}/tabulatedpotential.o # C Compiler Flags CFLAGS= # CC Compiler Flags CCFLAGS= CXXFLAGS= # Fortran Compiler Flags FFLAGS= # Assembler Flags ASFLAGS= # Link Libraries and Options LDLIBSOPTIONS= # Build Targets .build-conf: ${BUILD_SUBPROJECTS} "${MAKE}" -f nbproject/Makefile-Release.mk csg_boltzmann csg_boltzmann: ${OBJECTFILES} ${LINK.cc} -o csg_boltzmann ${OBJECTFILES} ${LDLIBSOPTIONS} ${OBJECTDIR}/main.o: main.cc ${MKDIR} -p ${OBJECTDIR} ${RM} $@.d $(COMPILE.cc) -O2 -MMD -MP -MF $@.d -o ${OBJECTDIR}/main.o main.cc ${OBJECTDIR}/bondedstatistics.o: bondedstatistics.cc ${MKDIR} -p ${OBJECTDIR} ${RM} $@.d $(COMPILE.cc) -O2 -MMD -MP -MF $@.d -o ${OBJECTDIR}/bondedstatistics.o bondedstatistics.cc ${OBJECTDIR}/stdanalysis.o: stdanalysis.cc ${MKDIR} -p ${OBJECTDIR} ${RM} $@.d $(COMPILE.cc) -O2 -MMD -MP -MF $@.d -o ${OBJECTDIR}/stdanalysis.o stdanalysis.cc ${OBJECTDIR}/tabulatedpotential.o: tabulatedpotential.cc ${MKDIR} -p ${OBJECTDIR} ${RM} $@.d $(COMPILE.cc) -O2 -MMD -MP -MF $@.d -o ${OBJECTDIR}/tabulatedpotential.o tabulatedpotential.cc # Subprojects .build-subprojects: # Clean Targets .clean-conf: ${CLEAN_SUBPROJECTS} ${RM} -r build/Release ${RM} csg_boltzmann # Subprojects .clean-subprojects: # Enable dependency checking .dep.inc: .depcheck-impl include .dep.inc csg-1.4.1/src/csg_boltzmann/nbproject/Makefile-impl.mk000066400000000000000000000101611315264121600227130ustar00rootroot00000000000000# # Generated Makefile - do not edit! # # Edit the Makefile in the project folder instead (../Makefile). Each target # has a pre- and a post- target defined where you can add customization code. # # This makefile implements macros and targets common to all configurations. # # NOCDDL # Building and Cleaning subprojects are done by default, but can be controlled with the SUB # macro. If SUB=no, subprojects will not be built or cleaned. The following macro # statements set BUILD_SUB-CONF and CLEAN_SUB-CONF to .build-reqprojects-conf # and .clean-reqprojects-conf unless SUB has the value 'no' SUB_no=NO SUBPROJECTS=${SUB_${SUB}} BUILD_SUBPROJECTS_=.build-subprojects BUILD_SUBPROJECTS_NO= BUILD_SUBPROJECTS=${BUILD_SUBPROJECTS_${SUBPROJECTS}} CLEAN_SUBPROJECTS_=.clean-subprojects CLEAN_SUBPROJECTS_NO= CLEAN_SUBPROJECTS=${CLEAN_SUBPROJECTS_${SUBPROJECTS}} # Project Name PROJECTNAME=csg_boltzmann # Active Configuration DEFAULTCONF=Debug CONF=${DEFAULTCONF} # All Configurations ALLCONFS=Debug Release # build .build-impl: .build-pre .validate-impl .depcheck-impl @#echo "=> Running $@... Configuration=$(CONF)" "${MAKE}" -f nbproject/Makefile-${CONF}.mk QMAKE=${QMAKE} SUBPROJECTS=${SUBPROJECTS} .build-conf # clean .clean-impl: .clean-pre .validate-impl .depcheck-impl @#echo "=> Running $@... Configuration=$(CONF)" "${MAKE}" -f nbproject/Makefile-${CONF}.mk QMAKE=${QMAKE} SUBPROJECTS=${SUBPROJECTS} .clean-conf # clobber .clobber-impl: .clobber-pre .depcheck-impl @#echo "=> Running $@..." for CONF in ${ALLCONFS}; \ do \ "${MAKE}" -f nbproject/Makefile-$${CONF}.mk QMAKE=${QMAKE} SUBPROJECTS=${SUBPROJECTS} .clean-conf; \ done # all .all-impl: .all-pre .depcheck-impl @#echo "=> Running $@..." for CONF in ${ALLCONFS}; \ do \ "${MAKE}" -f nbproject/Makefile-$${CONF}.mk QMAKE=${QMAKE} SUBPROJECTS=${SUBPROJECTS} .build-conf; \ done # build tests .build-tests-impl: .build-impl .build-tests-pre @#echo "=> Running $@... Configuration=$(CONF)" "${MAKE}" -f nbproject/Makefile-${CONF}.mk SUBPROJECTS=${SUBPROJECTS} .build-tests-conf # run tests .test-impl: .build-tests-impl .test-pre @#echo "=> Running $@... Configuration=$(CONF)" "${MAKE}" -f nbproject/Makefile-${CONF}.mk SUBPROJECTS=${SUBPROJECTS} .test-conf # dependency checking support .depcheck-impl: @echo "# This code depends on make tool being used" >.dep.inc @if [ -n "${MAKE_VERSION}" ]; then \ echo "DEPFILES=\$$(wildcard \$$(addsuffix .d, \$${OBJECTFILES}))" >>.dep.inc; \ echo "ifneq (\$${DEPFILES},)" >>.dep.inc; \ echo "include \$${DEPFILES}" >>.dep.inc; \ echo "endif" >>.dep.inc; \ else \ echo ".KEEP_STATE:" >>.dep.inc; \ echo ".KEEP_STATE_FILE:.make.state.\$${CONF}" >>.dep.inc; \ fi # configuration validation .validate-impl: @if [ ! -f nbproject/Makefile-${CONF}.mk ]; \ then \ echo ""; \ echo "Error: can not find the makefile for configuration '${CONF}' in project ${PROJECTNAME}"; \ echo "See 'make help' for details."; \ echo "Current directory: " `pwd`; \ echo ""; \ fi @if [ ! -f nbproject/Makefile-${CONF}.mk ]; \ then \ exit 1; \ fi # help .help-impl: .help-pre @echo "This makefile supports the following configurations:" @echo " ${ALLCONFS}" @echo "" @echo "and the following targets:" @echo " build (default target)" @echo " clean" @echo " clobber" @echo " all" @echo " help" @echo "" @echo "Makefile Usage:" @echo " make [CONF=] [SUB=no] build" @echo " make [CONF=] [SUB=no] clean" @echo " make [SUB=no] clobber" @echo " make [SUB=no] all" @echo " make help" @echo "" @echo "Target 'build' will build a specific configuration and, unless 'SUB=no'," @echo " also build subprojects." @echo "Target 'clean' will clean a specific configuration and, unless 'SUB=no'," @echo " also clean subprojects." @echo "Target 'clobber' will remove all built files from all configurations and," @echo " unless 'SUB=no', also from subprojects." @echo "Target 'all' will will build all configurations and, unless 'SUB=no'," @echo " also build subprojects." @echo "Target 'help' prints this message." @echo "" csg-1.4.1/src/csg_boltzmann/nbproject/configurations.xml000066400000000000000000000077651315264121600235020ustar00rootroot00000000000000 analysistool.h bondedstatistics.h stdanalysis.h tabulatedpotential.h version.h bondedstatistics.cc main.cc stdanalysis.cc tabulatedpotential.cc Makefile_nb Makefile_nb localhost GNU|GNU 2 /people/thnfs/homes/ruehle/gmx/install/include/gromacs ../../include ../../../include csg_boltzmann /people/thnfs/homes/ruehle/gmx/lib gmx expat Mathematics fftw3 boost_program_options localhost GNU|GNU 2 5 5 5 csg_boltzmann csg-1.4.1/src/csg_boltzmann/nbproject/project.properties000066400000000000000000000000001315264121600234610ustar00rootroot00000000000000csg-1.4.1/src/csg_boltzmann/nbproject/project.xml000066400000000000000000000016601315264121600221020ustar00rootroot00000000000000 org.netbeans.modules.cnd.makeproject csg_boltzmann 0 cc h UTF-8 ../../netbeans/libcsg ../../../tools/netbeans/libtools Debug Release csg-1.4.1/src/csg_boltzmann/stdanalysis.cc000066400000000000000000000102411315264121600205640ustar00rootroot00000000000000/* * Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include #include #include #include "analysistool.h" #include #include #include "bondedstatistics.h" #include "stdanalysis.h" void StdAnalysis::Register(map &lib) { lib["list"] = this; lib["vals"] = this; lib["cor"] = this; lib["autocor"] = this; } void StdAnalysis::Command(BondedStatistics &bs, string cmd, vector &args) { if(cmd == "vals") WriteValues(bs, args); if(cmd == "cor") WriteCorrelations(bs, args); if(cmd == "autocor") WriteAutocorrelation(bs, args); if(cmd == "list") { DataCollection::selection *sel = bs.BondedValues().select("*"); DataCollection::selection::iterator i; cout << "Available bonded interactions:" << endl; for(i=sel->begin(); i!=sel->end(); ++i) cout << (*i)->getName() << " "; // << "[" << (*i).second->size() << "]" << " "; cout << endl; delete sel; } } void StdAnalysis::Help(string cmd, vector &args) { if(cmd == "vals") { cout << "vals \n" << "write values to file. The first row is the frame number, then one " << "row for each interaction specified. The output can be used to generate " << "2D correlation plots.\n\n" << "example: vals angle *angle*\n"; } if(cmd == "cor") { cout << "cor \n" << "Calculate linear correlation coefficient of the first item in selection with all the other items\n" << "WARNING: for evaluating correlations in the system, it is not sufficient to calculate the " << "linear correlation coefficient, 2D histograms with data from the vals command should be used instead!\n"; } if(cmd == "autocor") { cout << "autocor \n" << "calculate autocorrelation function of first item in selection. The output is periodic since FFTW3 is used to " "calcualte correlations.\n"; } if(cmd == "list") { cout << "list\nlists all available interactions\n"; } } void StdAnalysis::WriteValues(BondedStatistics &bs, vector &args) { ofstream out; DataCollection::selection *sel = NULL; for(size_t i=1; isize() << " data rows to " << args[0] << endl; delete sel; } void StdAnalysis::WriteAutocorrelation(BondedStatistics &bs, vector &args) { ofstream out; DataCollection::selection *sel = NULL; for(size_t i=1; isize() << " data rows, written to " << args[0] << endl; delete sel; } void StdAnalysis::WriteCorrelations(BondedStatistics &bs, vector &args) { ofstream out; DataCollection::selection *sel = NULL; for(size_t i=1; isize() << " rows, written to " << args[0] << endl; delete sel; } csg-1.4.1/src/csg_boltzmann/stdanalysis.h000066400000000000000000000025561315264121600204400ustar00rootroot00000000000000/* * Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef _STDANALYSIS_H #define _STDANALYSIS_H #include "bondedstatistics.h" #include using namespace std; using namespace votca::tools; using namespace votca::csg; class StdAnalysis : public AnalysisTool { public: StdAnalysis() {}; ~StdAnalysis() {}; void Register(map &lib); void Command(BondedStatistics &bs, string cmd, vector &args); void Help(string cmd, vector &args); void WriteValues(BondedStatistics &bs, vector &args); void WriteCorrelations(BondedStatistics &bs, vector &args); void WriteAutocorrelation(BondedStatistics &bs, vector &args); private: }; #endif /* _STDANALYSIS_H */ csg-1.4.1/src/csg_boltzmann/tabulatedpotential.cc000066400000000000000000000300421315264121600221140ustar00rootroot00000000000000/* * Copyright 2009-2011 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include #include #include #include #include #include #include "analysistool.h" #include #include #include "bondedstatistics.h" #include "tabulatedpotential.h" using namespace std; using namespace boost; TabulatedPotential::TabulatedPotential() { _tab_smooth1 = _tab_smooth2 = 0; _T = 300; } void TabulatedPotential::Register(map &lib) { lib["tab"] = this; lib["hist"] = this; } void TabulatedPotential::Command(BondedStatistics &bs, string cmd, vector &args) { if(args[0] == "set") { if(cmd == "hist") SetOption(_hist_options, args); else if(cmd == "tab") { if(!SetOption(_tab_options, args)) { if(args.size() >2) { if(args[1] == "smooth_pdf") _tab_smooth1 = lexical_cast(args[2]); else if(args[1] == "smooth_pot") _tab_smooth2 = lexical_cast(args[2]); else if(args[1] == "T") _T = lexical_cast(args[2]); else { cout << "unknown option " << args[2] << endl; return; } } } if(args.size() <=2) { cout << "smooth_pdf: " << _tab_smooth1 << endl; cout << "smooth_pot: " << _tab_smooth2 << endl; cout << "T: " << _T << endl; } } } else if(args.size() >= 2) { if(cmd == "hist") WriteHistogram(bs, args); else if(cmd == "tab") WritePotential(bs, args); } else cout << "wrong number of arguments" << endl; } void TabulatedPotential::Help(string cmd, vector &args) { if(args.size() == 0) { if(cmd == "tab") { cout << "tab \n" << "Calculate tabulated potential by inverting the distribution function. " "Statistics is calculated using all interactions in selection.\n" "see also: help tab set\n\n" "example:\ntab set scale bond\ntab U_bond.txt *:bond:*\n"; } if(cmd == "hist") { cout << "hist \n" << "Calculate distribution function for selection. " "Statistics is calculated using all interactions in selection.\n" "see also: help hist set\n\n" "example:hist U_bond.txt *:bond:*\n"; } return; } if(args[0] == "set") { if(args.size() == 1) { cout << cmd << " set