Shark-3.1.4/000077500000000000000000000000001314655040000125655ustar00rootroot00000000000000Shark-3.1.4/.gitignore000066400000000000000000000160201314655040000145540ustar00rootroot00000000000000 # / /CMakeFiles /cmake /CTestTestfile.cmake /CPackSourceConfig.cmake /CPackConfig.cmake /CMakeCache.txt /cmake_install.cmake /bin /.settings /autom4te.cache /lib /.cdtproject /.project /config.status /configure /Makefile /Makefile.in /SharkConfig /*.dsw /share SharkConfig.cmake SharkConfigVersion.cmake SharkTargets.cmake /Testing /build # /Test/ /Test/test_output /Test/check_sparse.csv /Test/check_kernelmatrix_sparse_c_none.libsvm /Test/check_kernelmatrix_r_traceN.libsvm /Test/check_kernelmatrix_mcc_traceN.libsvm /Test/check_kernelmatrix_c_traceN.libsvm /Test/check_kernelmatrix_r_var.libsvm /Test/check_kernelmatrix_r_trace.libsvm /Test/check_kernelmatrix_r_none.libsvm /Test/check_kernelmatrix_r_center_tr.libsvm /Test/check_kernelmatrix_r_center.libsvm /Test/check_kernelmatrix_mcc_var.libsvm /Test/check_kernelmatrix_mcc_trace.libsvm /Test/check_kernelmatrix_mcc_none.libsvm /Test/check_kernelmatrix_mcc_center_tr.libsvm /Test/check_kernelmatrix_mcc_center.libsvm /Test/check_kernelmatrix_c_var.libsvm /Test/check_kernelmatrix_c_trace.libsvm /Test/check_kernelmatrix_c_none.libsvm /Test/check_kernelmatrix_c_center_tr.libsvm /Test/check_kernelmatrix_c_center.libsvm /Test/check_kernelmatrix_r.libsvm /Test/check_kernelmatrix_mcc.libsvm /Test/check_kernelmatrix_c.libsvm /Test/Testing /Test/new_shark_format /Test/CMakeCache.txt /Test/warm.txt /Test/hot.txt /Test/curve.dat /Test/cold.txt /Test/SteadyStateMOCMA /Test/SMSEMOA /Test/MOCMA /Test/HypRealCodedNSGAII /Test/EpsRealCodedNSGAII /Test/EpsilonSteadyStateMOCMA /Test/EpsilonMOCMA /Test/AGE /Test/CMSA_Rosenbrock.html /Test/CMSA_Ellipsoid.html /Test/CMA_Rosenbrock.html /Test/CMA_Ellipsoid.html /Test/CMA_Discus.html /Test/CMA_Cigar.html /Test/OnePlusOneES_Rosenbrock.html /Test/OnePlusOneES_Ellipsoid.html /Test/OnePlusOneES_Discus.html /Test/OnePlusOneES_Cigar.html /Test/ElitistCMA_Rosenbrock.html /Test/ElitistCMA_Ellipsoid.html /Test/ElitistCMA_Discus.html /Test/ElitistCMA_Cigar.html /Test/CMSA_Discus.html /Test/CMSA_Cigar.html /Test/bin /Test/Models_Softmax /Test/Models_SigmoidModel /Test/Models_RNNet /Test/Models_RBFNet /Test/Models_OnlineRNNet /Test/Models_LinearNorm /Test/Models_LinearModel /Test/Models_KernelNearestNeighbor /Test/Models_KernelMeanClassifier /Test/Models_KernelFunction /Test/Models_FFNet /Test/Models_ConcatenatedModel /Test/Models_CMAC /Test/LP_LinearProgram /Test/LinAlg_VectorStatistics /Test/LinAlg_simple /Test/GradDesc_SteepestDescent /Test/GradDesc_Rprop /Test/GradDesc_Quickprop /Test/GradDesc_NoisyRprop /Test/GradDesc_CG /Test/GradDesc_BFGS /Test/Data_Libsvm /Test/Data_Dataset /Test/Data_CVDatasetTools /Test/Data_Csv /Test/Algorithm_GridSearch /Test/CMakeFiles /Test/Makefile /Test/CTestTestfile.cmake /Test/cmake_install.cmake /Test/check.csv # /Test/test_data/ /Test/test_data/splitfile_test2.split /Test/test_data/splitfile_test.split /Test/test_data/set_from_file.split /Test/test_data/my_random_set.split /Test/test_data/my_new_set.split /Test/test_data/warm.txt /Test/test_data/hot.txt /Test/test_data/curve.dat /Test/test_data/cold.txt /Test/test_data/check.csv /Test/test_data/OnePlusOneES_Ellipsoid.html /Test/test_data/OnePlusOneES_Discus.html /Test/test_data/OnePlusOneES_Cigar.html /Test/test_data/ElitistCMA_Rosenbrock.html /Test/test_data/ElitistCMA_Ellipsoid.html /Test/test_data/ElitistCMA_Discus.html /Test/test_data/ElitistCMA_Cigar.html /Test/test_data/CMSA_Rosenbrock.html /Test/test_data/CMSA_Ellipsoid.html /Test/test_data/CMSA_Discus.html /Test/test_data/CMSA_Cigar.html /Test/test_data/CMA_Rosenbrock.html /Test/test_data/CMA_Ellipsoid.html /Test/test_data/CMA_Discus.html /Test/test_data/CMA_Cigar.html /Test/test_data/thyroid.csv /Test/test_data/thyroid.shark-data /Test/test_data/new_shark_format # /Test/testing_files/ /Test/testing_files/check_kernelmatrix_sparse_c_none.libsvm /Test/testing_files/check_kernelmatrix_r_var.libsvm /Test/testing_files/check_kernelmatrix_r_traceN.libsvm /Test/testing_files/check_kernelmatrix_r_trace.libsvm /Test/testing_files/check_kernelmatrix_r_none.libsvm /Test/testing_files/check_kernelmatrix_r_center_tr.libsvm /Test/testing_files/check_kernelmatrix_r_center.libsvm /Test/testing_files/check_kernelmatrix_mcc_var.libsvm /Test/testing_files/check_kernelmatrix_mcc_traceN.libsvm /Test/testing_files/check_kernelmatrix_mcc_trace.libsvm /Test/testing_files/check_kernelmatrix_mcc_none.libsvm /Test/testing_files/check_kernelmatrix_mcc_center_tr.libsvm /Test/testing_files/check_kernelmatrix_mcc_center.libsvm /Test/testing_files/check_kernelmatrix_c_var.libsvm /Test/testing_files/check_kernelmatrix_c_traceN.libsvm /Test/testing_files/check_kernelmatrix_c_trace.libsvm /Test/testing_files/check_kernelmatrix_c_none.libsvm /Test/testing_files/check_kernelmatrix_c_center_tr.libsvm /Test/testing_files/check_kernelmatrix_c_center.libsvm /Test/testing_files/encoding_filters_dos2pos.txt /Test/testing_files/check_sparse.csv /Test/testing_files/check.csv # /contrib/mathjax/ /contrib/mathjax/README.md /contrib/mathjax/MathJax.js /contrib/mathjax/LICENSE /contrib/mathjax/unpacked /contrib/mathjax/test /contrib/mathjax/jax /contrib/mathjax/images /contrib/mathjax/fonts /contrib/mathjax/extensions /contrib/mathjax/docs /contrib/mathjax/config /contrib/mathjax/.gitignore /contrib/mathjax/.git # /doc/ /doc/Shark.dox /doc/CMakeCache.txt /doc/latex /doc/Makefile /doc/CTestTestfile.cmake /doc/cmake_install.cmake /doc/CMakeFiles /doc/index.html /doc/EALib /doc/Array /doc/ReClaM /doc/Mixture /doc/Fuzzy /doc/Rng /doc/LinAlg /doc/MOO-EALib /doc/FileUtil /doc/html # /doc/doxygen_pages/ /doc/doxygen_pages/html # /doc/doxygen_pages/tag_files/ /doc/doxygen_pages/tag_files/all.tag # /doc/sphinx_pages/ /doc/sphinx_pages/conf.py /doc/sphinx_pages/build # /doc/sphinx_pages/rest_sources doc/sphinx_pages/rest_sources/tutorials/algorithms/*.rst doc/sphinx_pages/rest_sources/tutorials/first_steps/hello_shark.rst doc/sphinx_pages/rest_sources/tutorials/first_steps/general_optimization_tasks.rst doc/sphinx_pages/rest_sources/tutorials/first_steps/when_to_stop.rst doc/sphinx_pages/rest_sources/quickref/datasets.rst doc/sphinx_pages/rest_sources/tutorials/concepts/data/dataset_subsets.rst doc/sphinx_pages/rest_sources/tutorials/concepts/data/datasets.rst doc/sphinx_pages/rest_sources/tutorials/concepts/data/import_data.rst doc/sphinx_pages/rest_sources/tutorials/concepts/data/normalization.rst doc/sphinx_pages/rest_sources/tutorials/concepts/misc/random_numbers.rst doc/sphinx_pages/rest_sources/tutorials/concepts/misc/versatile_classification.rst # /examples/ /examples/CMakeFiles /examples/bin /examples/Makefile /examples/CTestTestfile.cmake /examples/cmake_install.cmake examples/Core/*.cpp examples/EA/MOO/*.cpp examples/EA/SOO/*.cpp examples/Statistics/*.cpp examples/Supervised/*.cpp examples/Unsupervised/*.cpp # /examples/ExampleProject/ /examples/ExampleProject/CMakeLists.txt # /examples/Supervised/ /examples/Supervised/quickstartData.shark-labeled # /examples/Unsupervised/ /examples/Unsupervised/clc.csv /examples/Unsupervised/cl2.csv /examples/Unsupervised/cl1.csv # /include/shark/Core/ /include/shark/Core/Shark.h # /src/ /src/*.dsp /src/*.dsw /src/Makefile /src/Makefile.in Shark-3.1.4/.travis.yml000066400000000000000000000024121314655040000146750ustar00rootroot00000000000000language: cpp compiler: - gcc - clang os: - linux - osx matrix: exclude: - os: osx compiler: gcc env: - OMP_NUM_THREADS=2 before_install: - if [ "$TRAVIS_OS_NAME" = "linux" ]; then sudo add-apt-repository -y ppa:boost-latest/ppa; fi - if [ "$TRAVIS_OS_NAME" = "linux" ]; then sudo add-apt-repository -y ppa:kubuntu-ppa/backports; fi - if [ "$TRAVIS_OS_NAME" = "linux" ]; then sudo apt-get update; fi - if [ "$TRAVIS_OS_NAME" = "linux" ]; then sudo apt-get install cmake; fi - if [ "$TRAVIS_OS_NAME" = "linux" ]; then sudo apt-get install libboost1.55-dev libboost-date-time1.55-dev libboost-filesystem1.55-dev; fi - if [ "$TRAVIS_OS_NAME" = "linux" ]; then sudo apt-get install libboost-graph1.55-dev libboost-iostreams1.55-dev libboost-math1.55-dev; fi - if [ "$TRAVIS_OS_NAME" = "linux" ]; then sudo apt-get install libboost-program-options1.55-dev; fi - if [ "$TRAVIS_OS_NAME" = "linux" ]; then sudo apt-get install libboost-random1.55-dev libboost-regex1.55-dev libboost-serialization1.55-dev; fi - if [ "$TRAVIS_OS_NAME" = "linux" ]; then sudo apt-get install libboost-system1.55-dev libboost-test1.55-dev libboost-thread1.55-dev; fi before_script: - mkdir build - cd build - cmake .. script: - make -j3 && make testShark-3.1.4/CMakeLists.txt000066400000000000000000000450201314655040000153260ustar00rootroot00000000000000##################################################################### # Shark Machine Learning Library # Top-Level CMake driver file # Optionally included sub-probjects: # * Test/CMakeLists.txt # * examples/CMakeLists.txt # * doc/CMakeLists.txt ##################################################################### project( shark ) set_property(GLOBAL PROPERTY USE_FOLDERS ON) INCLUDE (CheckFunctionExists) cmake_minimum_required( VERSION 2.8 ) cmake_policy(SET CMP0003 NEW) if(POLICY CMP0042) cmake_policy(SET CMP0042 NEW) endif() if(POLICY CMP0053) cmake_policy(SET CMP0053 NEW) endif() #========================================================= # Output directories. if(NOT CMAKE_RUNTIME_OUTPUT_DIRECTORY) set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${shark_BINARY_DIR}/bin") endif() if(NOT CMAKE_LIBRARY_OUTPUT_DIRECTORY) if(UNIX) set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${shark_BINARY_DIR}/lib") else() set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${shark_BINARY_DIR}/bin") endif() endif() if(NOT CMAKE_ARCHIVE_OUTPUT_DIRECTORY) set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY "${shark_BINARY_DIR}/lib") endif() mark_as_advanced( CMAKE_RUNTIME_OUTPUT_DIRECTORY CMAKE_LIBRARY_OUTPUT_DIRECTORY CMAKE_ARCHIVE_OUTPUT_DIRECTORY ) ##################################################################### # Static/Shared libraries ##################################################################### option(BUILD_SHARED_LIBS "Compile shark into a dynamic library instead of a static one." OFF) if(BUILD_SHARED_LIBS) set(SHARK_USE_DYNLIB 1) endif() ##################################################################### # Version information ##################################################################### option(BUILD_OFFICIAL_RELEASE "Is this an official Shark release." OFF ) mark_as_advanced( BUILD_OFFICIAL_RELEASE ) set(SHARK_VERSION_MAJOR 3) set(SHARK_VERSION_MINOR 0) set(SHARK_VERSION_PATCH 0) set(SHARK_VERSION ${SHARK_VERSION_MAJOR}.${SHARK_VERSION_MINOR}.${SHARK_VERSION_PATCH}) set(SHARK_SOVERSION 0) ##################################################################### # Adjustments for cpack and deb package generation ##################################################################### set(CPACK_GENERATOR TGZ DEB) set(CPACK_PACKAGE_NAME "libshark") set(CPACK_BUNDLE_NAME "libshark") set(CPACK_PACKAGE_VENDOR "Institut fur Neuroinformatik, Ruhr-Universitaet Bochum") set(CPACK_PACKAGE_VERSION_MAJOR ${SHARK_VERSION_MAJOR}) set(CPACK_PACKAGE_VERSION_MINOR ${SHARK_VERSION_MINOR}) set(CPACK_PACKAGE_VERSION_PATCH ${SHARK_VERSION_PATCH}) set(CPACK_PACKAGE_VERSION ${SHARK_VERSION_MAJOR}.${SHARK_VERSION_MINOR}.${SHARK_VERSION_PATCH}) set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_CURRENT_SOURCE_DIR}/COPYING.LESSER) #Debian Compatible naming by default if(CMAKE_SIZEOF_VOID_P MATCHES "8") set(CPACK_DEBIAN_PACKAGE_ARCHITECTURE amd64) else() set(CPACK_DEBIAN_PACKAGE_ARCHITECTURE i386) endif() set(CPACK_PACKAGE_FILE_NAME ${CPACK_PACKAGE_NAME}_${CPACK_PACKAGE_VERSION}_${CPACK_DEBIAN_PACKAGE_ARCHITECTURE}) set(CPACK_DEBIAN_PACKAGE_DEPENDS "libboost-all-dev (>=1.54)") set(CPACK_DEBIAN_PACKAGE_DESCRIPTION "SHARK is a modular C++ library for the design and optimization of adaptive systems. It provides methods for linear and nonlinear optimization, in particular evolutionary and gradient-based algorithms, kernel-based learning algorithms and neural networks, and various other machine learning techniques. SHARK serves as a toolbox to support real world applications as well as research indifferent domains of computational intelligence and machine learning. The sources are compatible with the following platforms: Windows, Solaris, MacOS X, and Linux." ) set( CPACK_DEBIAN_PACKAGE_MAINTAINER "Christian Igel " ) ##################################################################### # Adjust include, lib, example and doc paths for installation ##################################################################### if( UNIX ) set( SHARK_INSTALL_INCLUDE_DIR include/ ) set( SHARK_INSTALL_LIB_DIR lib/ ) set( SHARK_INSTALL_CONTRIB_DIR share/shark/contrib/ ) set( SHARK_INSTALL_EXAMPLE_DIR share/shark/examples/ ) set( SHARK_INSTALL_DOC_DIR share/shark/doc/ ) else() set( SHARK_INSTALL_INCLUDE_DIR include/shark/ ) set( SHARK_INSTALL_LIB_DIR lib/ ) set( SHARK_INSTALL_CONTRIB_DIR contrib/ ) set( SHARK_INSTALL_EXAMPLE_DIR examples/ ) set( SHARK_INSTALL_DOC_DIR doc ) endif() ##################################################################### # Enable installer and package generation ##################################################################### include( CPack ) ##################################################################### # Explicit macro setup for debug configuration ##################################################################### # enable or disable debugging, default is Release if(NOT CMAKE_BUILD_TYPE) set(CMAKE_BUILD_TYPE "Release") endif() if (UNIX) add_compile_options("$<$:-Wall>") endif() list(APPEND COMPILE_DEFINITIONS_RELEASE NDEBUG) message(STATUS "Will build: " ${CMAKE_BUILD_TYPE}) ##################################################################### # Boost configuration ##################################################################### set(Boost_USE_STATIC_LIBS OFF CACHE BOOL "use static libraries from Boost") set(Boost_USE_MULTITHREADED ON) add_definitions(-DBOOST_PARAMETER_MAX_ARITY=15) add_definitions(-DBOOST_FILESYSTEM_VERSION=3) # Should we link the boost test dynamically if(NOT Boost_USE_STATIC_LIBS) add_definitions(-DBOOST_TEST_DYN_LINK) add_definitions(-DBOOST_ALL_DYN_LINK) endif() find_package( Boost 1.48.0 REQUIRED COMPONENTS system date_time filesystem program_options serialization thread unit_test_framework ) if(NOT Boost_FOUND) message(FATAL_ERROR "Please make sure Boost 1.48.0 is installed on your system") endif() if (WIN32) # disable autolinking in boost add_definitions( -DBOOST_ALL_NO_LIB ) endif() include_directories(SYSTEM ${Boost_INCLUDE_DIR} ) link_directories( ${Boost_LIBRARY_DIR} ) if(UNIX) find_library( PTHREAD_LIBRARY pthread ) endif() # Set the libraries needed by Shark list(APPEND LINK_LIBRARIES ${Boost_LIBRARIES} ${PTHREAD_LIBRARY}) mark_as_advanced(Boost_DIR PTHREAD_LIBRARY) message( STATUS "Using boost from " ${Boost_LIBRARY_DIR} ) ##################################################################### # OpenMP ##################################################################### option( ENABLE_OPENMP "Enable OpenMP" ON ) if( ENABLE_OPENMP ) find_package( OpenMP QUIET ) if( OPENMP_FOUND ) message( STATUS "OpenMP found" ) set(SHARK_USE_OPENMP 1) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}") set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${OpenMP_EXE_LINKER_FLAGS}") set(SHARK_REQUIRED_C_FLAGS "${OpenMP_C_FLAGS}") set(SHARK_REQUIRED_CXX_FLAGS "${OpenMP_CXX_FLAGS}") set(SHARK_REQUIRED_EXE_LINKER_FLAGS "${OpenMP_EXE_LINKER_FLAGS}") else() message( STATUS "OpenMP not found" ) endif() else() message( STATUS "Building without OpenMP as requested." ) endif() ##################################################################### # HDF5 configuration ##################################################################### find_package(HDF5 COMPONENTS C CXX HL QUIET) mark_as_advanced(HDF5_DIR) if(HDF5_FOUND) if(HDF5_C_COMPILER_EXECUTABLE AND HDF5_CXX_COMPILER_EXECUTABLE) message(STATUS "Checking HDF5 installation: HDF5 installation seems ok.") include_directories( ${HDF5_INCLUDE_DIRS} ) list(APPEND LINK_LIBRARIES ${HDF5_LIBRARIES} ${HDF5_HL_LIBRARIES}) else() message(STATUS "Checking HDF5 installation:HDF5 package might be broken.") if(NOT( HDF5_C_COMPILER_EXECUTABLE)) message(STATUS " C Compiler Extension not found.") endif() if(NOT( HDF5_CXX_COMPILER_EXECUTABLE)) message(STATUS " CXX Compiler Extension not found.") endif() message(STATUS "Disabling HDF5.") unset( HDF5_FOUND ) endif() else() message(STATUS "HDF5 not found, skip") endif() ##################################################################### # ATLAS configuration ##################################################################### option( ENABLE_CBLAS "Use Installed Linear Algebra Library" ON ) if( ENABLE_CBLAS ) set(CBLAS_VENDOR FALSE) if( APPLE ) set(CBLAS_VENDOR "Accelerate") set(CBLAS_INCLUDES "") set(CBLAS_LIBRARIES "-framework Accelerate" ) else() #todo: do a propper vendor check find_library(OPENBLAS_LIBRARY openblas HINTS ${CBLAS_ROOT}/lib /opt/local/lib ) find_library(CBLAS_LIBRARY cblas HINTS ${ATLAS_ROOT}/lib ${CBLAS_ROOT}/lib /opt/local/lib /usr/lib64/atlas/ ) find_library(CLAPACK_LIBRARY lapack HINTS ${ATLAS_ROOT}/lib ${CBLAS_ROOT}/lib /opt/local/lib /usr/lib64/atlas/ ) find_library(ATLAS_LIBRARY atlas HINTS ${ATLAS_ROOT}/lib ${CBLAS_ROOT}/lib /opt/local/lib /usr/lib64/atlas/ ) mark_as_advanced( OPENBLAS_LIBRARY CBLAS_LIBRARY CLAPACK_LIBRARY ATLAS_LIBRARY ) #find the cblas.h include path if(CBLAS_LIBRARY ) get_filename_component(CBLAS_LIB_PATH ${CBLAS_LIBRARY} PATH ) elseif( OPENBLAS_LIBRARY) get_filename_component(CBLAS_LIB_PATH ${OPENBLAS_LIBRARY} PATH ) endif() if(CBLAS_LIB_PATH) find_file(CBLAS_INCLUDES cblas.h PATHS ${CBLAS_LIB_PATH} ${CBLAS_LIB_PATH}/../include ) get_filename_component(CBLAS_INCLUDES ${CBLAS_INCLUDES} PATH ) endif() if(ATLAS_LIBRARY) get_filename_component(ATLAS_LIBRARY_PATH ${ATLAS_LIBRARY} PATH ) find_file(CLAPACK_INCLUDES clapack.h PATHS ${ATLAS_LIBRARY_PATH} ${ATLAS_LIBRARY_PATH}/../include ${ATLAS_LIBRARY_PATH}/../include/atlas ${ATLAS_LIBRARY_PATH}/../../include/atlas ) get_filename_component(CLAPACK_INCLUDES ${CLAPACK_INCLUDES} PATH ) set(CBLAS_INCLUDES ${CBLAS_INCLUDES} ${CLAPACK_INCLUDES}) endif() if( OPENBLAS_LIBRARY AND CBLAS_INCLUDES) set(CBLAS_VENDOR "OpenBLAS") set(CBLAS_LIBRARIES ${OPENBLAS_LIBRARY}) elseif( CBLAS_LIBRARY AND CLAPACK_LIBRARY AND ATLAS_LIBRARY AND CBLAS_INCLUDES) set(CBLAS_VENDOR "ATLAS") set(CBLAS_LIBRARIES ${CLAPACK_LIBRARY} ${CBLAS_LIBRARY} ${ATLAS_LIBRARY}) elseif( CBLAS_LIBRARY AND CBLAS_INCLUDES) #check that we can compile a basic program with the libraries we have found #vendor versions might come with additional libraries which would be bad. try_compile(CBLAS_COMPILE "${PROJECT_BINARY_DIR}/cBlasCheck" "${CMAKE_SOURCE_DIR}/cBlasCheck.cpp" CMAKE_FLAGS "-DINCLUDE_DIRECTORIES=${CBLAS_INCLUDES}" LINK_LIBRARIES ${CBLAS_LIBRARY} ) if(CBLAS_COMPILE) set(CBLAS_VENDOR "GENERIC") set(CBLAS_LIBRARIES ${CBLAS_LIBRARY}) else() message(WARNING "Unknown CBLAS. Can not use it") endif() endif() endif() if(CBLAS_VENDOR) message(STATUS "CBLAS FOUND: " ${CBLAS_VENDOR} " with include directory " ${CBLAS_INCLUDES} ) set(SHARK_USE_CBLAS 1) list(APPEND EXTRA_INCLUDE_DIRECTORIES ${CBLAS_INCLUDES} ) list(APPEND LINK_LIBRARIES ${CBLAS_LIBRARIES}) include_directories ( ${CBLAS_INCLUDES} ) else() message(STATUS "No usable CBLAS Library found. No fast linear Algebra used.") endif() #Special setup for ATLAS if( CBLAS_VENDOR MATCHES "ATLAS" ) set( SHARK_USE_ATLAS_LAPACK 1) # ATLAS always contains some LAPACK methods that we can use #check for full lapack set(CMAKE_REQUIRE_QUIET 1) set(CMAKE_REQUIRED_LIBRARIES ${CBLAS_LIBRARIES}) check_function_exists(dsyev_ ATLAS_FULL_LAPACK) if( ATLAS_FULL_LAPACK ) set( SHARK_USE_LAPACK 1) message(STATUS "Detected ATLAS with full LAPACK package. Using it!") endif() endif() endif() ##################################################################### # Static Code Analysis ##################################################################### option(USE_CPPCHECK "Use CPPCheck Static Code Analysis" OFF) mark_as_advanced(USE_CPPCHECK) if(USE_CPPCHECK) find_program(CPP_CHECK cppcheck) mark_as_advanced(CPP_CHECK) if(CPP_CHECK) message(STATUS "cppcheck available for static code analysis." ) add_custom_target(code_analysis ${CPP_CHECK} --enable=all --xml --force -I${CMAKE_CURRENT_SOURCE_DIR}/include ${CMAKE_CURRENT_SOURCE_DIR} 2> StaticAnalysis.xml) endif() endif() ##################################################################### # GCov Code Analysis ##################################################################### option(USE_GCOV_CHECK "GCov Coverage Check" OFF) mark_as_advanced(USE_GCOV_CHECK) if(USE_GCOV_CHECK) find_program(GCOV_CHECK gcov) if(CMAKE_BUILD_TYPE MATCHES "Release") message( FATAL_ERROR "\nYou specified a Release Build." "This does not make sense, if Coverage is enabled." "Please change Build type to Debug or turn off Coverage." ) endif() message(STATUS "GCov available for static code analysis. Configuring for Code Check." ) set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fprofile-arcs -ftest-coverage") set(CFLAGS "${CFLAGS} -g -O0 -Wall -W -fprofile-arcs -ftest-coverage") set(LDFLAGS "${LDFLAGS} -fprofile-arcs -ftest-coverage") endif() ##################################################################### # Adjust compiler flags and settings for MSVC ##################################################################### if( MSVC ) #4250: inherit via dominance add_definitions(/wd4250) #4251: needs to have dll-interface add_definitions(/wd4251) #4275: non-dll interface used as base for dll-interface class add_definitions(/wd4275) #4308: Negative integral constant add_definitions(/wd4800) add_definitions(/wd4308) string( REPLACE "/O2" "/Od" CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO}" ) set( CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO}" CACHE STRING "compiler-flags" FORCE ) add_definitions(-D_USE_MATH_DEFINES) add_definitions(-DNOMINMAX) add_definitions(-D_CRT_SECURE_NO_WARNINGS) add_definitions(-D_CRT_SECURE_NO_DEPRECATE) add_definitions(-D_SCL_SECURE_NO_WARNINGS) add_definitions(-D_CRT_SECURE_CPP_OVERLOAD_STANDARD_NAMES=1) endif() ##################################################################### # General Path settings ##################################################################### include_directories( ${shark_SOURCE_DIR}/include ) include_directories( ${shark_BINARY_DIR}/include ) add_subdirectory( include ) add_subdirectory( src ) ##################################################################### # Include Examples ##################################################################### option( BUILD_EXAMPLES "Build example programs." ON ) add_subdirectory( examples ) ##################################################################### # Include Unit Tests ##################################################################### # Enable Testing include(CTest) if(BUILD_TESTING) enable_testing() add_subdirectory( Test ) endif() ##################################################################### # Include Documentation ##################################################################### option(BUILD_DOCUMENTATION "Build documentation." OFF) if(BUILD_DOCUMENTATION) add_subdirectory(doc) endif() ################################################################### # CPACK PACKAGING ################################################################### set(INSTALL_CMAKE_DIR lib/cmake/Shark/) # Add all targets to the build-tree export set export(TARGETS shark SharkVersion FILE "${shark_BINARY_DIR}/SharkTargets.cmake") # Create the SharkConfig.cmake and SharkConfigVersion files set(SHARK_USE_FILE "${PROJECT_SOURCE_DIR}/UseShark.cmake") set(SHARK_TARGETS_FILE "${PROJECT_BINARY_DIR}/SharkTargets.cmake") set(SHARK_LIBRARIES ${LINK_LIBRARIES} shark) set(SHARK_LIBRARY_DIRS "${PROJECT_BINARY_DIR}/lib") set(SHARK_INCLUDE_DIRS "${PROJECT_SOURCE_DIR}/include" "${PROJECT_BINARY_DIR}/include" "${Boost_INCLUDE_DIR}" ${EXTRA_INCLUDE_DIRECTORIES}) # Configure the files to be exported configure_file(SharkConfig.cmake.in "${PROJECT_BINARY_DIR}/SharkConfig.cmake") configure_file(SharkConfigVersion.cmake.in "${PROJECT_BINARY_DIR}/SharkConfigVersion.cmake") # Configure the SharkConfig.cmake for the install tree set(SHARK_CONFIG_CODE " # Compute the installation prefix from this SharkConfig.cmake file location. get_filename_component(SHARK_INSTALL_PREFIX \"\${CMAKE_CURRENT_LIST_FILE}\" PATH)") # Construct the proper number of get_filename_component(... PATH) # calls to compute the installation prefix. string(REGEX REPLACE "/" ";" _count "${INSTALL_CMAKE_DIR}") foreach(p ${_count}) set(SHARK_CONFIG_CODE "${SHARK_CONFIG_CODE} get_filename_component(SHARK_INSTALL_PREFIX \"\${SHARK_INSTALL_PREFIX}\" DIRECTORY)") endforeach() set(SHARK_CONFIG_CODE "${SHARK_CONFIG_CODE}") set(SHARK_USE_FILE "\${SHARK_INSTALL_PREFIX}/${INSTALL_CMAKE_DIR}UseShark.cmake") set(SHARK_TARGETS_FILE "\${SHARK_INSTALL_PREFIX}/${INSTALL_CMAKE_DIR}SharkTargets.cmake") set(SHARK_LIBRARIES ${LINK_LIBRARIES} shark) set(SHARK_LIBRARY_DIRS "\${SHARK_INSTALL_PREFIX}/lib") set(SHARK_INCLUDE_DIRS "\${SHARK_INSTALL_PREFIX}/${SHARK_INSTALL_INCLUDE_DIR}" "${Boost_INCLUDE_DIR}" ${EXTRA_INCLUDE_DIRECTORIES}) configure_file(SharkConfig.cmake.in "${PROJECT_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/SharkConfig.cmake") # Install the SharkConfig.cmake and SharkConfigVersion.cmake install(FILES "${PROJECT_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/SharkConfig.cmake" "${PROJECT_BINARY_DIR}/SharkConfigVersion.cmake" "${PROJECT_SOURCE_DIR}/UseShark.cmake" DESTINATION "${INSTALL_CMAKE_DIR}" COMPONENT dev ) # Install the export set for use with the install-tree install(EXPORT SharkTargets DESTINATION "${INSTALL_CMAKE_DIR}" COMPONENT dev) ##################################################################### # Configure the Shark.h file ##################################################################### configure_file ( "${CMAKE_SOURCE_DIR}/include/shark/Core/Shark.h.in" "${CMAKE_BINARY_DIR}/include/shark/Core/Shark.h" ) install(FILES "${CMAKE_BINARY_DIR}/include/shark/Core/Shark.h" DESTINATION "${SHARK_INSTALL_INCLUDE_DIR}/shark/Core/" COMPONENT deV ) ################################################################### # UNINSTALL # ################################################################### # refer to # http://www.cmake.org/Wiki/CMake_FAQ#Can_I_do_.22make_uninstall.22_with_CMake.3F # for details configure_file( "${CMAKE_CURRENT_SOURCE_DIR}/cmake_uninstall.cmake.in" "${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake" IMMEDIATE @ONLY) add_custom_target(uninstall COMMAND ${CMAKE_COMMAND} -P ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake ) Shark-3.1.4/COPYING000066400000000000000000001045131314655040000136240ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . Shark-3.1.4/COPYING.LESSER000066400000000000000000000167431314655040000146270ustar00rootroot00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. This version of the GNU Lesser General Public License incorporates the terms and conditions of version 3 of the GNU General Public License, supplemented by the additional permissions listed below. 0. Additional Definitions. As used herein, "this License" refers to version 3 of the GNU Lesser General Public License, and the "GNU GPL" refers to version 3 of the GNU General Public License. "The Library" refers to a covered work governed by this License, other than an Application or a Combined Work as defined below. An "Application" is any work that makes use of an interface provided by the Library, but which is not otherwise based on the Library. Defining a subclass of a class defined by the Library is deemed a mode of using an interface provided by the Library. A "Combined Work" is a work produced by combining or linking an Application with the Library. The particular version of the Library with which the Combined Work was made is also called the "Linked Version". The "Minimal Corresponding Source" for a Combined Work means the Corresponding Source for the Combined Work, excluding any source code for portions of the Combined Work that, considered in isolation, are based on the Application, and not on the Linked Version. The "Corresponding Application Code" for a Combined Work means the object code and/or source code for the Application, including any data and utility programs needed for reproducing the Combined Work from the Application, but excluding the System Libraries of the Combined Work. 1. Exception to Section 3 of the GNU GPL. You may convey a covered work under sections 3 and 4 of this License without being bound by section 3 of the GNU GPL. 2. Conveying Modified Versions. If you modify a copy of the Library, and, in your modifications, a facility refers to a function or data to be supplied by an Application that uses the facility (other than as an argument passed when the facility is invoked), then you may convey a copy of the modified version: a) under this License, provided that you make a good faith effort to ensure that, in the event an Application does not supply the function or data, the facility still operates, and performs whatever part of its purpose remains meaningful, or b) under the GNU GPL, with none of the additional permissions of this License applicable to that copy. 3. Object Code Incorporating Material from Library Header Files. The object code form of an Application may incorporate material from a header file that is part of the Library. You may convey such object code under terms of your choice, provided that, if the incorporated material is not limited to numerical parameters, data structure layouts and accessors, or small macros, inline functions and templates (ten or fewer lines in length), you do both of the following: a) Give prominent notice with each copy of the object code that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the object code with a copy of the GNU GPL and this license document. 4. Combined Works. You may convey a Combined Work under terms of your choice that, taken together, effectively do not restrict modification of the portions of the Library contained in the Combined Work and reverse engineering for debugging such modifications, if you also do each of the following: a) Give prominent notice with each copy of the Combined Work that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the Combined Work with a copy of the GNU GPL and this license document. c) For a Combined Work that displays copyright notices during execution, include the copyright notice for the Library among these notices, as well as a reference directing the user to the copies of the GNU GPL and this license document. d) Do one of the following: 0) Convey the Minimal Corresponding Source under the terms of this License, and the Corresponding Application Code in a form suitable for, and under terms that permit, the user to recombine or relink the Application with a modified version of the Linked Version to produce a modified Combined Work, in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source. 1) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (a) uses at run time a copy of the Library already present on the user's computer system, and (b) will operate properly with a modified version of the Library that is interface-compatible with the Linked Version. e) Provide Installation Information, but only if you would otherwise be required to provide such information under section 6 of the GNU GPL, and only to the extent that such information is necessary to install and execute a modified version of the Combined Work produced by recombining or relinking the Application with a modified version of the Linked Version. (If you use option 4d0, the Installation Information must accompany the Minimal Corresponding Source and Corresponding Application Code. If you use option 4d1, you must provide the Installation Information in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source.) 5. Combined Libraries. You may place library facilities that are a work based on the Library side by side in a single library together with other library facilities that are not Applications and are not covered by this License, and convey such a combined library under terms of your choice, if you do both of the following: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities, conveyed under the terms of this License. b) Give prominent notice with the combined library that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 6. Revised Versions of the GNU Lesser General Public License. The Free Software Foundation may publish revised and/or new versions of the GNU Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library as you received it specifies that a certain numbered version of the GNU Lesser General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that published version or of any later version published by the Free Software Foundation. If the Library as you received it does not specify a version number of the GNU Lesser General Public License, you may choose any version of the GNU Lesser General Public License ever published by the Free Software Foundation. If the Library as you received it specifies that a proxy can decide whether future versions of the GNU Lesser General Public License shall apply, that proxy's public statement of acceptance of any version is permanent authorization for you to choose that version for the Library. Shark-3.1.4/CTestConfig.cmake000066400000000000000000000003451314655040000157410ustar00rootroot00000000000000set(CTEST_PROJECT_NAME "Shark") set(CTEST_NIGHTLY_START_TIME "01:00:00 UTC") set(CTEST_DROP_METHOD "http") set(CTEST_DROP_SITE "my.cdash.org") set(CTEST_DROP_LOCATION "/submit.php?project=Shark") set(CTEST_DROP_SITE_CDASH TRUE) Shark-3.1.4/README.txt000066400000000000000000000211671314655040000142720ustar00rootroot00000000000000Shark is a fast, modular, general open-source C++ machine learning library. Shark is licensed under the GNU Lesser General Public License, please see the files COPYING and COPYING.LESSER, or visit http://www.gnu.org/licenses . Any application of the SHARK code toward military research and use is expressly against the wishes of the SHARK development team. INSTALLATION / DOCUMENTATION ---------------------------- The entry point to the Shark library documentation is located at doc/index.html . For installation instructions, please click on "Getting started" on that page. Short version of installation guide: issue "ccmake ." in the main directory to select your build options, and afterwards issue "make" in the main directory -- you should be done (assuming Boost and CMake were installed). See the documentation for detailed instructions. BUILDING THE DOCUMENTATION: To build the documentation yourself (e.g., if you need to read it locally in order to install it, i.e., because you don't have internet), see doc/README.txt FILE STRUCTURE -------------- README.txt This file (residing in the root directory of the Shark library). CMakeLists.txt Definitions for the CMake build system. include/ This directory and its sub-directories hold all include files of the library. Note that some functionality is implemented in lower- level Impl/ folders and inline .inl files. lib/ The Shark library is placed in this directory. In the source code distribution this directory is initially empty, and the library is placed into the directory as the results of compilation. Binary distributions already contain the library, pre-built in release mode. doc/ All documentation files are found in this sub-directory. In packaged versions of Shark the html documentation is pre-built; the repository provides the corresponding sources. The documentation contains technical reference documents for all classes and functions as well as a collection of introductory and advanced tutorials. doc/index.html Entry point to the Shark documentation. examples/ The examples directory contains example use-cases of the most important algorithms implemented in Shark. Besides exemplifying powerful learning algorithms, these programs are intended as starting points for experimentation with the library. The executables corresponding to the C++ example programs are found in examples/bin/. Test/ Shark comes with a large collection of unit tests, all of which reside inside the Test directory. bin/ The binaries of the Shark unit tests are placed here. Once the CMake build system is set up (with the "ccmake" command or equivalent) the whole test suite can be executed with the command "make test", issued in the Shark root directory. src/ Source files of the Shark library. Note that from Shark version 3 onwards large parts of the library are templated and therefore header-only. contrib/ The contrib directory contains (non-standard) tools by third parties. Typically, there is no need for users of Shark to deal with these tools directly. gpl-3.0.txt GNU general public license, version 3. Note: Depending of the type of Shark distribution (binary or source package, or current repository snapshot) not all of theses files and directories are present. PACKAGE STRUCTURE ----------------- >> Note for users of Shark 2: << The internal structure of the Shark library has changed in the transition to version 3. The old infrastructure packages Array, Rng, and FileUtil, as well as parts of LinAlg, have been replaced with more modern solutions provided by Boost. The machine learning related components EALib, MOO-EALib, Mixture, ReClaM, and TimeSeries have been unified and organized into competely new interfaces. Therefore there is no one-to-one correspondance between files or even concepts in version 3 and in older versions of Shark. In fact, the lion's share of the library has been rewritten from scratch, and this is also reflected in a completely new structure. In particular, many of the rather independent sub-modules (such as Mixture and MOO-EALib) have been unified. They now share the same top-level interfaces and thus form a coherent learning architecture. The organization of the include/ directory reflects the structure of the Shark library. It consists of the following modules: GENERAL INFRASTRUCTURE: Rng Random number generator. The interface of this component has remained nearly unchanged since early versions of Shark, but under the hood Shark 3 has switched to Boost as the back-end. LinAlg Data structures and algorithms for typical linear algebra computations. For (dense and sparse) vector and matrix classes Shark relies on Boost uBLAS. Many higher level algorithms (such as singular value decomposition) are still implemented by the library itself. Statistics This component is new in Shark 3. It wraps the capabilities of Boost accumulators, and it provides tools that appear regularly in machine learning, such as the Mann-Whitney U-test (also known as the Wilcoxon rank-sum test). LEARNING INFRASTRUCTURE: Core The core module is the central place for all top-level interfaces. In addition it holds a few infrastructure classes, such as exceptions. Data The data module hosts data containers that have been specifically designed for the needs of machine learning code. Also, data can be imported and exported from and to different standard machine learning data file formats. MACHINE LEARNING: Models Models are adaptive systems, the architectures on top of which (machine) learning happens. Shark features a rich set of models, from simple linear maps to (feed-forward and recurrent) neural networks, support vector machines, and different types of trees. Models can also be concatenated with data format converters and other models. ObjectiveFunctions This module collects different types of cost, fitness, or objective functions for learning. The bandwidth includes data-dependent error functions based on simple loss functions, cross-validation, area under the ROC curve, and different objectives used for model selection. Algorithms All actual learning algorithms reside in this module. There are two main groups of learning algorithms, namely iterative optimizers and more specialized model trainers. General optimizers are organized into direct search and gradient-based optimization. Specialized algorithms for linear programming (a part of GLPK, the GNU linear programming kit) and quadratic programming for training of non-linear support vector machines are included. Shark also ships with algorithms for efficient nearest neighbor search. Fuzzy The fuzzy module provides classes for the representation of linguistic terms, variables, operators and rules, as well as fuzzy logic interference engines and controllers. Unsupervised This module contains the Shark implementation of restricted Bolzmann machines (RBMs), a recent experimental feature of Shark. Shark-3.1.4/SharkConfig.cmake.in000066400000000000000000000022101314655040000163650ustar00rootroot00000000000000# - Config file for the Shark package # It defines the following variables # SHARK_INCLUDE_DIRS - include directories for SHARK # SHARK_LIBRARIES - libraries to link against # SHARK_LIBRARY_DIRS - path to the libraries @SHARK_CONFIG_CODE@ set(SHARK_INCLUDE_DIRS "@SHARK_INCLUDE_DIRS@") set(SHARK_LIBRARY_DIRS "@SHARK_LIBRARY_DIRS@") # Our library dependencies (contains definitions for IMPORTED targets) include("@SHARK_TARGETS_FILE@") # The Shark version number set(SHARK_VERSION_MAJOR "@SHARK_VERSION_MAJOR@") set(SHARK_VERSION_MINOR "@SHARK_VERSION_MINOR@") set(SHARK_VERSION_PATCH "@SHARK_VERSION_PATCH@") # The C and C++ flags added by Shark to the cmake-configured flags. SET(SHARK_REQUIRED_C_FLAGS "@SHARK_REQUIRED_C_FLAGS@") SET(SHARK_REQUIRED_CXX_FLAGS "@SHARK_REQUIRED_CXX_FLAGS@") SET(SHARK_REQUIRED_EXE_LINKER_FLAGS "@SHARK_REQUIRED_EXE_LINKER_FLAGS@") SET(SHARK_REQUIRED_SHARED_LINKER_FLAGS "@SHARK_REQUIRED_SHARED_LINKER_FLAGS@") SET(SHARK_REQUIRED_MODULE_LINKER_FLAGS "@SHARK_REQUIRED_MODULE_LINKER_FLAGS@") # The location of the UseShark.cmake file. SET(SHARK_USE_FILE "@SHARK_USE_FILE@") set(SHARK_LIBRARIES "@SHARK_LIBRARIES@")Shark-3.1.4/SharkConfigVersion.cmake.in000066400000000000000000000005711314655040000177430ustar00rootroot00000000000000set(PACKAGE_VERSION "@SHARK_VERSION@") # Check whether the requested PACKAGE_FIND_VERSION is compatible if("${PACKAGE_VERSION}" VERSION_LESS "${PACKAGE_FIND_VERSION}") set(PACKAGE_VERSION_COMPATIBLE FALSE) else() set(PACKAGE_VERSION_COMPATIBLE TRUE) if ("${PACKAGE_VERSION}" VERSION_EQUAL "${PACKAGE_FIND_VERSION}") set(PACKAGE_VERSION_EXACT TRUE) endif() endif()Shark-3.1.4/Test/000077500000000000000000000000001314655040000135045ustar00rootroot00000000000000Shark-3.1.4/Test/Algorithms/000077500000000000000000000000001314655040000156155ustar00rootroot00000000000000Shark-3.1.4/Test/Algorithms/DirectSearch/000077500000000000000000000000001314655040000201555ustar00rootroot00000000000000Shark-3.1.4/Test/Algorithms/DirectSearch/CMA.cpp000066400000000000000000000050671314655040000212710ustar00rootroot00000000000000#define BOOST_TEST_MODULE DirectSearch_CMA #include #include #include #include #include #include #include #include #include #include "../testFunction.h" using namespace shark; BOOST_AUTO_TEST_SUITE (Algorithms_DirectSearch_CMA) BOOST_AUTO_TEST_CASE( CMA_Cigar ) { Cigar function(3); CMA optimizer; ElitistCMA elitistCMA; std::cout<<"Testing: "< 1E5); } BOOST_AUTO_TEST_CASE( CMA_Sphere_Niko ) { Rng::seed(42); const unsigned N = 10; RealVector x0(10, 0.1); Sphere sphere(N); sphere.init(); CMA cma; cma.setInitialSigma(1.e-4); cma.init(sphere, x0); BOOST_REQUIRE(cma.sigma() == 1.e-4); bool sigmaHigh = false; bool condHigh = false; for(unsigned i=0; i<1500; i++) { cma.step( sphere ); if(cma.sigma() > 0.01) sigmaHigh = true; if(cma.condition() > 20) condHigh = true; } BOOST_CHECK(cma.solution().value < 1E-9); BOOST_CHECK(sigmaHigh); BOOST_CHECK(!condHigh); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Algorithms/DirectSearch/CMSA.cpp000066400000000000000000000031551314655040000214100ustar00rootroot00000000000000#define BOOST_TEST_MODULE DirectSearch_CMSA #include #include #include #include #include #include #include #include "../testFunction.h" using namespace shark; BOOST_AUTO_TEST_SUITE (Algorithms_DirectSearch_CMSA) BOOST_AUTO_TEST_CASE( CMSA_Cigar ) { Cigar function(3); CMSA optimizer; std::cout<<"Testing: "< #include #include #include #include #include #include #include #include #include "../testFunction.h" using namespace shark; BOOST_AUTO_TEST_SUITE (Algorithms_DirectSearch_CrossEntropyMethod) BOOST_AUTO_TEST_CASE( CrossEntropyMethod_Cigar ) { Cigar function(3); CrossEntropyMethod optimizer; std::cout<<"Testing: "< #include #include #include #include #include #include #include #include "../testFunction.h" using namespace shark; BOOST_AUTO_TEST_SUITE (Algorithms_DirectSearch_ElitistCMA) BOOST_AUTO_TEST_CASE( ElitistCMA_Cigar ) { Cigar function(3); ElitistCMA optimizer; optimizer.activeUpdate() = true; std::cout<<"Testing: "< #include #include #include #include using namespace shark; //randomly creates populations of individuals, sorts them and checks that the ranks are okay. BOOST_AUTO_TEST_SUITE (Algorithms_DirectSearch_FastNonDominatedSort) BOOST_AUTO_TEST_CASE( FastNonDominatedSort_Test ) { std::size_t numPoints = 50; std::size_t numTrials = 10; std::size_t numDims = 3; for(std::size_t t = 0; t != numTrials; ++t){ //create points std::vector > population(numPoints); for(std::size_t i = 0; i != numPoints; ++i){ population[i].penalizedFitness().resize(numDims); for(std::size_t j = 0; j != numDims; ++j){ population[i].penalizedFitness()[j]= Rng::uni(-1,2); } } FastNonDominatedSort sorter; sorter(population); ParetoDominanceComparator pdc; //check that ranks are okay for(std::size_t i = 0; i != numPoints; ++i){ for(std::size_t j = 0; j != numPoints; ++j){ int comp = pdc(population[i],population[j]); if(comp > 1){//i dominates j BOOST_CHECK(population[i].rank() < population[j].rank() ); } else if (comp < -1){//j dominates i BOOST_CHECK(population[i].rank() > population[j].rank() ); } if(population[i].rank() == population[j].rank()){ BOOST_CHECK(comp == 1 || comp == -1); } } } } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Algorithms/DirectSearch/Indicators/000077500000000000000000000000001314655040000222545ustar00rootroot00000000000000Shark-3.1.4/Test/Algorithms/DirectSearch/Indicators/HypervolumeIndicator.cpp000066400000000000000000000032651314655040000271420ustar00rootroot00000000000000#define BOOST_TEST_MODULE DirectSearch_HypervolumeIndicator #include #include #include #include #include #include #include using namespace shark; //checks for random sets that the least contributor actually has the least contribution BOOST_AUTO_TEST_SUITE (Algorithms_DirectSearch_Indicators_HypervolumeIndicator) BOOST_AUTO_TEST_CASE( HypervolumeIndicator_Consistency ) { std::size_t numPoints = 10; std::size_t numTrials = 50; std::size_t numDims = 2; for(std::size_t t = 0; t != numTrials; ++t){ //create points std::vector > population(numPoints); for(std::size_t i = 0; i != numPoints; ++i){ population[i].penalizedFitness().resize(numDims); for(std::size_t j = 0; j != numDims; ++j){ population[i].penalizedFitness()[j]= Rng::uni(-1,10); } } RealVector ref(numDims,11); HypervolumeIndicator indicator; RealVector volumes(numPoints); double maxVolume = -std::numeric_limits::max(); for(std::size_t i = 0; i != numPoints; ++i){ std::vector > copy = population; copy.erase(copy.begin()+i); double volume = indicator(FitnessExtractor(),copy,ref); volumes[i] =volume; if(maxVolume < volume){ maxVolume = volume; } } std::size_t indicated = indicator.leastContributor(FitnessExtractor(),population,ref); BOOST_CHECK_CLOSE(maxVolume,volumes[indicated],1.e-10); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Algorithms/DirectSearch/MOCMA.cpp000066400000000000000000000054351314655040000215240ustar00rootroot00000000000000#define BOOST_TEST_MODULE DirectSearch_MOCMA #include #include #include #include #include #include #include using namespace shark; struct PointExtractor{ template RealVector const& operator()(T const& arg)const{ return arg.value; } }; void testObjectiveFunctionMOO( MultiObjectiveFunction& f, std::size_t mu, double targetVolume, std::size_t iterations, RealVector const& reference ){ MOCMA mocma; mocma.mu() = mu; f.init(); mocma.init(f); for(std::size_t i = 0; i != iterations; ++i){ mocma.step(f); //~ std::clog<<"\r"<> mocma2) ); Rng::seed( 1 ); FastRng::seed( 1 ); mocma.step( dtlz1 ); MOCMA::SolutionType set1 = mocma.solution(); Rng::seed( 1 ); FastRng::seed( 1 ); mocma2.step( dtlz1 ); MOCMA::SolutionType set2 = mocma2.solution(); for( unsigned int i = 0; i < set1.size(); i++ ) { BOOST_CHECK_SMALL( norm_2( set1.at( i ).value - set2.at( i ).value ), 1E-20 ); BOOST_CHECK_SMALL( norm_2( set1.at( i ).point- set2.at( i ).point), 1E-20 ); } } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Algorithms/DirectSearch/Operators/000077500000000000000000000000001314655040000221335ustar00rootroot00000000000000Shark-3.1.4/Test/Algorithms/DirectSearch/Operators/Mutation/000077500000000000000000000000001314655040000237335ustar00rootroot00000000000000Shark-3.1.4/Test/Algorithms/DirectSearch/Operators/Mutation/BitflipMutation.cpp000066400000000000000000000023731314655040000275560ustar00rootroot00000000000000#define BOOST_TEST_MODULE DirectSearch_Recombination #include #include #include #include #include BOOST_AUTO_TEST_SUITE (Algorithms_DirectSearch_Operators_Mutation_BitflipMutation) BOOST_AUTO_TEST_CASE( BitflipMutation ) { std::size_t n = 10; std::size_t trials = 1000; shark::BitflipMutator flip; flip.m_mutationStrength = 0.3; std::vector flipCount(n,0); shark::Individual< std::vector< bool >,double > ind1, ind2; ind1.searchPoint() = std::vector< bool >( n, false ); ind2.searchPoint() = ind1.searchPoint(); for(std::size_t t = 0; t != trials; ++t){ flip(shark::Rng::globalRng, ind2); for(std::size_t i = 0; i != n; ++i){ flipCount[i] += (ind1.searchPoint()[i] == ind2.searchPoint()[i])? 0:1; } ind1 = ind2; } //check that not too many were flipped unsigned int minFlip = (unsigned int)(0.25*trials); unsigned int maxFlip = (unsigned int)(0.35*trials); for(std::size_t i = 0; i != n; ++i){ BOOST_CHECK(flipCount[i] >= minFlip); BOOST_CHECK(flipCount[i] <= maxFlip); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Algorithms/DirectSearch/Operators/PenalizingEvaluator.cpp000066400000000000000000000125261314655040000266300ustar00rootroot00000000000000#define BOOST_TEST_MODULE DIRECTSEARCH_PENALIZING_EVALUATOR #include #include #include #include #include using namespace shark; struct TestIndividualMOO{ RealVector m_point; RealVector m_penalizedFitness; RealVector m_unpenalizedFitness; RealVector& penalizedFitness(){ return m_penalizedFitness; } RealVector& unpenalizedFitness(){ return m_unpenalizedFitness; } RealVector const& searchPoint()const{ return m_point; } }; struct TestIndividualSOO{ RealVector m_point; double m_penalizedFitness; double m_unpenalizedFitness; double& penalizedFitness(){ return m_penalizedFitness; } double& unpenalizedFitness(){ return m_unpenalizedFitness; } RealVector const& searchPoint()const{ return m_point; } }; struct SOOTestFunction : public SingleObjectiveFunction { SOOTestFunction(std::size_t numVariables) : m_rosenbrock(numVariables), m_handler(numVariables,0,1) { announceConstraintHandler(&m_handler); } /// \brief From INameable: return the class name. std::string name() const { return "SOOTestFunction"; } std::size_t numberOfVariables()const{ return m_rosenbrock.numberOfVariables(); } ResultType eval( const SearchPointType & x ) const { return m_rosenbrock.eval(x); } private: Rosenbrock m_rosenbrock; BoxConstraintHandler m_handler; }; //check that feasible points are not penalized BOOST_AUTO_TEST_SUITE (Algorithms_DirectSearch_Operators_PenalizingEvaluator) BOOST_AUTO_TEST_CASE( PenalizingEvaluator_SingleObjective_Feasible ) { PenalizingEvaluator evaluator; evaluator.m_penaltyFactor = 1000;//make errors obvious! SOOTestFunction objective(10); for(std::size_t i = 0; i != 1000; ++i){ TestIndividualSOO tester; tester.m_point=objective.proposeStartingPoint(); BOOST_REQUIRE(objective.isFeasible(tester.m_point)); double fitness = objective(tester.m_point); evaluator(objective,tester); BOOST_CHECK_EQUAL(fitness,tester.m_penalizedFitness); BOOST_CHECK_EQUAL(fitness,tester.m_unpenalizedFitness); } } //check that infeasible points in the single objective case are penalized correctly BOOST_AUTO_TEST_CASE( PenalizingEvaluator_SingleObjective_Infeasible ) { BoxConstraintHandler generator(10,-5,5); PenalizingEvaluator evaluator; SOOTestFunction objective(10); //over 1000 infeasible points (we skip feasible ones) std::size_t trials = 0; while(trials < 1000){ TestIndividualSOO tester; generator.generateRandomPoint(tester.m_point); RealVector corrected=tester.m_point; objective.closestFeasible(corrected); BOOST_REQUIRE(objective.isFeasible(corrected)); double fitness = objective(corrected); evaluator.m_penaltyFactor = Rng::uni(0.1,1);//make errors obvious! evaluator(objective,tester); //calculate the expected penalty double expectedPenalty = evaluator.m_penaltyFactor*norm_sqr(tester.m_point-corrected); BOOST_CHECK_CLOSE(fitness,tester.m_unpenalizedFitness,1.e-10); BOOST_CHECK_CLOSE(tester.m_penalizedFitness-fitness,expectedPenalty, 1.e-10); ++trials; } } //check that feasible points are not penalized BOOST_AUTO_TEST_CASE( PenalizingEvaluator_MultiObjective_Feasible ) { PenalizingEvaluator evaluator; evaluator.m_penaltyFactor = 1000;//make errors obvious! ZDT1 objective(10); for(std::size_t i = 0; i != 1000; ++i){ TestIndividualMOO tester; tester.m_point=objective.proposeStartingPoint(); BOOST_REQUIRE(objective.isFeasible(tester.m_point)); RealVector fitness = objective(tester.m_point); evaluator(objective,tester); BOOST_REQUIRE_EQUAL(tester.m_penalizedFitness.size(), fitness.size()); BOOST_REQUIRE_EQUAL(tester.m_unpenalizedFitness.size(), fitness.size()); for(std::size_t i = 0; i != fitness.size(); ++i){ BOOST_CHECK_EQUAL(fitness(i),tester.m_penalizedFitness(i)); BOOST_CHECK_EQUAL(fitness(i),tester.m_unpenalizedFitness(i)); } } } //check that infeasible points in the multi objective case are penalized correctly BOOST_AUTO_TEST_CASE( PenalizingEvaluator_MultiObjective_Infeasible ) { BoxConstraintHandler generator(10,-5,5); PenalizingEvaluator evaluator; ZDT1 objective(10); //over 1000 infeasible points (we skip feasible ones) std::size_t trials = 0; while(trials < 1000){ TestIndividualMOO tester; generator.generateRandomPoint(tester.m_point); if(objective.isFeasible(tester.m_point)) continue;//don't increase trial counters RealVector corrected=tester.m_point; objective.closestFeasible(corrected); BOOST_REQUIRE(objective.isFeasible(corrected)); RealVector fitness = objective(corrected); evaluator.m_penaltyFactor = Rng::uni(0.1,1);//make errors obvious! evaluator(objective,tester); BOOST_REQUIRE_EQUAL(tester.m_penalizedFitness.size(), fitness.size()); BOOST_REQUIRE_EQUAL(tester.m_unpenalizedFitness.size(), fitness.size()); //calculate the expected penalty on every dimension double expectedPenalty = evaluator.m_penaltyFactor*norm_sqr(tester.m_point-corrected); for(std::size_t i = 0; i != fitness.size(); ++i){ BOOST_CHECK_CLOSE(fitness(i),tester.m_unpenalizedFitness(i),1.e-10); BOOST_CHECK_CLOSE(tester.m_penalizedFitness(i)-fitness(i),expectedPenalty, 1.e-10); } ++trials; } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Algorithms/DirectSearch/Operators/Selection/000077500000000000000000000000001314655040000240605ustar00rootroot00000000000000Shark-3.1.4/Test/Algorithms/DirectSearch/Operators/Selection/IndicatorBasedSelection.cpp000066400000000000000000000053761314655040000313200ustar00rootroot00000000000000#define BOOST_TEST_MODULE DirectSearch_IndicatorBasedSelection #include #include #include #include #include #include using namespace shark; //generate random populations and check that the selection chooses the right ones. BOOST_AUTO_TEST_SUITE (Algorithms_DirectSearch_Operators_Selection_IndicatorBasedSelection) BOOST_AUTO_TEST_CASE( IndicatorBasedSelection_Test ) { std::size_t numPoints = 50; std::size_t numTrials = 100; std::size_t numDims = 3; typedef IndicatorBasedSelection Selection; for(std::size_t t = 0; t != numTrials; ++t){ //create points std::vector > population(numPoints); for(std::size_t i = 0; i != numPoints; ++i){ population[i].penalizedFitness().resize(numDims); for(std::size_t j = 0; j != numDims; ++j){ population[i].penalizedFitness()[j]= Rng::uni(-1,2); } } std::size_t mu = Rng::discrete(1,numPoints-2); //store copy and compute ranks std::vector > popCopy = population; FastNonDominatedSort sorter; sorter(popCopy); //store ranks in container and sort std::vector ranks(numPoints); for(std::size_t i = 0; i != numPoints; ++i){ ranks[i] = popCopy[i].rank(); } std::sort(ranks.begin(),ranks.end()); //no selected individual can have higher rank. std::size_t maxRank = ranks[mu]; //do the selection Selection selection; selection(population,mu); //check1: ranks are the same! for(std::size_t i = 0; i != numPoints; ++i){ BOOST_CHECK_EQUAL(population[i].rank(), popCopy[i].rank()); } //check2: we selected the right amount //check3: no selected individual has higher rank than maxrank std::size_t numSelected = 0; for(std::size_t i = 0; i != numPoints; ++i){ if(population[i].selected()){ ++numSelected; BOOST_CHECK(population[i].rank() <= maxRank); } } BOOST_CHECK_EQUAL(numSelected,mu); //~ //check that ranks are okay //~ for(std::size_t i = 0; i != numPoints; ++i){ //~ for(std::size_t j = 0; j != numPoints; ++j){ //~ int comp = pdc(population[i],population[j]); //~ if(comp > 1){//i dominates j //~ BOOST_CHECK(population[i].rank() < population[j].rank() ); //~ } else if (comp < -1){//j dominates i //~ BOOST_CHECK(population[i].rank() > population[j].rank() ); //~ } //~ if(population[i].rank() == population[j].rank()){ //~ BOOST_CHECK(comp == 1 || comp == -1); //~ } //~ } //~ } } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Algorithms/DirectSearch/Operators/Selection/Selection.cpp000066400000000000000000000164331314655040000265200ustar00rootroot00000000000000#define BOOST_TEST_MODULE DirectSearch_Selection #include #include #include #include #include #include #include struct FitnessComparator { template bool operator()( const Individual & a, const Individual & b ) { return a.unpenalizedFitness() < b.unpenalizedFitness(); } }; struct NumberComparator { template bool operator()( const Individual & a, const Individual & b ) { return a < b; } }; BOOST_AUTO_TEST_SUITE (Algorithms_DirectSearch_Operators_Selection_Selection) BOOST_AUTO_TEST_CASE( Tournament_Selection ) { double pop[] = { 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 }; shark::TournamentSelection ts; ts.tournamentSize = 10; std::vector counter(11,0); for( unsigned int i = 0; i < 10000; i++ ) { counter[ std::distance( pop, ts(shark::Rng::globalRng, pop, pop + 11 ) ) ]++; } std::copy( counter.begin(), counter.begin() + 11, std::ostream_iterator( std::cout, "," ) ); BOOST_CHECK( counter[ 10 ] > counter[ 0 ] ); } BOOST_AUTO_TEST_CASE( RouletteWheel_Selection ) { double pop[] = { 0,1,2,3,4,5,6,7,8,9,10 }; shark::RouletteWheelSelection ts; shark::RealVector prob(11,0); for(std::size_t i = 0; i != 11; ++i){ prob(i) = shark::Rng::uni(0.5,1); } prob/=sum(prob); shark::RealVector hist(11,0); for( unsigned int i = 0; i < 1000000; i++ ) hist[ *ts(shark::Rng::globalRng, pop, pop + 11,prob) ]+=1.0; hist /=1000000; for(std::size_t i = 0; i != 11; ++i){ BOOST_CHECK_CLOSE(hist[i],prob[i], 1.0); } } //~ BOOST_AUTO_TEST_CASE( LinearRanking ) { //~ typedef shark::TypedIndividual< std::string > Individual; //~ typedef std::vector< Individual > Population; //~ Population parents( 10 ); //~ for( Population::iterator it = parents.begin(); it != parents.end(); ++it ) { //~ it->unpenalizedFitness() = std::distance( parents.begin(), it ); //~ **it = ( boost::format( "Parent_%1%" ) % std::distance( parents.begin(), it ) ).str(); //~ } //~ Population offspring( 10 ); //~ for( Population::iterator it = offspring.begin(); it != offspring.end(); ++it ) { //~ it->unpenalizedFitness() = std::distance( offspring.begin(), it ); //~ **it = ( boost::format( "Offspring_%1%" ) % std::distance( offspring.begin(), it ) ).str(); //~ } //~ std::sort( parents.begin(), parents.end(), FitnessComparator() ); //~ std::sort( offspring.begin(), offspring.end(), FitnessComparator() ); //~ Population newParents( 10 ); //~ shark::LinearRankingSelection< shark::tag::UnpenalizedFitness > lrs; //~ lrs( //~ parents.begin(), //~ parents.end(), //~ offspring.begin(), //~ offspring.end(), //~ newParents.begin(), //~ newParents.end(), //~ 3. ); //~ std::cout << "########## LINEAR RANKING ##########" << std::endl; //~ for( Population::iterator it = newParents.begin(); it != newParents.end(); ++it ) //~ std::cout << "Individual: " << **it << std::endl; //~ lrs( //~ parents, //~ offspring, //~ newParents, //~ 3. ); //~ std::cout << "########## LINEAR RANKING (Ranges) ##########" << std::endl; //~ for( Population::iterator it = newParents.begin(); it != newParents.end(); ++it ) //~ std::cout << "Individual: " << **it << std::endl; //~ } //~ BOOST_AUTO_TEST_CASE( UniformRanking ) { //~ typedef shark::Individual< std::string,double > Individual; //~ typedef std::vector< Individual > Population; //~ Population parents( 10 ); //~ for( Population::iterator it = parents.begin(); it != parents.end(); ++it ) { //~ it->unpenalizedFitness() = std::distance( parents.begin(), it ); //~ **it = ( boost::format( "Parent_%1%" ) % std::distance( parents.begin(), it ) ).str(); //~ } //~ Population offspring( 10 ); //~ for( Population::iterator it = offspring.begin(); it != offspring.end(); ++it ) { //~ it->unpenalizedFitness() = std::distance( offspring.begin(), it ); //~ **it = ( boost::format( "Offspring_%1%" ) % std::distance( offspring.begin(), it ) ).str(); //~ } //~ std::sort( parents.begin(), parents.end(), FitnessComparator() ); //~ std::sort( offspring.begin(), offspring.end(), FitnessComparator() ); //~ Population newParents( 10 ); //~ shark::UniformRankingSelection< shark::tag::UnpenalizedFitness > urs; //~ urs( //~ parents.begin(), //~ parents.end(), //~ offspring.begin(), //~ offspring.end(), //~ newParents.begin(), //~ newParents.end() ); //~ std::cout << "########## UNIFORM RANKING ##########" << std::endl; //~ for( Population::iterator it = newParents.begin(); it != newParents.end(); ++it ) //~ std::cout << "Individual: " << **it << std::endl; //~ urs( //~ parents, //~ offspring, //~ newParents ); //~ std::cout << "########## UNIFORM RANKING (Ranges) ##########" << std::endl; //~ for( Population::iterator it = newParents.begin(); it != newParents.end(); ++it ) //~ std::cout << "Individual: " << **it << std::endl; //~ } /* BOOST_AUTO_TEST_CASE( EPTournament ) { typedef shark::TypedIndividual< std::string > Individual; typedef std::vector< Individual > Population; Population parents( 10 ); for( Population::iterator it = parents.begin(); it != parents.end(); ++it ) { it->unpenalizedFitness() = std::distance( parents.begin(), it ); **it = ( boost::format( "Parent_%1%" ) % std::distance( parents.begin(), it ) ).str(); } Population offspring( 10 ); for( Population::iterator it = offspring.begin(); it != offspring.end(); ++it ) { it->unpenalizedFitness() = std::distance( offspring.begin(), it ); **it = ( boost::format( "Offspring_%1%" ) % std::distance( offspring.begin(), it ) ).str(); } std::sort( parents.begin(), parents.end(), FitnessComparator() ); std::sort( offspring.begin(), offspring.end(), FitnessComparator() ); Population newParents( 10 ); shark::EPTournamentSelection< shark::tag::UnpenalizedFitness > ept; ept( parents.begin(), parents.end(), offspring.begin(), offspring.end(), newParents.begin(), newParents.end(), 3 ); std::cout << "########## EP Tournament Selection ##########" << std::endl; for( Population::iterator it = newParents.begin(); it != newParents.end(); ++it ) std::cout << "Individual: " << **it << std::endl; ept( parents, offspring, newParents, 3 ); std::cout << "########## EP Tournament Selection (Ranges) ##########" << std::endl; for( Population::iterator it = newParents.begin(); it != newParents.end(); ++it ) std::cout << "Individual: " << **it << std::endl; }*/ BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Algorithms/DirectSearch/ParetoDominanceComparator.cpp000066400000000000000000000071441314655040000257670ustar00rootroot00000000000000#define BOOST_TEST_MODULE DirectSearch_FastNonDominatedSort #include #include #include #include #include using namespace shark; //check that the relation is correct on a number of selected points BOOST_AUTO_TEST_SUITE (Algorithms_DirectSearch_ParetoDominanceComparator) BOOST_AUTO_TEST_CASE( FastNonDominatedSort_Selected_Points_Test ) { RealVector p0(3); p0[0] = -1; p0[1] = 0; p0[2] = 1; RealVector p1(3);//strictly dominated by p0 p1[0] = 1; p1[1] = 2; p1[2] = 3; RealVector p2(3);//weakly dominated by p0 p2[0] = -1; p2[1] = 2; p2[2] = 3; RealVector p3(3);//trade off with p0 p3[0] = -2; p3[1] = 0; p3[2] = 2; typedef ParetoDominanceComparator Dominance; Dominance pdc; BOOST_CHECK_EQUAL(pdc(p0,p0), Dominance::A_EQUALS_B); BOOST_CHECK_EQUAL(pdc(p0,p3), Dominance::TRADE_OFF); BOOST_CHECK_EQUAL(pdc(p0,p1), Dominance::A_STRICTLY_DOMINATES_B); BOOST_CHECK_EQUAL(pdc(p1,p0), Dominance::B_STRICTLY_DOMINATES_A); BOOST_CHECK_EQUAL(pdc(p0,p2), Dominance::A_WEAKLY_DOMINATES_B); BOOST_CHECK_EQUAL(pdc(p2,p0), Dominance::B_WEAKLY_DOMINATES_A); } //randomly creates populations of individuals, sorts them and checks that the domination relation is //correct BOOST_AUTO_TEST_CASE( FastNonDominatedSort_Random_Test ) { std::size_t numPoints = 20; std::size_t numTrials = 10; std::size_t numDims = 3; for(std::size_t t = 0; t != numTrials; ++t){ //create points std::vector population(numPoints); for(std::size_t i = 0; i != numPoints; ++i){ population[i].resize(numDims); for(std::size_t j = 0; j != numDims; ++j){ population[i][j]= Rng::uni(-1,2); } } typedef ParetoDominanceComparator Dominance; Dominance pdc; //check that ranks are okay for(std::size_t i = 0; i != numPoints; ++i){ for(std::size_t j = 0; j != numPoints; ++j){ //test all 6 results int comp = pdc(population[i],population[j]); if( comp == Dominance::A_STRICTLY_DOMINATES_B){ for(std::size_t k = 0; k != numDims; ++k){ BOOST_CHECK(population[i][k] < population[j][k]); } } if( comp == Dominance::B_STRICTLY_DOMINATES_A){ for(std::size_t k = 0; k != numDims; ++k){ BOOST_CHECK(population[j][k] < population[i][k]); } } if( comp == Dominance::A_WEAKLY_DOMINATES_B){ bool equality = false; for(std::size_t k = 0; k != numDims; ++k){ BOOST_CHECK(population[i][k] <= population[j][k]); if(population[i][k]==population[j][k]) equality=true; BOOST_CHECK_EQUAL(equality,true); } } if( comp == Dominance::B_WEAKLY_DOMINATES_A){ bool equality = false; for(std::size_t k = 0; k != numDims; ++k){ BOOST_CHECK(population[j][k] >= population[i][k]); if(population[j][k]==population[i][k]) equality=true; BOOST_CHECK_EQUAL(equality,true); } } if( comp == Dominance::A_EQUALS_B){ for(std::size_t k = 0; k != numDims; ++k){ BOOST_CHECK(population[i][k] == population[j][k]); } } if( comp == Dominance::TRADE_OFF){ bool hasGreater = false; bool hasSmaller = false; for(std::size_t k = 0; k != numDims; ++k){ if(population[j][k] > population[i][k]) hasGreater = true; if(population[j][k] < population[i][k]) hasSmaller = true; } BOOST_CHECK_EQUAL(hasGreater, true); BOOST_CHECK_EQUAL(hasSmaller, true); } } } } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Algorithms/DirectSearch/RealCodedNSGAII.cpp000066400000000000000000000050531314655040000234010ustar00rootroot00000000000000#define BOOST_TEST_MODULE DirectSearch_RealCodedNSGAII #include #include #include #include using namespace shark; struct PointExtractor{ template RealVector const& operator()(T const& arg)const{ return arg.value; } }; double testObjectiveFunctionMOOHelper( MultiObjectiveFunction& f, std::size_t mu, std::size_t iterations, RealVector const& reference ){ RealCodedNSGAII realCodedNSGAII; realCodedNSGAII.mu() = mu; f.init(); realCodedNSGAII.init(f); for(std::size_t i = 0; i != iterations; ++i){ realCodedNSGAII.step(f); //~ if(i%(iterations/100)==0) //~ std::clog<<"\r"< result(10); for (std::size_t i=0; i #include #include #include using namespace shark; struct PointExtractor{ template RealVector const& operator()(T const& arg)const{ return arg.value; } }; double testObjectiveFunctionMOOHelper( MultiObjectiveFunction& f, std::size_t mu, std::size_t iterations, RealVector const& reference ){ SMSEMOA smsemoa; smsemoa.mu() = mu; f.init(); smsemoa.init(f); for(std::size_t i = 0; i != iterations; ++i){ smsemoa.step(f); //~ if(i%(iterations/100)==0) //~ std::clog<<"\r"< result(10); for (std::size_t i=0; i #include #include #include #include #include #include "../testFunction.h" using namespace shark; using namespace shark::blas; class TestObjective : public SingleObjectiveFunction { public: TestObjective() { m_features |= HAS_VALUE; m_features |= CAN_PROPOSE_STARTING_POINT; } std::size_t numberOfVariables() const { return 2; } RealVector proposeStartingPoint() const { return RealVector(2, 0.0); } double eval(RealVector const& x) const { double value = 0.0; double best = 1e100; for (size_t i=0; i m_point; std::vector m_value; }; BOOST_AUTO_TEST_SUITE (Algorithms_DirectSearch_SimplexDownhill) BOOST_AUTO_TEST_CASE( SimplexDownhill_Sphere ) { Sphere function(3); SimplexDownhill optimizer; std::cout<<"\nTesting: "< #include #include #include #include #include #include using namespace shark; struct PointExtractor{ template RealVector const& operator()(T const& arg)const{ return arg.value; } }; void testObjectiveFunctionMOO( MultiObjectiveFunction& f, std::size_t mu, double targetVolume, std::size_t iterations, RealVector const& reference ){ SteadyStateMOCMA mocma; mocma.mu() = mu; f.init(); mocma.init(f); for(std::size_t i = 0; i != iterations; ++i){ mocma.step(f); //~ std::clog<<"\r"<> ssMocma2) ); Rng::seed( 0 ); ssMocma.step( dtlz1 ); SteadyStateMOCMA::SolutionType set1 = ssMocma.solution(); Rng::seed( 0 ); ssMocma2.step( dtlz1 ); SteadyStateMOCMA::SolutionType set2 = ssMocma2.solution(); for( unsigned int i = 0; i < set1.size(); i++ ) { BOOST_CHECK_SMALL( norm_2( set1.at( i ).value - set2.at( i ).value ), 1E-20 ); BOOST_CHECK_SMALL(norm_2( set1.at( i ).point - set2.at( i ).point ), 1E-20 ); } } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Algorithms/DirectSearch/VDCMA.cpp000066400000000000000000000021601314655040000215120ustar00rootroot00000000000000#define BOOST_TEST_MODULE DirectSearch_VDCMA #include #include #include #include #include #include "../testFunction.h" using namespace shark; BOOST_AUTO_TEST_SUITE (Algorithms_DirectSearch_VDCMA) BOOST_AUTO_TEST_CASE( CMA_Ellipsoid ) { Ellipsoid function(20); VDCMA optimizer; optimizer.setInitialSigma(2); std::cout << "\nTesting: " << optimizer.name() << " with " << function.name() << std::endl; test_function( optimizer, function, _trials = 10, _iterations = 5000/optimizer.suggestLambda(20), _epsilon = 1E-10 ); } BOOST_AUTO_TEST_CASE( CMA_Rosenbrock ) { Rosenbrock function( 20,4 ); VDCMA optimizer; optimizer.setInitialSigma(2); std::size_t lambda= optimizer.suggestLambda(20); std::cout << "\nTesting: " << optimizer.name() << " with " << function.name() << std::endl; test_function( optimizer, function, _trials = 101, _iterations = 22000/lambda, _epsilon = 1E-10 ); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Algorithms/GradientDescent/000077500000000000000000000000001314655040000206605ustar00rootroot00000000000000Shark-3.1.4/Test/Algorithms/GradientDescent/BFGS.cpp000066400000000000000000000031271314655040000221100ustar00rootroot00000000000000#define BOOST_TEST_MODULE GradDesc_BFGS #include #include #include #include #include #include "../testFunction.h" using namespace shark; BOOST_AUTO_TEST_SUITE (Algorithms_GradientDescent_BFGS) BOOST_AUTO_TEST_CASE( BFGS_dlinmin ) { Ellipsoid function(5); BFGS optimizer; optimizer.lineSearch().lineSearchType()=LineSearch::Dlinmin; std::cout<<"Testing: "< #include #include #include #include #include "../testFunction.h" using namespace shark; BOOST_AUTO_TEST_SUITE (Algorithms_GradientDescent_CG) BOOST_AUTO_TEST_CASE( CG_dlinmin ) { Ellipsoid function(5); CG optimizer; optimizer.lineSearch().lineSearchType()=LineSearch::Dlinmin; std::cout<<"Testing: "< #include #include #include #include #include "../testFunction.h" using namespace shark; BOOST_AUTO_TEST_SUITE (Algorithms_GradientDescent_LBFGS) BOOST_AUTO_TEST_CASE( LBFGS_dlinmin ) { Ellipsoid function(5); LBFGS optimizer; optimizer.setHistCount(3); optimizer.lineSearch().lineSearchType()=LineSearch::Dlinmin; std::cout<<"Testing: "< #define BOOST_TEST_MODULE LinAlg_dlmin #include #include using namespace shark; // The testfunction: struct Testfunction { double eval(const RealVector& x) { return x(0) * x(0) + 2; } // The derivation of the testfunction: double evalDerivative(const RealVector& x, RealVector& y) { y(0) = 2 * x(0); return eval(x); } }; BOOST_AUTO_TEST_SUITE (Algorithms_GradientDescent_LineSearch) BOOST_AUTO_TEST_CASE( LineSearch_DLinmin ) { double fret(0.);// function value at the point that // is found by the function RealVector p(1); // initial search starting point RealVector xi(1); // direction of search // Initialize search starting point and direction: p(0) = -3.; xi(0) = 3.; Testfunction function; // Minimize function: detail::dlinmin(p, xi, fret, function,0.0,1.0); // lines below are for self-testing this example, please ignore BOOST_CHECK_SMALL(fret-2,1.e-14); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Algorithms/GradientDescent/NoisyRprop.cpp000066400000000000000000000046651314655040000235230ustar00rootroot00000000000000#define BOOST_TEST_MODULE ML_NoisyRProp #include #include #include #include using namespace shark; struct TestFunction : public SingleObjectiveFunction { typedef SingleObjectiveFunction Base; RealMatrix A; bool m_noisy; TestFunction(bool noisy):A(3,3),m_noisy(noisy) { A.clear(); A(0,0)=20; A(1,1)=10; A(2,2)=5; m_features|=Base::HAS_FIRST_DERIVATIVE; } std::string name() const { return "TestFunction"; } std::size_t numberOfVariables()const{ return 3; } virtual double eval(RealVector const& pattern)const { return inner_prod(prod(A,pattern),pattern); } //noise is only applied on the gradient virtual double evalDerivative(RealVector const& pattern, FirstOrderDerivative& derivative)const { derivative = 2*prod(A,pattern); if(m_noisy){ derivative(0)+=Rng::gauss(0,0.001); derivative(1)+=Rng::gauss(0,0.001); derivative(2)+=Rng::gauss(0,0.001); } return eval(pattern); } }; //First Test: the algorithm should work in a deterministic setting BOOST_AUTO_TEST_SUITE (Algorithms_GradientDescent_NoisyRprop) BOOST_AUTO_TEST_CASE( NoisyRprop_deterministic ) { TestFunction function(false); RealVector start(3);//startingPoint start(0)=2; start(1)=2; start(2)=2; NoisyRprop optimizer; optimizer.init(function,start); std::cout<<"Testing: "< #include #include #include #include #include "../testFunction.h" using namespace shark; BOOST_AUTO_TEST_SUITE (Algorithms_GradientDescent_Rprop) BOOST_AUTO_TEST_CASE( RPropPlus_Simple ) { Ellipsoid function(5); RpropPlus optimizer; optimizer.init(function); std::cout<<"Testing: "< #include #include using namespace shark; struct TestFunction : public SingleObjectiveFunction { typedef SingleObjectiveFunction Base; RealMatrix A; TestFunction():A(3,3,0.0) { A(0,0)=10; A(1,1)=5; A(2,2)=1; A(1,0)=1; A(0,1)=1; A(2,0)=1; A(0,2)=1; m_features|=Base::HAS_FIRST_DERIVATIVE; } std::string name() const { return "TestFunction"; } std::size_t numberOfVariables()const{ return 3; } virtual double eval(RealVector const& pattern)const { return inner_prod(prod(A,pattern),pattern); } virtual double evalDerivative(RealVector const& pattern, FirstOrderDerivative& derivative)const { derivative = 2*prod(A,pattern); return eval(pattern); } }; BOOST_AUTO_TEST_SUITE (Algorithms_GradientDescent_SteepestDescent) BOOST_AUTO_TEST_CASE( SteepestDescent_Test ) { TestFunction function; RealVector start(3);//startingPoint start(0)=1; start(1)=1; start(2)=1; SteepestDescent optimizer; optimizer.setLearningRate(0.1*(1-0.3)); optimizer.setMomentum(0.3); optimizer.init(function,start); // train the model std::cout<<"Testing: "< #include #include #include #include #include "../testFunction.h" using namespace shark; BOOST_AUTO_TEST_SUITE (Algorithms_GradientDescent_TrustRegionNewton) BOOST_AUTO_TEST_CASE( TrustRegionNewton_Ellipsoid ) { Ellipsoid function(5); TrustRegionNewton optimizer; std::cout<<"Testing: "< #include #include using namespace shark; struct TestFunction : public SingleObjectiveFunction { typedef SingleObjectiveFunction Base; RealMatrix A; TestFunction():A(2,2) { A.clear(); A(0,0)=1; A(1,1)=1; m_features|=Base::HAS_FIRST_DERIVATIVE; m_features|=Base::CAN_PROPOSE_STARTING_POINT; } std::string name() const { return "TestFunction"; } std::size_t numberOfVariables()const{ return 2; } virtual double eval(RealVector const& pattern)const { return inner_prod(prod(A,pattern),pattern); } virtual SearchPointType proposeStartingPoint()const { return RealVector(2,0.0); } }; BOOST_AUTO_TEST_SUITE (Algorithms_GridSearch) BOOST_AUTO_TEST_CASE( NestedGridSearch_initialized ) { std::vector searchMin; searchMin.push_back(-1); searchMin.push_back(-1); std::vector searchMax; searchMax.push_back(1); searchMax.push_back(1); TestFunction function; NestedGridSearch optimizer; optimizer.configure(searchMin,searchMax); optimizer.init(function); // train the model double error=0; for(size_t iteration=0;iteration<30;++iteration) { optimizer.step(function); error=optimizer.solution().value; } std::cout<<"NestedGridSearch done. Error:"< searchMin; searchMin.push_back(-1); searchMin.push_back(-1); std::vector searchMax; searchMax.push_back(1); searchMax.push_back(1); TestFunction function; NestedGridSearch optimizer; optimizer.init(function); // train the model double error=0; for(size_t iteration=0;iteration<30;++iteration) { optimizer.step(function); error=optimizer.solution().value; } std::cout<<"NestedGridSearch_uninitialized done. Error:"< searchMin; searchMin.push_back(-1); searchMin.push_back(-1); std::vector searchMax; searchMax.push_back(1); searchMax.push_back(1); std::vector sections; sections.push_back(5); sections.push_back(5); TestFunction function; GridSearch optimizer; optimizer.configure(searchMin,searchMax,sections); optimizer.init(function); // train the model optimizer.step(function); double error=optimizer.solution().value; std::cout<<"GridSearch_init_individual done. Error:"< > searchValues(2); for(size_t i=0;i!=2;++i) { for(size_t j=0;j<5;++j) { searchValues[i].push_back(-1+j*0.5); } } TestFunction function; GridSearch optimizer; optimizer.configure(searchValues); optimizer.init(function); // train the model optimizer.step(function); double error=optimizer.solution().value; std::cout<<"GridSearch_init_individual_param done. Error:"< points; points.resize(11,RealVector(2)); for(size_t i=0;i!=10;++i) { for(size_t j=0;j<2;++j) { points[i](j)=Rng::gauss(0,1); } } points[10](0)=0.0; points[10](1)=0.0; TestFunction function; PointSearch optimizer; optimizer.configure(points); optimizer.init(function); // train the model optimizer.step(function); double error=optimizer.solution().value; std::cout<<"PointSearch done. Error:"< #include #include #include #include #include #include #include #include using namespace shark; double inclusionExclusionHelper(std::vector< RealVector > const& points, RealVector const& reference, RealVector point, std::size_t setsize = 0, std::size_t index = 0) { if (index == points.size()) { if (setsize > 0) { double hv = 1.0; for (std::size_t i=0; i const& points, RealVector const& reference) { RealVector point(reference.size(), -1e100); return inclusionExclusionHelper(points, reference, point, 0, 0); } struct Fixture { static const double HV_TEST_SET_2D; static const double HV_TEST_SET_3D; Fixture() { BOOST_TEST_MESSAGE( "Setting up test sets and reference points " ); m_refPoint3D = boost::assign::list_of( 1.1 )( 1.1 )( 1.1 ); m_testSet3D.push_back( boost::assign::list_of( 6.56039859404455e-2 ) (0.4474014917277) (0.891923776019316) ); m_testSet3D.push_back( boost::assign::list_of( 3.74945443950542e-2)(3.1364039802686e-2)(0.998804513479922 ) ); m_testSet3D.push_back( boost::assign::list_of( 0.271275894554688)(0.962356894778677)(1.66911984440026e-2 ) ); m_testSet3D.push_back( boost::assign::list_of( 0.237023460537611)(0.468951509833942)(0.850825693417425 ) ); m_testSet3D.push_back( boost::assign::list_of( 8.35813910785332e-2)(0.199763306732937)(0.97627289850149 ) ); m_testSet3D.push_back( boost::assign::list_of( 1.99072649788403e-2)(0.433909411793732)(0.900736544810901 ) ); m_testSet3D.push_back( boost::assign::list_of( 9.60698311356187e-2)(0.977187045721721)(0.18940978121319 ) ); m_testSet3D.push_back( boost::assign::list_of( 2.68052822856208e-2)(2.30651870780559e-2)(0.999374541394087 ) ); m_testSet3D.push_back( boost::assign::list_of( 0.356223209018184)(0.309633114503212)(0.881607826507812 ) ); m_testSet3D.push_back( boost::assign::list_of( 0.127964409429531)(0.73123479272024)(0.670015513129912 ) ); m_testSet3D.push_back( boost::assign::list_of( 0.695473366395562)(0.588939459338073)(0.411663831140169 ) ); m_testSet3D.push_back( boost::assign::list_of( 0.930735605917613)(0.11813654121718)(0.346085234453039 ) ); m_testSet3D.push_back( boost::assign::list_of( 0.774030940645471)(2.83363630460836e-2)(0.632513362272141 ) ); m_testSet3D.push_back( boost::assign::list_of( 0.882561783965009)(4.80931050853475e-3)(0.470171849451808 ) ); m_testSet3D.push_back( boost::assign::list_of( 4.92340623346446e-3)(0.493836329534438)(0.869540936185878 ) ); m_testSet3D.push_back( boost::assign::list_of( 0.305054163869799)(0.219367569077876)(0.926725324323535 ) ); m_testSet3D.push_back( boost::assign::list_of( 0.575227233936948)(0.395585597387712)(0.715978815661927 ) ); m_testSet3D.push_back( boost::assign::list_of( 0.914091673974525)(0.168988399705031)(0.368618138912863 ) ); m_testSet3D.push_back( boost::assign::list_of( 0.225088318852838)(0.796785147906617)(0.560775067911755 ) ); m_testSet3D.push_back( boost::assign::list_of( 0.306941172015014)(0.203530333828304)(0.929710987422322 ) ); m_testSet3D.push_back( boost::assign::list_of( 0.185344081015371)(0.590388202293731)(0.785550343533082 ) ); m_testSet3D.push_back( boost::assign::list_of( 0.177181921358634)(0.67105558509432)(0.719924279669315 ) ); m_testSet3D.push_back( boost::assign::list_of( 0.668494587335475)(0.22012845825454)(0.710393164782469 ) ); m_testSet3D.push_back( boost::assign::list_of( 0.768639363955671)(0.256541291890516)(0.585986427942633 ) ); m_testSet3D.push_back( boost::assign::list_of( 0.403457020846225)(0.744309886218013)(0.532189088208334 ) ); m_testSet3D.push_back( boost::assign::list_of( 0.659545359568811)(0.641205442223306)(0.39224418355721 ) ); m_testSet3D.push_back( boost::assign::list_of( 0.156141960251846)(8.36191498217669e-2)(0.984188765446851 ) ); m_testSet3D.push_back( boost::assign::list_of( 0.246039496399399)(0.954377757574506)(0.16919711007753 ) ); m_testSet3D.push_back( boost::assign::list_of( 3.02243260456876e-2)(0.43842801306405)(0.898257962656493 ) ); m_testSet3D.push_back( boost::assign::list_of( 0.243139979715573)(0.104253945099703)(0.96437236853565 ) ); m_testSet3D.push_back( boost::assign::list_of( 0.343877707314699)(0.539556201272222)(0.768522757034998 ) ); m_testSet3D.push_back( boost::assign::list_of( 0.715293885551218)(0.689330705208567)(0.114794756629825 ) ); m_testSet3D.push_back( boost::assign::list_of( 1.27610149409238e-2)(9.47996983636579e-2)(0.995414573777096 ) ); m_testSet3D.push_back( boost::assign::list_of( 0.30565381275615)(0.792827267212719)(0.527257689476066 ) ); m_testSet3D.push_back( boost::assign::list_of( 0.43864576057661)(3.10389339442242e-2)(0.8981238674636 ) ); m_refPoint2D = boost::assign::list_of( 11 )( 11 ); //~ m_refPoint2D.push_back(11); //~ m_refPoint2D.push_back(11); m_testSet2D.push_back( boost::assign::list_of( 0.0000000000 )(1.0000000000 ) ); m_testSet2D.push_back( boost::assign::list_of( 0.1863801385)(0.9824777066 ) ); m_testSet2D.push_back( boost::assign::list_of( 0.2787464911)(0.9603647191 ) ); m_testSet2D.push_back( boost::assign::list_of( 0.3549325314)(0.9348919179 ) ); m_testSet2D.push_back( boost::assign::list_of( 0.4220279525)(0.9065828188 ) ); m_testSet2D.push_back( boost::assign::list_of( 0.4828402120)(0.8757084730 ) ); m_testSet2D.push_back( boost::assign::list_of( 0.5388359009)(0.8424107501 ) ); m_testSet2D.push_back( boost::assign::list_of( 0.5908933491)(0.8067496824 ) ); m_testSet2D.push_back( boost::assign::list_of( 0.6395815841)(0.7687232254 ) ); m_testSet2D.push_back( boost::assign::list_of( 0.6852861036)(0.7282739568 ) ); m_testSet2D.push_back( boost::assign::list_of( 0.7282728148)(0.6852873173 ) ); m_testSet2D.push_back( boost::assign::list_of( 0.7687221637)(0.6395828601 ) ); m_testSet2D.push_back( boost::assign::list_of( 0.8067485614)(0.5908948796 ) ); m_testSet2D.push_back( boost::assign::list_of( 0.8424097574)(0.5388374529 ) ); m_testSet2D.push_back( boost::assign::list_of( 0.8757076053)(0.4828417856 ) ); m_testSet2D.push_back( boost::assign::list_of( 0.9065821569)(0.4220293744 ) ); m_testSet2D.push_back( boost::assign::list_of( 0.9348914021)(0.3549338901 ) ); m_testSet2D.push_back( boost::assign::list_of( 0.9603643728)(0.2787476843 ) ); m_testSet2D.push_back( boost::assign::list_of( 0.9824775804)(0.1863808035 ) ); m_testSet2D.push_back( boost::assign::list_of( 1.0000000000)(0.0000000000 ) ); } RealVector m_refPoint2D; std::vector< RealVector > m_testSet2D; RealVector m_refPoint3D; std::vector< RealVector > m_testSet3D; }; const double Fixture::HV_TEST_SET_2D = 120.196858; const double Fixture::HV_TEST_SET_3D = 0.60496383631719475; BOOST_FIXTURE_TEST_SUITE (Algorithms_Hypervolume, Fixture) BOOST_AUTO_TEST_CASE( Algorithms_ExactHypervolume ) { HypervolumeCalculator hc; HypervolumeApproximator< FastRng > ha; IdentityFitnessExtractor ife; // check exact algorithms against hard-coded reference values for 2 and 3 objectives BOOST_CHECK_CLOSE( hc( ife, m_testSet2D, m_refPoint2D ), Fixture::HV_TEST_SET_2D, 1E-5 ); BOOST_CHECK_CLOSE( hc( ife, m_testSet3D, m_refPoint3D ), Fixture::HV_TEST_SET_3D, 1E-5 ); // check that the approximate algorithm confirms (the plausibility of) the result std::vector results; for( unsigned int trial = 0; trial < 10; trial++ ) results.push_back(ha( m_testSet3D.begin(), m_testSet3D.end(), ife, m_refPoint3D, 1E-2, 1E-2 ) ); BOOST_CHECK_SMALL( *median_element(results) - Fixture::HV_TEST_SET_3D, 1E-2 ); // scalable test with random linear front for (unsigned int nObj=2; nObj<=10; nObj++) { std::vector points; for (unsigned int i=0; i<15; i++) { RealVector point(nObj); for (unsigned int j=0; j points; points.push_back(RealVector(nObj, 0.5)); for (unsigned int i=0; i points; points.push_back(RealVector(nObj, 0.5)); for (unsigned int i=0; i lca; //~ double vol = hc( ife, m_testSet3D, m_refPoint3D ); //~ std::vector< double > contributions( m_testSet3D.size(), 0. ); //~ for( unsigned int i = 0; i < contributions.size(); i++ ) { //~ std::vector< RealVector > front( m_testSet3D ); //~ front.erase( front.begin() + i ); //~ std::cout<::const_iterator it = m_testSet3D.begin(); //~ std::size_t approx = lca.leastContributor( ife, m_testSet3D, m_refPoint3D ); //~ std::size_t exact = (std::size_t)std::distance( contributions.begin(), std::min_element( contributions.begin(), contributions.end() ) ); //~ BOOST_CHECK_EQUAL( approx, exact ); //~ } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Algorithms/JaakkolaHeuristic.cpp000066400000000000000000000055031314655040000217210ustar00rootroot00000000000000//=========================================================================== /*! * * * \brief Unit test for Jaakkola's bandwidth selection heuristic. * * * * * \author T. Glasmachers * \date 2012 * * * \par Copyright 1995-2015 Shark Development Team * *

* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ //=========================================================================== #include #define BOOST_TEST_MODULE Algorithms_JaakkolaHeuristic #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (Algorithms_JaakkolaHeuristic) BOOST_AUTO_TEST_CASE( Algorithms_JaakkolaHeuristic ) { // Create a simple data set. // The distribution of distances of pairs of // different classes is: 1 2 2 3 3 3 4 4 5. // We have: minimum=1, median=3, maximum=5. std::vector inputs(6, RealVector(1)); inputs[0](0) = 0.0; inputs[1](0) = 1.0; inputs[2](0) = 2.0; inputs[3](0) = 3.0; inputs[4](0) = 4.0; inputs[5](0) = 5.0; std::vector targets(6); targets[0] = 0; targets[1] = 0; targets[2] = 0; targets[3] = 1; targets[4] = 1; targets[5] = 1; ClassificationDataset dataset = createLabeledDataFromRange(inputs, targets); // todo: make test for default version // // obtain values of sigma for different quantiles, including the default JaakkolaHeuristic jh(dataset, false); double sigma = jh.sigma(); double sigma_minimum = jh.sigma(0.0); double sigma_median = jh.sigma(0.5); double sigma_maximum = jh.sigma(1.0); double gamma_median = jh.gamma(0.5); // check values BOOST_CHECK_SMALL(std::abs(sigma_minimum - 1.0), 1e-14); BOOST_CHECK_SMALL(std::abs(sigma_median - 3.0), 1e-14); BOOST_CHECK_SMALL(std::abs(sigma_maximum - 5.0), 1e-14); // check consistency BOOST_CHECK_SMALL(std::abs(sigma_median - sigma), 1e-14); BOOST_CHECK_SMALL(std::abs(gamma_median - 0.5 / (sigma_median * sigma_median)), 1e-14); // check default variant JaakkolaHeuristic jh2(dataset, true); sigma = jh2.sigma(); BOOST_CHECK_SMALL(std::abs(sigma - 2.0), 1e-14); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Algorithms/KMeans.cpp000066400000000000000000000174631314655040000175120ustar00rootroot00000000000000//=========================================================================== /*! * * * \brief Test case for k-means clustering. * * * * \author T. Glasmachers * \date 2011 * * * \par Copyright 1995-2015 Shark Development Team * *

* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ //=========================================================================== #define BOOST_TEST_MODULE Algorithms_KMeans #include #include #include #include #include #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (Algorithms_KMeans) BOOST_AUTO_TEST_CASE(KMeans_simple) { RealVector v(1); // prepare data set std::vector data(300); for (std::size_t i=0; i<100; i++) { v(0) = Rng::uni(); data[i] = v; v(0) = Rng::uni() + 10.0; data[100+i] = v; v(0) = Rng::uni() + 20.0; data[200+i] = v; } Data dataset = createDataFromRange(data); // prepare initial centroids std::vector start(3); v(0) = 2.0; start[0] = v; v(0) = 7.0; start[1] = v; v(0) = 25.0; start[2] = v; Centroids centroids( createDataFromRange(start)); // invoke k-means std::size_t iterations = kMeans(dataset, 3, centroids); std::cout< const& c = centroids.centroids(); std::cout< 0.0); BOOST_CHECK(c.element(0)(0) < 1.0); BOOST_CHECK(c.element(1)(0) > 10.0); BOOST_CHECK(c.element(1)(0) < 11.0); BOOST_CHECK(c.element(2)(0) > 20.0); BOOST_CHECK(c.element(2)(0) < 21.0); BOOST_CHECK_LE(iterations, 3u); } // tests whether the algorithm leads to clusters which are constant (i.e. the algorithm converged to // a stationary solution). BOOST_AUTO_TEST_CASE(KMeans_multiple_gauss) { const unsigned int numTrials = 100; const unsigned int numPoints = 300; const unsigned int numMeans = 3; const unsigned int numDimensions = 5; for(unsigned int trial = 0; trial != numTrials; ++trial){ //prepare means std::vector means(numMeans,RealVector(numDimensions)); for (unsigned int i=0; i data(numPoints); for (std::size_t i=0; i dataset = createDataFromRange(data); // invoke k-means with random centroids and no upper limit for iterations Centroids centroids; kMeans(dataset, numMeans, centroids); // check result BOOST_REQUIRE_EQUAL( centroids.centroids().numberOfElements(), numMeans); BOOST_REQUIRE_EQUAL( dataDimension(centroids.centroids()), numDimensions); HardClusteringModel model(¢roids); //assign centers Data clusters = model(dataset); //create cluster means (they should be the same as the previous ones) std::vector clusterMeans(numMeans,RealVector(numDimensions,0)); std::vector members = classSizes(clusters); for (std::size_t i=0; i means(numMeans,RealVector(numDimensions)); for (unsigned int i=0; i data(numPoints); for (std::size_t i=0; i dataset = createDataFromRange(data); // invoke k-means with random centroids and no upper limit for iterations Centroids centroids; LinearKernel<> kernel; KernelExpansion clusteringModel = kMeans(dataset, numMeans, kernel); //std::cout< > model; model.decisionFunction() = clusteringModel; //assign centers Data clusters = model(dataset); //create new cluster means (they should be the same as the previous ones) std::vector newClusterMeans(numMeans,RealVector(numDimensions,0)); std::vector members = classSizes(clusters); for (std::size_t i=0; i #define BOOST_TEST_MODULE ML_LinearProgram #include #include #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (Algorithms_LP_LinearProgram) BOOST_AUTO_TEST_CASE( ML_LinearProgram ) { // define the example linear program // found in the GLPK documentation LP lp; lp.setMaximize(); lp.addRows(3); lp.addColumns(3); lp.setObjectiveCoefficient(1, 10.0); lp.setObjectiveCoefficient(2, 6.0); lp.setObjectiveCoefficient(3, 4.0); lp.setRowUpperBounded(1, 100.0); lp.setRowUpperBounded(2, 600.0); lp.setRowUpperBounded(3, 300.0); lp.setColumnLowerBounded(1, 0.0); lp.setColumnLowerBounded(2, 0.0); lp.setColumnLowerBounded(3, 0.0); std::vector row(10); std::vector col(10); std::vector value(10); row[1] = 1; col[1] = 1; value[1] = 1.0; row[2] = 1; col[2] = 2; value[2] = 1.0; row[3] = 1; col[3] = 3; value[3] = 1.0; row[4] = 2; col[4] = 1; value[4] = 10.0; row[5] = 3; col[5] = 1; value[5] = 2.0; row[6] = 2; col[6] = 2; value[6] = 3.0; row[7] = 3; col[7] = 2; value[7] = 2.0; row[8] = 2; col[8] = 3; value[8] = 5.0; row[9] = 3; col[9] = 3; value[9] = 6.0; lp.setConstraintMatrix(row, col, value); // solve the linear program with the simplex algorithm lp.solve(); // read out the solution double p[3]; p[0] = lp.solution(1); p[1] = lp.solution(2); p[2] = lp.solution(3); // compare the solution to the true optimum with an accuracy // of 6 significant digits, which is more than enough to // identify the correct corner of the simplex double best[3] = {42.8571, 57.1429, 0.0}; double diff = fabs(best[0] - p[0]) + fabs(best[1] - p[1]) + fabs(best[2] - p[2]); BOOST_CHECK_SMALL(diff, 1e-4); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Algorithms/Trainers/000077500000000000000000000000001314655040000174045ustar00rootroot00000000000000Shark-3.1.4/Test/Algorithms/Trainers/Budgeted/000077500000000000000000000000001314655040000211275ustar00rootroot00000000000000Shark-3.1.4/Test/Algorithms/Trainers/Budgeted/AbstractBudgetMaintenanceStrategy_Test.cpp000066400000000000000000000114021314655040000314140ustar00rootroot00000000000000//=========================================================================== /*! * * * \brief AbstractBudgetMaintenanceStrategy Test * * * * \author Aydin Demircioglu * \date 2014 * * * \par Copyright 1995-2015 Shark Development Team * *

* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ //=========================================================================== #define BOOST_TEST_MODULE ABSTRACTBUDGETMAINTENANCESTRATEGY #include #include #include #include #include #include #include using namespace shark; /* // REMOVE ME static void dumpModel(ModelType const& model) { for (size_t i = 0; i < model.basis().numberOfElements(); i++) { for (size_t j = 0; j < model.alpha().size2(); j++) { std::cout << (model.alpha(i, j)) << ", " ; } std::cout << " - "; for (size_t j = 0; j < model.basis().element(j).size(); j++) { std::cout << model.basis().element(i)(j) << ", " ; } std::cout << "\n"; } } */ BOOST_AUTO_TEST_SUITE (Algorithms_Trainers_Budgeted_AbstractBudgetMaintenanceStrategy_Test) BOOST_AUTO_TEST_CASE( AbstractBudgetMaintenanceStrategy_findSmallestVector) { typedef RealVector InputType; typedef KernelExpansion ModelType; // create a kernel double gamma = 1.0f; GaussianRbfKernel<> kernel(gamma); // create a fake 2d dataset with 64 entries and no noise size_t datasetSize = 64; Chessboard problem (2,0); LabeledData dataset = problem.generateDataset(datasetSize); std::size_t classes = numberOfClasses (dataset); // create a budget with some fake entries size_t m_budgetSize = 16; LabeledData preinitializedBudgetVectors (m_budgetSize, dataset.element (0)); // initialize kernel expansion KernelClassifier classifier; ModelType &budgetModel = classifier.decisionFunction(); // before we start: what happens if there is no vector in the budget? size_t index = 0; double minAlpha = -0.0; try { AbstractBudgetMaintenanceStrategy::findSmallestVector(budgetModel, index, minAlpha); // in release we must have infinity back BOOST_REQUIRE_EQUAL(minAlpha, std::numeric_limits::infinity()); } catch (...) { // in debug we got an exception, that is OK BOOST_REQUIRE_EQUAL(0, 0); } budgetModel.setStructure (&kernel, preinitializedBudgetVectors.inputs(), false, classes); // create the alphas, we need by just enumerating them for (size_t i = 0; i < m_budgetSize; i++) for (size_t j = 0; j < classes; j++) budgetModel.alpha(i, j) = double(i*classes + j); // find the smallest vector AbstractBudgetMaintenanceStrategy::findSmallestVector(budgetModel, index, minAlpha); BOOST_REQUIRE_EQUAL(index, 0); BOOST_REQUIRE_EQUAL(minAlpha, 1); // what happens if we have all alphas the same? for (size_t i = 0; i < m_budgetSize; i++) for (size_t j = 0; j < classes; j++) budgetModel.alpha(i, j) = classes; AbstractBudgetMaintenanceStrategy::findSmallestVector(budgetModel, index, minAlpha); BOOST_REQUIRE_EQUAL(minAlpha, sqrt(2.0*classes*classes)); // what happens if all things are zero in the budget? for (size_t i = 0; i < m_budgetSize; i++) for (size_t j = 0; j < classes; j++) budgetModel.alpha(i, j) = 0; AbstractBudgetMaintenanceStrategy::findSmallestVector(budgetModel, index, minAlpha); BOOST_REQUIRE_EQUAL(minAlpha, 0); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Algorithms/Trainers/Budgeted/KernelBudgetedSGDTrainer_Test.cpp000066400000000000000000000112761314655040000274100ustar00rootroot00000000000000//=========================================================================== /*! * * * \brief KernelBudgetedSGDTrainer Test * * * * \author Aydin Demircioglu * \date 2014 * * * \par Copyright 1995-2015 Shark Development Team * *

* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ //=========================================================================== #define BOOST_TEST_MODULE KERNELBUDGETEDSGDTRAINER #include #include #include #include #include #include #include #include #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (Algorithms_Trainers_Budgeted_KernelBudgetedSGDTrainer_Test) BOOST_AUTO_TEST_CASE( KernelBudgetedSGDTrainer_train) { // Create a Gaussian RBF Kernel double gamma = 1.0f; GaussianRbfKernel<> *kernel = new GaussianRbfKernel<> (gamma); // We will use the usual hinge Loss HingeLoss *hingeLoss = new HingeLoss(); // As the budget maintenance strategy we choose the merge strategy // We need explicitly to state that we are merging RealVectors. // For the time being this is the only type of objects we can merge. MergeBudgetMaintenanceStrategy *strategy = new MergeBudgetMaintenanceStrategy(); // Parameters for the trainer: // Our budget shall have at most 64 vectors size_t budgetSize = 32; // We want to run 3 epochs size_t epochs = 3; // Initialize the KernelBudgetedSGDTrainer and set number of epochs. std::cout << "Creating KernelBudgetedSGDTrainer." << std::endl; double cost = 1.0f; KernelBudgetedSGDTrainer *kernelBudgetedSGDtrainer = new KernelBudgetedSGDTrainer (kernel, hingeLoss, cost, false, false, budgetSize, strategy ); kernelBudgetedSGDtrainer -> setEpochs (epochs); // We want to train a normal Chessboard problem. size_t datasetSize = 1000; std::cout << "Creating Chessboard dataset problem with " << datasetSize << " points." << std::endl; Chessboard problem (4); LabeledData trainingData = problem.generateDataset(datasetSize); // Create classifier that will hold the final model KernelClassifier kernelClassifier; // Train std::cout << "Training the KernelBudgetedSGDTrainer on the problem with a budget of " << budgetSize << " and " << epochs << " Epochs." << std::endl; kernelBudgetedSGDtrainer ->train (kernelClassifier, trainingData); // Check the number of support vectors first, it should be equal to the budgetSize (but can be less) Data supportVectors = kernelClassifier.decisionFunction().basis(); size_t nSupportVectors = supportVectors.numberOfElements(); std::cout << "We have " << nSupportVectors << " support vectors in our model.\n"; if (nSupportVectors > budgetSize) SHARKEXCEPTION ("Something has gone wrong. There are more support vectors in the budget than specified!"); // Create another test problem with 500 points std::cout << "Creating test data set with 500 points.\n"; size_t testDatasetSize = 500; Chessboard testProblem (4); LabeledData testData = testProblem.generateDataset(testDatasetSize); // Check the performance on the test set std::cout << "Computing the performance on the test dataset using 0-1 loss.\n"; ZeroOneLoss loss; Data prediction = kernelClassifier (testData.inputs()); double error_rate = loss (testData.labels(), prediction); // Report performance. std::cout << "Test error rate: " << error_rate << std::endl; } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Algorithms/Trainers/Budgeted/MergeBudgetMaintenanceStrategy_Test.cpp000066400000000000000000000063311314655040000307150ustar00rootroot00000000000000//=========================================================================== /*! * * * \brief MergeBudgetMaintenanceStrategy Test * * * * \author Aydin Demircioglu * \date 2014 * * * \par Copyright 1995-2015 Shark Development Team * *

* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ //=========================================================================== #define BOOST_TEST_MODULE MERGEBUDGETMAINTENANCESTRATEGY #include #include #include #include #include #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (Algorithms_Trainers_Budgeted_MergeBudgetMaintenanceStrategy_Test) BOOST_AUTO_TEST_CASE( MergeBudgetMaintenanceStrategy_MergingProblemFunction) { // setup MergeBudgetMaintenanceStrategy ms; RealVector h(1); // initial search starting point RealVector xi(1); // direction of search h(0) = 0.0; xi(0) = 0.5; // test for the minimum of the easy function 1 + 1=2=const between 0 and 1 double fret(0.5); double k = 1.0; double a = 1.0; double b = 1.0; { MergeBudgetMaintenanceStrategy::MergingProblemFunction mergingProblemFunction(a, b, k); detail::dlinmin(h, xi, fret, mergingProblemFunction, 0.0, 1.0); BOOST_REQUIRE_EQUAL(h(0), 0); } // test for the minimum of the easy function 0.5^{h*h} between 0 and 1 fret = 0.5; k = 0.5; a = 0.0; b = 1.0; h(0) = 0.0; xi(0) = 0.00001; { MergeBudgetMaintenanceStrategy::MergingProblemFunction mergingProblemFunction(a, b, k); detail::dlinmin(h, xi, fret, mergingProblemFunction, 0.0, 1.0); BOOST_REQUIRE_SMALL(h(0), 0.000001); } // minimize ( -0.2*(0.2)^{x*x} - 0.1* (0.2)^{ (1-x) * (1-x)}) over [0,1] fret = 0.0; k = 0.2; a = 0.1; b = 0.2; h(0) = 0.0; xi(0) = 0.00001; { MergeBudgetMaintenanceStrategy::MergingProblemFunction mergingProblemFunction(a, b, k); detail::dlinmin(h, xi, fret, mergingProblemFunction, 0.0, 1.0); BOOST_REQUIRE_SMALL(h(0) -0.133040685 , 0.000001); } } BOOST_AUTO_TEST_CASE( MergeBudgetMaintenanceStrategy_reduceBudget) { } BOOST_AUTO_TEST_CASE( MergeBudgetMaintenanceStrategy_addToModel) { } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Algorithms/Trainers/Budgeted/RemoveBudgetMaintenanceStrategy_Test.cpp000066400000000000000000000033711314655040000311140ustar00rootroot00000000000000//=========================================================================== /*! * * * \brief RemoveBudgetMaintenanceStrategy Test * * * * \author Aydin Demircioglu * \date 2014 * * * \par Copyright 1995-2015 Shark Development Team * *

* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ //=========================================================================== #define BOOST_TEST_MODULE REMOVEBUDGETMAINTENANCESTRATEGY #include #include #include #include #include #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (Algorithms_Trainers_Budgeted_RemoveBudgetMaintenanceStrategy_Test) BOOST_AUTO_TEST_CASE( RemoveBudgetMaintenanceStrategy_reduceBudget) { } BOOST_AUTO_TEST_CASE( RemoveBudgetMaintenanceStrategy_addToModel) { } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Algorithms/Trainers/CSvmTrainer.cpp000066400000000000000000000223431314655040000223110ustar00rootroot00000000000000//=========================================================================== /*! * * * \brief test case for the CSvmTrainer * * * * * \author T. Glasmachers * \date - * * * \par Copyright 1995-2015 Shark Development Team * *

* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ //=========================================================================== #define BOOST_TEST_MODULE ALGORITHMS_TRAINERS_CSVMTRAINER #include #include #include #include #include #include using namespace shark; // This test case consists of training SVMs with // analytically computable solution. This known // solution is used to validate the trainer. BOOST_AUTO_TEST_SUITE (Algorithms_Trainers_CSvmTrainer) BOOST_AUTO_TEST_CASE( CSVM_TRAINER_SIMPLE_TEST ) { // simple 5-point dataset std::vector input(5); std::vector target(5); size_t i; for (i=0; i<5; i++) input[i].resize(2); input[0](0) = 0.0; input[0](1) = 0.0; target[0] = 0; input[1](0) = 2.0; input[1](1) = 2.0; target[1] = 1; input[2](0) = -1.0; input[2](1) = -8.0; target[2] = 0; input[3](0) = -1.0; input[3](1) = -1.0; target[3] = 0; input[4](0) = 3.0; input[4](1) = 3.0; target[4] = 1; ClassificationDataset dataset = createLabeledDataFromRange(input, target); // hard-margin training with linear kernel { std::cout << "C-SVM hard margin" << std::endl; LinearKernel<> kernel; KernelClassifier svm; CSvmTrainer trainer(&kernel, 1e100,true); trainer.sparsify() = false; trainer.shrinking() = false; trainer.stoppingCondition().minAccuracy = 1e-8; trainer.train(svm, dataset); RealVector param = svm.parameterVector(); BOOST_REQUIRE_EQUAL(param.size(), 6u); std::cout << "alpha: " << param(0) << " " << param(1) << " " << param(2) << " " << param(3) << " " << param(4) << std::endl; std::cout << " b: " << param(5) << std::endl; std::cout << "kernel computations: " << trainer.accessCount() << std::endl; // test against analytically known solution BOOST_CHECK_SMALL(param(0) + 0.25, 1e-6); BOOST_CHECK_SMALL(param(1) - 0.25, 1e-6); BOOST_CHECK_SMALL(param(2), 1e-6); BOOST_CHECK_SMALL(param(3), 1e-6); BOOST_CHECK_SMALL(param(4), 1e-6); BOOST_CHECK_SMALL(param(5) + 1.0, 1e-6); } // hard-margin training with linear kernel { std::cout << "C-SVM hard margin with shrinking" << std::endl; LinearKernel<> kernel; KernelClassifier svm; CSvmTrainer trainer(&kernel, 1e100,true); trainer.sparsify() = false; trainer.shrinking() = true; trainer.stoppingCondition().minAccuracy = 1e-8; trainer.train(svm, dataset); RealVector param = svm.parameterVector(); BOOST_REQUIRE_EQUAL(param.size(), 6u); std::cout << "alpha: " << param(0) << " " << param(1) << " " << param(2) << " " << param(3) << " " << param(4) << std::endl; std::cout << " b: " << param(5) << std::endl; std::cout << "kernel computations: " << trainer.accessCount() << std::endl; // test against analytically known solution BOOST_CHECK_SMALL(param(0) + 0.25, 1e-6); BOOST_CHECK_SMALL(param(1) - 0.25, 1e-6); BOOST_CHECK_SMALL(param(2), 1e-6); BOOST_CHECK_SMALL(param(3), 1e-6); BOOST_CHECK_SMALL(param(4), 1e-6); BOOST_CHECK_SMALL(param(5) + 1.0, 1e-6); } // soft-margin training with linear kernel { std::cout << "C-SVM soft margin" << std::endl; LinearKernel<> kernel; KernelClassifier svm; CSvmTrainer trainer(&kernel, 0.1, true); trainer.sparsify() = false; trainer.stoppingCondition().minAccuracy = 1e-8; trainer.train(svm, dataset); RealVector param = svm.parameterVector(); BOOST_REQUIRE_EQUAL(param.size(), 6u); std::cout << "alpha: " << param(0) << " " << param(1) << " " << param(2) << " " << param(3) << " " << param(4) << std::endl; std::cout << " b: " << param(5) << std::endl; std::cout << "kernel computations: " << trainer.accessCount() << std::endl; // test against analytically known solution BOOST_CHECK_SMALL(param(0) + 0.1, 1e-6); BOOST_CHECK_SMALL(param(1) - 0.1, 1e-6); BOOST_CHECK_SMALL(param(2), 1e-6); BOOST_CHECK_SMALL(param(3) + 0.0125, 1e-6); BOOST_CHECK_SMALL(param(4) - 0.0125, 1e-6); BOOST_CHECK_SMALL(param(5) + 0.5, 1e-6); } // hard-margin training with squared hinge loss { std::cout << "Squared Hinge Loss C-SVM hard margin with shrinking" << std::endl; LinearKernel<> kernel; KernelClassifier svm; SquaredHingeCSvmTrainer trainer(&kernel, 1e100, true); trainer.sparsify() = false; trainer.shrinking() = true; trainer.stoppingCondition().minAccuracy = 1e-8; trainer.train(svm, dataset); RealVector param = svm.parameterVector(); BOOST_REQUIRE_EQUAL(param.size(), 6u); std::cout << "alpha: " << param(0) << " " << param(1) << " " << param(2) << " " << param(3) << " " << param(4) << std::endl; std::cout << " b: " << param(5) << std::endl; std::cout << "kernel computations: " << trainer.accessCount() << std::endl; // test against analytically known solution BOOST_CHECK_SMALL(param(0) + 0.25, 1e-6); BOOST_CHECK_SMALL(param(1) - 0.25, 1e-6); BOOST_CHECK_SMALL(param(2), 1e-6); BOOST_CHECK_SMALL(param(3), 1e-6); BOOST_CHECK_SMALL(param(4), 1e-6); BOOST_CHECK_SMALL(param(5) + 1.0, 1e-6); } // soft-margin training with squared hinge loss { std::cout << "Squared Hinge Loss C-SVM soft margin with shrinking" << std::endl; LinearKernel<> kernel; KernelClassifier svm; SquaredHingeCSvmTrainer trainer(&kernel, 0.1, true); trainer.sparsify() = false; trainer.shrinking() = true; trainer.stoppingCondition().minAccuracy = 1e-8; trainer.train(svm, dataset); RealVector param = svm.parameterVector(); BOOST_REQUIRE_EQUAL(param.size(), 6u); std::cout << "alpha: " << param(0) << " " << param(1) << " " << param(2) << " " << param(3) << " " << param(4) << std::endl; std::cout << " b: " << param(5) << std::endl; // test against analytically known solution BOOST_CHECK_SMALL(param(0) + 0.104, 1e-6); BOOST_CHECK_SMALL(param(1) - 0.104, 1e-6); BOOST_CHECK_SMALL(param(2), 1e-6); BOOST_CHECK_SMALL(param(3) + 0.008, 1e-6); BOOST_CHECK_SMALL(param(4) - 0.008, 1e-6); BOOST_CHECK_SMALL(param(5) + 0.48, 1e-6); std::cout << "kernel computations: " << trainer.accessCount() << std::endl; } } template void checkSVMSolutionsEqual( Model1 const& model1, Model2 const& model2, Dataset const& dataset, double epsilon ){ Data decision1 = model1.decisionFunction()(dataset.inputs()); Data decision2 = model2.decisionFunction()(dataset.inputs()); for(std::size_t i = 0; i != dataset.numberOfElements(); ++i){ BOOST_CHECK_CLOSE( decision1.element(i)(0), decision2.element(i)(0), epsilon ); } } BOOST_AUTO_TEST_CASE( CSVM_WEIGHTED_TEST ) { // simple dataset Chessboard problem; ClassificationDataset dataset = problem.generateDataset(30); GaussianRbfKernel<> kernel(1.0); KernelClassifier svmUnweighted; KernelClassifier svmWeighted; CSvmTrainer trainer(&kernel, 1.0, true); trainer.stoppingCondition().minAccuracy = 1e-8; //first check that all weights being equal is working trainer.train(svmUnweighted, dataset); trainer.train(svmWeighted, WeightedLabeledData(dataset,1.0)); checkSVMSolutionsEqual(svmUnweighted, svmWeighted, dataset,0.0001); //resample the dataset by creating duplications. This must be the same as the normal //dataset initialized with the correct multiplicities std::size_t const Trials = 10; std::size_t const DatasetSize = 100; for(std::size_t trial = 0; trial != Trials; ++trial){ //generate weighted and unweighted dataset WeightedLabeledData weightedDataset(dataset,0.0); ClassificationDataset unweightedDataset(1); unweightedDataset.batch(0).input.resize(DatasetSize,inputDimension(dataset)); unweightedDataset.batch(0).label.resize(DatasetSize); for(std::size_t i = 0; i != DatasetSize; ++i){ std::size_t index = Rng::discrete(0,29); weightedDataset.element(index).weight +=1.0; unweightedDataset.element(i) = dataset.element(index); } trainer.train(svmUnweighted, unweightedDataset); trainer.train(svmWeighted, weightedDataset); checkSVMSolutionsEqual(svmUnweighted, svmWeighted, dataset,0.0001); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Algorithms/Trainers/EpsilonSvmTrainer.cpp000066400000000000000000000045741314655040000235460ustar00rootroot00000000000000//=========================================================================== /*! * * * \brief test case for epsilon-SVM * * * * \author T. Glasmachers * \date 2013 * * * \par Copyright 1995-2015 Shark Development Team * *

* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ //=========================================================================== #define BOOST_TEST_MODULE ALGORITHMS_TRAINERS_EPSILON_SVM #include #include #include #include #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (Algorithms_Trainers_EpsilonSvmTrainer) BOOST_AUTO_TEST_CASE( EPSILON_SVM_TEST ) { const std::size_t ell = 200; const double C = 10.0; const double epsilon = 0.03; const double accuracy = 1e-8; Wave prob; RegressionDataset training = prob.generateDataset(ell); GaussianRbfKernel<> kernel(0.1); KernelExpansion svm; EpsilonSvmTrainer trainer(&kernel, C, epsilon); trainer.stoppingCondition().minAccuracy = accuracy; trainer.sparsify() = false; trainer.train(svm, training); Data output; output = svm(training.inputs()); RealVector alpha = svm.parameterVector(); for (std::size_t i=0; i= -accuracy); } } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Algorithms/Trainers/FisherLDA.cpp000066400000000000000000000034601314655040000216540ustar00rootroot00000000000000#define BOOST_TEST_MODULE ML_FISHER_LDA #include #include #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (Algorithms_Trainers_FisherLDA) BOOST_AUTO_TEST_CASE( FISHER_LDA_TEST ){ const size_t trainExamples = 200000; FisherLDA trainer; trainer.setWhitening(false); LinearModel<> model(3, 3, true); // create datatsets - three normal distributions // [TG] why not use DataDistribution for this? RealMatrix covariance(3,3); covariance.clear(); covariance(0,0)=1; covariance(1,1)=1; covariance(2,2)=1; RealVector mean[] = {RealVector(3), RealVector(3), RealVector(3)}; mean[0](0) = 20; mean[0](1) = 0; mean[0](2) = 0; mean[1](0) = -10; mean[1](1) = 0; mean[1](2) = 20; mean[2](0) = -10; mean[2](1) = 0; mean[2](2) = -20; MultiVariateNormalDistribution dist(covariance); RealVector result[] = {RealVector(3), RealVector(3)}; result[0].clear(); result[0](2) = 1; result[1].clear(); result[1](0) = 1; std::vector input(trainExamples, RealVector(3)); std::vector target(trainExamples); for(size_t i=0;i!=trainExamples;++i) { //create sample target[i] = i % 3; input[i] = dist(Rng::globalRng).first + mean[target[i]]; } //statisticalBayesRisk/=trainExamples; ClassificationDataset dataset = createLabeledDataFromRange(input,target); trainer.train(model, dataset); // test the direction for(size_t i = 0; i != 2; ++i){ RealVector curRow = row(model.matrix(), i); std::cout << curRow << std::endl; double error = std::min(norm_sqr(curRow - result[i]), norm_sqr(curRow + result[i])); BOOST_CHECK_SMALL(error, 1e-4); // [TG] 10e-4 ??? } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Algorithms/Trainers/KernelMeanClassifier.cpp000066400000000000000000000023221314655040000241350ustar00rootroot00000000000000#define BOOST_TEST_MODULE ML_PERCEPTRON #include #include #include #include using namespace shark; using namespace std; BOOST_AUTO_TEST_SUITE (Algorithms_Trainers_KernelMeanClassifier) BOOST_AUTO_TEST_CASE( KERNEL_MEAN_CLASSIFIER ) { DenseLinearKernel kernel; KernelMeanClassifier trainer(&kernel); KernelClassifier model; std::vector input(6,RealVector(2)); input[0](0)=1; input[0](1)=3; input[1](0)=-1; input[1](1)=3; input[2](0)=1; input[2](1)=0; input[3](0)=-1; input[3](1)=0; input[4](0)=1; input[4](1)=-3; input[5](0)=-1; input[5](1)=-3; std::vector target(6); target[0]=0; target[1]=1; target[2]=0; target[3]=1; target[4]=0; target[5]=1; ClassificationDataset dataset = createLabeledDataFromRange(input,target); trainer.train(model, dataset); for(size_t i = 0; i != 6; ++i){ RealVector result = model.decisionFunction()(input[i]); BOOST_CHECK_EQUAL(result.size(),1u); unsigned int label = result(0)>0; BOOST_CHECK_EQUAL(target[i],label); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Algorithms/Trainers/KernelNormalization.cpp000066400000000000000000000141651314655040000241060ustar00rootroot00000000000000#define BOOST_TEST_MODULE Trainers_Kernel_Normalization #include #include #include #include //#include #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (Algorithms_Trainers_KernelNormalization) BOOST_AUTO_TEST_CASE( Normalize_Kernel_Unit_Variance_InFeatureSpace_Simple ) { std::size_t num_points = 3; std::vector input(num_points); RealVector v(1); v(0) = 0.0; input[0] = v; v(0) = 1.0; input[1] = v; v(0) = 2.0; input[2] = v; UnlabeledData data = createDataFromRange(input); DenseLinearKernel lin; DenseScaledKernel scale( &lin ); NormalizeKernelUnitVariance<> normalizer; normalizer.train( scale, data ); std::cout << " done training. factor is " << scale.factor() << std::endl; std::cout << " mean = " << normalizer.mean() << std::endl; std::cout << " trace = " << normalizer.trace() << std::endl; BOOST_CHECK_SMALL( scale.factor() - 1.5, 1e-12); { //check in feature space double control = 0.0; for ( std::size_t i=0; i input(num_points); RealVector v(num_dims); for ( std::size_t i=0; i data = createDataFromRange(input); DenseLinearKernel lin; DenseScaledKernel scale( &lin ); NormalizeKernelUnitVariance<> normalizer; normalizer.train( scale, data ); std::cout << " done training. factor is " << scale.factor() << std::endl; std::cout << " mean = " << normalizer.mean() << std::endl; std::cout << " trace = " << normalizer.trace() << std::endl; { //check in feature space double control = 0.0; for ( std::size_t i=0; i input(num_points); RealVector v(num_dims); for ( std::size_t i=0; i data = createDataFromRange(input); DenseRbfKernel kernel(0.01); DenseScaledKernel scale( &kernel ); NormalizeKernelUnitVariance<> normalizer; normalizer.train( scale, data ); std::cout << " done training. factor is " << scale.factor() << std::endl; std::cout << " mean = " << normalizer.mean() << std::endl; std::cout << " trace = " << normalizer.trace() << std::endl; //check in feature space double control = 0.0; for ( std::size_t i=0; i input(num_points); // RealVector v(num_dims); // for ( std::size_t i=0; i data(input); // // DenseRbfMklKernel basekernel1(0.1); // DenseLinearMklKernel basekernel2; // DensePolynomialMklKernel basekernel3(2, 1.0); // // std::vector< DenseMklKernelFunction * > kernels; // kernels.push_back(&basekernel1); // kernels.push_back(&basekernel2); // kernels.push_back(&basekernel3); // // std::vector< std::pair< std::size_t, std::size_t > > frs; // frs.push_back( std::make_pair( 0,3 ) ); // frs.push_back( std::make_pair( 3,6 ) ); // frs.push_back( std::make_pair( 6,9 ) ); // // DenseMklKernel kernel( kernels, frs ); // DenseScaledKernel scale( &kernel ); // // NormalizeKernelUnitVariance<> normalizer; // normalizer.train( scale, data ); // std::cout << " done training. factor is " << scale.factor() << std::endl; // std::cout << " mean = " << normalizer.mean() << std::endl; // std::cout << " trace = " << normalizer.trace() << std::endl; // //check in feature space // double control = 0.0; // for ( std::size_t i=0; i #include #include #include #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (Algorithms_Trainers_LDA) BOOST_AUTO_TEST_CASE( LDA_TEST_TWOCLASS ){ const size_t TrainExamples = 12000; LDA trainer; LinearClassifier<> model; //create datatsets - two overlapping normal distributions RealMatrix covariance(2,2); covariance(0,0)=16; covariance(0,1)=8; covariance(1,0)=8; covariance(1,1)=16; RealMatrix inverse(2,2,0.0); inverse(0,0) = inverse(1,1) = 1.0; blas::solveSymmPosDefSystemInPlace(covariance,inverse); RealVector mean[]={RealVector(2),RealVector(2)}; mean[0](0)=0; mean[0](1)=0; mean[1](0)=6; mean[1](1)=6; double prior[2]={std::log(1.0/3.0),std::log(2.0/3.0)}; MultiVariateNormalDistribution dist(covariance); std::vector input(TrainExamples,RealVector(2)); std::vector target(TrainExamples); double statisticalBayesRisk=0; for(size_t i=0;i!=TrainExamples;++i){ //create samples. class 1 has double as many elements as class 0 target[i]=(i%3 == 0); input[i]=dist(Rng::globalRng).first+mean[target[i]]; //calculate bayes Target - the best fit to the distributions RealVector diff=input[i]-mean[0]; double dist0=inner_prod(diff,prod(inverse,diff))+prior[0]; diff=input[i]-mean[1]; double dist1=inner_prod(diff,prod(inverse,diff))+prior[1]; unsigned int bayesTarget = dist0>dist1; statisticalBayesRisk+= bayesTarget != target[i]; } statisticalBayesRisk/=TrainExamples; ClassificationDataset dataset = createLabeledDataFromRange(input,target); trainer.train(model, dataset); Data results = model(dataset.inputs()); ZeroOneLoss<> loss; double classificatorRisk = loss.eval(dataset.labels(),results); std::cout<<"statistical bayes Risk: "< model; //create datatsets - two overlapping normal distributions //same as in the previous test aside from that we add a third //variable which is allways 0 RealMatrix covariance(2,2); covariance(0,0)=16; covariance(0,1)=8; covariance(1,0)=8; covariance(1,1)=16; RealMatrix inverse(2,2,0.0); inverse(0,0) = inverse(1,1) = 1.0; blas::solveSymmPosDefSystemInPlace(covariance,inverse); RealVector mean[]={RealVector(2),RealVector(2)}; mean[0](0)=0; mean[0](1)=0; mean[1](0)=6; mean[1](1)=6; MultiVariateNormalDistribution dist(covariance); double prior[2]={std::log(2.0/3.0),std::log(1.0/3.0)}; std::vector input(TrainExamples,RealVector(3)); std::vector target(TrainExamples); double statisticalBayesRisk=0; for(size_t i=0;i!=TrainExamples;++i){ //create sample target[i]= (i%3 != 0); RealVector vec = dist(Rng::globalRng).first+mean[target[i]]; //calculate bayes Target - the best fit to the distributions RealVector diff=vec-mean[0]; double dist0=inner_prod(diff,prod(inverse,diff)) + prior[0]; diff=vec-mean[1]; double dist1=inner_prod(diff,prod(inverse,diff)) + prior[1]; unsigned int bayesTarget = dist0>dist1; statisticalBayesRisk+= bayesTarget != target[i]; init(input[i])< results = model(dataset.inputs()); ZeroOneLoss<> loss; double classificatorRisk = loss.eval(dataset.labels(),results); std::cout<<"statistical bayes Risk: "< model; Rng::seed(44); //create datatsets - overlapping normal distributions RealMatrix covariance(2,2); covariance(0,0)=16; covariance(0,1)=8; covariance(1,0)=8; covariance(1,1)=16; RealMatrix inverse(2,2,0.0); inverse(0,0) = inverse(1,1) = 1.0; blas::solveSymmPosDefSystemInPlace(covariance,inverse); std::vector mean(classes,RealVector(2)); for(unsigned int c = 0; c != classes; ++c){ for(std::size_t j = 0; j != 2; ++j){ mean[c](j) = Rng::gauss(0,30); } } MultiVariateNormalDistribution dist(covariance); std::vector input(TrainExamples,RealVector(2)); std::vector target(TrainExamples); double statisticalBayesRisk=0; for(size_t i=0;i!=TrainExamples;++i){ //create sample target[i]=i%classes; input[i]=dist(Rng::globalRng).first+mean[target[i]]; //calculate bayes Target - the best fit to the distributions unsigned int bayesTarget = 0; double minDist = 1.e30; for(unsigned int c = 0; c != classes; ++c){ RealVector diff=input[i]-mean[c]; double dist=inner_prod(diff,prod(inverse,diff)); if(dist results = model(dataset.inputs()); ZeroOneLoss<> loss; double classificatorRisk = loss.eval(dataset.labels(),results); std::cout<<"statistical bayes Risk: "<(covariance,inverse); std::vector mean(classes,RealVector(2)); for(unsigned int c = 0; c != classes; ++c){ mean[c](0) = Rng::gauss(0,30); mean[c](1) = Rng::gauss(0,30); } MultiVariateNormalDistribution dist(covariance); std::vector input(TrainExamples,RealVector(2)); std::vector target(TrainExamples); for(size_t i=0;i!=TrainExamples;++i){ //create sample target[i]=i%classes; input[i]=dist(Rng::globalRng).first+mean[target[i]]; } ClassificationDataset dataset = createLabeledDataFromRange(input,target); //first check that the weighted and unweighted version give the same results when all weights are equal LDA trainer; LinearClassifier<> modelUnweighted; LinearClassifier<> modelWeighted; trainer.train(modelUnweighted, dataset); trainer.train(modelWeighted, WeightedLabeledData(dataset,2.0));//two as 1 might hide errors as sqrt(1)=1 //we need to correct for the fact that the unweighted versions normalizes with inputs-classes //and the unnormalized only with sumOfWeights=inputs. BOOST_CHECK_SMALL( norm_frobenius(modelUnweighted.decisionFunction().matrix()/0.9-modelWeighted.decisionFunction().matrix()), 0.0001 ); RealVector normalizedOffset = (modelUnweighted.decisionFunction().offset()-blas::repeat(std::log(0.1),10))/0.9; BOOST_CHECK_SMALL( norm_2(normalizedOffset-modelWeighted.decisionFunction().offset()+blas::repeat(std::log(0.1),10)), 0.0001 ); //resample the dataset by creating duplications. This must be the same as the normal //dataset initialized with the correct multiplicities for(std::size_t trial = 0; trial != Trials; ++trial){ //generate weighted and unweighted dataset WeightedLabeledData weightedDataset(dataset,0.0); ClassificationDataset unweightedDataset(1); unweightedDataset.batch(0).input.resize(DatasetSize,inputDimension(dataset)); unweightedDataset.batch(0).label.resize(DatasetSize); RealVector classWeight(classes,0); for(std::size_t i = 0; i != DatasetSize; ++i){ std::size_t index = Rng::discrete(0,TrainExamples-1); weightedDataset.element(index).weight +=1.0; unweightedDataset.element(i) = dataset.element(index); classWeight(weightedDataset.element(index).data.label) += 1.0/DatasetSize; } trainer.train(modelUnweighted, unweightedDataset); trainer.train(modelWeighted, weightedDataset); double covarianceCorrection = double(DatasetSize-classes)/DatasetSize; BOOST_CHECK_SMALL( norm_frobenius(modelUnweighted.decisionFunction().matrix()/covarianceCorrection-modelWeighted.decisionFunction().matrix()), 0.0001 ); RealVector normalizedOffset = (modelUnweighted.decisionFunction().offset()-log(classWeight))/covarianceCorrection; BOOST_CHECK_SMALL( norm_2(normalizedOffset-modelWeighted.decisionFunction().offset()+log(classWeight)), 0.0001 ); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Algorithms/Trainers/LinearRegression.cpp000066400000000000000000000037371314655040000233750ustar00rootroot00000000000000#define BOOST_TEST_MODULE TRAINERS_LINEARREGRESSION #include #include #include #include #include #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (Algorithms_Trainers_LinearRegression) BOOST_AUTO_TEST_CASE( LinearRegression_TEST ){ const size_t trainExamples = 60000; LinearRegression trainer; LinearModel<> model; RealMatrix matrix(2, 2); RealVector offset(2); matrix(0,0) = 3; matrix(1,1) = -5; matrix(0,1) = -2; matrix(1,0) = 7; offset(0) = 3; offset(1) = -6; model.setStructure(matrix, offset); // create datatset - the model output + Gaussian noise RealMatrix covariance(2, 2); covariance(0,0) = 1; covariance(0,1) = 0; covariance(1,0) = 0; covariance(1,1) = 1; MultiVariateNormalDistribution noise(covariance); Uniform<> uniform(Rng::globalRng,-3.0, 3.0); // create samples std::vector input(trainExamples,RealVector(2)); std::vector trainTarget(trainExamples,RealVector(2)); std::vector testTarget(trainExamples,RealVector(2)); for (size_t i=0;i!=trainExamples;++i) { input[i](0) = uniform(); input[i](1) = uniform(); testTarget[i] = model(input[i]); trainTarget[i] = noise(Rng::globalRng).first + testTarget[i]; } // let the model forget... matrix.clear(); offset.clear(); model.setStructure(matrix,offset); // train the model, overwriting its parameters RegressionDataset trainset = createLabeledDataFromRange(input, trainTarget); trainer.train(model, trainset); // evaluate using the ErrorFunction RegressionDataset testset = createLabeledDataFromRange(input, testTarget); SquaredLoss<> loss; double error=loss(testset.labels(),model(testset.inputs())); BOOST_CHECK_SMALL(error, 1e-4); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Algorithms/Trainers/LinearSvmTrainer.cpp000066400000000000000000000127721314655040000233460ustar00rootroot00000000000000//=========================================================================== /*! * * * \brief Test case for various linear SVM trainers. * * \par * This unit test trains a number of multi-class SVMs with two * different trainers, namely with a specialized trainer for * linear SVMs and a general purpose SVM trainer with linear * kernel function. It compares the weight vectors obtained * with both approaches. (Approximate) equality of the weight * vectors indicates correctness of both types of trainers. * * * * \author T. Glasmachers * \date - * * * \par Copyright 1995-2015 Shark Development Team * *

* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ //=========================================================================== #define BOOST_TEST_MODULE ALGORITHMS_TRAINERS_MCSVMTRAINER #include #include #include #include #include #include #include #include #include #include #include #include #include using namespace shark; using namespace std; #define RELATIVE_ACCURACY 0.01 #define MAX_KKT_VIOLATION 1e-5 // subtract the mean from each row void ZeroSum(RealMatrix& mat) { RealVector sum(mat.size2(), 0.0); for (size_t j=0; j kernel; AbstractLinearSvmTrainer* linearTrainer[8]; AbstractSvmTrainer* nonlinearTrainer[8]; #define TRAINER(index, kind) \ linearTrainer[index] = new LinearMcSvm##kind##Trainer(C); \ nonlinearTrainer[index] = new McSvm##kind##Trainer(&kernel, C,false); TRAINER(0, MMR); TRAINER(1, OVA); TRAINER(2, WW); TRAINER(3, CS); TRAINER(4, LLW); TRAINER(5, ADM); TRAINER(6, ATS); TRAINER(7, ATM); for (unsigned int run=0; run<10; run++) { // generate random training set Rng::seed(run); cout << endl << "generating test problem " << (run+1) << " out of 10" << endl; vector input(ell, CompressedRealVector(dim)); vector target(ell); for (size_t i=0; i dataset = createLabeledDataFromRange(input, target); for (size_t i=0; i<8; i++) { cout << " testing " << linearTrainer[i]->name() << " vs. " << nonlinearTrainer[i]->name() << endl; // train machine with two trainers LinearClassifier linear; linearTrainer[i]->stoppingCondition().minAccuracy = MAX_KKT_VIOLATION; linearTrainer[i]->train(linear, dataset); KernelClassifier nonlinear; nonlinearTrainer[i]->stoppingCondition().minAccuracy = MAX_KKT_VIOLATION; nonlinearTrainer[i]->train(nonlinear, dataset); // extract weight matrices RealMatrix linear_w = linear.decisionFunction().matrix(); RealMatrix nonlinear_w(classes, dim); for (size_t j=0; j
* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ //=========================================================================== #define BOOST_TEST_MODULE ALGORITHMS_TRAINERS_MCSVMTRAINER #include #include #define SHARK_COUNT_KERNEL_LOOKUPS //in this example, we want to count the kernel lookups #include #include #include #include #include #include #include #include #include #include #include #include #include using namespace shark; /* // verbose version: #define CHECK_ALPHAS(input, is, should ) { \ std::cout << " # iterations: " << trainer.solutionProperties().iterations << std::endl; \ for (unsigned int i=0; i void CHECK_ALPHAS(std::vector const& input, T const& is, U const& should ) { std::cout< input(5); std::vector target(5); for (std::size_t i=0; i<5; i++) input[i].resize(2); input[0](0) = -1.0; input[0](1) = -1.0; target[0] = 0; input[1](0) = 1.0; input[1](1) = -1.0; target[1] = 1; input[2](0) = 0.0; input[2](1) = 1.0; target[2] = 2; input[3](0) = 0.0; input[3](1) = 2.0; target[3] = 2; input[4](0) = 0.0; input[4](1) = 99.0; target[4] = 2; ClassificationDataset dataset = createLabeledDataFromRange(input, target); LinearKernel<> kernel; // OVA-SVM { double alpha[15] = {0.0, -0.5, -0.5, -0.5, 0.0, -0.5, -0.5, -0.5, 0.0, -0.25, -0.25, 0.0, 0.0, 0.0, 0.0}; KernelClassifier svm; McSvmOVATrainer trainer(&kernel, 0.5, false); std::cout << "testing " << trainer.name() << std::endl; trainer.sparsify() = false; trainer.stoppingCondition().minAccuracy = 1e-8; trainer.train(svm, dataset); std::cout << "kernel computations: " << trainer.accessCount() << std::endl; CHECK_ALPHAS(input,svm.parameterVector(), alpha); } // MMR-SVM { double alpha[15] = {0.5, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0}; KernelClassifier svm; McSvmMMRTrainer trainer(&kernel, 0.5, false); std::cout << "testing " << trainer.name() << std::endl; trainer.sparsify() = false; trainer.stoppingCondition().minAccuracy = 1e-8; trainer.train(svm, dataset); std::cout << "kernel computations: " << trainer.accessCount() << std::endl; CHECK_ALPHAS(input,svm.parameterVector(), alpha); } // WW-SVM { double alpha[15] = {0.4375, -0.25, -0.1875, -0.25, 0.4375, -0.1875, -0.25, -0.25, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0}; KernelClassifier svm; McSvmWWTrainer trainer(&kernel, 0.5, false); std::cout << "testing " << trainer.name() << std::endl; trainer.sparsify() = false; trainer.shrinking() = false; trainer.stoppingCondition().minAccuracy = 1e-8; trainer.train(svm, dataset); std::cout << "kernel computations: " << trainer.accessCount() << std::endl; CHECK_ALPHAS(input,svm.parameterVector(), alpha); } // CS-SVM { double alpha[15] = {0.25, -0.25, 0.0, -0.25, 0.25, 0.0, -0.000163, -0.25, 0.250163, -0.1666, -0.04166, 0.2083, 0.0, 0.0, 0.0}; KernelClassifier svm; McSvmCSTrainer trainer(&kernel, 0.5, false); std::cout << "testing " << trainer.name() << std::endl; trainer.sparsify() = false; trainer.stoppingCondition().minAccuracy = 1e-8; trainer.train(svm, dataset); std::cout << "kernel computations: " << trainer.accessCount() << std::endl; CHECK_ALPHAS(input,svm.parameterVector(), alpha); } // LLW-SVM { double alpha[15] = {0.0, -0.5, -0.5, -0.5, 0.0, -0.5, -0.5, -0.5, 0.0, -0.25, -0.25, 0.0, 0.0, 0.0, 0.0}; KernelClassifier svm; McSvmLLWTrainer trainer(&kernel, 0.5, false); std::cout << "testing " << trainer.name() << std::endl; trainer.sparsify() = false; trainer.stoppingCondition().minAccuracy = 1e-8; trainer.train(svm, dataset); std::cout << "kernel computations: " << trainer.accessCount() << std::endl; CHECK_ALPHAS(input,svm.parameterVector(), alpha); } // ADM-SVM { double alpha[15] = {0.0, -0.4375, -0.0625, -0.4375, 0.0, -0.0625, 0.0, -0.5, 0.0, -0.375, -0.125, 0.0, 0.0, 0.0, 0.0}; KernelClassifier svm; McSvmADMTrainer trainer(&kernel, 0.5, false); std::cout << "testing " << trainer.name() << std::endl; trainer.sparsify() = false; trainer.stoppingCondition().minAccuracy = 1e-8; trainer.train(svm, dataset); std::cout << "kernel computations: " << trainer.accessCount() << std::endl; CHECK_ALPHAS(input,svm.parameterVector(), alpha); } // ATS-SVM { double alpha[15] = {0.0, -0.5, 0.0, -0.5, 0.0, 0.0, -0.5, -0.5, 0.5, -0.5, -0.5, 0.0, 0.0, 0.0, 0.0}; KernelClassifier svm; McSvmATSTrainer trainer(&kernel, 0.5, false); std::cout << "testing " << trainer.name() << std::endl; trainer.sparsify() = false; trainer.stoppingCondition().minAccuracy = 1e-8; trainer.train(svm, dataset); std::cout << "kernel computations: " << trainer.accessCount() << std::endl; CHECK_ALPHAS(input,svm.parameterVector(), alpha); } // ATM-SVM { double alpha[15] = {0.0, -0.4375, -0.0625, -0.4375, 0.0, -0.0625, 0.0, -0.5, 0.0, -0.375, -0.125, 0.0, 0.0, 0.0, 0.0}; KernelClassifier svm; McSvmATMTrainer trainer(&kernel, 0.5, false); std::cout << "testing " << trainer.name() << std::endl; trainer.sparsify() = false; trainer.stoppingCondition().minAccuracy = 1e-8; trainer.train(svm, dataset); std::cout << "kernel computations: " << trainer.accessCount() << std::endl; CHECK_ALPHAS(input,svm.parameterVector(), alpha); } // Reinforced-SVM { double alpha[15] = {0.5, -0.5, 0.0, -0.5, 0.5, 0.0, -0.5, -0.5, 0.5, 0.0, 0.0, 0.25, 0.0, 0.0, 0.0}; KernelClassifier svm; McReinforcedSvmTrainer trainer(&kernel, 0.5, false); std::cout << "testing " << trainer.name() << std::endl; trainer.sparsify() = false; trainer.stoppingCondition().minAccuracy = 1e-8; trainer.train(svm, dataset); std::cout << "kernel computations: " << trainer.accessCount() << std::endl; std::cout << svm.parameterVector() << std::endl; CHECK_ALPHAS(input,svm.parameterVector(), alpha); } } //~ BOOST_AUTO_TEST_CASE( CSVM_TRAINER_ITERATIVE_BIAS_TEST ) //~ { //~ //Chessboard problem; //~ CircleInSquare problem(2,0.0,true); //~ ClassificationDataset dataset = problem.generateDataset(1000); //~ //GaussianRbfKernel<> kernel(0.5); //~ LinearKernel<> kernel; //~ KernelClassifier svmTruth(false); //~ KernelClassifier svmTruth2(true); //~ KernelClassifier svmTruth3(true); //~ KernelClassifier svmTest1(false,2); //~ KernelClassifier svmTest2(true,2); //~ double C = 3; //~ {//train as a binary svm problem //~ CSvmTrainer trainerTruth(&kernel, C, false); //~ trainerTruth.sparsify() = false; //~ trainerTruth.shrinking() = false; //~ trainerTruth.stoppingCondition().minAccuracy = 1e-4; //~ trainerTruth.train(svmTruth, dataset); //~ std::cout<<"a"< trainer(&kernel, 2*C); //~ trainer.sparsify() = false; //~ trainer.shrinking() = false; //~ trainer.stoppingCondition().minAccuracy = 1e-4; //~ trainer.train(svmTest1, dataset); //~ std::cout<<"c"< loss; //~ std::cout<<"CSVM: "< #include #include #include #include // In this test case, we want to count the kernel lookups #define SHARK_COUNT_KERNEL_LOOKUPS #include #include #include #include #include #include #include #include "../../Utils.h" namespace shark { /// Test fixture class SvmWafFixture { public: SvmWafFixture() { //~ string2data(m_labeledData, m_dataInString, LAST_COLUMN, true, false, NULL); //~ string2data(m_labeledData2, m_dataInString2, LAST_COLUMN, true, false, NULL); csvStringToData(m_labeledData,m_dataInString,LAST_COLUMN); csvStringToData(m_labeledData2,m_dataInString2,LAST_COLUMN); } static const std::string m_dataInString; static const std::string m_dataInString2; LabeledData m_labeledData; LabeledData m_labeledData2; }; const std::string SvmWafFixture::m_dataInString = "\ ?,180,12,0\n\ 5.92,190,11,0\n\ 5.58,170,12,0\n\ 5.92,165,10,0\n\ 5,100,6,1\n\ 5.5,150,8,1\n\ 5.42,130,7,1\n\ 5.75,150,9,1\r"; /// The difference is adding additional one column of '?' to @a m_dataInString const std::string SvmWafFixture::m_dataInString2 = "\ ?,180,12,?,0\n\ 5.92,190,11,?,0\n\ 5.58,170,12,?,0\n\ 5.92,165,10,?,0\n\ 5,100,6,?,1\n\ 5.5,150,8,?,1\n\ 5.42,130,7,?,1\n\ 5.75,150,9,?,1\r"; BOOST_FIXTURE_TEST_SUITE (Algorithms_Trainers_MissingFeatureSvmTrainerTests, SvmWafFixture) BOOST_AUTO_TEST_CASE(NoMissingFeatures) { // Test that out trainer works fine with normal feature set without Missing features // In this case, we should get the same results as class CSvmTrainer // simple 5-point dataset std::vector input(5); std::vector target(5); for (std::size_t i=0; i<5; i++) input[i].resize(2); input[0](0) = 0.0; input[0](1) = 0.0; target[0] = 0; input[1](0) = 2.0; input[1](1) = 2.0; target[1] = 1; input[2](0) = -1.0; input[2](1) = -8.0; target[2] = 0; input[3](0) = -1.0; input[3](1) = -1.0; target[3] = 0; input[4](0) = 3.0; input[4](1) = 3.0; target[4] = 1; ClassificationDataset dataset = createLabeledDataFromRange(input, target); // Soft-margin training with linear kernel RealVector seenParam1; { LinearKernel<> kernel; MissingFeaturesKernelExpansion svm; MissingFeatureSvmTrainer trainer(&kernel, 0.1, true); trainer.setMaxIterations(1); trainer.stoppingCondition().minAccuracy = 1e-8; trainer.train(svm, dataset); seenParam1 = svm.parameterVector(); // Test against analytically known solution // param 0-4 is alpha, param 5 is bias(offset) RealVector expected(6); expected(0) = -0.1; expected(1) = 0.1; expected(2) = 0.0; expected(3) = -0.0125; expected(4) = 0.0125; expected(5) = -0.5; BOOST_CHECK(test::verifyVectors(seenParam1, expected)); } // The result should also be the same as results trained by CSvmTrainer { LinearKernel<> kernel; KernelClassifier svm; CSvmTrainer trainer(&kernel, 0.1, true); trainer.stoppingCondition().minAccuracy = 1e-8; trainer.sparsify() = false; trainer.train(svm, dataset); RealVector seenParam2 = svm.parameterVector(); // seenParam1 and seenParam2 should be the same BOOST_CHECK(test::verifyVectors(seenParam1, seenParam2)); } } BOOST_AUTO_TEST_CASE(MissingFeatures) { RealVector pattern1(3); pattern1(0) = 5.85; pattern1(1) = std::numeric_limits::quiet_NaN(); pattern1(2) = 8.5; RealVector pattern2(4); pattern2(0) = 5.85; pattern2(1) = std::numeric_limits::quiet_NaN(); pattern2(2) = 8.5; pattern2(3) = 170.5; RealVector param1; RealVector output1; { LinearKernel<> kernel; MissingFeaturesKernelExpansion svm1; MissingFeatureSvmTrainer trainer(&kernel, 0.1, true); trainer.setMaxIterations(4); trainer.stoppingCondition().minAccuracy = 1e-8; // Train with normal data trainer.train(svm1, m_labeledData); param1 = svm1.parameterVector(); output1 = svm1(pattern1); } RealVector param2; RealVector output2; { // Train with data with missing features LinearKernel<> kernel; MissingFeaturesKernelExpansion svm2; MissingFeatureSvmTrainer trainer(&kernel, 0.1, true); trainer.train(svm2, m_labeledData2); trainer.setMaxIterations(4); trainer.stoppingCondition().minAccuracy = 1e-8; param2 = svm2.parameterVector(); // Try to make a prediction output2 = svm2(pattern2); } // Should be the same BOOST_CHECK(test::verifyVectors(param1, param2)); BOOST_CHECK(test::verifyVectors(output1, output2)); } BOOST_AUTO_TEST_SUITE_END() } // namespace shark { Shark-3.1.4/Test/Algorithms/Trainers/NBClassifierTrainerTests.cpp000066400000000000000000000142271314655040000247720ustar00rootroot00000000000000//=========================================================================== /*! * * * \brief Test cases for naive Bayes classifier trainer * * * * * \author B. Li * \date 2012 * * * \par Copyright 1995-2015 Shark Development Team * *

* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ //=========================================================================== #define BOOST_TEST_MODULE NaiveBayesClassifierTrainerTestModule #include "shark/Algorithms/Trainers/Distribution/DistTrainerContainer.h" #include "shark/Algorithms/Trainers/Distribution/NormalTrainer.h" #include "shark/Algorithms/Trainers/NBClassifierTrainer.h" #include "shark/Data/Csv.h" #include "shark/LinAlg/Base.h" #include "shark/Models/NBClassifier.h" #include "shark/Rng/Normal.h" #include "shark/Rng/Rng.h" #include "shark/Rng/Uniform.h" #include #include #include #include namespace shark { /// A help class to help test cases access internal states of NBClassifier class DummyNBClassifier : public NBClassifier<> { public: typedef NBClassifier<> Base; explicit DummyNBClassifier(const NBClassifier<>::FeatureDistributionsType& featureDists) : NBClassifier<>(featureDists), m_featureDistributions(Base::m_featureDistributions), m_classPriors(Base::m_classPriors) {} DummyNBClassifier(std::size_t classSize, std::size_t featureSize) : NBClassifier<>(classSize, featureSize), m_featureDistributions(Base::m_featureDistributions), m_classPriors(Base::m_classPriors) {} typedef NBClassifier<>::FeatureDistributionsType FeatureDistributionsType; typedef NBClassifier<>::ClassPriorsType ClassPriorsType; FeatureDistributionsType& m_featureDistributions; ClassPriorsType& m_classPriors; }; /// Fixture for testing naive Bayes classifier trainer class NBClassifierTrainerFixture { public: typedef NBClassifier<>::AbstractDistPtr AbstractDistPtr; NBClassifierTrainerFixture() : tolerancePercentage(0.01) { csvStringToData(m_labeledData,m_dataInString,LAST_COLUMN); } void verifyNormalDistribution(const DummyNBClassifier& myNbClassifier) { // Verify class distribution BOOST_CHECK_EQUAL(myNbClassifier.m_classPriors.size(), 2u); BOOST_CHECK_CLOSE(myNbClassifier.m_classPriors[0], 0.5, tolerancePercentage); BOOST_CHECK_CLOSE(myNbClassifier.m_classPriors[1], 0.5, tolerancePercentage); // Verify feature distribution. format(mean, variance) BOOST_REQUIRE(myNbClassifier.m_featureDistributions.size() == 2u); // male { std::vector > expectedMaleDist = boost::assign::map_list_of(5.855, 3.5033e-02)(176.25,1.2292e+02)(11.25,9.1667e-01); unsigned int i = 0; BOOST_REQUIRE_EQUAL(myNbClassifier.m_featureDistributions[0].size(), 3u); BOOST_FOREACH(const NBClassifier<>::AbstractDistPtr& dist, myNbClassifier.m_featureDistributions[0]) { const Normal<>& featureDistribution = dynamic_cast&>(*dist); BOOST_CHECK_CLOSE(featureDistribution.mean(), expectedMaleDist[i].first, tolerancePercentage); BOOST_CHECK_CLOSE(featureDistribution.variance(), expectedMaleDist[i].second, tolerancePercentage); ++i; } } // female { std::vector > expectedFemaleDist = boost::assign::map_list_of(5.4175,9.7225e-02)(132.5,5.5833e+02)(7.5,1.6667e+00); unsigned int i = 0; BOOST_REQUIRE_EQUAL(myNbClassifier.m_featureDistributions[1].size(), 3u); BOOST_FOREACH(const NBClassifier<>::AbstractDistPtr& dist, myNbClassifier.m_featureDistributions[1]) { const Normal<>& featureDistribution = dynamic_cast&>(*dist); BOOST_CHECK_CLOSE(featureDistribution.mean(), expectedFemaleDist[i].first, tolerancePercentage); BOOST_CHECK_CLOSE(featureDistribution.variance(), expectedFemaleDist[i].second, tolerancePercentage); ++i; } } } /// Create a normal distribution from given @a mean and @a variance AbstractDistPtr createNormalDist() const { return AbstractDistPtr(new Normal<>(Rng::globalRng)); } static const std::string m_dataInString; LabeledData m_labeledData; const double tolerancePercentage; /// The class under test NBClassifierTrainer<> m_NbTrainer; }; // Last column is label, 0 is for male, 1 is for female const std::string NBClassifierTrainerFixture::m_dataInString = "\ 6,180,12,0\n\ 5.92,190,11,0\n\ 5.58,170,12,0\n\ 5.92,165,10,0\n\ 5,100,6,1\n\ 5.5,150,8,1\n\ 5.42,130,7,1\n\ 5.75,150,9,1\r"; BOOST_FIXTURE_TEST_SUITE (Algorithms_Trainers_NBClassifierTrainerTests, NBClassifierTrainerFixture) BOOST_AUTO_TEST_CASE(TestNormal) { // Test that the trainer is able to provide correct distributions to the classifier DummyNBClassifier myNbClassifier(2u, 3u); // all distributions are normal m_NbTrainer.train(myNbClassifier, m_labeledData); verifyNormalDistribution(myNbClassifier); } BOOST_AUTO_TEST_CASE(AccessInvidualDistTrainer) { // Test that we are able to access individual distribution trainer // get DistTrainerContainer& trainerContainer = m_NbTrainer.getDistTrainerContainer(); BOOST_CHECK_NO_THROW(trainerContainer.getNormalTrainer()); // set BOOST_CHECK_NO_THROW(trainerContainer.setNormalTrainer(NormalTrainer())); } BOOST_AUTO_TEST_SUITE_END() } // namespace shark { Shark-3.1.4/Test/Algorithms/Trainers/Normalization.cpp000066400000000000000000000114211314655040000227350ustar00rootroot00000000000000#define BOOST_TEST_MODULE Trainers_Normalization #include #include #include #include #include #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (Algorithms_Trainers_Normalization) BOOST_AUTO_TEST_CASE( NORMALIZE_TO_UNIT_VARIANCE ) { std::vector input(3); RealVector v(1); v(0) = 0.0; input[0] = v; v(0) = 1.0; input[1] = v; v(0) = 2.0; input[2] = v; UnlabeledData set = createDataFromRange(input); NormalizeComponentsUnitVariance<> normalizer(true); Normalizer<> map; normalizer.train(map, set); Data transformedSet = map(set); double error = std::abs(-std::sqrt(1.5) - transformedSet.element(0)(0)) + std::abs(transformedSet.element(1)(0)) + std::abs(sqrt(1.5) - transformedSet.element(2)(0)); BOOST_CHECK_SMALL(error, 1e-10); } BOOST_AUTO_TEST_CASE( NORMALIZE_TO_UNIT_INTERVAL ) { std::vector input(3); RealVector v(1); v(0) = 0.0; input[0] = v; v(0) = 1.0; input[1] = v; v(0) = 2.0; input[2] = v; UnlabeledData set = createDataFromRange(input); NormalizeComponentsUnitInterval<> normalizer; Normalizer<> map; normalizer.train(map, set); Data transformedSet = map(set); BOOST_CHECK_SMALL(transformedSet.element(0)(0),1.e-10); BOOST_CHECK_SMALL(0.5 - transformedSet.element(1)(0),1.e-10); BOOST_CHECK_SMALL(1.0 - transformedSet.element(2)(0),1.e-10); } BOOST_AUTO_TEST_CASE( NORMALIZE_WHITENING) { RealMatrix mat(3,3); mat(0,0)=2; mat(0,1)=0.1; mat(0,2)=0.3; mat(1,0)=0.1; mat(1,1)=5; mat(1,2)=0.05; mat(2,0)=0.3; mat(2,1)=0.05;mat(2,2)=8; RealVector mean(3); mean(0)=1; mean(1)=-1; mean(2)=3; MultiVariateNormalDistribution dist(mat); std::vector input(1000,RealVector(3)); for(std::size_t i = 0; i != 1000;++i) input[i]=dist(Rng::globalRng).first+mean; UnlabeledData set = createDataFromRange(input); NormalizeComponentsWhitening normalizer(1.5); LinearModel<> map(3, 3); normalizer.train(map, set); Data transformedSet = map(set); RealMatrix covariance; meanvar(transformedSet, mean, covariance); std::cout< input(1000,RealVector(3)); for(std::size_t i = 0; i != 1000;++i) input[i]=dist(Rng::globalRng).first+mean; UnlabeledData set = createDataFromRange(input); NormalizeComponentsWhitening normalizer(1.5); LinearModel<> map(3, 3); normalizer.train(map, set); Data transformedSet = map(set); RealMatrix covariance; meanvar(transformedSet, mean, covariance); std::cout< input(1000,RealVector(3)); for(std::size_t i = 0; i != 1000;++i) input[i]=dist(Rng::globalRng).first+mean; UnlabeledData set = createDataFromRange(input); NormalizeComponentsZCA normalizer(1.5); LinearModel<> map(3, 3); normalizer.train(map, set); Data transformedSet = map(set); RealMatrix covariance; meanvar(transformedSet, mean, covariance); std::cout<
* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ //=========================================================================== #define BOOST_TEST_MODULE ALGORITHMS_TRAINERS_ONE_CLASS_SVM #include #include #include #include #include #include using namespace shark; class Gaussians : public DataDistribution { public: void draw(RealVector& point) const { point.resize(2); size_t cluster = Rng::discrete(0, 4); double alpha = 0.4 * M_PI * cluster; point(0) = 3.0 * cos(alpha) + 0.75 * Rng::gauss(); point(1) = 3.0 * sin(alpha) + 0.75 * Rng::gauss(); } }; BOOST_AUTO_TEST_SUITE (Algorithms_Trainers_OneClassSvmTrainer) BOOST_AUTO_TEST_CASE( ONE_CLASS_SVM_TEST ) { const std::size_t ell = 2500; const double nu = 0.7; const double gamma = 0.5; const double threshold = 0.02; // 1 / sqrt(ell) GaussianRbfKernel<> kernel(gamma); KernelExpansion ke; Gaussians problem; UnlabeledData data = problem.generateDataset(ell); OneClassSvmTrainer trainer(&kernel, nu); trainer.sparsify() = false; trainer.train(ke, data); Data output = ke(data); // check deviation of fraction of negatives from nu std::size_t pos = 0; std::size_t neg = 0; for (std::size_t i=0; i 0.0) pos++; else if (f < 0.0) neg++; } double p = (double)pos / (double)ell; double n = (double)neg / (double)ell; BOOST_CHECK_SMALL(p - 1.0 + nu, threshold); BOOST_CHECK_SMALL(n - nu, threshold); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Algorithms/Trainers/PCA.cpp000066400000000000000000000176261314655040000205270ustar00rootroot00000000000000//=========================================================================== /*! * * * \brief Test case for optimization of the hyperparameters of a * Gaussian Process/Regularization Network using evidence/marginal * likelihood maximization. * * * * \author Christian Igel, Oswin Krause * \date 2011 * * * \par Copyright 1995-2015 Shark Development Team * *

* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ #include #include #include #include #define BOOST_TEST_MODULE ALGORITHM_PCA #include #include using namespace std; using namespace shark; ///Principal components of our multivariate data distribution, we will ///use them later for checking double principalComponents[3][3] = { { 5, 0, 0}, { 0, 2, 2}, { 0,-1, 1} }; ///The 3D test distribution is just a multivariate Gaussian. UnlabeledData createData3D() { const unsigned numberOfExamples = 30000; RealVector mean(3); mean(0) = 1; mean(1) = -1; mean(2) = 3; // to create the covariance matrix we first put the // copy the principal components in the matrix // and than use an outer product RealMatrix covariance(3,3); for(int i = 0; i != 3; ++i) { for(int j = 0; j != 3; ++j) { covariance(i,j) = principalComponents[i][j]; } } covariance = prod(trans(covariance),covariance); //now we can create the distribution MultiVariateNormalDistribution distribution(covariance); //and we sample from it std::vector data(numberOfExamples); BOOST_FOREACH(RealVector& sample, data) { //first element is the sample, second is the underlying uniform gaussian sample = mean + distribution(Rng::globalRng).first; } return createDataFromRange(data); } ///The 2D test distribution is an even simpler Gaussian. UnlabeledData createData2D() { const unsigned numberOfExamples = 10000; RealMatrix C(2, 2); RealVector mu(2); C.clear(); C(0, 0) = 16.; C(1, 1) = 1.; mu(0) = mu(1) = 1.; MultiVariateNormalDistribution distribution(C); std::vector v; for(unsigned i=0; i createDataNotFullRank() { const unsigned dimensions = 10; const unsigned numberOfExamples = 5; RealVector mean(dimensions,0); mean(0) = 1; mean(1) = -1; mean(2) = 3; // to create the covariance matrix we first put the // copy the principal components in the matrix // and than use an outer product RealMatrix covariance(dimensions,dimensions,0.0); diag(covariance) = blas::repeat(0.001,dimensions); for(int i = 0; i != 3; ++i) { for(int j = 0; j != 3; ++j) { covariance(i,j) = principalComponents[i][j]; } } covariance = prod(trans(covariance),covariance); //now we can create the distribution MultiVariateNormalDistribution distribution(covariance); //and we sample from it std::vector data(numberOfExamples); BOOST_FOREACH(RealVector& sample, data) { //first element is the sample, second is the underlying uniform gaussian sample = mean + distribution(Rng::globalRng).first; } return createDataFromRange(data,2);//small batch size to get batching errors } BOOST_AUTO_TEST_SUITE (Algorithms_Trainers_PCA) BOOST_AUTO_TEST_CASE( PCA_TEST_MORE_DATA_THAN_DIMENSIONS ){ // // 1. 2D test with whitening and without dimensionality // reduction // // create 2D sample data UnlabeledData data = createData2D(); Data encodedData, decodedData; // compute statistics RealVector mean, var; meanvar(data, mean, var); cout << "data mean\t" << mean << " \tvariance\t" << var << endl; // do PCA with whitening bool whitening = true; PCA pca(data, whitening); // encode data and compute statistics LinearModel<> enc; pca.encoder(enc); encodedData = enc(data); RealVector emean, evar; meanvar(encodedData, emean, evar); cout << "encoded mean\t" << emean << " \tvariance\t" << evar << endl; // decode data again and compute statistics LinearModel<> dec; pca.decoder(dec); RealVector dmean, dvar; decodedData = dec(encodedData); meanvar(decodedData, dmean, dvar); cout << "decoded mean\t" << dmean << " \tvariance\t" << dvar << endl; /// do checks for(unsigned i=0; i<2; i++) { // have mean and variance correctly been reconstructed BOOST_CHECK_SMALL(mean(i) - dmean(i), 1.e-6); BOOST_CHECK_SMALL(var(i) - dvar(i), 1.e-6); // is the variance one after whitening BOOST_CHECK_SMALL(evar(i) - 1., 1.e-10); // is the mean zero after PCA BOOST_CHECK_SMALL(emean(i), 1.e-10); } // // 3D test case with dimensionalty reduction, without // whitening // data = createData3D(); // With the definition of the model, we declare, how many // principal components we want. If we want all, we set // inputs=outputs = 3 but since want to do a reduction, we use // only 2 in the second argument. The third argument is the // bias. The PCA class needs a bias to work. LinearModel<> pcaModel(3,2,true); pca.setWhitening(false); pca.train(pcaModel,data); RealVector pc1 = row(pcaModel.matrix(),0) * sqrt(pca.eigenvalues()(0)); RealVector pc2 = row(pcaModel.matrix(),1) * sqrt(pca.eigenvalues()(1)); cout << "principal component 1: " << pc1 << endl; cout << "principal component 2: " << pc2 << endl; // Check whether results are close to be expected form the // covariance matrix of the underlying distribution. Because // of samplig (and also numerical errors), the tolerance is // pretty high. for(unsigned i=0; i<3; i++) { BOOST_CHECK_SMALL(fabs(principalComponents[0][i]) - fabs(pc1(i)), 0.05); BOOST_CHECK_SMALL(fabs(principalComponents[1][i]) - fabs(pc2(i)), 0.05); } } BOOST_AUTO_TEST_CASE( PCA_TEST_LESS_DATA_THAN_DIMENSIONS ){ UnlabeledData data = createDataNotFullRank(); Data encodedData, decodedData; // compute statistics RealVector mean, var; meanvar(data, mean, var); // do PCA with whitening bool whitening = true; PCA pca(data, whitening); // encode data and compute statistics LinearModel<> enc; pca.encoder(enc); encodedData = enc(data); RealVector emean; RealMatrix ecovar; meanvar(encodedData, emean, ecovar); // decode data again and compute statistics LinearModel<> dec; pca.decoder(dec); RealVector dmean, dvar; decodedData = dec(encodedData); meanvar(decodedData, dmean, dvar); /// do checks for(unsigned i=0; i #include #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (Algorithms_Trainers_Perceptron) BOOST_AUTO_TEST_CASE( PERCEPTRON ){ DenseLinearKernel kernel; KernelClassifier model; Perceptron trainer(&kernel); std::vector input(6,RealVector(2)); input[0](0)=1; input[0](1)=3; input[1](0)=-1; input[1](1)=3; input[2](0)=1; input[2](1)=0; input[3](0)=-1; input[3](1)=0; input[4](0)=1; input[4](1)=-3; input[5](0)=-1; input[5](1)=-3; std::vector target(6); target[0]=0; target[1]=1; target[2]=0; target[3]=1; target[4]=0; target[5]=1; ClassificationDataset dataset = createLabeledDataFromRange(input,target); trainer.train(model, dataset); for(size_t i = 0; i != 6; ++i){ std::cout<
* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ //=========================================================================== #define BOOST_TEST_MODULE ALGORITHMS_TRAINERS_REGULARIZATION_NETWORK #include #include #include #include #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (Algorithms_Trainers_RegularizationNetworkTrainer) BOOST_AUTO_TEST_CASE( REGULARIZATION_NETWORK_TEST ) { const std::size_t ell = 200; const double lambda = 1e-6; const double threshold = 1e-8; Wave prob(0.0, 5.0); RegressionDataset training = prob.generateDataset(ell); GaussianRbfKernel<> kernel(0.1); KernelExpansion svm; RegularizationNetworkTrainer trainer(&kernel, lambda); trainer.train(svm, training); Data output; output = svm(training.inputs()); RealVector alpha = svm.parameterVector(); for (std::size_t i=0; i #include #include #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (Algorithms_Trainers_SigmoidFit) BOOST_AUTO_TEST_CASE( SIGMOID_FIT_TEST_RPROP_NO_ENCODING_DETERMINISTIC ){ bool TRANSFORM_INPUTS = false; //no encoding, i.e., the non-unconstrained variant unsigned int NUM_DRAWS = 90; // create model, and vars for the flow RealVector gen_parameters( 2 ); //for generating data RealVector obscure_parameters( 2 ); //starting points before optimization/training // generate random parameter set gen_parameters(0) = 1.4; gen_parameters(1) = -0.3; // for probabilities of 0.1, ... , 0.9 , sample from the model std::vector dataSamplePoints(NUM_DRAWS,RealVector(1)); for ( size_t j=0; j<9; j++ ) { double target = 0.1*(j+1); //these are the probabilities i want. double source = 1.0/gen_parameters(0) * (gen_parameters(1) - log(-1+1/target)); //and i have to look here on the x-axis to get them. for ( unsigned int k=0; k<10; k++ ) { dataSamplePoints[j*10+k](0)=source; } } // set the targets according to their probabilities std::vector dataSampleLabels(NUM_DRAWS,0); for ( size_t i=0; i<9; i++ ) { for ( std::size_t k=0; k dataSamplePoints(NUM_DRAWS,RealVector(1)); for ( size_t j=0; j<9; j++ ) { double target = 0.1*(j+1); //these are the probabilities i want. double source = 1.0/exp(gen_parameters(0)) * (gen_parameters(1) - log(-1+1/target)); //and i have to look here on the x-axis to get them. for ( unsigned int k=0; k<10; k++ ) { dataSamplePoints[j*10+k](0)=source; } } // set the targets according to their probabilities std::vector dataSampleLabels(NUM_DRAWS,0); for ( size_t i=0; i<9; i++ ) { for ( std::size_t k=0; k dataSamplePoints(NUM_DRAWS,RealVector(1)); for ( size_t j=0; j<9; j++ ) { double target = 0.1*(j+1); //these are the probabilities i want. double source = 1.0/exp(gen_parameters(0)) * (gen_parameters(1) - log(-1+1/target)); //and i have to look here on the x-axis to get them. for ( unsigned int k=0; k<10; k++ ) { dataSamplePoints[j*10+k](0)=source; } } // set the targets according to their probabilities std::vector dataSampleLabels(NUM_DRAWS,0); for ( size_t i=0; i<9; i++ ) { for ( std::size_t k=0; k dataSamplePoints(NUM_DRAWS,RealVector(1)); for ( size_t j=0; j<9; j++ ) { double target = 0.1*(j+1); //these are the probabilities i want. double source = 1.0/exp(gen_parameters(0)) * (gen_parameters(1) - log(-1+1/target)); //and i have to look here on the x-axis to get them. for ( unsigned int k=0; k<10; k++ ) { dataSamplePoints[j*10+k](0)=source; } } // set the targets according to their probabilities std::vector dataSampleLabels(NUM_DRAWS,0); for ( size_t i=0; i<9; i++ ) { for ( std::size_t k=0; k input(examples); std::vector target(examples); size_t i; for (i=0; i
* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ //=========================================================================== #define BOOST_TEST_MODULE Algorithms_NearestNeighbors #include #include #include #include #include #include #include #include #include #include #include using namespace shark; // helper class for std::sort class Comparator { public: Comparator(std::vector& data, RealVector& test) : m_data(data) , m_test(test) { } bool operator () (std::size_t index1, std::size_t index2) const { double d1 = distanceSqr(m_data[index1], m_test); double d2 = distanceSqr(m_data[index2], m_test); return (d1 < d2); } protected: std::vector& m_data; RealVector& m_test; }; const unsigned int TRAINING = 100000; const unsigned int NEIGHBORS = 1000; template void testTree( Tree const& tree, char const* name, std::vector const& data, RealVector const& test, std::vector index, double time_reference ){ // space for algorithm results std::vector dist(NEIGHBORS); std::vector neighbor(NEIGHBORS); // query NEIGHBORS first neighbors of the test point double start = Timer::now(); IterativeNNQuery > query(&tree,data, test); for (std::size_t i=0; i != NEIGHBORS; i++){ std::pair ret = query.next(); dist[i] = ret.first; neighbor[i] = &data[ret.second]; } double time = Timer::now() - start; // check consistency with brute force algorithm for (std::size_t i=0; i= 5) { BOOST_CHECK_EQUAL(&data[index[i]], neighbor[i]); } BOOST_CHECK_SMALL(distance(*neighbor[i], test) - dist[i], 1e-12); } // check consistency of ranking for (std::size_t i=1; i void testTreeStructure( Tree const& tree, std::vector const& data ){ for (std::size_t i=0; i != data.size(); i++){ BinaryTree const* node = &tree; while(! node->isLeaf()){ if(node->isRight(data[i])) node = node->right(); else node = node->left(); } std::size_t index = node->index(0); if(i < 5) BOOST_CHECK_LE(index,5u); else BOOST_CHECK_EQUAL(index,i); } } // Generate 100.000 i.i.d. standard normally // distributed samples in 3D, then query the // 1.000 nearest neighbors of the origin. BOOST_AUTO_TEST_SUITE (Algorithms_nearestneighbors) BOOST_AUTO_TEST_CASE(IterativeNearestNeighborQueries) { double start; Rng::seed(42); // generate data set and test set std::vector data(TRAINING); std::vector test(10); for (std::size_t i=0; i<5; i++) { // multiple instances of the same point data[i].resize(3); data[i][0] = 0.0; data[i][1] = 0.0; data[i][2] = 0.0; } for (std::size_t i=0; i<10; i++) { // multiple instances of the same point test[i].resize(3); test[i][0] = Rng::gauss(); test[i][1] = Rng::gauss(); test[i][2] = Rng::gauss(); } test[0].clear();//(0,0,0) for (std::size_t i=0; i dataset = createDataFromRange(data); //test trees KDTree kdtree(dataset); testTreeStructure(kdtree,data); LCTree lctree(dataset); LinearKernel kernel; KHCTree > khctree(data, &kernel); for(std::size_t k = 0; k != 10; ++k){ // brute force sorting (for comparison) std::vector index(TRAINING); start = Timer::now(); for (std::size_t i=0; i #include #include #include namespace shark { BOOST_PARAMETER_NAME(optimizer) // Note: no semicolon BOOST_PARAMETER_NAME(function) BOOST_PARAMETER_NAME(trials) BOOST_PARAMETER_NAME(iterations) BOOST_PARAMETER_NAME(epsilon) BOOST_PARAMETER_NAME(fstop) BOOST_PARAMETER_FUNCTION( (void), test_function, tag, (required ( in_out(optimizer), * ) ( in_out(function), * ) ) (optional (trials, *, 10) (iterations, *, 1000) (epsilon, *, 1E-15) (fstop, *, 1E-10) )) { boost::progress_display pd( trials * iterations ); std::vector stats; for( size_t trial =0;trial != static_cast(trials);++trial ){ function.init(); optimizer.init(function); double error=0; for( size_t iteration = 0; iteration < static_cast(iterations); ++iteration ) { optimizer.step( function ); error=optimizer.solution().value; ++pd; if( fstop > error ) break; } stats.push_back(error); } std::cout< void testFunction(Optimizer& optimizer,Function& function,unsigned int trials,unsigned int iterations, double epsilon = 1.e-15){ boost::progress_display pd( trials * iterations ); std::vector stats; for( size_t trial =0;trial != static_cast(trials);++trial ){ optimizer.init(function); double error=0; for( size_t iteration = 0; iteration < static_cast(iterations); ++iteration ) { //~ std::cout< error ){ pd+=iterations-iteration; break; } ++pd; } stats.push_back(error); } std::cout< #include #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (Core_Iterators) BOOST_AUTO_TEST_CASE(MULTI_SEQUENCE_ITERATOR_TEST) { std::vector > vecs(10); std::vector values(1000); //~ for(std::size_t i = 0; i != 1000; ++i){ //~ std::size_t bin = Rng::discrete(0,9); //~ vecs[bin].push_back(i); //~ } for(std::size_t i = 0; i != 10; ++i){ for(std::size_t j = 0; j != 100; ++j){ vecs[i].push_back(i*100+j); } } { std::vector::iterator valPos = values.begin(); for(std::size_t i = 0; i != 10; ++i){ std::copy(vecs[i].begin(),vecs[i].end(),valPos); valPos +=vecs[i].size(); } } {//check op++ MultiSequenceIterator > > iter( vecs.begin(),vecs.end(), vecs.begin(),vecs.begin()->begin(),0 ); MultiSequenceIterator > > end( vecs.begin(),vecs.end(), vecs.end(),std::vector::iterator(),1000 ); for(std::size_t pos = 0; pos != 1000; ++pos,++iter){ std::size_t value = values[pos]; BOOST_CHECK_EQUAL(*iter,value); BOOST_CHECK_EQUAL(iter.index(),pos); BOOST_CHECK_EQUAL(end-iter,1000-pos); } BOOST_CHECK(iter == end); } {//check op+= MultiSequenceIterator > > iter( vecs.begin(),vecs.end(), vecs.begin(),vecs.begin()->begin(),0 ); MultiSequenceIterator > > end( vecs.begin(),vecs.end(), vecs.end(),std::vector::iterator(),1000 ); std::ptrdiff_t pos = 0; for(std::size_t trial = 0; trial != 10000; ++trial){ std::size_t value = values[pos]; BOOST_CHECK_EQUAL(*iter,value); BOOST_CHECK_EQUAL(iter.index(),pos); BOOST_CHECK_EQUAL(end-iter,1000-pos); std::ptrdiff_t newPos = Rng::discrete(0,999); std::ptrdiff_t diff = newPos - pos; std::cout< #include #include #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (Core_Math) BOOST_AUTO_TEST_CASE(Math_Trigamma_SpecialValues) { double pi = boost::math::constants::pi(); double inputs[]={0.25,0.5,1,1.5,2.0}; double K = 0.915965594177219015054; double values[]={sqr(pi)+8*K,sqr(pi)/2.0,sqr(pi)/6,sqr(pi)/2-4,sqr(pi)/6-1}; double maxError = 1.e-12; for(std::size_t i = 0; i != 4; ++i){ double y=trigamma(inputs[i]); std::cout< #include namespace shark { /// Fixture for testing ScopedHandle class ScopedHandleFixture { public: /// A deleter for testing void deleteMe(int handle) { m_deletedHandles.push_back(handle); } /// Save deleted handles for verification std::vector m_deletedHandles; }; BOOST_FIXTURE_TEST_SUITE (Core_ScopedHandleTests, ScopedHandleFixture) BOOST_AUTO_TEST_CASE(BasicTest) { // Test that ScopedHandle is able to hold a valid handle, access it and delete it upon destruction const int validHandle = 10; const int invalidHandle = -1; BOOST_REQUIRE_EQUAL(m_deletedHandles.size(), 0u); { ScopedHandle handle(validHandle, boost::bind(&ScopedHandleFixture::deleteMe, this, _1)); BOOST_CHECK_EQUAL(*handle, validHandle); } BOOST_CHECK_EQUAL(m_deletedHandles.size(), 1u); BOOST_CHECK_EQUAL(m_deletedHandles[0], validHandle); m_deletedHandles.clear(); // Test that ScopedHandle will throw exception when passed handle is invalid // and destruction should do no harm { BOOST_CHECK_THROW(ScopedHandle handle(invalidHandle, boost::bind(&ScopedHandleFixture::deleteMe, this, _1)), Exception); } BOOST_CHECK_EQUAL(m_deletedHandles.size(), 0u); } BOOST_AUTO_TEST_SUITE_END() } // namespace shark { Shark-3.1.4/Test/Data/000077500000000000000000000000001314655040000143555ustar00rootroot00000000000000Shark-3.1.4/Test/Data/Bootstrap.cpp000066400000000000000000000046711314655040000170460ustar00rootroot00000000000000#define BOOST_TEST_MODULE DATA_BOOTSTRAP #include #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (Data_Bootstrap) BOOST_AUTO_TEST_CASE( Bootstrap_LabeledData ){ //create a toy dataset std::vector inputs; std::vector labels; for(unsigned int i=0;i != 20;++i){ inputs.push_back(i); labels.push_back(20+i); } LabeledData set=createLabeledDataFromRange(inputs,labels,8); //create Bootstrap subsets and create a running sum of the weights to check // that every point is chosen equally. RealVector weightSums(20,0.0); std::size_t iterations = 10000; for(std::size_t iteration = 0; iteration != iterations; ++iteration){ WeightedLabeledData bootstrapSet = bootstrap(set); BOOST_REQUIRE_EQUAL(bootstrapSet.numberOfElements(),20); double setWeightSum = 0.0; for(std::size_t i = 0; i != 20; ++i){ BOOST_CHECK_EQUAL(bootstrapSet.element(i).data.input,set.element(i).input); BOOST_CHECK_EQUAL(bootstrapSet.element(i).data.label,set.element(i).label); weightSums[i] += bootstrapSet.element(i).weight; setWeightSum += bootstrapSet.element(i).weight; } BOOST_CHECK_CLOSE(setWeightSum,20,1.e-7); } weightSums/=iterations; for(std::size_t i = 0; i != 20; ++i){ BOOST_CHECK_CLOSE(weightSums[i],1.0,5); } } BOOST_AUTO_TEST_CASE( Bootstrap_UnlabeledData ){ //create a toy dataset std::vector inputs; for(unsigned int i=0;i != 20;++i){ inputs.push_back(i); } UnlabeledData set=createDataFromRange(inputs,8); //create Bootstrap subsets and create a running sum of the weights to check // that every point is chosen equally. RealVector weightSums(20,0.0); std::size_t iterations = 10000; for(std::size_t iteration = 0; iteration != iterations; ++iteration){ WeightedUnlabeledData bootstrapSet = bootstrap(set); BOOST_REQUIRE_EQUAL(bootstrapSet.numberOfElements(),20); double setWeightSum = 0.0; for(std::size_t i = 0; i != 20; ++i){ BOOST_CHECK_EQUAL(bootstrapSet.element(i).data,set.element(i)); weightSums[i] += bootstrapSet.element(i).weight; setWeightSum += bootstrapSet.element(i).weight; } BOOST_CHECK_CLOSE(setWeightSum,20,1.e-7); } weightSums/=iterations; for(std::size_t i = 0; i != 20; ++i){ BOOST_CHECK_CLOSE(weightSums[i],1.0,5); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Data/CVDatasetTools.cpp000066400000000000000000000134011314655040000177170ustar00rootroot00000000000000#define BOOST_TEST_MODULE ML_CVDatasetTools #include #include #include #include #include using namespace shark; const size_t numPartitions=4; template void testEqualCollections(const T& set,const U& vec){ BOOST_REQUIRE_EQUAL(set.numberOfElements(),vec.size()); for(size_t i=0;i!=set.numberOfElements();++i){ bool found = false; for(std::size_t j = 0; j != set.numberOfElements(); ++j) if(set.element(j)==vec[i]) found = true; BOOST_CHECK_EQUAL( found,true ); } } BOOST_AUTO_TEST_SUITE (Data_CVDatasetTools) BOOST_AUTO_TEST_CASE( CVDatasetTools_CreateIndexed ){ //input for createIndexed std::vector inputs; std::vector labels; std::vector indizes; //the testresults std::vector testInputPartitions[numPartitions]; //fill the vectors. inputs are the number [0,19], labels go from [20...39] //the indizes are assigned cyclically 0,1,2,3,0,1,2,3 to the numbers. for(size_t i=0;i!=20;++i){ inputs.push_back(double(i) ); labels.push_back(20.0+i); indizes.push_back(i%numPartitions); for(size_t j=0;j!=numPartitions;++j){ if(j!=i%numPartitions){ testInputPartitions[j].push_back(double(i)); } } } LabeledData set=createLabeledDataFromRange(inputs,labels,8); CVFolds > folds= createCVIndexed(set, numPartitions, indizes); BOOST_REQUIRE_EQUAL(folds.size(), numPartitions); //now test the partitions for(size_t i=0;i!=numPartitions;++i){ LabeledData partition = folds.training(i); testEqualCollections( partition.inputs(), testInputPartitions[i] ); } } //you can't really test this, because its based on randomness... //there are a few things like the partitionsSize which should be roughly equal //the rest is tested through CVDatasetTools_CreateIndexed //~ BOOST_AUTO_TEST_CASE( CVDatasetTools_CreateIID ) //~ { //~ const size_t numExamples=50000; //~ const double numValidationSize=numExamples/numPartitions; //~ const double numTrainingSize=numExamples-numValidationSize; //~ //input for createIID //~ std::vector inputs; //~ std::vector labels; //~ for(size_t i=0;i!=numExamples;++i){ //~ inputs.push_back(i); //~ labels.push_back(numExamples+i); //~ } //~ LabeledData set(inputs,labels,8); //~ CVFolds > folds= createCVIID(set,numPartitions); //~ BOOST_REQUIRE_EQUAL(folds.size(), numPartitions); //~ //now test the partitions //~ for(size_t i=0;i!=numPartitions;++i){ //~ LabeledData partition = folds.training(i); //~ double partitionSizeError=1-partition.numberOfElements()/(double)numTrainingSize; //~ BOOST_CHECK_SMALL(partitionSizeError,0.01); //~ } //~ } BOOST_AUTO_TEST_CASE( CVDatasetTools_CreateSameSize ) { const size_t numExamples=102; size_t trainSize[]={76,76,77,77}; //input for createSameSize std::vector inputs; std::vector labels; for(size_t i=0;i!=numExamples;++i){ inputs.push_back(double(i)); labels.push_back(double(numExamples+i)); } LabeledData set = createLabeledDataFromRange(inputs,labels,8); CVFolds > folds = createCVSameSize(set,numPartitions); BOOST_REQUIRE_EQUAL(folds.size(), numPartitions); //now comes the complex part: to ensure that everything is ok //we check that all validation vectors together form the input and label array. //the test works as following: //1. all vectors are appended //2. the resulting vector is sorted //3.after that it holds vec[i]==i for inputs and vec[i]==i+numExamples for labels std::vector validationInputs; std::vector validationLabels; for(size_t i=0;i!=numPartitions;++i){ LabeledData partition = folds.training(i); LabeledData validation = folds.validation(i); BOOST_REQUIRE_EQUAL(partition.numberOfElements(),trainSize[i]); BOOST_REQUIRE_EQUAL(validation.numberOfElements(),numExamples-trainSize[i]); for(size_t j=0;j!=validation.numberOfElements();++j){ validationInputs.push_back(validation.element(j).input); validationLabels.push_back(validation.element(j).label); } } std::sort(validationInputs.begin(),validationInputs.end()); std::sort(validationLabels.begin(),validationLabels.end()); for(size_t i=0;i!=numExamples;++i){ BOOST_CHECK_EQUAL(validationInputs[i],i); BOOST_CHECK_EQUAL(validationLabels[i],i+numExamples); } } BOOST_AUTO_TEST_CASE( CVDatasetTools_CreateSameSizeBalancedUnsigned ) { const size_t numExamples=102; size_t trainSize[]={51,51}; //number of.numberOfElements with label 0 per partition size_t zeroSize[]={26,25}; //input for createSameSize std::vector inputs; std::vector labels; for(size_t i=0;i!=numExamples;++i){ RealVector vec(1); vec(0)= double(i); inputs.push_back(vec); labels.push_back(i%2); } ClassificationDataset set = createLabeledDataFromRange(inputs,labels,8); CVFolds folds = createCVSameSizeBalanced(set,2); BOOST_REQUIRE_EQUAL(folds.size(), 2); //first check equals previous test std::vector validationInputs; for(size_t i=0;i!=2;++i){ ClassificationDataset partition = folds.training(i); ClassificationDataset validation = folds.validation(i); BOOST_REQUIRE_EQUAL(partition.numberOfElements(),trainSize[i]); size_t zeroCount=0; for(size_t j=0;j!=validation.numberOfElements();++j){ validationInputs.push_back(validation.element(j).input(0)); zeroCount+= !validation.element(j).label; } BOOST_CHECK_EQUAL(zeroCount,zeroSize[i]); } std::sort(validationInputs.begin(),validationInputs.end()); for(size_t i=0;i!=numExamples;++i){ BOOST_CHECK_EQUAL(validationInputs[i],i); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Data/Csv.cpp000066400000000000000000000256301314655040000156220ustar00rootroot00000000000000#define BOOST_TEST_MODULE ML_Csv #include #include #include #include #include using namespace shark; const char test_separator[] = "\ # i am a test comment at the start\n\ 1.000,148.0,72,35,0,33.6,,?,-1\n\ # i am a test comment\n\ 1,66,29,0,26.6,,,?,-1\r\ 1,183,64,0,0, 23.3,0.672,32,1\r\n\ 2.000,116,\t74\t,\t0 \t\t,0,25.6,0.201 , 30,-1\n\ 1.,78,50,32,88 ,31.0, 0.248,26,1\n\ 2,196,90,0,0,39.8,0.451,41,1\n\ # i am a test comment 1.345\n\ 1.0,119,80,35,0,29.0,0.263,29,1\n\ 2.,143,94,33,146,36.6,0.254,51,1\n\ 1,125,70,26,115,31.1,0.205,41,1\n\ 1,147,76,0,0,39.4,0.257,43,1\n\ 1,97,66,15,140,23.2,0.487,22,-1\n\ 0,145,82,19,110,22.2,0.245,57,-1\n\ 0,117,92,0,0,34.1,0.337,38,-1\n\ 1,109,75,26,0,36.0,0.546,60,-1\n\ 1,158,76,36,245,31.6,0.851,28,1\n\ 0,88,58,11,54,24.8,0.267,22,-1\r\ # i am a test comment at the end without newline"; const char test_no_separator[] = "\ # i am a test comment at the start\n\ 1.000 148.0 \t72 35\t0 33.6 ? ? -1\n\ 1 66 29 0 26.6 ? ? ? -1\r\ 1 183 64 0 0 23.3 0.672 32 1\r\n\ # i am a test comment 1.345\n \ 2.000 116 74 0\t0 25.6 0.201 30 -1\n\ 1. 78 50 32 88 31.0 0.248 26 1\n\ 2 196 90 0 0 39.8\t\t\t0.451 41 1\n\ 1.0 119 80 35 0 29.0 0.263 29 1\n\ 2. 143 94 33 146 36.6 0.254 51 1\n\ 1 125 70 26 115 31.1 0.205 41 1\n\ 1 147 76 0 0 39.4 0.257 43 1\n\ # 1 147 76 0 0 39.4 0.257 43 1\n\ 1 97 66 15 140 23.2 0.487 22 -1\n\ 0 145 82 19 110 22.2 0.245 57 -1\n\ 0 117 92 0 0 34.1 0.337 38 -1\n\ 1 109 75 26 0 36.0 0.546 60 -1\n\ 1 158 76 36 245 31.6 0.851 28 1\n\ 0 88 58 11 54 24.8 0.267 22 -1\r\ # i am a test comment at the end with newline\n"; const char test_single_integer[] = "\ # i am a test comment at the start\n\ 1\n\ 0\r\ 1\r\n\ # i am a test comment 1.345\n \ 0\n\ 1\n\ 0\n\ # 1\n\ 1\r\ # i am a test comment at the end"; const double qnan = std::numeric_limits::quiet_NaN(); std::size_t const numDimensions = 8; std::size_t const numInputs = 16; double test_values_1[numInputs * numDimensions] ={ 148.0,72,35,0,33.6,qnan,qnan,-1, 66,29,0,26.6,qnan,qnan,qnan,-1, 183,64,0,0,23.3,0.672,32,1, 116,74,0,0,25.6,0.201,30,-1, 78,50,32,88,31.0,0.248,26,1, 196,90,0,0,39.8,0.451,41,1, 119,80,35,0,29.0,0.263,29,1, 143,94,33,146,36.6,0.254,51,1, 125,70,26,115,31.1,0.205,41,1, 147,76,0,0,39.4,0.257,43,1, 97,66,15,140,23.2,0.487,22,-1, 145,82,19,110,22.2,0.245,57,-1, 117,92,0,0,34.1,0.337,38,-1, 109,75,26,0,36.0,0.546,60,-1, 158,76,36,245,31.6,0.851,28,1, 88,58,11,54,24.8,0.267,22,-1 }; double test_values_2[numInputs*numDimensions] ={ 1.000,148.0,72,35,0,33.6,qnan,qnan, 1,66,29,0,26.6,qnan,qnan,qnan, 1,183,64,0,0,23.3,0.672,32, 2.000,116,74,0,0,25.6,0.201,30, 1.,78,50,32,88,31.0,0.248,26, 2,196,90,0,0,39.8,0.451,41, 1.0,119,80,35,0,29.0,0.263,29, 2.,143,94,33,146,36.6,0.254,51, 1,125,70,26,115,31.1,0.205,41, 1,147,76,0,0,39.4,0.257,43, 1,97,66,15,140,23.2,0.487,22, 0,145,82,19,110,22.2,0.245,57, 0,117,92,0,0,34.1,0.337,38, 1,109,75,26,0,36.0,0.546,60, 1,158,76,36,245,31.6,0.851,28, 0,88,58,11,54,24.8,0.267,22 }; double test_values[9*numInputs] ={ 1.000,148.0,72,35,0,33.6,qnan,qnan,-1, 1,66,29,0,26.6,qnan,qnan,qnan,-1, 1,183,64,0,0,23.3,0.672,32,1, 2.000,116,74,0,0,25.6,0.201,30,-1, 1.,78,50,32,88,31.0,0.248,26,1, 2,196,90,0,0,39.8,0.451,41,1, 1.0,119,80,35,0,29.0,0.263,29,1, 2.,143,94,33,146,36.6,0.254,51,1, 1,125,70,26,115,31.1,0.205,41,1, 1,147,76,0,0,39.4,0.257,43,1, 1,97,66,15,140,23.2,0.487,22,-1, 0,145,82,19,110,22.2,0.245,57,-1, 0,117,92,0,0,34.1,0.337,38,-1, 1,109,75,26,0,36.0,0.546,60,-1, 1,158,76,36,245,31.6,0.851,28,1, 0,88,58,11,54,24.8,0.267,22,-1 }; unsigned int test_values_single_integer[]={1,0,1,0,1,0,1}; unsigned int labels_1[numInputs] = {1,1,1,2,1,2,1,2,1,1,1,0,0,1,1,0}; unsigned int labels_2[numInputs] = {0,0,1,0,1,1,1,1,1,1,0,0,0,0,1,0}; template void checkDataEquality(T* values, unsigned int* labels, LabeledData const& loaded){ BOOST_REQUIRE_EQUAL(loaded.numberOfElements(),numInputs); BOOST_REQUIRE_EQUAL(inputDimension(loaded),numDimensions); for (size_t i=0; i != numInputs; ++i){ for (size_t j=0; j != numDimensions; ++j) { if( boost::math::isnan(values[i*numDimensions+j])){ BOOST_CHECK(boost::math::isnan(loaded.element(i).input(j))); } else { BOOST_CHECK_EQUAL(loaded.element(i).input(j), values[i*numDimensions+j]); } } BOOST_CHECK_EQUAL(loaded.element(i).label, labels[i]); } } void checkDataEquality(double* values, Data const& loaded){ BOOST_REQUIRE_EQUAL(loaded.numberOfElements(),numInputs); BOOST_REQUIRE_EQUAL(dataDimension(loaded),numDimensions+1); std::size_t dims = numDimensions+1; for (size_t i=0; i != numInputs; ++i){ for (size_t j=0; j != dims; ++j) { if( boost::math::isnan(values[i*dims+j])){ BOOST_CHECK(boost::math::isnan(loaded.element(i)(j))); } else { BOOST_CHECK_EQUAL(loaded.element(i)(j), values[i*dims+j]); } } } } void checkDataRegression(double* values, LabeledData const& loaded, std::size_t labelStart, std::size_t labelEnd){ BOOST_REQUIRE_EQUAL(loaded.numberOfElements(),numInputs); std::size_t inputStart =0; std::size_t inputEnd = labelStart; if(labelStart == 0){ inputStart = labelEnd; inputEnd = numDimensions+1; } for (size_t i=0; i != numInputs; ++i){ for (size_t j=0; j != numDimensions+1; ++j) { double element = 0; if(j >= labelStart &&j < labelEnd){ element = loaded.element(i).label(j-labelStart); } if(j >= inputStart && j < inputEnd){ element = loaded.element(i).input(j-inputStart); } if( boost::math::isnan(values[i*(numDimensions+1)+j])){ BOOST_CHECK(boost::math::isnan(element)); } else { BOOST_CHECK_EQUAL(element, values[i*(numDimensions+1)+j]); } } } } BOOST_AUTO_TEST_SUITE (Data_Csv) BOOST_AUTO_TEST_CASE( Data_Csv_Data_Import) { { Data test; csvStringToData(test, test_separator, ',','#',3); BOOST_CHECK_EQUAL(test.numberOfElements(), 16u); BOOST_CHECK_EQUAL(test.numberOfBatches(), 6); std::cout << test< test; csvStringToData(test, test_no_separator, ' ','#',3); BOOST_CHECK_EQUAL(test.numberOfElements(), 16u); BOOST_CHECK_EQUAL(test.numberOfBatches(), 6); std::cout << test< test; csvStringToData(test, test_single_integer, ',','#',3); BOOST_REQUIRE_EQUAL(test.numberOfElements(), 7u); BOOST_CHECK_EQUAL(test.numberOfBatches(), 3); std::cout << test< test; csvStringToData(test, test_separator, FIRST_COLUMN, ',','#',3); BOOST_CHECK_EQUAL(test.numberOfElements(), 16u); BOOST_CHECK_EQUAL(test.numberOfBatches(), 6); BOOST_CHECK_EQUAL(numberOfClasses(test), 3); std::cout << test< test; csvStringToData(test, test_separator, FIRST_COLUMN, 3, ',','#',3); BOOST_CHECK_EQUAL(test.numberOfElements(), 16u); BOOST_CHECK_EQUAL(test.numberOfBatches(), 6); BOOST_CHECK_EQUAL(inputDimension(test), 6); BOOST_CHECK_EQUAL(labelDimension(test), 3); std::cout << test< test; csvStringToData(test, test_no_separator, FIRST_COLUMN, ' ','#',3); BOOST_CHECK_EQUAL(test.numberOfElements(), 16u); BOOST_CHECK_EQUAL(test.numberOfBatches(), 6); BOOST_CHECK_EQUAL(numberOfClasses(test), 3); std::cout << test< test; csvStringToData(test, test_no_separator, FIRST_COLUMN, 3, ' ','#',3); BOOST_CHECK_EQUAL(test.numberOfElements(), 16u); BOOST_CHECK_EQUAL(test.numberOfBatches(), 6); BOOST_CHECK_EQUAL(inputDimension(test), 6); BOOST_CHECK_EQUAL(labelDimension(test), 3); std::cout << test< test; csvStringToData(test, test_no_separator, LAST_COLUMN, ' ','#',3); BOOST_CHECK_EQUAL(test.numberOfElements(), 16u); BOOST_CHECK_EQUAL(test.numberOfBatches(), 6); BOOST_CHECK_EQUAL(numberOfClasses(test), 2); std::cout << test< test; csvStringToData(test, test_no_separator, LAST_COLUMN, 3, ' ','#',3); BOOST_CHECK_EQUAL(test.numberOfElements(), 16u); BOOST_CHECK_EQUAL(test.numberOfBatches(), 6); BOOST_CHECK_EQUAL(inputDimension(test), 6); BOOST_CHECK_EQUAL(labelDimension(test), 3); std::cout << test< test; csvStringToData(test, test_separator, LAST_COLUMN, ',','#',3); BOOST_CHECK_EQUAL(test.numberOfElements(), 16u); BOOST_CHECK_EQUAL(test.numberOfBatches(), 6); BOOST_CHECK_EQUAL(numberOfClasses(test), 2); std::cout << test< test; csvStringToData(test, test_separator, LAST_COLUMN, 3, ',','#',3); BOOST_CHECK_EQUAL(test.numberOfElements(), 16u); BOOST_CHECK_EQUAL(test.numberOfBatches(), 6); BOOST_CHECK_EQUAL(inputDimension(test), 6); BOOST_CHECK_EQUAL(labelDimension(test), 3); std::cout << test< test; csvStringToData(test, test_separator, FIRST_COLUMN, ',','#',3); exportCSV(test, "./test_output/check_first.csv", FIRST_COLUMN); LabeledData loaded; importCSV(loaded, "./test_output/check_first.csv", FIRST_COLUMN); checkDataEquality(test_values_1,labels_1,loaded); } { LabeledData test; csvStringToData(test, test_separator, FIRST_COLUMN, ',','#',3); exportCSV(test, "./test_output/check_last.csv", LAST_COLUMN); LabeledData loaded; importCSV(loaded, "./test_output/check_last.csv", LAST_COLUMN); checkDataEquality(test_values_1,labels_1,loaded); } { Data test; csvStringToData(test, test_separator, ',','#',3); exportCSV(test, "test_output/check_regression.csv"); Data loaded; importCSV(loaded, "test_output/check_regression.csv"); checkDataEquality(test_values,loaded); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Data/DataView.cpp000066400000000000000000000275711314655040000166010ustar00rootroot00000000000000#define BOOST_TEST_MODULE Data_DataView #include #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (Data_DataView) BOOST_AUTO_TEST_CASE( DataView_Data_Test ) { //define initial dataset std::vector inputs; for (int i=0;i!=100;++i) { inputs.push_back(100+i); } UnlabeledData set = createDataFromRange(inputs,10); //define subset std::vector subsetIndices1; for (size_t i=0;i!=30;++i) { subsetIndices1.push_back(i*3+1); } //define indices for a sub-subset std::vector subsetIndices2; std::vector realSubsetIndices2; for (size_t i=0;i!=10;++i) { subsetIndices2.push_back(2*i+3); realSubsetIndices2.push_back(subsetIndices1[2*i+3]); } //Test1 create just an identity set DataView > view(set); BOOST_CHECK_EQUAL(view.size(),100u); for(std::size_t i = 0; i != 100; ++i){ BOOST_CHECK_EQUAL(view[i],set.element(i)); BOOST_CHECK_EQUAL(view.index(i), i); } //Test2 create a subset of the identity set DataView > subview1=subset(view,subsetIndices1); BOOST_CHECK_EQUAL(subview1.size(),30u); for(std::size_t i = 0; i != 30; ++i){ BOOST_CHECK_EQUAL(subview1[i],set.element(subsetIndices1[i])); BOOST_CHECK_EQUAL(subview1.index(i), subsetIndices1[i]); } //Test3 create a subset of the subset DataView > subview2=subset(subview1,subsetIndices2); BOOST_CHECK_EQUAL(subview2.size(),10u); for(std::size_t i = 0; i != 10; ++i){ BOOST_CHECK_EQUAL(subview2[i],set.element(realSubsetIndices2[i])); BOOST_CHECK_EQUAL(subview2.index(i), realSubsetIndices2[i]); } //Test4 create a Dataset of the sets Data setCopy= toDataset(view,20); BOOST_CHECK_EQUAL(setCopy.numberOfBatches(),5u); BOOST_CHECK_EQUAL(setCopy.numberOfElements(),100u); for(std::size_t i = 0; i != 100; ++i){ BOOST_CHECK_EQUAL(setCopy.element(i),set.element(i)); } for(std::size_t i = 0; i != 5; ++i){ BOOST_CHECK_EQUAL(setCopy.batch(i).size(),20u); } Data copy1 = toDataset(subview1,5); BOOST_CHECK_EQUAL(copy1.numberOfBatches(),6u); BOOST_CHECK_EQUAL(copy1.numberOfElements(),30u); for(std::size_t i = 0; i != 30; ++i){ BOOST_CHECK_EQUAL(copy1.element(i),set.element(subsetIndices1[i])); } for(std::size_t i = 0; i != 6; ++i){ BOOST_CHECK_EQUAL(copy1.batch(i).size(),5u); } Data copy2 = toDataset(subview2,2); BOOST_CHECK_EQUAL(copy2.numberOfBatches(),5u); BOOST_CHECK_EQUAL(copy2.numberOfElements(),10u); for(std::size_t i = 0; i != 10; ++i){ BOOST_CHECK_EQUAL(copy2.element(i),set.element(realSubsetIndices2[i])); } for(std::size_t i = 0; i != 5;++i){ BOOST_CHECK_EQUAL(copy2.batch(i).size(),2u); } } //Same again, just throwing in some consts... BOOST_AUTO_TEST_CASE( DataView_Data_Const_Test ) { //define initial dataset std::vector inputs; for (int i=0;i!=100;++i) { inputs.push_back(100+i); } UnlabeledData set0 = createDataFromRange(inputs,10); UnlabeledData const& set = set0; //define subset std::vector subsetIndices1; for (size_t i=0;i!=30;++i) { subsetIndices1.push_back(i*3+1); } //define indices for a sub-subset std::vector subsetIndices2; std::vector realSubsetIndices2; for (size_t i=0;i!=10;++i) { subsetIndices2.push_back(2*i+3); realSubsetIndices2.push_back(subsetIndices1[2*i+3]); } //Test1 create just an identity set DataView const > view(set); BOOST_CHECK_EQUAL(view.size(),100u); for(std::size_t i = 0; i != 100; ++i){ BOOST_CHECK_EQUAL(view[i],set.element(i)); BOOST_CHECK_EQUAL(view.index(i), i); } //Test2 create a subset of the identity set DataView const> subview1=subset(view,subsetIndices1); BOOST_CHECK_EQUAL(subview1.size(),30u); for(std::size_t i = 0; i != 30; ++i){ BOOST_CHECK_EQUAL(subview1[i],set.element(subsetIndices1[i])); BOOST_CHECK_EQUAL(subview1.index(i), subsetIndices1[i]); } //Test3 create a subset of the subset DataView const> subview2=subset(subview1,subsetIndices2); BOOST_CHECK_EQUAL(subview2.size(),10u); for(std::size_t i = 0; i != 10; ++i){ BOOST_CHECK_EQUAL(subview2[i],set.element(realSubsetIndices2[i])); BOOST_CHECK_EQUAL(subview2.index(i), realSubsetIndices2[i]); } //Test4 create a Dataset of the sets Data setCopy= toDataset(view,20); BOOST_CHECK_EQUAL(setCopy.numberOfBatches(),5u); BOOST_CHECK_EQUAL(setCopy.numberOfElements(),100u); for(std::size_t i = 0; i != 100; ++i){ BOOST_CHECK_EQUAL(setCopy.element(i),set.element(i)); } for(std::size_t i = 0; i != 5; ++i){ BOOST_CHECK_EQUAL(setCopy.batch(i).size(),20u); } Data copy1 = toDataset(subview1,5); BOOST_CHECK_EQUAL(copy1.numberOfBatches(),6u); BOOST_CHECK_EQUAL(copy1.numberOfElements(),30u); for(std::size_t i = 0; i != 30; ++i){ BOOST_CHECK_EQUAL(copy1.element(i),set.element(subsetIndices1[i])); } for(std::size_t i = 0; i != 6; ++i){ BOOST_CHECK_EQUAL(copy1.batch(i).size(),5u); } Data copy2 = toDataset(subview2,2); BOOST_CHECK_EQUAL(copy2.numberOfBatches(),5u); BOOST_CHECK_EQUAL(copy2.numberOfElements(),10u); for(std::size_t i = 0; i != 10; ++i){ BOOST_CHECK_EQUAL(copy2.element(i),set.element(realSubsetIndices2[i])); } for(std::size_t i = 0; i != 5;++i){ BOOST_CHECK_EQUAL(copy2.batch(i).size(),2u); } } //now the same test for datasets BOOST_AUTO_TEST_CASE( DataView_Dataset_Test ) { //define initial dataset std::vector inputs; for (int i=0;i!=100;++i) { inputs.push_back(100+i); } std::vector labels; for (unsigned int i=0;i!=100;++i) { labels.push_back(200+i); } LabeledData set = createLabeledDataFromRange(inputs,labels,10); //define subset std::vector subsetIndices1; for (size_t i=0;i!=30;++i) { subsetIndices1.push_back(i*3+1); } //define indices for a sub-subset std::vector subsetIndices2; std::vector realSubsetIndices2; for (size_t i=0;i!=10;++i) { subsetIndices2.push_back(2*i+3); realSubsetIndices2.push_back(subsetIndices1[2*i+3]); } //Test1 create just an identity set DataView > view(set); BOOST_CHECK_EQUAL(view.size(),100u); for(std::size_t i = 0; i != 100; ++i){ BOOST_CHECK_EQUAL(view[i].input,set.element(i).input); BOOST_CHECK_EQUAL(view[i].label,set.element(i).label); BOOST_CHECK_EQUAL(view.index(i), i); } //Test2 create a subset of the identity set DataView > subview1=subset(view,subsetIndices1); BOOST_CHECK_EQUAL(subview1.size(),30u); for(std::size_t i = 0; i != 30; ++i){ BOOST_CHECK_EQUAL(subview1[i].input,set.element(subsetIndices1[i]).input); BOOST_CHECK_EQUAL(subview1[i].label,set.element(subsetIndices1[i]).label); BOOST_CHECK_EQUAL(subview1.index(i), subsetIndices1[i]); } //Test3 create a subset of the subset DataView > subview2=subset(subview1,subsetIndices2); BOOST_CHECK_EQUAL(subview2.size(),10u); for(std::size_t i = 0; i != 10; ++i){ BOOST_CHECK_EQUAL(subview2[i].input,set.element(realSubsetIndices2[i]).input); BOOST_CHECK_EQUAL(subview2[i].label,set.element(realSubsetIndices2[i]).label); BOOST_CHECK_EQUAL(subview2.index(i), realSubsetIndices2[i]); } //Test4 create a Dataset of the sets LabeledData setCopy= toDataset(view,20); BOOST_CHECK_EQUAL(setCopy.numberOfBatches(),5u); BOOST_CHECK_EQUAL(setCopy.numberOfElements(),100u); for(std::size_t i = 0; i != 100; ++i){ BOOST_CHECK_EQUAL(setCopy.element(i).input,set.element(i).input); BOOST_CHECK_EQUAL(setCopy.element(i).label,set.element(i).label); } for(std::size_t i = 0; i != 5; ++i){ BOOST_CHECK_EQUAL(setCopy.batch(i).size(),20u); } LabeledData copy1 = toDataset(subview1,5); BOOST_CHECK_EQUAL(copy1.numberOfBatches(),6u); BOOST_CHECK_EQUAL(copy1.numberOfElements(),30u); for(std::size_t i = 0; i != 30; ++i){ BOOST_CHECK_EQUAL(copy1.element(i).input,set.element(subsetIndices1[i]).input); BOOST_CHECK_EQUAL(copy1.element(i).label,set.element(subsetIndices1[i]).label); } for(std::size_t i = 0; i != 6; ++i){ BOOST_CHECK_EQUAL(copy1.batch(i).size(),5u); } LabeledData copy2 = toDataset(subview2,2); BOOST_CHECK_EQUAL(copy2.numberOfBatches(),5u); BOOST_CHECK_EQUAL(copy2.numberOfElements(),10u); for(std::size_t i = 0; i != 10; ++i){ BOOST_CHECK_EQUAL(copy2.element(i).input,set.element(realSubsetIndices2[i]).input); BOOST_CHECK_EQUAL(copy2.element(i).label,set.element(realSubsetIndices2[i]).label); } for(std::size_t i = 0; i != 5;++i){ BOOST_CHECK_EQUAL(copy2.batch(i).size(),2u); } } //now the same test for const datasets BOOST_AUTO_TEST_CASE( DataView_Dataset_Const_Test ) { //define initial dataset std::vector inputs; for (int i=0;i!=100;++i) { inputs.push_back(100+i); } std::vector labels; for (unsigned int i=0;i!=100;++i) { labels.push_back(200+i); } LabeledData set0 = createLabeledDataFromRange(inputs,labels,10); LabeledData const& set = set0; //define subset std::vector subsetIndices1; for (size_t i=0;i!=30;++i) { subsetIndices1.push_back(i*3+1); } //define indices for a sub-subset std::vector subsetIndices2; std::vector realSubsetIndices2; for (size_t i=0;i!=10;++i) { subsetIndices2.push_back(2*i+3); realSubsetIndices2.push_back(subsetIndices1[2*i+3]); } //Test1 create just an identity set DataView const > view(set); BOOST_CHECK_EQUAL(view.size(),100u); for(std::size_t i = 0; i != 100; ++i){ BOOST_CHECK_EQUAL(view[i].input,set.element(i).input); BOOST_CHECK_EQUAL(view[i].label,set.element(i).label); BOOST_CHECK_EQUAL(view.index(i), i); } //Test2 create a subset of the identity set DataView const > subview1=subset(view,subsetIndices1); BOOST_CHECK_EQUAL(subview1.size(),30u); for(std::size_t i = 0; i != 30; ++i){ BOOST_CHECK_EQUAL(subview1[i].input,set.element(subsetIndices1[i]).input); BOOST_CHECK_EQUAL(subview1[i].label,set.element(subsetIndices1[i]).label); BOOST_CHECK_EQUAL(subview1.index(i), subsetIndices1[i]); } //Test3 create a subset of the subset DataView const > subview2=subset(subview1,subsetIndices2); BOOST_CHECK_EQUAL(subview2.size(),10u); for(std::size_t i = 0; i != 10; ++i){ BOOST_CHECK_EQUAL(subview2[i].input,set.element(realSubsetIndices2[i]).input); BOOST_CHECK_EQUAL(subview2[i].label,set.element(realSubsetIndices2[i]).label); BOOST_CHECK_EQUAL(subview2.index(i), realSubsetIndices2[i]); } //Test4 create a Dataset of the sets LabeledData setCopy= toDataset(view,20); BOOST_CHECK_EQUAL(setCopy.numberOfBatches(),5u); BOOST_CHECK_EQUAL(setCopy.numberOfElements(),100u); for(std::size_t i = 0; i != 100; ++i){ BOOST_CHECK_EQUAL(setCopy.element(i).input,set.element(i).input); BOOST_CHECK_EQUAL(setCopy.element(i).label,set.element(i).label); } for(std::size_t i = 0; i != 5; ++i){ BOOST_CHECK_EQUAL(setCopy.batch(i).size(),20u); } LabeledData copy1 = toDataset(subview1,5); BOOST_CHECK_EQUAL(copy1.numberOfBatches(),6u); BOOST_CHECK_EQUAL(copy1.numberOfElements(),30u); for(std::size_t i = 0; i != 30; ++i){ BOOST_CHECK_EQUAL(copy1.element(i).input,set.element(subsetIndices1[i]).input); BOOST_CHECK_EQUAL(copy1.element(i).label,set.element(subsetIndices1[i]).label); } for(std::size_t i = 0; i != 6; ++i){ BOOST_CHECK_EQUAL(copy1.batch(i).size(),5u); } LabeledData copy2 = toDataset(subview2,2); BOOST_CHECK_EQUAL(copy2.numberOfBatches(),5u); BOOST_CHECK_EQUAL(copy2.numberOfElements(),10u); for(std::size_t i = 0; i != 10; ++i){ BOOST_CHECK_EQUAL(copy2.element(i).input,set.element(realSubsetIndices2[i]).input); BOOST_CHECK_EQUAL(copy2.element(i).label,set.element(realSubsetIndices2[i]).label); } for(std::size_t i = 0; i != 5;++i){ BOOST_CHECK_EQUAL(copy2.batch(i).size(),2u); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Data/Dataset.cpp000066400000000000000000000614771314655040000164650ustar00rootroot00000000000000#define BOOST_TEST_MODULE Data_Dataset #include #include #include #include using namespace shark; void testSetEquality(Data const& set1, Data const& set2){ BOOST_REQUIRE_EQUAL(set1.numberOfBatches(),set2.numberOfBatches()); BOOST_REQUIRE_EQUAL(set1.numberOfElements(),set2.numberOfElements()); for(size_t i=0;i!=set1.numberOfBatches();++i) { IntVector vec1=set1.batch(i); IntVector vec2=set2.batch(i); BOOST_REQUIRE_EQUAL(vec1.size(),vec2.size()); BOOST_CHECK_EQUAL_COLLECTIONS(vec1.begin(),vec1.end(),vec2.begin(),vec2.end()); } } void testDatasetEquality(LabeledData const& set1, LabeledData const& set2){ BOOST_REQUIRE_EQUAL(set1.numberOfBatches(),set2.numberOfBatches()); BOOST_REQUIRE_EQUAL(set1.numberOfElements(),set2.numberOfElements()); for(std::size_t i = 0; i != set1.numberOfBatches(); ++i){ BOOST_REQUIRE_EQUAL(set1.batch(i).input.size(),set1.batch(i).label.size()); BOOST_REQUIRE_EQUAL(set2.batch(i).input.size(),set2.batch(i).label.size()); } testSetEquality(set1.inputs(),set2.inputs()); testSetEquality(set1.labels(),set2.labels()); } BOOST_AUTO_TEST_SUITE (Data_Dataset) BOOST_AUTO_TEST_CASE( Set_Test ) { std::vector inputs; // the test results std::vector indizes[2]; // fill the vectors: inputs are the number [100, ..., 199] for (int i=0;i!=100;++i) { inputs.push_back(100+i); } for(std::size_t i = 0; i != 20; ++i){ indizes[i%2].push_back(i); } // 1.1 test element access and thus createDataFromRange UnlabeledData set = createDataFromRange(inputs,5);//20 batches BOOST_REQUIRE_EQUAL(set.numberOfElements(), 100u); BOOST_REQUIRE_EQUAL(set.numberOfBatches(), 20u); for (size_t i=0; i!=100; ++i) { BOOST_CHECK_EQUAL(inputs[i], set.element(i)); } //also test iterator access BOOST_CHECK_EQUAL_COLLECTIONS( set.elements().begin(),set.elements().end(), inputs.begin(),inputs.end() ); //1.2 test batch access for (size_t i=0; i!=10; ++i) { IntVector batch=set.batch(i); BOOST_REQUIRE_EQUAL(batch.size(),5u); BOOST_CHECK_EQUAL_COLLECTIONS( batch.begin(),batch.end(), inputs.begin()+i*5, inputs.begin()+(i+1)*5 ); } // 2. create indexed partitions { UnlabeledData subset = indexedSubset(set,indizes[0]); UnlabeledData subsetMethod; set.indexedSubset(indizes[0],subsetMethod); BOOST_REQUIRE_EQUAL(subset.numberOfBatches(), indizes[0].size()); for (size_t i=0; i!=subset.numberOfBatches(); ++i) { IntVector batch=subsetMethod.batch(i); BOOST_REQUIRE_EQUAL(batch.size(),5); BOOST_CHECK_EQUAL_COLLECTIONS( batch.begin(),batch.end(), inputs.begin()+indizes[0][i]*5, inputs.begin()+(indizes[0][i]+1)*5 ); } testSetEquality(subset,subsetMethod); } // 2.1 now with complement { UnlabeledData subset = indexedSubset(set,indizes[0]); UnlabeledData subset2; UnlabeledData complement; set.indexedSubset(indizes[0], subset2, complement); testSetEquality(subset, subset2); BOOST_REQUIRE_EQUAL(complement.numberOfBatches(), indizes[1].size()); for (size_t i=0; i!=subset.numberOfBatches(); ++i) { IntVector batch=complement.batch(i); BOOST_REQUIRE_EQUAL(batch.size(),5); BOOST_CHECK_EQUAL_COLLECTIONS( batch.begin(),batch.end(), inputs.begin()+indizes[1][i]*5, inputs.begin()+(indizes[1][i]+1)*5 ); } } } BOOST_AUTO_TEST_CASE( Set_Repartition ) { std::vector inputs; // fill the vectors: inputs are the number [100, ..., 199] for (int i=0;i!=100;++i) { inputs.push_back(100+i); } //generate a set and than repartition it with unevenly sized batches std::vector batchSizes(8); batchSizes[0]=8; batchSizes[1]=24; batchSizes[2]=8; batchSizes[3]=7; batchSizes[4]=8; batchSizes[5]=12; batchSizes[6]=25; batchSizes[7]=8; UnlabeledData set = createDataFromRange(inputs,10); set.repartition(batchSizes); BOOST_REQUIRE_EQUAL(set.numberOfBatches(),8u); BOOST_REQUIRE_EQUAL(set.numberOfElements(),100u); for(std::size_t i = 0; i != 8; ++i){ BOOST_CHECK_EQUAL(set.batch(i).size(), batchSizes[i]); } BOOST_CHECK_EQUAL_COLLECTIONS( set.elements().begin(),set.elements().end(), inputs.begin(),inputs.end() ); } BOOST_AUTO_TEST_CASE( Set_splitAtElement_Boundary_Test ) { std::vector inputs; // fill the vectors: inputs are the number [100, ..., 199] for (int i=0;i!=100;++i) { inputs.push_back(100+i); } //generate a set with unevenly sized batches std::vector batchSizes(8); batchSizes[0]=8; batchSizes[1]=24; batchSizes[2]=8; batchSizes[3]=7; batchSizes[4]=8; batchSizes[5]=12; batchSizes[6]=25; batchSizes[7]=8; //split before and after every batch std::size_t index = 0; for(std::size_t i = 0; i <= batchSizes.size();++i){ UnlabeledData set= createDataFromRange(inputs,10); set.repartition(batchSizes); UnlabeledData split = splitAtElement(set,index); BOOST_REQUIRE_EQUAL(set.numberOfBatches(),i); BOOST_REQUIRE_EQUAL(split.numberOfBatches(),8-i); BOOST_REQUIRE_EQUAL(set.numberOfElements(),index); BOOST_REQUIRE_EQUAL(split.numberOfElements(),100-index); BOOST_CHECK_EQUAL_COLLECTIONS( set.elements().begin(),set.elements().end(), inputs.begin(),inputs.begin()+index ); BOOST_CHECK_EQUAL_COLLECTIONS( split.elements().begin(),split.elements().end(), inputs.begin()+index,inputs.end() ); if(i != batchSizes.size()) index+=batchSizes[i]; } } BOOST_AUTO_TEST_CASE( Set_Merge_Test ) { std::vector inputs1; std::vector inputs2; for (int i=0;i!=50;++i) { inputs1.push_back(i); } for (int i=0;i!=70;++i) { inputs2.push_back(50+i); } Data set1= createDataFromRange(inputs1,10); Data set2= createDataFromRange(inputs2,7); set1.append(set2); BOOST_REQUIRE_EQUAL(set1.numberOfBatches(),15); BOOST_REQUIRE_EQUAL(set1.numberOfElements(),120); for(std::size_t i = 0; i != 15; ++i){ if(i < 5){ BOOST_CHECK_EQUAL(set1.batch(i).size(),10); } else{ BOOST_CHECK_EQUAL(set1.batch(i).size(),7); } } for(std::size_t i = 0; i != 120;++i){ BOOST_CHECK_EQUAL(set1.element(i),i); } } BOOST_AUTO_TEST_CASE( Data_ColumnAccess ) { std::vector inputs; for (size_t i=0;i!=50;++i) { RealVector r(2); r(0) = i / 2.0; r(1) = 5; inputs.push_back(r); } UnlabeledData set = createDataFromRange(inputs); RealVector c0 = getColumn(set, 0); setColumn(set, 0, c0); for(std::size_t i = 0; i != 50; ++i){ BOOST_CHECK_EQUAL(set.element(i)(0), set.element(i)(0)); } } BOOST_AUTO_TEST_CASE( LabledData_Merge_Test ) { std::vector inputs1; std::vector labels1; std::vector inputs2; std::vector labels2; for (int i=0;i!=50;++i) { inputs1.push_back(i); labels1.push_back(i*2); } for (int i=0;i!=70;++i) { inputs2.push_back(50+i); labels2.push_back((50+i)*2); } LabeledData set1= createLabeledDataFromRange(inputs1,labels1,10); LabeledData set2= createLabeledDataFromRange(inputs2,labels2,7); set1.append(set2); BOOST_REQUIRE_EQUAL(set1.numberOfBatches(),15); BOOST_REQUIRE_EQUAL(set1.numberOfElements(),120); for(std::size_t i = 0; i != 15; ++i){ if(i < 5){ BOOST_CHECK_EQUAL(set1.batch(i).size(),10); } else{ BOOST_CHECK_EQUAL(set1.batch(i).size(),7); } } for(std::size_t i = 0; i != 120;++i){ BOOST_CHECK_EQUAL(set1.element(i).input,i); BOOST_CHECK_EQUAL(set1.element(i).label,2*i); } } BOOST_AUTO_TEST_CASE( Set_splitAtElement_MiddleOfBatch_Test ) { std::vector inputs; // fill the vectors: inputs are the number [100, ..., 199] for (int i=0;i!=100;++i) { inputs.push_back(100+i); } //generate a set with unevenly sized batches std::vector batchSizes(8); batchSizes[0]=8; batchSizes[1]=24; batchSizes[2]=8; batchSizes[3]=7; batchSizes[4]=8; batchSizes[5]=12; batchSizes[6]=25; batchSizes[7]=8; //split in the middle of a batch UnlabeledData set= createDataFromRange(inputs,10); set.repartition(batchSizes); UnlabeledData split = splitAtElement(set,53); BOOST_REQUIRE_EQUAL(set.numberOfBatches(),5u); BOOST_REQUIRE_EQUAL(split.numberOfBatches(),4u); BOOST_REQUIRE_EQUAL(set.numberOfElements(),53u); BOOST_REQUIRE_EQUAL(split.numberOfElements(),47u); BOOST_CHECK_EQUAL_COLLECTIONS( set.elements().begin(),set.elements().end(), inputs.begin(),inputs.begin()+53 ); BOOST_CHECK_EQUAL_COLLECTIONS( split.elements().begin(),split.elements().end(), inputs.begin()+53,inputs.end() ); } BOOST_AUTO_TEST_CASE( RepartitionByClass_Test ) { std::vector inputs(101,UIntVector(3)); std::vector labels(101); // generate dataset for (int i=0;i!=101;++i) { inputs[i][0] = 100+i; inputs[i][1] = 200+i; inputs[i][2] = 300+i; labels[i] = i%3; } LabeledData data = createLabeledDataFromRange(inputs,labels,9); BOOST_REQUIRE_EQUAL(data.numberOfElements(),101); BOOST_REQUIRE_EQUAL(data.numberOfBatches(),12); repartitionByClass(data,11);//different batch size to check other side effects //check dataset integrity BOOST_REQUIRE_EQUAL(data.numberOfElements(),101); BOOST_REQUIRE_EQUAL(data.numberOfBatches(),11); std::vector classes = classSizes(data); BOOST_REQUIRE_EQUAL(classes[0],34); BOOST_REQUIRE_EQUAL(classes[1],34); BOOST_REQUIRE_EQUAL(classes[2],33); //check that all labels match the elements and that all elements are still there std::vector resultInputs(101,0); for(std::size_t i = 0; i != 101; ++i){ std::size_t k = data.element(i).input(0)-100; BOOST_CHECK_EQUAL(data.element(i).input(1),k+200); BOOST_CHECK_EQUAL(data.element(i).input(2),k+300); BOOST_CHECK_EQUAL(data.element(i).label,k%3); resultInputs[k] = (unsigned int)k; } //in the end all elements should be set for(std::size_t i = 0; i != 101; ++i){ BOOST_CHECK_EQUAL(resultInputs[i],i); } //check the correct sizes of the batches BOOST_CHECK_EQUAL(data.batch(0).size(),9); BOOST_CHECK_EQUAL(data.batch(1).size(),9); BOOST_CHECK_EQUAL(data.batch(2).size(),8); BOOST_CHECK_EQUAL(data.batch(3).size(),8); BOOST_CHECK_EQUAL(data.batch(4).size(),9); BOOST_CHECK_EQUAL(data.batch(5).size(),9); BOOST_CHECK_EQUAL(data.batch(6).size(),8); BOOST_CHECK_EQUAL(data.batch(7).size(),8); BOOST_CHECK_EQUAL(data.batch(8).size(),11); BOOST_CHECK_EQUAL(data.batch(9).size(),11); BOOST_CHECK_EQUAL(data.batch(10).size(),11); //check order of the labels of the elements for(std::size_t i = 0; i != 34; ++i){ BOOST_CHECK_EQUAL(data.element(i).label, 0); } for(std::size_t i = 34; i != 68; ++i){ BOOST_CHECK_EQUAL(data.element(i).label, 1); } for(std::size_t i = 68; i != 101; ++i){ BOOST_CHECK_EQUAL(data.element(i).label, 2); } } BOOST_AUTO_TEST_CASE( BinarySubproblem_Test ) { unsigned int labels[] ={0,0,1,1,1,2,2,3,4,4,4}; unsigned int sizes[] ={21,39,31,17,57}; LabeledData data(11); for(std::size_t i = 0; i != 11; ++i){ data.batch(i).input.resize(i+10,1); data.batch(i).label.resize(i+10); for(std::size_t j = 0; j != i+10; ++j){ data.batch(i).input(j,0) = (unsigned int)j; data.batch(i).label(j) = labels[i]; } } //check all class combinations for(unsigned int ci = 0; ci != 5; ++ci){ for(unsigned int cj = 0; cj != 5; ++cj){ if(ci == cj) continue; LabeledData testset = binarySubProblem(data,ci,cj); BOOST_REQUIRE_EQUAL(numberOfClasses(testset),2); std::vector classes = classSizes(testset); BOOST_CHECK_EQUAL(classes[0], sizes[ci]); BOOST_CHECK_EQUAL(classes[1], sizes[cj]); } } } // // 2.2 range version // std::cout << "range..."; // UnlabeledData rangeSubset, rangeSubset2; // UnlabeledData rangeComplement; // set.rangeSubset(4, rangeSubset, rangeComplement); // set.rangeSubset(4, rangeSubset2); // BOOST_REQUIRE_EQUAL(rangeSubset.size(), 4); // BOOST_REQUIRE_EQUAL(rangeComplement.size(), 6); // for(size_t i=0; i != 40; ++i) { // BOOST_CHECK_EQUAL(i+100, rangeSubset(i)); // BOOST_CHECK_EQUAL(i+100, rangeSubset2(i)); // } // for(size_t i=0; i != 60; ++i) { // BOOST_CHECK_EQUAL(i+140, rangeComplement(i)); // } // // // 2.3 random set // std::cout << "random..."; // // just acheck for compile errors and sanity // UnlabeledData randomSubset; // UnlabeledData randomComplement; // set.randomSubset(12, randomSubset, randomComplement); // BOOST_REQUIRE_EQUAL(randomSubset.size(), 12); // BOOST_REQUIRE_EQUAL(randomComplement.size(), 8); // // // 2.4 subsets from subsets // std::cout << "subsubset..."; // UnlabeledData subsubset; // subset.rangeSubset(5, subsubset); // BOOST_REQUIRE_EQUAL(subsubset.size(), 5); // for(size_t i=0; i!=subsubset.size(); ++i) { // BOOST_CHECK_EQUAL(indizes[0][i] + 20, subsubset(i)); // } // // // 2.5 packing sets and checking equality // //2.5.1 equality // std::cout<<"equal1..."; // UnlabeledData subset3 = subset2; // BOOST_CHECK_EQUAL(subset3 == subset2,true); // BOOST_CHECK_EQUAL(subset3 != subset2,false); // //2.5.2 pack // std::cout<<"pack..."; // subset2.pack(); // testSetEquality(subset,subset2); // //2.5.3 and equality again // std::cout<<"equal2..."; // BOOST_CHECK_EQUAL(subset3 == subset2,false); // BOOST_CHECK_EQUAL(subset3 != subset2,true); // // // // 3. create and query subset // std::cout << "subset..."; // set.createNamedSubset("Dataset_test", indizes[0]); // BOOST_REQUIRE_EQUAL(set.hasNamedSubset("Dataset_test"), true); // // UnlabeledData namedSet; // set.namedSubset("Dataset_test", namedSet); // testSetEquality(namedSet, subset); // BOOST_REQUIRE_EQUAL(namedSet.hasNamedSubset("Dataset_test"), false); // // // 3.1 now with complement // std::cout << "subset complement..."; // UnlabeledData namedComplement; // set.namedSubset("Dataset_test", namedSet, namedComplement); // testSetEquality(namedSet, subset); // testSetEquality(namedComplement, complement); // BOOST_REQUIRE_EQUAL(namedSet.hasNamedSubset("Dataset_test"), false); // BOOST_REQUIRE_EQUAL(namedComplement.hasNamedSubset("Dataset_test"), false); // // //3.2 create from DataSet // std::cout<<"SetSubset..."; // set.createNamedSubset("Dataset_test2",randomSubset); // BOOST_REQUIRE_EQUAL(set.hasNamedSubset("Dataset_test2"),true); // // UnlabeledData namedSet2; // set.namedSubset("Dataset_test2",namedSet2); // testSetEquality(namedSet2,randomSubset); // // // // 4. copy constructor, default constructor and equality // // a bit late, but... // std::cout << "copy..."; // UnlabeledData copy(set); // testSetEquality(copy, set); // BOOST_REQUIRE_EQUAL(copy.hasNamedSubset("Dataset_test"), true); // UnlabeledData defaultSet; // BOOST_REQUIRE_EQUAL(defaultSet.size(), 0); // copy = defaultSet; // BOOST_REQUIRE_EQUAL(copy.size(), 0); // BOOST_REQUIRE_EQUAL(copy.hasNamedSubset("Dataset_test"), false); //~ // 2.4 subsets from subsets //~ std::cout << "subsubset..."; //~ UnlabeledData subsubset; //~ subset.rangeSubset(5, subsubset); //~ BOOST_REQUIRE_EQUAL(subsubset.size(), 5); //~ for(size_t i=0; i!=subsubset.size(); ++i) { //~ BOOST_CHECK_EQUAL(indizes[0][i] + 20, subsubset(i)); //~ } //~ // 2.5 packing sets and checking equality //~ //2.5.1 equality //~ std::cout<<"equal1..."; //~ UnlabeledData subset3 = subset2; //~ BOOST_CHECK_EQUAL(subset3 == subset2,true); //~ BOOST_CHECK_EQUAL(subset3 != subset2,false); //~ //2.5.2 pack //~ std::cout<<"pack..."; //~ subset2.pack(); //~ testSetEquality(subset,subset2); //~ //2.5.3 and equality again //~ std::cout<<"equal2..."; //~ BOOST_CHECK_EQUAL(subset3 == subset2,false); //~ BOOST_CHECK_EQUAL(subset3 != subset2,true); //~ // 3. create and query subset //~ std::cout << "subset..."; //~ set.createNamedSubset("Dataset_test", indizes[0]); //~ BOOST_REQUIRE_EQUAL(set.hasNamedSubset("Dataset_test"), true); //~ UnlabeledData namedSet; //~ set.namedSubset("Dataset_test", namedSet); //~ testSetEquality(namedSet, subset); //~ BOOST_REQUIRE_EQUAL(namedSet.hasNamedSubset("Dataset_test"), false); //~ // 3.1 now with complement //~ std::cout << "subset complement..."; //~ UnlabeledData namedComplement; //~ set.namedSubset("Dataset_test", namedSet, namedComplement); //~ testSetEquality(namedSet, subset); //~ testSetEquality(namedComplement, complement); //~ BOOST_REQUIRE_EQUAL(namedSet.hasNamedSubset("Dataset_test"), false); //~ BOOST_REQUIRE_EQUAL(namedComplement.hasNamedSubset("Dataset_test"), false); //~ //3.2 create from DataSet //~ std::cout<<"SetSubset..."; //~ set.createNamedSubset("Dataset_test2",randomSubset); //~ BOOST_REQUIRE_EQUAL(set.hasNamedSubset("Dataset_test2"),true); //~ UnlabeledData namedSet2; //~ set.namedSubset("Dataset_test2",namedSet2); //~ testSetEquality(namedSet2,randomSubset); //~ //3.3 get indices back //~ std::vector< std::size_t > ret_ind; //~ set.namedSubsetIndices( "Dataset_test", ret_ind ); //~ for ( unsigned int i=0; i<10; i++ ) { //~ BOOST_REQUIRE_EQUAL( ret_ind[i], 2*i ); //~ } //COMMENTED OUT BECAUSE OF JENKINS BUILD SERVER PROBLEMS WITH READ/WRITE COMMANDS! // //4. split file; read and write // std::vector< std::size_t > reta, retb, ret2a, ret2b; // set.createSubsetFileFromSubset( "Dataset_test", "test_data/splitfile_test.split" ); // set.createSubsetFileFromSubset( "Dataset_test2", "test_data/splitfile_test2.split" ); // set.createSubsetFromFile( "test_data/splitfile_test.split" ); // set.createSubsetFromFile( "test_data/splitfile_test.split", "my_new_set" ); // set.createSubsetFromFile( "test_data/splitfile_test2.split", "my_random_set" ); //// set.createSubsetFromFile( "test_data/splitfile_test2.split", "my_random_set" ); //this should fail // // obtain indices that were written out and reloaded // set.namedSubsetIndices( "set_from_file", reta ); // set.namedSubsetIndices( "my_new_set", retb ); // set.namedSubsetIndices( "Dataset_test2", ret2a ); // set.namedSubsetIndices( "my_random_set", ret2b ); // for ( unsigned int i=0; i<10; i++ ) { // BOOST_REQUIRE_EQUAL( reta[i], ret_ind[i] ); // BOOST_REQUIRE_EQUAL( retb[i], reta[i] ); // BOOST_REQUIRE_EQUAL( ret2a[i], ret2b[i] ); // } // // final write-out for manual control // set.createSubsetFileFromSubset( "set_from_file", "test_data/set_from_file.split" ); // set.createSubsetFileFromSubset( "my_new_set", "test_data/my_new_set.split" ); // set.createSubsetFileFromSubset( "my_random_set", "test_data/my_random_set.split" ); //~ // 5. copy constructor, default constructor and equality //~ // a bit late, but... //~ std::cout << "copy..."; //~ UnlabeledData copy(set); //~ testSetEquality(copy, set); //~ BOOST_REQUIRE_EQUAL(copy.hasNamedSubset("Dataset_test"), true); //~ UnlabeledData defaultSet; //~ BOOST_REQUIRE_EQUAL(defaultSet.size(), 0); //~ copy = defaultSet; //~ BOOST_REQUIRE_EQUAL(copy.size(), 0); //~ BOOST_REQUIRE_EQUAL(copy.hasNamedSubset("Dataset_test"), false); //~ std::cout << "Set tests done" << std::endl; //} /*BOOST_AUTO_TEST_CASE( Dataset_Test ) { std::cout<<"testing Dataset..."; std::vector inputs; std::vector labels; std::vector indizes[2]; //the testresults std::vector testLabelPartition[2]; //fill the vectors. inputs are the number [0,19], labels go from [20...39] for(size_t i=0;i!=20;++i){ inputs.push_back(i); labels.push_back(20+i); indizes[i%2].push_back(i); testLabelPartition[i%2].push_back(20+i); } LabeledData set(inputs,labels); // 1. test element access and thus equality of sets BOOST_REQUIRE_EQUAL(set.size(),inputs.size()); for(size_t i=0;i!=set.size();++i){ BOOST_CHECK_EQUAL(inputs[i],set.input(i)); BOOST_CHECK_EQUAL(labels[i],set.label(i)); } // 1.1 test creating sets std::cout<<"sets..."; UnlabeledData inputSet=set.inputs(); Data labelSet=set.labels(); for(size_t i=0;i!=set.size();++i){ BOOST_CHECK_EQUAL(inputSet(i),set.input(i)); BOOST_CHECK_EQUAL(labelSet(i),set.label(i)); } // 1.2 test creating datasets from sets LabeledData setCopySet(inputSet,labelSet); testDatasetEquality(setCopySet,set); // 2. create indexed partitions std::cout<<"indexed..."; LabeledData subset; set.indexedSubset(indizes[0],subset); BOOST_REQUIRE_EQUAL(subset.size(),indizes[0].size()); for(size_t i=0;i!=subset.size();++i){ BOOST_CHECK_EQUAL(indizes[0][i],subset.input(i)); BOOST_CHECK_EQUAL(indizes[0][i]+20,subset.label(i)); } // 2.1 now with complement std::cout<<"indexed complement..."; LabeledData subset2; LabeledData complement; set.indexedSubset(indizes[0],subset2,complement); testDatasetEquality(subset,subset2); BOOST_REQUIRE_EQUAL(complement.size(),indizes[1].size()); for(size_t i=0;i!=subset2.size();++i){ BOOST_CHECK_EQUAL(indizes[1][i],complement.input(i)); BOOST_CHECK_EQUAL(indizes[1][i]+20,complement.label(i)); } // 2.2 range version std::cout<<"range..."; LabeledData rangeSubset,rangeSubset2; LabeledData rangeComplement; set.rangeSubset(10,rangeSubset,rangeComplement); BOOST_REQUIRE_EQUAL(rangeSubset.size(),10); BOOST_REQUIRE_EQUAL(rangeComplement.size(),10); for(size_t i=0;i!=rangeSubset.size();++i){ BOOST_CHECK_EQUAL(i+20,rangeSubset.label(i)); BOOST_CHECK_EQUAL(i+30,rangeComplement.label(i)); } set.rangeSubset(10,rangeSubset2); testDatasetEquality(rangeSubset,rangeSubset2); // 2.3 random set std::cout<<"random..."; // just a check for compile errors and sanity LabeledData randomSubset; LabeledData randomComplement; set.randomSubset(12,randomSubset,randomComplement); BOOST_REQUIRE_EQUAL(randomSubset.size(),12); BOOST_REQUIRE_EQUAL(randomComplement.size(),8); // 2.4 subsets from subsets std::cout<<"subsubset..."; LabeledData subsubset; subset.rangeSubset(5,subsubset); BOOST_REQUIRE_EQUAL(subsubset.size(),5); for(size_t i=0;i!=subsubset.size();++i){ BOOST_CHECK_EQUAL(indizes[0][i],subsubset.input(i)); BOOST_CHECK_EQUAL(indizes[0][i]+20,subsubset.label(i)); } // 2.5 packing sets and checking equality //2.5.1 equality std::cout<<"equal1..."; LabeledData subset3 = subset2; BOOST_CHECK_EQUAL(subset3 == subset2,true); BOOST_CHECK_EQUAL(subset3 != subset2,false); //2.5.2 pack std::cout<<"pack..."; subset2.pack(); testDatasetEquality(subset,subset2); //2.5.3 and equality again std::cout<<"equal2..."; BOOST_CHECK_EQUAL(subset3 == subset2,false); BOOST_CHECK_EQUAL(subset3 != subset2,true); // 3. create and query subset std::cout<<"subset..."; set.createNamedSubset("Dataset_test",indizes[0]); BOOST_REQUIRE_EQUAL(set.hasNamedSubset("Dataset_test"),true); LabeledData namedSet; set.namedSubset("Dataset_test",namedSet); testDatasetEquality(namedSet,subset); BOOST_REQUIRE_EQUAL(namedSet.hasNamedSubset("Dataset_test"),false); // 3.1 now with complement std::cout<<"subset complement..."; LabeledData namedComplement; set.namedSubset("Dataset_test",namedSet,namedComplement); testDatasetEquality(namedSet,subset); testDatasetEquality(namedComplement,complement); BOOST_REQUIRE_EQUAL(namedSet.hasNamedSubset("Dataset_test"),false); BOOST_REQUIRE_EQUAL(namedComplement.hasNamedSubset("Dataset_test"),false); //3.2 create from DataSet std::cout<<"DataSubset..."; set.createNamedSubset("Dataset_test2",randomSubset); BOOST_REQUIRE_EQUAL(set.hasNamedSubset("Dataset_test2"),true); LabeledData namedSet2; set.namedSubset("Dataset_test2",namedSet2); testDatasetEquality(namedSet2,randomSubset); // 4. copy constructor, default constructor and equality // a bit late, but... std::cout<<"copy..."; LabeledData copy(set); testDatasetEquality(copy,set); BOOST_REQUIRE_EQUAL(copy.hasNamedSubset("Dataset_test"),true); LabeledData defaultSet; BOOST_REQUIRE_EQUAL(defaultSet.size(),0); copy = defaultSet; BOOST_REQUIRE_EQUAL(copy.size(),0); BOOST_REQUIRE_EQUAL(copy.hasNamedSubset("Dataset_test"),false); std::cout<<"Dataset tests done"< data(1000); for (int i=0; i<1000; i++){ data[i]=3*i+5; } Data dataSource = createDataFromRange(data,23); //now we serialize the Dataset std::ostringstream outputStream; TextOutArchive oa(outputStream); oa << dataSource; //and create a new set from the serialization Data dataDeserialized; std::istringstream inputStream(outputStream.str()); TextInArchive ia(inputStream); ia >> dataDeserialized; testSetEquality(dataSource,dataDeserialized); } BOOST_AUTO_TEST_CASE( LABELED_DATA_SERIALIZE ) { std::vector data(1000); std::vector labels(1000); for (int i=0; i<1000; i++){ data[i]=3*i+5; labels[i]=5*i+1001; } LabeledData dataSource = createLabeledDataFromRange(data,labels,23); //now we serialize the Dataset std::ostringstream outputStream; TextOutArchive oa(outputStream); oa << dataSource; //and create a new set from the serialization LabeledData dataDeserialized; std::istringstream inputStream(outputStream.str()); TextInArchive ia(inputStream); ia >> dataDeserialized; testDatasetEquality(dataSource,dataDeserialized); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Data/ExportKernelMatrix.cpp000066400000000000000000000103551314655040000206740ustar00rootroot00000000000000#define BOOST_TEST_MODULE ML_ExportKernelMatrix #include #include #include #include #include #include #include #include using namespace shark; //first, use the example provided in the libsvm readme file const char test_classification[] = "-1 1:1 2:1 3:1 4:1.0000 \n\ 1 2:3 4:3\n\ +1 3:1e0 \n\ -1 1:10e-1 3:1\n"; const char test_mc_classification[] = "1 1:1 2:1 3:1 4:1.0000 \n\ 3 2:3 4:3\n\ 2 3:1e0 \n\ 1 1:10e-1 3:1\n"; const char test_regression[] = "2.2 1:1 2:1 3:1 4:1.0000 \n\ 0.07 2:3 4:3\n\ -12 3:1e0 \n\ +1.4e-1 1:10e-1 3:1\n"; BOOST_AUTO_TEST_SUITE (Data_ExportKernelMatrix) BOOST_AUTO_TEST_CASE( Set_ExportKernelMatrix ) { // load data and set up vars std::stringstream ssc(test_classification), ssmcc(test_mc_classification); //dense class., dense mc-class. std::stringstream ssr(test_regression), sssc(test_classification); // dense regr., sparse classif. LabeledData test_ds_c; LabeledData test_ds_mcc; //~ LabeledData test_ds_r; LabeledData test_ds_sc; importSparseData(test_ds_c,ssc); //dense classif. importSparseData(test_ds_mcc,ssmcc); //dense mc-classif. //~ importSparseData(test_ds_r, ssr); //dense regression importSparseData(test_ds_sc, sssc); //sparse classif. DenseLinearKernel kernel; CompressedLinearKernel skernel; //sparse // no scaling exportKernelMatrix( test_ds_c, kernel, "test_output/check_kernelmatrix_c_none.libsvm", NONE, false, 15 ); exportKernelMatrix( test_ds_mcc, kernel, "test_output/check_kernelmatrix_mcc_none.libsvm", NONE, false, 15 ); //~ exportKernelMatrix( test_ds_r, kernel, "test_output/check_kernelmatrix_r_none.libsvm", NONE, false, 15 ); // trace=1 exportKernelMatrix( test_ds_c, kernel, "test_output/check_kernelmatrix_c_trace.libsvm", MULTIPLICATIVE_TRACE_ONE, false, 15 ); exportKernelMatrix( test_ds_mcc, kernel, "test_output/check_kernelmatrix_mcc_trace.libsvm", MULTIPLICATIVE_TRACE_ONE, false, 15 ); //~ exportKernelMatrix( test_ds_r, kernel, "test_output/check_kernelmatrix_r_trace.libsvm", MULTIPLICATIVE_TRACE_ONE, false, 15 ); // trace=N exportKernelMatrix( test_ds_c, kernel, "test_output/check_kernelmatrix_c_traceN.libsvm", MULTIPLICATIVE_TRACE_N, false, 15 ); exportKernelMatrix( test_ds_mcc, kernel, "test_output/check_kernelmatrix_mcc_traceN.libsvm", MULTIPLICATIVE_TRACE_N, false, 15 ); //~ exportKernelMatrix( test_ds_r, kernel, "test_output/check_kernelmatrix_r_traceN.libsvm", MULTIPLICATIVE_TRACE_N, false, 15 ); // var=1 exportKernelMatrix( test_ds_c, kernel, "test_output/check_kernelmatrix_c_var.libsvm", MULTIPLICATIVE_VARIANCE_ONE, false, 15 ); exportKernelMatrix( test_ds_mcc, kernel, "test_output/check_kernelmatrix_mcc_var.libsvm", MULTIPLICATIVE_VARIANCE_ONE, false, 15 ); //~ exportKernelMatrix( test_ds_r, kernel, "test_output/check_kernelmatrix_r_var.libsvm", MULTIPLICATIVE_VARIANCE_ONE, false, 15 ); // center exportKernelMatrix( test_ds_c, kernel, "test_output/check_kernelmatrix_c_center.libsvm", CENTER_ONLY, false, 15 ); exportKernelMatrix( test_ds_mcc, kernel, "test_output/check_kernelmatrix_mcc_center.libsvm", CENTER_ONLY, false, 15 ); //~ exportKernelMatrix( test_ds_r, kernel, "test_output/check_kernelmatrix_r_center.libsvm", CENTER_ONLY, false, 15 ); // center and tr=1 exportKernelMatrix( test_ds_c, kernel, "test_output/check_kernelmatrix_c_center_tr.libsvm", CENTER_AND_MULTIPLICATIVE_TRACE_ONE, false, 15 ); exportKernelMatrix( test_ds_mcc, kernel, "test_output/check_kernelmatrix_mcc_center_tr.libsvm", CENTER_AND_MULTIPLICATIVE_TRACE_ONE, false, 15 ); //~ exportKernelMatrix( test_ds_r, kernel, "test_output/check_kernelmatrix_r_center_tr.libsvm", CENTER_AND_MULTIPLICATIVE_TRACE_ONE, false, 15 ); // sparse classif., no scaling exportKernelMatrix( test_ds_sc, skernel, "test_output/check_kernelmatrix_sparse_c_none.libsvm", NONE, false, 15 ); // mt: todo: add tests here once read-in is supported } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Data/HDF5Tests.cpp000066400000000000000000000206431314655040000165770ustar00rootroot00000000000000#define BOOST_TEST_MODULE CoreHDF5TestModule #include "shark/Data/HDF5.h" #include #include #include #include #include namespace shark { /// Fixture for testing HDF5 file import class HDF5Fixture { public: HDF5Fixture() : m_exampleFileName("./test_data/testfile_for_import.h5"), m_datasetNameData1("data/data1"), m_labelNameLabel1("data/label1"), m_labelNameWrongLabel("data/wrong_label"), m_dsNameOneDimension("data/one_dimension"), m_dsNameThreeDimension("data/three_dimension"), m_dsNameCscVal("csc/data"), m_dsNameIndices("csc/indices"), m_dsNameIndPtr("csc/indptr") { using namespace boost::assign; // 3 x 4 matrix m_expectedFromData1 += list_of(1.0)(2.0)(3.0), list_of(4.0)(5.0)(6.0), list_of(7.0)(8.0)(9.0), list_of(10.0)(11.0)(12.0); // vector of 4 elements m_expectedFromLabel1 += 80,81,82,83; } /// Verify the @a actual matrix is the same as @a expected template boost::test_tools::predicate_result verify( const MatrixType& actual, const ExpectedType& expected ); /// Validate @exp has @msg bool validate(const std::string& msg, const shark::Exception& exp); /// Name of test file const std::string m_exampleFileName; /// dataset: data/data1 const std::string m_datasetNameData1; std::vector > m_expectedFromData1; /// dataset: data/label1 const std::string m_labelNameLabel1; const std::string m_labelNameWrongLabel; std::vector m_expectedFromLabel1; const std::string m_dsNameOneDimension; const std::string m_dsNameThreeDimension; const std::string m_dsNameCscVal; const std::string m_dsNameIndices; const std::string m_dsNameIndPtr; }; template boost::test_tools::predicate_result HDF5Fixture::verify( const MatrixType& actual, const ExpectedType& expected) { boost::test_tools::predicate_result res( true ); if (actual.numberOfElements() != expected.size()) { res = false; res.message() << boost::format("\nActual size: %1%, expected size: %2%") % actual.numberOfElements() % expected.size(); } for (size_t i = 0; i < actual.numberOfElements(); ++i) { if (actual.element(i).size() != expected[i].size()) { res = false; res.message() << boost::format("\nIndex: %1%, actual size: %2%, expected size: %3%") % i % actual.element(i).size() % expected[i].size(); } for (size_t j = 0; j < actual.element(i).size(); ++j) { if (actual.element(i)(j) != expected[i][j]) { // floating point comparison here should also work in our case res = false; res.message() << boost::format("\nElements not equal: actual[%1%][%2%]=%3%, expected[%1%][%2%]=%4%") % i % j % actual.element(i)(j) % expected[i][j]; } } } return res; } bool HDF5Fixture::validate(const std::string& msg, const shark::Exception& exp) { bool result = exp.what() == msg; if(! result) std::cout<<"expected: "< data; importHDF5(data, m_exampleFileName, m_datasetNameData1); BOOST_CHECK(verify(data, m_expectedFromData1)); } // Test that importing a basic 2-dimension labeled matrix of double type works fine { LabeledData data; importHDF5(data, m_exampleFileName, m_datasetNameData1, m_labelNameLabel1); BOOST_CHECK(verify(data.inputs(), m_expectedFromData1)); BOOST_CHECK_EQUAL_COLLECTIONS( data.labels().elements().begin(), data.labels().elements().end(), m_expectedFromLabel1.begin(), m_expectedFromLabel1.end() ); } // Test same thing for compressed vector { Data data; importHDF5(data, m_exampleFileName, m_datasetNameData1); BOOST_CHECK(verify(data, m_expectedFromData1)); } { LabeledData data; importHDF5(data, m_exampleFileName, m_datasetNameData1, m_labelNameLabel1); BOOST_CHECK(verify(data.inputs(), m_expectedFromData1)); BOOST_CHECK_EQUAL_COLLECTIONS( data.labels().elements().begin(), data.labels().elements().end(), m_expectedFromLabel1.begin(), m_expectedFromLabel1.end() ); } } BOOST_AUTO_TEST_CASE(CscTests) { // Basic Test for CSC { using namespace boost::assign; std::vector csc; csc += m_dsNameCscVal, m_dsNameIndices, m_dsNameIndPtr; Data data; importHDF5(data, m_exampleFileName, csc); std::vector > expected; expected += list_of(1.0)(0.0)(2.0), list_of(0.0)(0.0)(3.0), list_of(4.0)(5.0)(6.0); BOOST_CHECK(verify(data, expected)); } // Test (CSC + compressed vector) works { // Python: // >>> from scipy.sparse import * // >>> from scipy import * // >>> data=array([10,20,30,50,40,60,70,80]) // >>> indices=array([0,0,1,2,1,2,2,3]) // >>> idxptr=array([0,1,3,4,6,7,8]) // >>> csc_matrix((data,indices,idxptr)).todense() // matrix([[10, 20, 0, 0, 0, 0], // [ 0, 30, 0, 40, 0, 0], // [ 0, 0, 50, 60, 70, 0], // [ 0, 0, 0, 0, 0, 80]]) // >>> using namespace boost::assign; std::vector csc; csc += "csc2/data", "csc2/indices", "csc2/idxptr"; LabeledData data; importHDF5(data, m_exampleFileName, csc, "csc2/label"); std::vector > expectedInputs; expectedInputs += list_of(10)(0)(0)(0), list_of(20)(30)(0)(0), list_of(0)(0)(50)(0), list_of(0)(40)(60)(0), list_of(0)(0)(70)(0), list_of(0)(0)(0)(80); std::vector expectedLabels; expectedLabels += 100,200,300,400,500,600; BOOST_CHECK(verify(data.inputs(), expectedInputs)); BOOST_CHECK_EQUAL_COLLECTIONS( data.labels().elements().begin(), data.labels().elements().end(), expectedLabels.begin(), expectedLabels.end() ); } } BOOST_AUTO_TEST_CASE(OneDimension) { // Test that accessing one-dimension dataset works fine using namespace boost::assign; std::vector > expected; expected += list_of(1.0)(2.0)(3.0); Data data; importHDF5(data, m_exampleFileName, m_dsNameOneDimension); BOOST_CHECK(verify(data, expected)); } BOOST_AUTO_TEST_CASE(NegativeTests) { // Test that trying to import for a non-exist file will throw an exception { const std::string nonExistFileName = "non-exist.h5"; Data data; BOOST_CHECK_EXCEPTION( importHDF5(data, nonExistFileName, "dummy/dummy"), shark::Exception, boost::bind( &HDF5Fixture::validate, this, (boost::format("[loadIntoMatrix] open file name: %1% (FAILED)") % nonExistFileName).str(), _1)); } // Test that accessing a non-exist dataset will throw an exception { const std::string dummyDataset = "data/dummy"; Data data; BOOST_CHECK_EXCEPTION( importHDF5(data, m_exampleFileName, dummyDataset), shark::Exception, boost::bind( &HDF5Fixture::validate, this, (boost::format("[importHDF5] Get data set(%1%) info from file(%2%).") % dummyDataset % m_exampleFileName).str(), _1)); } // Test that accessing a 3-dimension dataset will throw exception { Data data; BOOST_CHECK_EXCEPTION( importHDF5(data, m_exampleFileName, m_dsNameThreeDimension), shark::Exception, boost::bind( &HDF5Fixture::validate, this, (boost::format("[loadIntoMatrix][%2%][%1%] Support 1 or 2 dimensions, but this dataset has at least 3 dimensions.") % m_dsNameThreeDimension % m_exampleFileName).str(), _1)); } // Test that accessing with unmatched data type will throw exception { Data data; // 'data/data1' is actual of 64-bit float BOOST_CHECK_EXCEPTION( importHDF5(data, m_exampleFileName, m_datasetNameData1), shark::Exception, boost::bind( &HDF5Fixture::validate, this, (boost::format("[loadIntoMatrix] DataType doesn't match. HDF5 data type in dataset(%1%::%2%): 1, size: 8") % m_exampleFileName % m_datasetNameData1).str(), _1)); } } BOOST_AUTO_TEST_SUITE_END() } // namespace shark Shark-3.1.4/Test/Data/LabelOrder_Test.cpp000066400000000000000000000062651314655040000201040ustar00rootroot00000000000000//=========================================================================== /*! * * * \brief LabelOrder Test * * * * \author Aydin Demircioglu * \date 2014 * * * \par Copyright 1995-2015 Shark Development Team * *

* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ //=========================================================================== #define BOOST_TEST_MODULE LabelOrder #include #include #include #include #include #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (Data_LabelOrder_Test) BOOST_AUTO_TEST_CASE(LabelOrder_General) { // create a dataset size_t datasetSize = 64; Chessboard problem(2, 0); LabeledData dataset = problem.generateDataset(datasetSize); // now map distort every, reversing is enough. make sure we have label 0 in it // to test if we can handle that too for(std::size_t i = 0; i < dataset.numberOfElements(); ++i) dataset.labels().element(i) = (unsigned int)(2 * (dataset.numberOfElements() - 1) - 2 * i); // create a copy we can compare with later on LabeledData datasetCopy = dataset; // now reorder the dataset LabelOrder labelOrder; labelOrder.normalizeLabels(dataset); // obtain the ordering std::vector internalOrder; labelOrder.getLabelOrder(internalOrder); // check the order for(std::size_t i = 0; i < internalOrder.size(); ++i) BOOST_REQUIRE_EQUAL(internalOrder[i], (unsigned int)(2 * (dataset.numberOfElements() - 1) - 2 * i)); // finally map the labels back on the copy labelOrder.restoreOriginalLabels(dataset); // make sure we did not loose anything for(std::size_t i = 0; i < internalOrder.size(); ++i) BOOST_REQUIRE_EQUAL(dataset.labels().element(i), datasetCopy.labels().element(i)); // now check for some error cases: // create labels that are out of range and call the restore function LabeledData datasetBroken = dataset; for(std::size_t i = 0; i < dataset.numberOfElements(); ++i) dataset.labels().element(i) = (unsigned int)(internalOrder.size() + i); try { labelOrder.restoreOriginalLabels(datasetBroken); // this should have thrown an error. BOOST_REQUIRE_EQUAL(1, 0); } catch(...) { // everything is fine. BOOST_REQUIRE_EQUAL(0, 0); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Data/SparseData.cpp000066400000000000000000000127121314655040000171130ustar00rootroot00000000000000#define BOOST_TEST_MODULE ML_SparseData #include #include #include #include //for safety export test #include #include using namespace shark; std::size_t const VectorSize = 11; std::size_t const NumLines = 5; double test_classification_values[NumLines][VectorSize] ={ {0.3,0,0,-4,0,0,0,1.1,0,0,0}, {0,1.2,8.82,0,0,0,1.e-4,0,0,0,0}, {0,0,0,0,0,0,0,0,0.124,0,0}, {0,0,0,0,0,0,0,0,0,0,0}, {0,4.6,0,1000,0,0,0,-0.7,0,0,0.1} }; const char test_classification[] = "-1 1:0.3 4:-4 8:1.1 \n\ +1 2:1.2 3:8.82 7:1e-4\r\n\ 1 1:0.0 9:0.124\r\n\ -1\n\ 1 2:4.6 4:1000 8:-0.7 11:0.1\n"; const char test_mc_classification[] = "4 1:0.3 4:-4 8:1.1 \n\ 3 2:1.2 3:8.82 7:1e-4\n\ 2 1:0.0 9:0.124\n\ 1\n\ 3 2:4.6 4:1000 8:-0.7 11:0.1\n"; const char test_mc_classification_missing_label[] = "4 1:0.3 4:-4 8:1.1 \n\ 3 2:1.2 3:8.82 7:1e-4\n\ 2 1:0.0 9:0.124\n\ 1\n\ 3 2:4.6 4:1000 8:-0.7 11:0.1\n"; const char test_regression[] = "7.1 1:0.3 4:-4 8:1.1\n\ 9.99 2:1.2 3:8.82 7:1e-4\r\ -5 1:0.0 9:0.124\n\ 1\n\ 5e2 2:4.6 4:1000 8:-0.7 11:0.1\r\n"; const char test_mc_classification_labelmap[] = "8 1:0.3 4:-4 8:1.1 \n\ 4 2:1.2 3:8.82 7:1e-4\n\ 2 1:0.0 9:0.124\n\ 19\n\ 4 2:4.6 4:1000 8:-0.7 11:0.1\n"; const char test_export[] = "0.3 0.0 0.0 -4 0.0 0.0 0.0 1.1 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0\n\ 0.0 1.2 8.82 0.0 0.0 0.0 1e-4 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1\n\ 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.12437 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1\n\ 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0\n\ 0.0 4.6 0.0 1000 0.0 0.0 0.0 -0.7 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1\n\ 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 17.4 0.0 0.0 0.0 0.0 -2.24 0\n"; BOOST_AUTO_TEST_SUITE (Data_SparseData) BOOST_AUTO_TEST_CASE( Set_SparseData ) { std::stringstream ssc(test_classification), ssmcc(test_mc_classification); //dense: classif., mc-classif., regr. std::stringstream sssc(test_classification), sssmcc(test_mc_classification); //sparse: classif., mc-classif., regr. LabeledData test_ds_c; LabeledData test_ds_mcc; LabeledData test_ds_sc; LabeledData test_ds_smcc; importSparseData(test_ds_c, ssc); //dense classif. importSparseData(test_ds_mcc, ssmcc); //dense mc-classif. importSparseData(test_ds_sc, sssc); //sparse classif. importSparseData(test_ds_smcc, sssmcc); //sparse mc-classif. //check that we got the proper number of lines BOOST_REQUIRE_EQUAL(test_ds_c.numberOfElements(), NumLines); BOOST_REQUIRE_EQUAL(test_ds_mcc.numberOfElements(), NumLines); BOOST_REQUIRE_EQUAL(test_ds_sc.numberOfElements(), NumLines); BOOST_REQUIRE_EQUAL(test_ds_smcc.numberOfElements(), NumLines); // check abels of binary/multiclass readin BOOST_CHECK_EQUAL(0u, test_ds_c.element(0).label); BOOST_CHECK_EQUAL(1u, test_ds_c.element(1).label); BOOST_CHECK_EQUAL(1u, test_ds_c.element(2).label); BOOST_CHECK_EQUAL(0u, test_ds_c.element(3).label); BOOST_CHECK_EQUAL(1u, test_ds_c.element(4).label); BOOST_CHECK_EQUAL(3u, test_ds_mcc.element(0).label); BOOST_CHECK_EQUAL(2u, test_ds_mcc.element(1).label); BOOST_CHECK_EQUAL(1u, test_ds_mcc.element(2).label); BOOST_CHECK_EQUAL(0u, test_ds_mcc.element(3).label); BOOST_CHECK_EQUAL(2u, test_ds_mcc.element(4).label); for ( unsigned int i=0; i test_ds_e = createLabeledDataFromRange( xe, ye ); //~ BOOST_CHECK_EQUAL(test_ds_e.numberOfElements(), 6); //~ exportSparseData( test_ds_e, "test_output/check1.libsvm" ); //~ exportSparseData( test_ds_c, "test_output/check2.libsvm" ); //~ LabeledData import_of_export_1, import_of_export_2; //~ importSparseData( import_of_export_1, "test_output/check1.libsvm" ); //~ importSparseData( import_of_export_2, "test_output/check2.libsvm" ); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Data/Statistics.cpp000066400000000000000000000061551314655040000172220ustar00rootroot00000000000000#include "shark/Data/Statistics.h" #define BOOST_TEST_MODULE Data_Statistics #include #include using namespace shark; const std::size_t Dimensions=4; struct StatisticsFixture { UnlabeledData inputData; UnlabeledData inputDataSmallBatch; StatisticsFixture() { //values of the input matrix double vals[Dimensions][Dimensions]= { {0.5, 2, 1.3, 2.6}, {1.7, 2.4, 1.3, -4 }, {-2, 3.1, 2, 2.3}, {-0.25, -1, 2.1, -0.8} }; std::vector vec(Dimensions,RealVector(4)); for(std::size_t row=0;row!= Dimensions;++row) { for(std::size_t col=0;col!=Dimensions;++col) { vec[row](col) = vals[row][col]; } } inputData = createDataFromRange(vec); inputDataSmallBatch = createDataFromRange(vec,1); } }; //results double resultMean[Dimensions]={-0.0125,1.625,1.675,0.025}; double resultVariance[Dimensions][Dimensions]= { { 1.80047,-0.19718,-0.395312,-2.47468}, {-0.19718, 2.45188, -0.26687, 0.84187}, {-0.39531,-0.26687, 0.141875, 0.23312}, {-2.47468, 0.84187, 0.233125, 7.17188} }; BOOST_FIXTURE_TEST_SUITE (Data_Statistics, StatisticsFixture); BOOST_AUTO_TEST_CASE( Data_Statistics_mean ) { // Calculate mean vector: RealVector meanVec = mean(inputData); RealVector meanVecSmall = mean(inputDataSmallBatch); for(std::size_t i=0;i!=Dimensions;++i) { BOOST_CHECK_SMALL(meanVec(i)-resultMean[i],1.e-5); BOOST_CHECK_SMALL(meanVecSmall(i)-resultMean[i],1.e-5); } } BOOST_AUTO_TEST_CASE( Data_Statistics_variance ) { // Calculate variance vector: RealVector varVec = variance(inputData); RealVector varVecSmall = variance(inputDataSmallBatch); for(std::size_t i=0;i!=Dimensions;++i) { BOOST_CHECK_SMALL(varVec(i)-resultVariance[i][i],1.e-5); BOOST_CHECK_SMALL(varVecSmall(i)-resultVariance[i][i],1.e-5); } } BOOST_AUTO_TEST_CASE( Data_Statistics_meanvar ) { RealVector meanVec; RealVector varVec; RealVector meanVecAlternative; RealVector varVecAlternative; RealVector meanVecSmall; RealVector varVecSmall; // Calculate mean and variance values: meanvar(inputData, meanVec, varVec); meanvar(inputDataSmallBatch, meanVecSmall, varVecSmall); for(std::size_t i=0;i!=Dimensions;++i) { BOOST_CHECK_SMALL(meanVec(i)-resultMean[i],1.e-5); BOOST_CHECK_SMALL(varVec(i)-resultVariance[i][i],1.e-5); BOOST_CHECK_SMALL(meanVecSmall(i)-resultMean[i],1.e-5); BOOST_CHECK_SMALL(varVecSmall(i)-resultVariance[i][i],1.e-5); } } BOOST_AUTO_TEST_CASE( Data_Statistics_meanvar_covariance ) { RealVector meanVec; RealMatrix varMat; RealVector meanVecSmall; RealMatrix varMatSmall; // Calculate mean and variance values: meanvar(inputData, meanVec, varMat); meanvar(inputDataSmallBatch, meanVecSmall, varMatSmall); for(std::size_t i=0;i!=Dimensions;++i) { BOOST_CHECK_SMALL(meanVec(i)-resultMean[i],1.e-5); BOOST_CHECK_SMALL(meanVecSmall(i)-resultMean[i],1.e-5); for(std::size_t j=0; j != Dimensions; ++j){ BOOST_CHECK_SMALL(varMat(i,j)-resultVariance[i][j],1.e-5); BOOST_CHECK_SMALL(varMatSmall(i,j)-resultVariance[i][j],1.e-5); } } } BOOST_AUTO_TEST_SUITE_END(); Shark-3.1.4/Test/EALib/000077500000000000000000000000001314655040000144205ustar00rootroot00000000000000Shark-3.1.4/Test/EALib/SchwefelEllipsoidCMA.cpp000066400000000000000000000021071314655040000210520ustar00rootroot00000000000000#include #include #define BOOST_TEST_MODULE EALib_SchwefelEllipsoidCMSA #include #include #include #include BOOST_AUTO_TEST_SUITE (EALib_SchwefelEllipsoidCMA) BOOST_AUTO_TEST_CASE( EALib_SchwefelEllipsoidCMSA ) { const unsigned int Seed = 42; const unsigned int Trials=30; const unsigned int Dimension = 10; const double GlobalStepInit = 1.; double results[Trials]; Rng::seed(Seed); SchwefelEllipsoidRotated f(Dimension); CMASearch cma; for(size_t trial=0;trial!=Trials;++trial) { // start point RealVector start(Dimension); double* p=&start(0); f.ProposeStartingPoint(p); cma.init(f, start, GlobalStepInit); // optimization loop size_t i = 0; do { cma.run(); i++; } while (cma.bestSolutionFitness() > 10E-10); results[trial]=i; std::cout< #include #define BOOST_TEST_MODULE EALib_SchwefelEllipsoidCMSA #include #include #include BOOST_AUTO_TEST_SUITE (EALib_SchwefelEllipsoidCMSA) BOOST_AUTO_TEST_CASE( EALib_SchwefelEllipsoidCMSA ) { const unsigned int Seed = 42; const unsigned int Trials=30; const unsigned int Dimension = 10; const double GlobalStepInit = 1.; double results[Trials]; Rng::seed(Seed); SchwefelEllipsoidRotated f(Dimension); CMSASearch cma; for(size_t trial=0;trial!=Trials;++trial) { // start point RealVector start(Dimension); double* p=&start(0); f.ProposeStartingPoint(p); cma.init(f, start, GlobalStepInit); // optimization loop size_t i = 0; do { cma.run(); i++; } while (cma.bestSolutionFitness() > 10E-10); results[trial]=i; } //sort and check the median std::sort(results,results+Trials); //median should lie between 250 and 278 BOOST_CHECK_CLOSE(results[Trials/2],265,5); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/EALib/SchwefelEllipsoidElitistCMA.cpp000066400000000000000000000020431314655040000224070ustar00rootroot00000000000000#include #include #define BOOST_TEST_MODULE EALib_SchwefelEllipsoidCMSA #include #include #include BOOST_AUTO_TEST_SUITE (EALib_SchwefelEllipsoidElitistCMA) BOOST_AUTO_TEST_CASE( EALib_SchwefelEllipsoidCMSA ) { const unsigned int Seed = 42; const unsigned int Trials=30; const unsigned int Dimension = 10; const double GlobalStepInit = 1.; double results[Trials]; Rng::seed(Seed); SchwefelEllipsoidRotated f(Dimension); CMAElitistSearch cma; for(size_t trial=0;trial!=Trials;++trial) { // start point RealVector start(Dimension); double* p=&start(0); f.ProposeStartingPoint(p); cma.init(f, start, GlobalStepInit); // optimization loop size_t i = 0; do { cma.run(); i++; } while (cma.bestSolutionFitness() > 10E-10); results[trial]=i; } //sort and check the median std::sort(results,results+Trials); BOOST_CHECK_CLOSE(results[Trials/2],1600,2); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/EALib/paraboloidCMA.cpp000066400000000000000000000024461314655040000175670ustar00rootroot00000000000000#include #include #define BOOST_TEST_MODULE EALib_ParaboloidCMA #include #include #include #include BOOST_AUTO_TEST_SUITE (EALib_paraboloidCMA) BOOST_AUTO_TEST_CASE( EALib_ParaboloidCMA ) { const unsigned int Seed = 44; const unsigned int Trials=30; const unsigned int Dimension = 8; double results[Trials]; Rng::seed(Seed); // // fitness function // const unsigned int a = 1000; // determines (square root of) problem condition Paraboloid f(Dimension, a); // // EA parameters // const unsigned int Iterations = 650; const double MinInit = .1; const double MaxInit = .3; const double GlobalStepInit = 1.; CMASearch cma; for(size_t trial=0;trial!=Trials;++trial) { // start point RealVector start=blas::scalar_vector(Dimension,Rng::uni(MinInit, MaxInit)); cma.init(f, start, GlobalStepInit); for (size_t i=0; i #include #define BOOST_TEST_MODULE EALib_SchwefelEllipsoidCMSA #include #include #include BOOST_AUTO_TEST_SUITE (EALib_paraboloidCMSA) BOOST_AUTO_TEST_CASE( EALib_ParaboloidCMA ) { const unsigned int Seed = 44; const unsigned int Trials=30; const unsigned int Dimension = 8; double results[Trials]; Rng::seed(Seed); // // fitness function // const unsigned int a = 1000; // determines (square root of) problem condition Paraboloid f(Dimension, a); // // EA parameters // const unsigned int Iterations = 1000; const double MinInit = .1; const double MaxInit = .3; const double GlobalStepInit = 1.; CMSASearch cma; for(size_t trial=0;trial!=Trials;++trial) { // start point RealVector start=blas::scalar_vector(Dimension,Rng::uni(MinInit, MaxInit)); cma.init(f, start, GlobalStepInit); for (size_t i=0; i #include #define BOOST_TEST_MODULE EALib_SchwefelEllipsoidCMSA #include #include #include BOOST_AUTO_TEST_SUITE (EALib_paraboloidElitistCMA) BOOST_AUTO_TEST_CASE( EALib_ParaboloidCMA ) { const unsigned int Seed = 42; const unsigned int Trials=30; const unsigned int Dimension = 8; double results[Trials]; Rng::seed(Seed); // // fitness function // const unsigned int a = 1000; // determines (square root of) problem condition Paraboloid f(Dimension, a); // // EA parameters // const unsigned int Iterations = 3500; const double MinInit = .1; const double MaxInit = .3; const double GlobalStepInit = 1.; CMAElitistSearch cma; for(size_t trial=0;trial!=Trials;++trial) { // start point RealVector start=blas::scalar_vector(Dimension,Rng::uni(MinInit, MaxInit)); cma.init(f, start, GlobalStepInit); for (size_t i=0; i #include #include #include using namespace shark; using namespace blas; template void checkMatrixVectorMultiply(M const& arg1, V const& arg2, Result const& result, double factor, double init = 0){ BOOST_REQUIRE_EQUAL(arg1.size1(), result.size()); BOOST_REQUIRE_EQUAL(arg2.size(), arg1.size2()); for(std::size_t i = 0; i != arg1.size1(); ++i){ double test_result = init; for(std::size_t k = 0; k != arg1.size2(); ++k){ test_result += factor * arg1(i,k)*arg2(k); } BOOST_CHECK_CLOSE(result(i), test_result,1.e-10); } } BOOST_AUTO_TEST_SUITE (LinAlg_BLAS_axpy_prod) BOOST_AUTO_TEST_CASE( LinAlg_axpy_prod_matrix_vector_dense ){ std::size_t rows = 50; std::size_t columns = 80; //initialize the arguments in both row and column major as well as transposed matrix arg1rm(rows,columns); matrix arg1cm(rows,columns); matrix arg1rmt(columns,rows); matrix arg1cmt(columns,rows); for(std::size_t i = 0; i != rows; ++i){ for(std::size_t j = 0; j != columns; ++j){ arg1rm(i,j) = arg1cm(i,j) = i*columns+0.2*j; arg1rmt(j,i) = arg1cmt(j,i) = i*columns+0.2*j; } } vector arg2(columns); for(std::size_t j = 0; j != columns; ++j){ arg2(j) = 1.5*j+2; } std::cout<<"\nchecking dense matrix vector plusassign multiply"< result(rows,1.5); axpy_prod(arg1rm,arg2,result,false,-2.0); checkMatrixVectorMultiply(arg1rm,arg2,result,-2.0,1.5); } { std::cout<<"column major Ax"< result(rows,1.5); axpy_prod(arg1cm,arg2,result,false,-2.0); checkMatrixVectorMultiply(arg1cm,arg2,result,-2.0,1.5); } { std::cout<<"row major xA"< result(rows,1.5); axpy_prod(arg2,arg1rmt,result,false,-2.0); checkMatrixVectorMultiply(arg1rm,arg2,result,-2.0,1.5); } { std::cout<<"column major xA"< result(rows,1.5); axpy_prod(arg2,arg1cmt,result,false,-2.0); checkMatrixVectorMultiply(arg1cm,arg2,result,-2.0,1.5); } std::cout<<"\nchecking dense matrix vector assign multiply"< result(rows,1.5); axpy_prod(arg1rm,arg2,result,true,-2.0); checkMatrixVectorMultiply(arg1rm,arg2,result,-2.0,0); } { std::cout<<"column major Ax"< result(rows,1.5); axpy_prod(arg1cm,arg2,result,true,-2.0); checkMatrixVectorMultiply(arg1cm,arg2,result,-2.0,0); } { std::cout<<"row major xA"< result(rows,1.5); axpy_prod(arg2,arg1rmt,result,true,-2.0); checkMatrixVectorMultiply(arg1rm,arg2,result,-2.0,0); } { std::cout<<"column major xA"< result(rows,1.5); axpy_prod(arg2,arg1cmt,result,true,-2.0); checkMatrixVectorMultiply(arg1cm,arg2,result,-2.0,0); } } BOOST_AUTO_TEST_CASE( LinAlg_axpy_prod_matrix_vector_dense_sparse ){ std::size_t rows = 50; std::size_t columns = 80; //initialize the arguments in both row and column major as well as transposed matrix arg1rm(rows,columns); matrix arg1cm(rows,columns); matrix arg1rmt(columns,rows); matrix arg1cmt(columns,rows); for(std::size_t i = 0; i != rows; ++i){ for(std::size_t j = 0; j != columns; ++j){ arg1rm(i,j) = arg1cm(i,j) = i*columns+0.2*j; arg1rmt(j,i) = arg1cmt(j,i) = i*columns+0.2*j; } } compressed_vector arg2(columns); for(std::size_t j = 1; j < columns; j+=3){ arg2(j) = 1.5*j+2; } std::cout<<"\nchecking dense-sparse matrix vector plusassign multiply"< result(rows,1.5); axpy_prod(arg1rm,arg2,result,false,-2.0); checkMatrixVectorMultiply(arg1rm,arg2,result,-2.0,1.5); } { std::cout<<"column major Ax"< result(rows,1.5); axpy_prod(arg1cm,arg2,result,false,-2.0); checkMatrixVectorMultiply(arg1cm,arg2,result,-2.0,1.5); } { std::cout<<"row major xA"< result(rows,1.5); axpy_prod(arg2,arg1rmt,result,false,-2.0); checkMatrixVectorMultiply(arg1rm,arg2,result,-2.0,1.5); } { std::cout<<"column major xA"< result(rows,1.5); axpy_prod(arg2,arg1cmt,result,false,-2.0); checkMatrixVectorMultiply(arg1cm,arg2,result,-2.0,1.5); } std::cout<<"\nchecking dense-sparse matrix vector assign multiply"< result(rows,1.5); axpy_prod(arg1rm,arg2,result,true,-2.0); checkMatrixVectorMultiply(arg1rm,arg2,result,-2.0,0); } { std::cout<<"column major Ax"< result(rows,1.5); axpy_prod(arg1cm,arg2,result,true,-2.0); checkMatrixVectorMultiply(arg1cm,arg2,result,-2.0,0); } { std::cout<<"row major xA"< result(rows,1.5); axpy_prod(arg2,arg1rmt,result,true,-2.0); checkMatrixVectorMultiply(arg1rm,arg2,result,-2.0,0); } { std::cout<<"column major xA"< result(rows,1.5); axpy_prod(arg2,arg1cmt,result,true,-2.0); checkMatrixVectorMultiply(arg1cm,arg2,result,-2.0,0); } } BOOST_AUTO_TEST_CASE( LinAlg_axpy_prod_matrix_vector_sparse_dense ){ std::size_t rows = 50; std::size_t columns = 80; //initialize the arguments in both row and column major as well as transposed compressed_matrix arg1rm(rows,columns); compressed_matrix arg1cm_base(columns,rows); matrix_transpose > arg1cm = trans(arg1cm_base); compressed_matrix arg1rmt(columns,rows); compressed_matrix arg1cmt_base(rows,columns); matrix_transpose > arg1cmt = trans(arg1cmt_base); for(std::size_t i = 0; i != 10; ++i){ for(std::size_t j = 1; j < 20; j+=(i+1)){ arg1rm(i,j) = arg1cm(i,j) = 2.0*(20*i+1)+1.0; arg1rmt(j,i) = arg1cmt(j,i) = arg1rm(i,j); } } vector arg2(columns); for(std::size_t j = 0; j != columns; ++j){ arg2(j) = 1.5*j+2; } std::cout<<"\nchecking sparse-dense matrix vector plusassign multiply"< result(rows,1.5); axpy_prod(arg1rm,arg2,result,false,-2.0); checkMatrixVectorMultiply(arg1rm,arg2,result,-2.0,1.5); } { std::cout<<"column major Ax"< result(rows,1.5); axpy_prod(arg1cm,arg2,result,false,-2.0); checkMatrixVectorMultiply(arg1cm,arg2,result,-2.0,1.5); } { std::cout<<"row major xA"< result(rows,1.5); axpy_prod(arg2,arg1rmt,result,false,-2.0); checkMatrixVectorMultiply(arg1rm,arg2,result,-2.0,1.5); } { std::cout<<"column major xA"< result(rows,1.5); axpy_prod(arg2,arg1cmt,result,false,-2.0); checkMatrixVectorMultiply(arg1cm,arg2,result,-2.0,1.5); } std::cout<<"\nchecking sparse-dense matrix vector assign multiply"< result(rows,1.5); axpy_prod(arg1rm,arg2,result,true,-2.0); checkMatrixVectorMultiply(arg1rm,arg2,result,-2.0,0); } { std::cout<<"column major Ax"< result(rows,1.5); axpy_prod(arg1cm,arg2,result,true,-2.0); checkMatrixVectorMultiply(arg1cm,arg2,result,-2.0,0); } { std::cout<<"row major xA"< result(rows,1.5); axpy_prod(arg2,arg1rmt,result,true,-2.0); checkMatrixVectorMultiply(arg1rm,arg2,result,-2.0,0); } { std::cout<<"column major xA"< result(rows,1.5); axpy_prod(arg2,arg1cmt,result,true,-2.0); checkMatrixVectorMultiply(arg1cm,arg2,result,-2.0,0); } } BOOST_AUTO_TEST_CASE( LinAlg_axpy_prod_matrix_vector_sparse_sparse ){ std::size_t rows = 50; std::size_t columns = 80; //initialize the arguments in both row and column major as well as transposed compressed_matrix arg1rm(rows,columns); compressed_matrix arg1cm_base(columns,rows); matrix_transpose > arg1cm = trans(arg1cm_base); compressed_matrix arg1rmt(columns,rows); compressed_matrix arg1cmt_base(rows,columns); matrix_transpose > arg1cmt = trans(arg1cmt_base); for(std::size_t i = 0; i != 10; ++i){ for(std::size_t j = 1; j < 20; j+=(i+1)){ arg1rm(i,j) = arg1cm(i,j) = 2*(20*i+1)+1.0; arg1rmt(j,i) = arg1cmt(j,i) = arg1rm(i,j); } } compressed_vector arg2(columns); for(std::size_t j = 1; j < columns; j+=3){ arg2(j) = 1.5*j+2; } std::cout<<"\nchecking sparse-sparse matrix vector plusassign multiply"< result(rows,1.5); axpy_prod(arg1rm,arg2,result,false,-2.0); checkMatrixVectorMultiply(arg1rm,arg2,result,-2.0,1.5); } { std::cout<<"column major Ax"< result(rows,1.5); axpy_prod(arg1cm,arg2,result,false,-2.0); checkMatrixVectorMultiply(arg1cm,arg2,result,-2.0,1.5); } { std::cout<<"row major xA"< result(rows,1.5); axpy_prod(arg2,arg1rmt,result,false,-2.0); checkMatrixVectorMultiply(arg1rm,arg2,result,-2.0,1.5); } { std::cout<<"column major xA"< result(rows,1.5); axpy_prod(arg2,arg1cmt,result,false,-2.0); checkMatrixVectorMultiply(arg1cm,arg2,result,-2.0,1.5); } std::cout<<"\nchecking sparse-sparse matrix vector assign multiply"< result(rows,1.5); axpy_prod(arg1rm,arg2,result,true,-2.0); checkMatrixVectorMultiply(arg1rm,arg2,result,-2.0,0); } { std::cout<<"column major Ax"< result(rows,1.5); axpy_prod(arg1cm,arg2,result,true,-2.0); checkMatrixVectorMultiply(arg1cm,arg2,result,-2.0,0); } { std::cout<<"row major xA"< result(rows,1.5); axpy_prod(arg2,arg1rmt,result,true,-2.0); checkMatrixVectorMultiply(arg1rm,arg2,result,-2.0,0); } { std::cout<<"column major xA"< result(rows,1.5); axpy_prod(arg2,arg1cmt,result,true,-2.0); checkMatrixVectorMultiply(arg1cm,arg2,result,-2.0,0); } } //we test using the textbook definition. template void checkMatrixMatrixMultiply(Arg1 const& arg1, Arg2 const& arg2, Result const& result, double factor, double init = 0){ BOOST_REQUIRE_EQUAL(arg1.size1(), result.size1()); BOOST_REQUIRE_EQUAL(arg2.size2(), result.size2()); for(std::size_t i = 0; i != arg1.size1(); ++i){ for(std::size_t j = 0; j != arg2.size2(); ++j){ double test_result = init; for(std::size_t k = 0; k != arg1.size2(); ++k){ test_result += factor * arg1(i,k)*arg2(k,j); } BOOST_CHECK_CLOSE(result(i,j), test_result,1.e-10); } } } BOOST_AUTO_TEST_CASE( LinAlg_axpy_prod_matrix_matrix_dense_dense ){ std::size_t rows = 50; std::size_t columns = 80; std::size_t middle = 33; //initialize the arguments in both row and column major matrix arg1rm(rows,middle); matrix arg1cm(rows,middle); for(std::size_t i = 0; i != rows; ++i){ for(std::size_t j = 0; j != middle; ++j){ arg1rm(i,j) = arg1cm(i,j) = i*middle+0.2*j; } } matrix arg2rm(middle,columns); matrix arg2cm(middle,columns); for(std::size_t i = 0; i != middle; ++i){ for(std::size_t j = 0; j != columns; ++j){ arg2rm(i,j) = arg2cm(i,j) = i*columns+1.5*j; } } std::cout<<"\nchecking dense-dense matrix matrix plusassign multiply"< resultrm(rows,columns,1.5); axpy_prod(arg1rm,arg2rm,resultrm,false,-2.0); checkMatrixMatrixMultiply(arg1rm,arg2rm,resultrm,-2.0,1.5); } { std::cout<<"rrc"< resultcm(rows,columns,1.5); axpy_prod(arg1rm,arg2rm,resultcm,false,-2.0); checkMatrixMatrixMultiply(arg1rm,arg2rm,resultcm,-2.0,1.5); } { std::cout<<"rcr"< resultrm(rows,columns,1.5); axpy_prod(arg1rm,arg2cm,resultrm,false,-2.0); checkMatrixMatrixMultiply(arg1rm,arg2cm,resultrm,-2.0,1.5); } { std::cout<<"rcc"< resultcm(rows,columns,1.5); axpy_prod(arg1rm,arg2cm,resultcm,false,-2.0); checkMatrixMatrixMultiply(arg1rm,arg2cm,resultcm,-2.0,1.5); } { std::cout<<"crr"< resultrm(rows,columns,1.5); axpy_prod(arg1cm,arg2rm,resultrm,false,-2.0); checkMatrixMatrixMultiply(arg1cm,arg2rm,resultrm,-2.0,1.5); } { std::cout<<"crc"< resultcm(rows,columns,1.5); axpy_prod(arg1cm,arg2rm,resultcm,false,-2.0); checkMatrixMatrixMultiply(arg1cm,arg2rm,resultcm,-2.0,1.5); } { std::cout<<"ccr"< resultrm(rows,columns,1.5); axpy_prod(arg1cm,arg2cm,resultrm,false,-2.0); checkMatrixMatrixMultiply(arg1cm,arg2cm,resultrm,-2.0,1.5); } { std::cout<<"ccc"< resultcm(rows,columns,1.5); axpy_prod(arg1cm,arg2cm,resultcm,false,-2.0); checkMatrixMatrixMultiply(arg1cm,arg2cm,resultcm,-2.0,1.5); } std::cout<<"\nchecking dense-dense matrix matrix assign multiply"< resultrm(rows,columns,1.5); axpy_prod(arg1rm,arg2rm,resultrm,true,-2.0); checkMatrixMatrixMultiply(arg1rm,arg2rm,resultrm,-2.0,0); } { std::cout<<"rrc"< resultcm(rows,columns,1.5); axpy_prod(arg1rm,arg2rm,resultcm,true,-2.0); checkMatrixMatrixMultiply(arg1rm,arg2rm,resultcm,-2.0,0); } { std::cout<<"rcr"< resultrm(rows,columns,1.5); axpy_prod(arg1rm,arg2cm,resultrm,true,-2.0); checkMatrixMatrixMultiply(arg1rm,arg2cm,resultrm,-2.0,0); } { std::cout<<"rcc"< resultcm(rows,columns,1.5); axpy_prod(arg1rm,arg2cm,resultcm,true,-2.0); checkMatrixMatrixMultiply(arg1rm,arg2cm,resultcm,-2.0,0); } { std::cout<<"crr"< resultrm(rows,columns,1.5); axpy_prod(arg1cm,arg2rm,resultrm,true,-2.0); checkMatrixMatrixMultiply(arg1cm,arg2rm,resultrm,-2.0,0); } { std::cout<<"crc"< resultcm(rows,columns,1.5); axpy_prod(arg1cm,arg2rm,resultcm,true,-2.0); checkMatrixMatrixMultiply(arg1cm,arg2rm,resultcm,-2.0,0); } { std::cout<<"ccr"< resultrm(rows,columns,1.5); axpy_prod(arg1cm,arg2cm,resultrm,true,-2.0); checkMatrixMatrixMultiply(arg1cm,arg2cm,resultrm,-2.0,0); } { std::cout<<"ccc"< resultcm(rows,columns,1.5); axpy_prod(arg1cm,arg2cm,resultcm,true,-2.0); checkMatrixMatrixMultiply(arg1cm,arg2cm,resultcm,-2.0,0); } } //second argument sparse BOOST_AUTO_TEST_CASE( LinAlg_axpy_prod_matrix_matrix_dense_sparse ){ std::size_t rows = 50; std::size_t columns = 80; std::size_t middle = 33; //initialize the arguments in both row and column major matrix arg1rm(rows,middle); matrix arg1cm(rows,middle); for(std::size_t i = 0; i != rows; ++i){ for(std::size_t j = 0; j != middle; ++j){ arg1rm(i,j) = arg1cm(i,j) = i*middle+0.2*j; } } compressed_matrix arg2rm(middle,columns); compressed_matrix arg2cm_base(columns,middle); matrix_transpose > arg2cm = trans(arg2cm_base); for(std::size_t i = 0; i != middle; ++i){ for(std::size_t j = 1; j < columns; j+=(i+1)){ arg2rm(i,j) = arg2cm(i,j) = 2*(20*i+1)+1.0; } } std::cout<<"\nchecking dense-sparse matrix matrix plusassign multiply"< resultrm(rows,columns,1.5); axpy_prod(arg1rm,arg2rm,resultrm,false,-2.0); checkMatrixMatrixMultiply(arg1rm,arg2rm,resultrm,-2.0,1.5); } { std::cout<<"rrc"< resultcm(rows,columns,1.5); axpy_prod(arg1rm,arg2rm,resultcm,false,-2.0); checkMatrixMatrixMultiply(arg1rm,arg2rm,resultcm,-2.0,1.5); } { std::cout<<"rcr"< resultrm(rows,columns,1.5); axpy_prod(arg1rm,arg2cm,resultrm,false,-2.0); checkMatrixMatrixMultiply(arg1rm,arg2cm,resultrm,-2.0,1.5); } { std::cout<<"rcc"< resultcm(rows,columns,1.5); axpy_prod(arg1rm,arg2cm,resultcm,false,-2.0); checkMatrixMatrixMultiply(arg1rm,arg2cm,resultcm,-2.0,1.5); } { std::cout<<"crr"< resultrm(rows,columns,1.5); axpy_prod(arg1cm,arg2rm,resultrm,false,-2.0); checkMatrixMatrixMultiply(arg1cm,arg2rm,resultrm,-2.0,1.5); } { std::cout<<"crc"< resultcm(rows,columns,1.5); axpy_prod(arg1cm,arg2rm,resultcm,false,-2.0); checkMatrixMatrixMultiply(arg1cm,arg2rm,resultcm,-2.0,1.5); } { std::cout<<"ccr"< resultrm(rows,columns,1.5); axpy_prod(arg1cm,arg2cm,resultrm,false,-2.0); checkMatrixMatrixMultiply(arg1cm,arg2cm,resultrm,-2.0,1.5); } { std::cout<<"ccc"< resultcm(rows,columns,1.5); axpy_prod(arg1cm,arg2cm,resultcm,false,-2.0); checkMatrixMatrixMultiply(arg1cm,arg2cm,resultcm,-2.0,1.5); } std::cout<<"\nchecking dense-sparse matrix matrix assign multiply"< resultrm(rows,columns,1.5); axpy_prod(arg1rm,arg2rm,resultrm,true,-2.0); checkMatrixMatrixMultiply(arg1rm,arg2rm,resultrm,-2.0,0); } { std::cout<<"rrc"< resultcm(rows,columns,1.5); axpy_prod(arg1rm,arg2rm,resultcm,true,-2.0); checkMatrixMatrixMultiply(arg1rm,arg2rm,resultcm,-2.0,0); } { std::cout<<"rcr"< resultrm(rows,columns,1.5); axpy_prod(arg1rm,arg2cm,resultrm,true,-2.0); checkMatrixMatrixMultiply(arg1rm,arg2cm,resultrm,-2.0,0); } { std::cout<<"rcc"< resultcm(rows,columns,1.5); axpy_prod(arg1rm,arg2cm,resultcm,true,-2.0); checkMatrixMatrixMultiply(arg1rm,arg2cm,resultcm,-2.0,0); } { std::cout<<"crr"< resultrm(rows,columns,1.5); axpy_prod(arg1cm,arg2rm,resultrm,true,-2.0); checkMatrixMatrixMultiply(arg1cm,arg2rm,resultrm,-2.0,0); } { std::cout<<"crc"< resultcm(rows,columns,1.5); axpy_prod(arg1cm,arg2rm,resultcm,true,-2.0); checkMatrixMatrixMultiply(arg1cm,arg2rm,resultcm,-2.0,0); } { std::cout<<"ccr"< resultrm(rows,columns,1.5); axpy_prod(arg1cm,arg2cm,resultrm,true,-2.0); checkMatrixMatrixMultiply(arg1cm,arg2cm,resultrm,-2.0,0); } { std::cout<<"ccc"< resultcm(rows,columns,1.5); axpy_prod(arg1cm,arg2cm,resultcm,true,-2.0); checkMatrixMatrixMultiply(arg1cm,arg2cm,resultcm,-2.0,0); } } //first argument sparse BOOST_AUTO_TEST_CASE( LinAlg_axpy_prod_matrix_matrix_sparse_dense ){ std::size_t rows = 50; std::size_t columns = 80; std::size_t middle = 33; //initialize the arguments in both row and column major compressed_matrix arg1rm(rows,middle); compressed_matrix arg1cm_base(middle,rows); matrix_transpose > arg1cm = trans(arg1cm_base); for(std::size_t i = 0; i != rows; ++i){ for(std::size_t j = 1; j < middle; j+=(i+1)){ arg1rm(i,j) = arg1cm(i,j) = 2*(20*i+1)+1.0; } } matrix arg2rm(middle,columns); matrix arg2cm(middle,columns); for(std::size_t i = 0; i != middle; ++i){ for(std::size_t j = 0; j != columns; ++j){ arg2rm(i,j) = arg2cm(i,j) = i*columns+1.5*j; } } std::cout<<"\nchecking sparse-dense matrix matrix plusassign multiply"< resultrm(rows,columns,1.5); axpy_prod(arg1rm,arg2rm,resultrm,false,-2.0); checkMatrixMatrixMultiply(arg1rm,arg2rm,resultrm,-2.0,1.5); } { std::cout<<"rrc"< resultcm(rows,columns,1.5); axpy_prod(arg1rm,arg2rm,resultcm,false,-2.0); checkMatrixMatrixMultiply(arg1rm,arg2rm,resultcm,-2.0,1.5); } { std::cout<<"rcr"< resultrm(rows,columns,1.5); axpy_prod(arg1rm,arg2cm,resultrm,false,-2.0); checkMatrixMatrixMultiply(arg1rm,arg2cm,resultrm,-2.0,1.5); } { std::cout<<"rcc"< resultcm(rows,columns,1.5); axpy_prod(arg1rm,arg2cm,resultcm,false,-2.0); checkMatrixMatrixMultiply(arg1rm,arg2cm,resultcm,-2.0,1.5); } { std::cout<<"crr"< resultrm(rows,columns,1.5); axpy_prod(arg1cm,arg2rm,resultrm,false,-2.0); checkMatrixMatrixMultiply(arg1cm,arg2rm,resultrm,-2.0,1.5); } { std::cout<<"crc"< resultcm(rows,columns,1.5); axpy_prod(arg1cm,arg2rm,resultcm,false,-2.0); checkMatrixMatrixMultiply(arg1cm,arg2rm,resultcm,-2.0,1.5); } { std::cout<<"ccr"< resultrm(rows,columns,1.5); axpy_prod(arg1cm,arg2cm,resultrm,false,-2.0); checkMatrixMatrixMultiply(arg1cm,arg2cm,resultrm,-2.0,1.5); } { std::cout<<"ccc"< resultcm(rows,columns,1.5); axpy_prod(arg1cm,arg2cm,resultcm,false,-2.0); checkMatrixMatrixMultiply(arg1cm,arg2cm,resultcm,-2.0,1.5); } std::cout<<"\nchecking sparse-dense matrix matrix assign multiply"< resultrm(rows,columns,1.5); axpy_prod(arg1rm,arg2rm,resultrm,true,-2.0); checkMatrixMatrixMultiply(arg1rm,arg2rm,resultrm,-2.0,0); } { std::cout<<"rrc"< resultcm(rows,columns,1.5); axpy_prod(arg1rm,arg2rm,resultcm,true,-2.0); checkMatrixMatrixMultiply(arg1rm,arg2rm,resultcm,-2.0,0); } { std::cout<<"rcr"< resultrm(rows,columns,1.5); axpy_prod(arg1rm,arg2cm,resultrm,true,-2.0); checkMatrixMatrixMultiply(arg1rm,arg2cm,resultrm,-2.0,0); } { std::cout<<"rcc"< resultcm(rows,columns,1.5); axpy_prod(arg1rm,arg2cm,resultcm,true,-2.0); checkMatrixMatrixMultiply(arg1rm,arg2cm,resultcm,-2.0,0); } { std::cout<<"crr"< resultrm(rows,columns,1.5); axpy_prod(arg1cm,arg2rm,resultrm,true,-2.0); checkMatrixMatrixMultiply(arg1cm,arg2rm,resultrm,-2.0,0); } { std::cout<<"crc"< resultcm(rows,columns,1.5); axpy_prod(arg1cm,arg2rm,resultcm,true,-2.0); checkMatrixMatrixMultiply(arg1cm,arg2rm,resultcm,-2.0,0); } { std::cout<<"ccr"< resultrm(rows,columns,1.5); axpy_prod(arg1cm,arg2cm,resultrm,true,-2.0); checkMatrixMatrixMultiply(arg1cm,arg2cm,resultrm,-2.0,0); } { std::cout<<"ccc"< resultcm(rows,columns,1.5); axpy_prod(arg1cm,arg2cm,resultcm,true,-2.0); checkMatrixMatrixMultiply(arg1cm,arg2cm,resultcm,-2.0,0); } } BOOST_AUTO_TEST_CASE( LinAlg_axpy_prod_matrix_matrix_sparse_sparse ){ std::size_t rows = 50; std::size_t columns = 80; std::size_t middle = 33; //initialize the arguments in both row and column major compressed_matrix arg1rm(rows,middle); compressed_matrix arg1cm_base(middle,rows); matrix_transpose > arg1cm = trans(arg1cm_base); for(std::size_t i = 0; i != rows; ++i){ for(std::size_t j = 1; j < middle; j+=(i+1)){ arg1rm(i,j) = arg1cm(i,j) = 2*(20*i+1)+1.0; } } compressed_matrix arg2rm(middle,columns); compressed_matrix arg2cm_base(columns,middle); matrix_transpose > arg2cm = trans(arg2cm_base); for(std::size_t i = 0; i != middle; ++i){ for(std::size_t j = 1; j < columns; j+=(i+1)){ arg2rm(i,j) = arg2cm(i,j) = 2*(20*i+1)+1.0; } } std::cout<<"\nchecking sparse-sparse matrix matrix plusassign multiply"< resultrm(rows,columns,1.5); axpy_prod(arg1rm,arg2rm,resultrm,false,-2.0); checkMatrixMatrixMultiply(arg1rm,arg2rm,resultrm,-2.0,1.5); } { std::cout<<"rrc"< resultcm(rows,columns,1.5); axpy_prod(arg1rm,arg2rm,resultcm,false,-2.0); checkMatrixMatrixMultiply(arg1rm,arg2rm,resultcm,-2.0,1.5); } { std::cout<<"rcr"< resultrm(rows,columns,1.5); axpy_prod(arg1rm,arg2cm,resultrm,false,-2.0); checkMatrixMatrixMultiply(arg1rm,arg2cm,resultrm,-2.0,1.5); } { std::cout<<"rcc"< resultcm(rows,columns,1.5); axpy_prod(arg1rm,arg2cm,resultcm,false,-2.0); checkMatrixMatrixMultiply(arg1rm,arg2cm,resultcm,-2.0,1.5); } { std::cout<<"crr"< resultrm(rows,columns,1.5); axpy_prod(arg1cm,arg2rm,resultrm,false,-2.0); checkMatrixMatrixMultiply(arg1cm,arg2rm,resultrm,-2.0,1.5); } { std::cout<<"crc"< resultcm(rows,columns,1.5); axpy_prod(arg1cm,arg2rm,resultcm,false,-2.0); checkMatrixMatrixMultiply(arg1cm,arg2rm,resultcm,-2.0,1.5); } { std::cout<<"ccr"< resultrm(rows,columns,1.5); axpy_prod(arg1cm,arg2cm,resultrm,false,-2.0); checkMatrixMatrixMultiply(arg1cm,arg2cm,resultrm,-2.0,1.5); } { std::cout<<"ccc"< resultcm(rows,columns,1.5); axpy_prod(arg1cm,arg2cm,resultcm,false,-2.0); checkMatrixMatrixMultiply(arg1cm,arg2cm,resultcm,-2.0,1.5); } std::cout<<"\nchecking sparse-dense matrix matrix assign multiply"< resultrm(rows,columns,1.5); axpy_prod(arg1rm,arg2rm,resultrm,true,-2.0); checkMatrixMatrixMultiply(arg1rm,arg2rm,resultrm,-2.0,0); } { std::cout<<"rrc"< resultcm(rows,columns,1.5); axpy_prod(arg1rm,arg2rm,resultcm,true,-2.0); checkMatrixMatrixMultiply(arg1rm,arg2rm,resultcm,-2.0,0); } { std::cout<<"rcr"< resultrm(rows,columns,1.5); axpy_prod(arg1rm,arg2cm,resultrm,true,-2.0); checkMatrixMatrixMultiply(arg1rm,arg2cm,resultrm,-2.0,0); } { std::cout<<"rcc"< resultcm(rows,columns,1.5); axpy_prod(arg1rm,arg2cm,resultcm,true,-2.0); checkMatrixMatrixMultiply(arg1rm,arg2cm,resultcm,-2.0,0); } { std::cout<<"crr"< resultrm(rows,columns,1.5); axpy_prod(arg1cm,arg2rm,resultrm,true,-2.0); checkMatrixMatrixMultiply(arg1cm,arg2rm,resultrm,-2.0,0); } { std::cout<<"crc"< resultcm(rows,columns,1.5); axpy_prod(arg1cm,arg2rm,resultcm,true,-2.0); checkMatrixMatrixMultiply(arg1cm,arg2rm,resultcm,-2.0,0); } { std::cout<<"ccr"< resultrm(rows,columns,1.5); axpy_prod(arg1cm,arg2cm,resultrm,true,-2.0); checkMatrixMatrixMultiply(arg1cm,arg2cm,resultrm,-2.0,0); } { std::cout<<"ccc"< resultcm(rows,columns,1.5); axpy_prod(arg1cm,arg2cm,resultcm,true,-2.0); checkMatrixMatrixMultiply(arg1cm,arg2cm,resultcm,-2.0,0); } } BOOST_AUTO_TEST_CASE( LinAlg_axpy_prod_matrix_vector_triangular ){ std::size_t rows = 50; //initialize the arguments in both row and column major as well as transposed triangular_matrix arg1rm(rows); triangular_matrix arg1cm(rows); triangular_matrix arg1rmt(rows); triangular_matrix arg1cmt(rows); for(std::size_t i = 0; i != rows; ++i){ for(std::size_t j = 0; j <= i; ++j){ arg1rm.set_element(i,j,i*rows+0.2*j); arg1cm.set_element(i,j,i*rows+0.2*j); arg1rmt.set_element(j,i,i*rows+0.2*j); arg1cmt.set_element(j,i,i*rows+0.2*j); } } vector arg2(rows); for(std::size_t j = 0; j != rows; ++j){ arg2(j) = 1.5*j+2; } std::cout<<"\nchecking packed matrix vector plusassign multiply"< result(rows,1.5); axpy_prod(arg1rm,arg2,result,false,-2.0); checkMatrixVectorMultiply(arg1rm,arg2,result,-2.0,1.5); } { std::cout<<"lower column major Ax"< result(rows,1.5); axpy_prod(arg1cm,arg2,result,false,-2.0); checkMatrixVectorMultiply(arg1cm,arg2,result,-2.0,1.5); } { std::cout<<"upper row major Ax"< result(rows,1.5); axpy_prod(arg1rmt,arg2,result,false,-2.0); checkMatrixVectorMultiply(arg1rmt,arg2,result,-2.0,1.5); } { std::cout<<"upper column major Ax"< result(rows,1.5); axpy_prod(arg1cmt,arg2,result,false,-2.0); checkMatrixVectorMultiply(arg1cmt,arg2,result,-2.0,1.5); } { std::cout<<"lower row major xA"< result(rows,1.5); axpy_prod(arg2,arg1rm,result,false,-2.0); checkMatrixVectorMultiply(arg1rmt,arg2,result,-2.0,1.5); } { std::cout<<"lower column major xA"< result(rows,1.5); axpy_prod(arg2,arg1cm,result,false,-2.0); checkMatrixVectorMultiply(arg1cmt,arg2,result,-2.0,1.5); } { std::cout<<"row major xA"< result(rows,1.5); axpy_prod(arg2,arg1rmt,result,false,-2.0); checkMatrixVectorMultiply(arg1rm,arg2,result,-2.0,1.5); } { std::cout<<"column major xA"< result(rows,1.5); axpy_prod(arg2,arg1cmt,result,false,-2.0); checkMatrixVectorMultiply(arg1cm,arg2,result,-2.0,1.5); } std::cout<<"\nchecking dense matrix vector assign multiply"< result(rows,1.5); axpy_prod(arg1rm,arg2,result,true,-2.0); checkMatrixVectorMultiply(arg1rm,arg2,result,-2.0,0); } { std::cout<<"column major Ax"< result(rows,1.5); axpy_prod(arg1cm,arg2,result,true,-2.0); checkMatrixVectorMultiply(arg1cm,arg2,result,-2.0,0); } { std::cout<<"upper row major Ax"< result(rows,1.5); axpy_prod(arg1rmt,arg2,result,true,-2.0); checkMatrixVectorMultiply(arg1rmt,arg2,result,-2.0,0.0); } { std::cout<<"upper column major Ax"< result(rows,1.5); axpy_prod(arg1cmt,arg2,result,true,-2.0); checkMatrixVectorMultiply(arg1cmt,arg2,result,-2.0,0.0); } { std::cout<<"row major xA"< result(rows,1.5); axpy_prod(arg2,arg1rm,result,true,-2.0); checkMatrixVectorMultiply(arg1rmt,arg2,result,-2.0,0); } { std::cout<<"column major xA"< result(rows,1.5); axpy_prod(arg2,arg1cm,result,true,-2.0); checkMatrixVectorMultiply(arg1cmt,arg2,result,-2.0,0); } { std::cout<<"upper row major xA"< result(rows,1.5); axpy_prod(arg2,arg1rmt,result,true,-2.0); checkMatrixVectorMultiply(arg1rm,arg2,result,-2.0,0.0); } { std::cout<<"upper column major xA"< result(rows,1.5); axpy_prod(arg2,arg1cmt,result,true,-2.0); checkMatrixVectorMultiply(arg1cm,arg2,result,-2.0,0.0); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/LinAlg/BLAS/compressed_matrix.cpp000066400000000000000000000130441314655040000216310ustar00rootroot00000000000000#define BOOST_TEST_MODULE LinAlg_MatrixSparse #include #include #include using namespace shark; using namespace blas; struct Element{ std::size_t row; std::size_t column; int value; Element(){} Element(std::size_t r, std::size_t col, int v):row(r),column(col),value(v){} }; //checks the internal memory structure of the matrix and ensures that it stores the same elements as //are given in the vector. void checkCompressedMatrixStructure(std::vector const& elements, compressed_matrix const& matrix){ std::size_t rows = matrix.size1(); //check storage invariants //BOOST_REQUIRE_EQUAL(matrix. outer_indices_end()[rows-1], matrix.outer_indices()[rows]); BOOST_REQUIRE_EQUAL( matrix.nnz(), elements.size()); BOOST_REQUIRE( matrix.nnz_capacity() >= elements.size()); BOOST_REQUIRE_EQUAL(matrix.outer_indices()[0], 0); std::size_t elem = 0; for(std::size_t i = 0; i != rows; ++i){ //find row end in the array std::size_t elem_end = elem; while(elem_end != elements.size() && elements[elem_end].row == i)++elem_end; std::size_t rowSize = elem_end-elem; //check row invariants BOOST_REQUIRE(matrix.row_capacity(i) >= rowSize ); BOOST_REQUIRE(matrix.outer_indices()[i+1] >= matrix.outer_indices()[i] );//values are increasing BOOST_REQUIRE(matrix.outer_indices_end()[i] >= matrix.outer_indices()[i] );//end is bigger than start BOOST_REQUIRE(matrix.outer_indices_end()[i] <= matrix.outer_indices()[i+1] );//end of previous is smaller equal than start of next BOOST_REQUIRE_EQUAL(matrix.inner_nnz(i), rowSize ); BOOST_REQUIRE_EQUAL(matrix.outer_indices_end()[i] - matrix.outer_indices()[i], rowSize); BOOST_REQUIRE_EQUAL(matrix.outer_indices()[i+1] - matrix.outer_indices()[i], matrix.row_capacity(i)); BOOST_REQUIRE(matrix.inner_nnz(i) <= matrix.row_capacity(i)); BOOST_REQUIRE_EQUAL(matrix.row_end(i) - matrix.row_begin(i),rowSize); BOOST_REQUIRE_EQUAL(matrix.row_begin(i).row(), i); BOOST_REQUIRE_EQUAL(matrix.row_end(i).row(), i); //check row elements std::size_t rowIndex = matrix.outer_indices()[i]; for(compressed_matrix::const_row_iterator pos = matrix.row_begin(i); pos != matrix.row_end(i); ++pos,++elem,++rowIndex ){ //check array BOOST_CHECK_EQUAL(matrix.inner_indices()[rowIndex],elements[elem].column); BOOST_CHECK_EQUAL(matrix.values()[rowIndex],elements[elem].value); //check iterator BOOST_CHECK_EQUAL(pos.index(),elements[elem].column); BOOST_CHECK_EQUAL(*pos,elements[elem].value); } } } void checkRowSizes(std::vector const& rowSizes, compressed_matrix const& matrix){ std::size_t rowStart = 0; for(std::size_t i = 0; i != rowSizes.size(); ++i){ BOOST_CHECK_EQUAL(matrix.outer_indices()[i], rowStart); BOOST_CHECK(matrix.row_capacity(i) >= rowSizes[i]); BOOST_CHECK(matrix.outer_indices_end()[i] >= rowStart); rowStart += rowSizes[i]; } //BOOST_CHECK_EQUAL(matrix.outer_indices()[rowSizes.size()], matrix.outer_indices_end()[rowSizes.size()-1]); BOOST_CHECK(matrix.nnz_capacity() >= rowStart); } BOOST_AUTO_TEST_SUITE (LinAlg_BLAS_compressed_matrix) //tests whether reserve calls are correct BOOST_AUTO_TEST_CASE( LinAlg_sparse_matrix_reserve_row){ std::size_t rows = 11;//Should be prime :) std::size_t columns = 30; std::size_t base = 8; compressed_matrix matrix(rows,columns); std::vector rowSizes(rows,0); std::size_t i = 1; for(std::size_t j = 0; j != columns; ++j){ i = (i*base)% rows; rowSizes[i]=j; matrix.reserve_row(i,j); checkRowSizes(rowSizes,matrix); } } //this test tests push_back behavior of set_element and operator() BOOST_AUTO_TEST_CASE( LinAlg_sparse_matrix_insert_element_end){ std::size_t rows = 10; std::size_t columns = 20; compressed_matrix matrix_set(rows,columns); compressed_matrix matrix_operator(rows,columns); std::vector elements; for(std::size_t i = 0; i != rows; ++i){ compressed_matrix::row_iterator row_iter = matrix_set.row_begin(i); for(std::size_t j = 0; j != columns; ++j,++row_iter){ BOOST_REQUIRE_EQUAL(row_iter.row(),i); int val = (int)(i + j); row_iter = matrix_set.set_element(row_iter,j,val); matrix_operator(i, j) = val; elements.push_back(Element(i,j,val)); checkCompressedMatrixStructure(elements,matrix_set); checkCompressedMatrixStructure(elements,matrix_operator); } } } //we still insert row by row, but now with different gaps between indices. BOOST_AUTO_TEST_CASE( LinAlg_sparse_matrix_insert_element_some_elements ){ std::size_t rows = 10; std::size_t columns = 23;//must be prime std::size_t colElements = 3; std::vector cols(colElements); std::vector elements; compressed_matrix matrix_set(rows,columns); compressed_matrix matrix_operator(rows,columns); std::size_t base = 8; std::size_t l = 1; for(std::size_t i = 0; i != rows; ++i){ for(std::size_t k = 0; k != colElements; ++k){ l = (l*base)%columns;//field theory gives us a nice "random like" order cols[k] = l; } std::sort(cols.begin(),cols.end()); compressed_matrix::row_iterator row_iter = matrix_set.row_begin(i); for(std::size_t elem = 0; elem != colElements; ++elem,++row_iter){ std::size_t j = cols[elem]; int val = (int)(i + j); row_iter = matrix_set.set_element(row_iter,j,val); matrix_operator(i,j) = val; elements.push_back(Element(i,j,val)); checkCompressedMatrixStructure(elements,matrix_set); checkCompressedMatrixStructure(elements,matrix_operator); } } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/LinAlg/BLAS/compressed_vector.cpp000066400000000000000000000053451314655040000216340ustar00rootroot00000000000000#define BOOST_TEST_MODULE LinAlg_VectorSparse #include #include #include using namespace shark; using namespace blas; //this test tests push_back behavior of set_element and operator() BOOST_AUTO_TEST_SUITE (LinAlg_BLAS_compressed_vector) BOOST_AUTO_TEST_CASE( LinAlg_sparse_vector_insert_element_end){ std::size_t dimensions = 20; compressed_vector vector_set(dimensions); compressed_vector vector_operator(dimensions); compressed_vector::iterator iter = vector_set.begin(); for(std::size_t i = 0; i != dimensions; ++i){ //check set_element iter = vector_set.set_element(iter,i,3*i); BOOST_REQUIRE_EQUAL(iter.index(), i); BOOST_REQUIRE_EQUAL(iter-vector_set.begin(), i); BOOST_REQUIRE_EQUAL(*iter, 3*i); BOOST_REQUIRE_EQUAL(vector_set.nnz(),i+1); for(std::size_t k = 0; k <=i; ++k){ BOOST_CHECK_EQUAL(vector_set.values()[k], 3*k); BOOST_CHECK_EQUAL(vector_set.indices()[k], k); } ++iter; BOOST_REQUIRE_EQUAL(iter-vector_set.begin(), i+1); } for(std::size_t i = 0; i != dimensions; ++i){ //check operator() vector_operator(i) = 3*i; BOOST_REQUIRE_EQUAL(vector_operator(i), 3*i); BOOST_REQUIRE_EQUAL(vector_operator.nnz(),i+1); for(std::size_t k = 0; k <=i; ++k){ BOOST_CHECK_EQUAL(vector_operator.values()[k], 3*k); BOOST_CHECK_EQUAL(vector_operator(k), 3*k); BOOST_CHECK_EQUAL(vector_operator.indices()[k], k); } } } //~ //we still insert row by row, but now with different gaps between indices. //~ BOOST_AUTO_TEST_CASE( LinAlg_sparse_matrix_insert_element_some_elements ){ //~ std::size_t rows = 10; //~ std::size_t columns = 23;//must be prime //~ std::size_t colElements = 3; //~ std::vector cols(colElements); //~ std::vector elements; //~ compressed_matrix matrix_set(rows,columns); //~ compressed_matrix matrix_operator(rows,columns); //~ std::size_t base = 8; //~ std::size_t l = 1; //~ for(std::size_t i = 0; i != rows; ++i){ //~ for(std::size_t k = 0; k != colElements; ++k){ //~ l = (l*base)%columns;//field theory gives us a nice "random like" order //~ cols[k] = l; //~ } //~ std::sort(cols.begin(),cols.end()); //~ compressed_matrix::row_iterator row_iter = matrix_set.row_begin(i); //~ for(std::size_t elem = 0; elem != colElements; ++elem,++row_iter){ //~ std::size_t j = cols[elem]; //~ row_iter = matrix_set.set_element(row_iter,j,i+j); //~ matrix_operator(i,j) = i+j; //~ elements.push_back(Element(i,j,i+j)); //~ checkCompressedMatrixStructure(elements,matrix_set); //~ checkCompressedMatrixStructure(elements,matrix_operator); //~ } //~ } //~ } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/LinAlg/BLAS/iterators.cpp000066400000000000000000000343401314655040000201170ustar00rootroot00000000000000#define BOOST_TEST_MODULE BLAS_Vector_vector_iterators #include #include #include using namespace shark; using namespace blas; BOOST_AUTO_TEST_SUITE (LinAlg_BLAS_iterators) BOOST_AUTO_TEST_CASE( BLAS_Dense_Storage_Iterator) { double values[]={0.1,0.2,0.3,0.4,0.5,0.6}; //reading { dense_storage_iterator iter(values+2,1,2); dense_storage_iterator start=iter; dense_storage_iterator end(values+6,3,2); BOOST_REQUIRE_EQUAL(end-start, 2); BOOST_REQUIRE_EQUAL(start-iter, 0); BOOST_REQUIRE(start == iter); BOOST_REQUIRE(end != start); BOOST_REQUIRE(end == start+2); std::size_t k = 1; while(iter != end){ BOOST_CHECK_EQUAL(iter.index(),k); BOOST_CHECK_EQUAL(*iter,values[2*k]); BOOST_CHECK_EQUAL(start[k-1],values[2*k]); BOOST_CHECK_EQUAL(*(start+k-1),values[2*k]); BOOST_CHECK(iter < end); ++iter; ++k; } BOOST_REQUIRE_EQUAL(end-iter, 0); BOOST_REQUIRE_EQUAL(iter-start, 2); BOOST_REQUIRE_EQUAL(k, 3); } //writing { dense_storage_iterator iter(values,0,2); dense_storage_iterator end(values,3,2); std::size_t k = 0; while(iter != end){ *iter = k; ++k; ++iter; } for(std::size_t i = 0; i != 6; ++i){ if(i% 2 == 0) BOOST_CHECK_EQUAL(values[i],i/2); else BOOST_CHECK_CLOSE(values[i],i*0.1+0.1, 1.e-10); } } } BOOST_AUTO_TEST_CASE( BLAS_Compressed_Storage_Iterator) { double values[]={0.1,0.2,0.3,0.4,0.5,0.6}; std::size_t indizes[]={3,8,11,12,15,16}; //reading { compressed_storage_iterator iter(values,indizes,1,2); compressed_storage_iterator start=iter; compressed_storage_iterator end(values,indizes,5,2); BOOST_REQUIRE_EQUAL(start.row(), 2); BOOST_REQUIRE_EQUAL(start-iter, 0); BOOST_REQUIRE_EQUAL(end-start, 4); BOOST_REQUIRE(start == iter); BOOST_REQUIRE(end != start); std::size_t k = 1; while(iter != end){ BOOST_CHECK_EQUAL(iter.index(),indizes[k]); BOOST_CHECK_EQUAL(*iter,values[k]); ++iter; ++k; } BOOST_REQUIRE_EQUAL(end-iter, 0); BOOST_REQUIRE_EQUAL(iter-start, 4); BOOST_REQUIRE_EQUAL(k, 5); } //writing { compressed_storage_iterator iter(values,indizes,1,2); compressed_storage_iterator end(values,indizes,5,2); std::size_t k = 1; while(iter != end){ *iter = 2*k; ++iter; ++k; } BOOST_REQUIRE_EQUAL(k, 5); for(std::size_t i = 1; i !=5; ++i){ BOOST_CHECK_EQUAL(values[i],2*i); } } } struct IndexedMocup{ typedef double value_type; typedef double& reference; typedef double const& const_reference; IndexedMocup(double* array,std::size_t size):m_array(array),m_size(size){} std::size_t size()const{ return m_size; } reference operator()(std::size_t i)const{ return m_array[i]; } bool same_closure(IndexedMocup const& other)const{ return m_array == other.m_array; } double* m_array; std::size_t m_size; }; struct ConstIndexedMocup{ typedef double value_type; typedef double& reference; typedef double const& const_reference; ConstIndexedMocup(double* array,std::size_t size):m_array(array),m_size(size){} std::size_t size()const{ return m_size; } const_reference operator()(std::size_t i)const{ return m_array[i]; } bool same_closure(ConstIndexedMocup const& other)const{ return m_array == other.m_array; } double* m_array; std::size_t m_size; }; BOOST_AUTO_TEST_CASE( BLAS_Indexed_Iterator) { double values[]={0.1,0.2,0.3,0.4,0.5,0.6}; //reading { ConstIndexedMocup mocup(values,6); indexed_iterator iter(mocup,1); indexed_iterator start=iter; indexed_iterator end(mocup,5); BOOST_REQUIRE_EQUAL(end-start, 4); BOOST_REQUIRE_EQUAL(start-iter, 0); BOOST_REQUIRE(start == iter); BOOST_REQUIRE(end != start); BOOST_REQUIRE(start < end); BOOST_REQUIRE(end == start+4); std::size_t k = 1; while(iter != end){ BOOST_CHECK_EQUAL(iter.index(),k); BOOST_CHECK_EQUAL(*iter,values[k]); BOOST_CHECK_EQUAL(start[k-1],values[k]); BOOST_CHECK_EQUAL(*(start+k-1),values[k]); BOOST_CHECK(iter < end); ++iter; ++k; } BOOST_REQUIRE_EQUAL(end-iter, 0); BOOST_REQUIRE_EQUAL(iter-start, 4); BOOST_REQUIRE_EQUAL(k, 5); } //writing { IndexedMocup mocup(values,6); indexed_iterator iter(mocup,1); indexed_iterator end(mocup,5); std::size_t k = 1; while(iter != end){ *iter = 2*k; ++iter; ++k; } BOOST_REQUIRE_EQUAL(k, 5); for(std::size_t i = 1; i !=5; ++i){ BOOST_CHECK_EQUAL(values[i],2*i); } } } BOOST_AUTO_TEST_CASE( BLAS_Constant_Iterator) { constant_iterator iter(3,4.0); constant_iterator start =iter; constant_iterator end(10,4.0); BOOST_REQUIRE_EQUAL(start-iter, 0); BOOST_REQUIRE_EQUAL(end-start, 7); BOOST_REQUIRE(start == iter); BOOST_REQUIRE(end != start); BOOST_REQUIRE(start < end); std::size_t k = 3; while(iter != end){ BOOST_CHECK_EQUAL(iter.index(),k); BOOST_CHECK_EQUAL(*iter,4.0); ++iter; ++k; } BOOST_REQUIRE_EQUAL(end-iter, 0); BOOST_REQUIRE_EQUAL(iter-start, 7); BOOST_REQUIRE_EQUAL(k, 10); } BOOST_AUTO_TEST_CASE( BLAS_Subrange_Iterator_Dense) { double values[]={0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0}; //reading { typedef dense_storage_iterator iterator; iterator inner_iter(values,0); iterator inner_end(values,10); subrange_iterator iter(inner_iter,inner_end,2,2); subrange_iterator start = iter; subrange_iterator end(inner_iter,inner_end,8,2); BOOST_REQUIRE(start == iter); BOOST_REQUIRE(end != start); BOOST_REQUIRE(start < end); std::size_t k = 2; inner_iter+=2; while(iter != end){ BOOST_CHECK_EQUAL(iter.index(),k-2); BOOST_CHECK(iter.inner() == inner_iter); BOOST_CHECK_EQUAL(*iter,values[k]); ++iter; ++inner_iter; ++k; } BOOST_REQUIRE_EQUAL(end-iter, 0); BOOST_REQUIRE_EQUAL(iter-start, 6); BOOST_REQUIRE_EQUAL(k, 8); } //reading full range { typedef dense_storage_iterator iterator; iterator inner_iter(values,0); iterator inner_end(values,10); subrange_iterator iter(inner_iter,inner_end,0,0); subrange_iterator end(inner_end,0); std::size_t k = 0; while(iter != end){ BOOST_CHECK_EQUAL(iter.index(),k); BOOST_CHECK(iter.inner() == inner_iter); BOOST_CHECK_EQUAL(*iter,values[k]); ++iter; ++inner_iter; ++k; } BOOST_REQUIRE_EQUAL(end-iter, 0); BOOST_REQUIRE_EQUAL(k, 10); } //writing { typedef dense_storage_iterator iterator; iterator inner_iter(values,0); iterator inner_end(values,10); subrange_iterator iter(inner_iter,inner_end,2,2); subrange_iterator end(inner_iter,inner_end,8,2); std::size_t k = 2; while(iter != end){ *iter = 2*k; ++iter; ++k; } BOOST_REQUIRE_EQUAL(k, 8); for(std::size_t i = 2; i !=8; ++i){ BOOST_CHECK_EQUAL(values[i],2*i); } } } BOOST_AUTO_TEST_CASE( BLAS_Subrange_Iterator_Compressed) { double values[]={0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0}; std::size_t indizes[]={3,8,11,12,15,16,18,20,23,30}; //reading full range (but starting from different startIndex) { typedef compressed_storage_iterator iterator; iterator inner_iter(values,indizes,0); iterator inner_end(values,indizes,10); subrange_iterator iter(inner_iter,3); subrange_iterator end(inner_end,3); std::size_t k = 0; while(iter != end){ BOOST_CHECK_EQUAL(iter.index(),indizes[k]-3); BOOST_CHECK(iter.inner() == inner_iter); BOOST_CHECK_EQUAL(*iter,values[k]); ++iter; ++inner_iter; ++k; } BOOST_REQUIRE_EQUAL(k, 10); } //reading full range (but starting from different startIndex) { typedef compressed_storage_iterator iterator; iterator inner_iter(values,indizes,0); iterator inner_end(values,indizes,10); iterator inner_iter_test(values,indizes,4); subrange_iterator iter(inner_iter,inner_end,13,10); subrange_iterator end(inner_iter,inner_end,23,10); //check that we got to the correct array positions BOOST_REQUIRE_EQUAL(iter.inner().index(),15); BOOST_REQUIRE_EQUAL(end.inner().index(),23); std::size_t k = 4; while(iter != end){ BOOST_CHECK_EQUAL(iter.index(),indizes[k]-10); BOOST_CHECK(iter.inner() == inner_iter_test); BOOST_CHECK_EQUAL(*iter,values[k]); ++iter; ++inner_iter_test; ++k; } BOOST_REQUIRE_EQUAL(k, 8); } //writing { typedef compressed_storage_iterator iterator; iterator inner_iter(values,indizes,0); iterator inner_end(values,indizes,10); iterator inner_iter_test(values,indizes,4); subrange_iterator iter(inner_iter,inner_end,13,10); subrange_iterator end(inner_iter,inner_end,23,10); //check that we got to the correct array positions BOOST_REQUIRE_EQUAL(iter.inner().index(),15); BOOST_REQUIRE_EQUAL(end.inner().index(),23); std::size_t k = 4; while(iter != end){ *iter = 2*k; ++iter; ++k; } BOOST_REQUIRE_EQUAL(k, 8); for(std::size_t i = 4; i !=8; ++i){ BOOST_CHECK_EQUAL(values[i],2*i); } } } BOOST_AUTO_TEST_CASE( BLAS_Transform_Iterator_Dense) { double values[]={0.1,0.2,0.3,0.4,0.5,0.6}; typedef dense_storage_iterator iterator; iterator dense_iter(values,0); iterator dense_end(values,6); transform_iterator > iter(dense_iter,scalar_sqr()); transform_iterator > start = iter; transform_iterator > end(dense_end,scalar_sqr()); BOOST_REQUIRE_EQUAL(end-start, 6); BOOST_REQUIRE_EQUAL(start-iter, 0); BOOST_REQUIRE(start == iter); BOOST_REQUIRE(start != end); BOOST_REQUIRE(start < end); BOOST_REQUIRE(end == start+6); std::size_t k = 0; while(iter != end){ BOOST_CHECK_EQUAL(iter.index(),k); BOOST_CHECK_EQUAL(*iter,values[k]*values[k]); BOOST_CHECK_EQUAL(start[k],values[k]*values[k]); BOOST_CHECK_EQUAL(*(start+k),values[k]*values[k]); BOOST_CHECK(iter < end); ++iter; ++k; } BOOST_REQUIRE_EQUAL(k, 6); BOOST_REQUIRE_EQUAL(end-iter, 0); BOOST_REQUIRE_EQUAL(iter-start, 6); } BOOST_AUTO_TEST_CASE( BLAS_Transform_Iterator_Compressed) { double values[]={0.1,0.2,0.3,0.4,0.5,0.6}; std::size_t indizes[]={3,8,11,12,15,16}; typedef compressed_storage_iterator iterator; iterator compressed_iter(values,indizes,0); iterator compressed_end(values,indizes,6); transform_iterator > iter(compressed_iter,scalar_sqr()); transform_iterator > start = iter; transform_iterator > end(compressed_end,scalar_sqr()); BOOST_REQUIRE_EQUAL(end-start, 6); BOOST_REQUIRE_EQUAL(start-iter, 0); BOOST_REQUIRE(start == iter); BOOST_REQUIRE(start != end); std::size_t k = 0; while(iter != end){ BOOST_CHECK_EQUAL(iter.index(),indizes[k]); BOOST_CHECK_EQUAL(*iter,values[k]*values[k]); ++iter; ++k; } BOOST_REQUIRE_EQUAL(k, 6); BOOST_REQUIRE_EQUAL(end-iter, 0); BOOST_REQUIRE_EQUAL(iter-start, 6); } BOOST_AUTO_TEST_CASE( BLAS_Binary_Transform_Iterator_Dense) { double values1[]={0.1,0.2,0.3,0.4,0.5,0.6}; double values2[]={0.3,0.5,0.7,0.9,1.1,1.3}; typedef dense_storage_iterator iterator; iterator dense_iter1(values1,0); iterator dense_end1(values1,6); iterator dense_iter2(values2,0); iterator dense_end2(values2,6); typedef binary_transform_iterator > transform_iterator; transform_iterator iter(scalar_binary_plus(),dense_iter1,dense_end1,dense_iter2,dense_end2); transform_iterator start = iter; transform_iterator end(scalar_binary_plus(),dense_end1,dense_end1,dense_end2,dense_end2); BOOST_REQUIRE_EQUAL(end-start, 6); BOOST_REQUIRE_EQUAL(start-iter, 0); BOOST_REQUIRE(start == iter); BOOST_REQUIRE(start != end); BOOST_REQUIRE(start < end); BOOST_REQUIRE(end == start+6); std::size_t k = 0; while(iter != end){ double value = values1[k]+values2[k]; BOOST_CHECK_EQUAL(iter.index(),k); BOOST_CHECK_EQUAL(*iter,value); BOOST_CHECK_EQUAL(start[k],value); BOOST_CHECK_EQUAL(*(start+k),value); BOOST_CHECK(iter < end); ++iter; ++k; } BOOST_REQUIRE_EQUAL(k, 6); BOOST_REQUIRE_EQUAL(end-iter, 0); BOOST_REQUIRE_EQUAL(iter-start, 6); } BOOST_AUTO_TEST_CASE( BLAS_Binary_Transform_Iterator_Compressed) { double values1[]={0.1,0.2,0.3,0.4,0.5,0.6}; double values2[]={0.3,0.5,0.7,0.9}; double valuesResult[]={0.1,0.3,0.7,1.0,0.4,0.9,0.5,0.6}; std::size_t indizes1[]={3,8,11,12,17,18}; std::size_t indizes2[]={5,8,11,14}; std::size_t indizesResult[]={3,5,8,11,12,14,17,18}; typedef compressed_storage_iterator iterator; typedef binary_transform_iterator > transform_iterator; //a+b { iterator iter1(values1,indizes1,0); iterator end1(values1,indizes1,6); iterator iter2(values2,indizes2,0); iterator end2(values2,indizes2,4); transform_iterator iter(scalar_binary_plus(),iter1,end1,iter2,end2); transform_iterator start = iter; transform_iterator end(scalar_binary_plus(),end1,end1,end2,end2); BOOST_REQUIRE(start == iter); BOOST_REQUIRE(start != end); std::size_t k = 0; while(iter != end){ BOOST_CHECK_EQUAL(iter.index(),indizesResult[k]); BOOST_CHECK_EQUAL(*iter,valuesResult[k]); ++iter; ++k; } BOOST_REQUIRE_EQUAL(k, 8); } //b+a { iterator iter2(values1,indizes1,0); iterator end2(values1,indizes1,6); iterator iter1(values2,indizes2,0); iterator end1(values2,indizes2,4); transform_iterator iter(scalar_binary_plus(),iter1,end1,iter2,end2); transform_iterator start = iter; transform_iterator end(scalar_binary_plus(),end1,end1,end2,end2); BOOST_REQUIRE(start == iter); BOOST_REQUIRE(start != end); std::size_t k = 0; while(iter != end){ BOOST_CHECK_EQUAL(iter.index(),indizesResult[k]); BOOST_CHECK_EQUAL(*iter,valuesResult[k]); ++iter; ++k; } BOOST_REQUIRE_EQUAL(k, 8); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/LinAlg/BLAS/matrix_assign.cpp000066400000000000000000000644231314655040000207600ustar00rootroot00000000000000#define BOOST_TEST_MODULE LinAlg_BLAS_VectorAssign #include #include #include #include using namespace shark; template void checkMatrixEqual(M1 const& m1, M2 const& m2){ BOOST_REQUIRE_EQUAL(m1.size1(),m2.size1()); BOOST_REQUIRE_EQUAL(m1.size2(),m2.size2()); for(std::size_t i = 0; i != m2.size1(); ++i){ for(std::size_t j = 0; j != m2.size2(); ++j){ BOOST_CHECK_EQUAL(m1(i,j),m2(i,j)); } } } BOOST_AUTO_TEST_SUITE (LinAlg_BLAS_matrix_assign) /////////////////////////////////////////////////////////////////////////////// //////FUNCTOR CONSTANT ASSIGNMENT ////////////////////////////////////////////////////////////////////////////// //test for assign of the form m_ij=f(m_ij,t) for constant t BOOST_AUTO_TEST_CASE( LinAlg_BLAS_Dense_Matrix_Constant_Functor_Assign ){ std::cout<<"testing dense functor-constant assignment."< input_row_major(10,20); blas::matrix input_column_major(10,20); blas::matrix target(10,20); const unsigned int t = 2; for(std::size_t i = 0; i != 10; ++i){ for(std::size_t j = 0; j != 20; ++j){ input_row_major(i,j) = 2*(20*i+1)+1.0; input_column_major(i,j) = 2*(20*i+1)+1.0; target(i,j)=t*input_row_major(i,j); } } std::cout<<"testing row-major"< (input_row_major, t); checkMatrixEqual(target,input_row_major); std::cout<<"testing column-major"< (input_column_major, t); checkMatrixEqual(target,input_column_major); std::cout<<"\n"; } BOOST_AUTO_TEST_CASE( LinAlg_BLAS_Packed_Matrix_Constant_Functor_Assign ){ std::cout<<"testing packed functor-constant assignment."< input_row_lower(20); blas::triangular_matrix input_row_upper(20); blas::triangular_matrix input_column_lower(20); blas::triangular_matrix input_column_upper(20); blas::triangular_matrix target_lower(20); blas::triangular_matrix target_upper(20); const unsigned int t = 2; for(unsigned int i = 0; i != 20; ++i){ for(unsigned int j = 0; j <= i; ++j){ input_row_lower.set_element(i,j, 2*(20*i+1)+1); input_column_lower.set_element(i,j, 2*(20*i+1)+1); target_lower.set_element(i,j,t*input_row_lower(i,j)); } } for(unsigned int i = 0; i != 20; ++i){ for(unsigned int j = i; j != 20; ++j){ input_row_upper.set_element(i,j,2*(20*i+1)+1); input_column_upper.set_element(i,j,2*(20*i+1)+1); target_upper.set_element(i,j,t*input_row_upper(i,j)); } } std::cout<<"testing row-major lower"< (input_row_lower, t); checkMatrixEqual(target_lower,input_row_lower); std::cout<<"testing column-major lower"< (input_column_lower, t); checkMatrixEqual(target_lower,input_column_lower); std::cout<<"testing row-major upper"< (input_row_upper, t); checkMatrixEqual(target_upper,input_row_upper); std::cout<<"testing column-major upper"< (input_column_upper, t); checkMatrixEqual(target_upper,input_column_upper); std::cout<<"\n"; } BOOST_AUTO_TEST_CASE( LinAlg_BLAS_Sparse_Matrix_Constant_Functor_Assign ){ std::cout<<"testing sparse functor-constant assignment."< input_row_major(10,20,0); blas::compressed_matrix target_row_major(10,20,0); blas::compressed_matrix input_column_major_base(20,10,0); blas::compressed_matrix target_column_major_base(20,10,0); blas::matrix_transpose > input_column_major(input_column_major_base); blas::matrix_transpose > target_column_major(target_column_major_base); const unsigned int t = 2; for(unsigned int i = 0; i != 10; ++i){ for(unsigned int j = 1; j < 20; j += (i + 1)){ input_row_major(i,j) = 2*(20*i+1)+1; input_column_major(i,j) = 2*(20*i+1)+1;//source_row_major(i,j)+2; target_row_major(i,j) = t*input_row_major(i,j); target_column_major(i,j) = t*input_column_major(i,j); } } std::cout<<"testing row-major"< (input_row_major, t); checkMatrixEqual(target_row_major,input_row_major); std::cout<<"testing column-major"< (input_column_major, t); checkMatrixEqual(target_column_major,input_column_major); std::cout<<"\n"; } ////////////////////////////////////////////////////// //////SIMPLE ASSIGNMENT ////////////////////////////////////////////////////// BOOST_AUTO_TEST_CASE( LinAlg_BLAS_Dense_Dense_Matrix_Assign ){ std::cout<<"testing direct dense-dense assignment"< source_row_major(10,20); blas::matrix source_column_major(10,20); for(unsigned int i = 0; i != 10; ++i){ for(unsigned int j = 0; j != 20; ++j){ source_row_major(i,j) = 2*(20*i+1)+1; source_column_major(i,j) = source_row_major(i,j)+2; } } //test all 4 combinations of row/column major { blas::matrix target(10,20); for(unsigned int i = 0; i != 10; ++i){ for(unsigned int j = 0; j != 20; ++j){ target(i,j) = 3*(20*i+1)+2; } } std::cout<<"testing row-row"< target(10,20); for(unsigned int i = 0; i != 10; ++i){ for(unsigned int j = 0; j != 20; ++j){ target(i,j) = 3*(20*i+1)+2; } } std::cout<<"testing row-column"< target(10,20); for(unsigned int i = 0; i != 10; ++i){ for(unsigned int j = 0; j != 20; ++j){ target(i,j) = 3*(20*i+1)+2; } } std::cout<<"testing column-row"< target(10,20); for(unsigned int i = 0; i != 10; ++i){ for(unsigned int j = 0; j != 20; ++j){ target(i,j) = 3*(20*i+1)+2; } } std::cout<<"testing column-column"< MRU; //~ typedef blas::triangular_matrix MCU; //~ typedef blas::triangular_matrix MRL; //~ typedef blas::triangular_matrix MCL; //~ MRU source_upper_row_major(20); //~ MRL source_lower_row_major(20); //~ MCU source_upper_column_major(20); //~ MCL source_lower_column_major(20); //~ for(std::size_t i = 0; i != 20; ++i){ //~ MRU::row_iterator pos1=source_upper_row_major.row_begin(i); //~ MRL::row_iterator pos2=source_lower_row_major.row_begin(i); //~ MCU::column_iterator pos3=source_upper_column_major.column_begin(i); //~ MCL::column_iterator pos4=source_lower_column_major.column_begin(i); //~ for(; pos1 != source_upper_row_major.row_end(i);++pos1){ //~ *pos1 = i*20+pos1.index()+1; //~ } //~ for(; pos2 != source_lower_row_major.row_end(i);++pos2){ //~ *pos2 = i*20+pos2.index()+1; //~ } //~ for(; pos3 != source_upper_column_major.column_end(i);++pos3){ //~ *pos3 = i*20+pos3.index()+1; //~ } //~ for(; pos4 != source_lower_column_major.column_end(i);++pos4){ //~ *pos4 = i*20+pos4.index()+1; //~ } //~ } //~ //test all 8 combinations of row/column major target and the four sources //~ { //~ blas::matrix target(20,20); //~ for(std::size_t i = 0; i != 20; ++i){ //~ for(std::size_t j = 0; j != 20; ++j){ //~ target(i,j) = 3*(20*i+1)+2; //~ } //~ } //~ std::cout<<"testing row-row/upper"< target(20,20); //~ for(std::size_t i = 0; i != 20; ++i){ //~ for(std::size_t j = 0; j != 20; ++j){ //~ target(i,j) = 3*(20*i+1)+2; //~ } //~ } //~ std::cout<<"testing row-row/lower"< target(20,20); //~ for(std::size_t i = 0; i != 20; ++i){ //~ for(std::size_t j = 0; j != 20; ++j){ //~ target(i,j) = 3*(20*i+1)+2; //~ } //~ } //~ std::cout<<"testing row-column/upper"< target(20,20); //~ for(std::size_t i = 0; i != 20; ++i){ //~ for(std::size_t j = 0; j != 20; ++j){ //~ target(i,j) = 3*(20*i+1)+2; //~ } //~ } //~ std::cout<<"testing row-column/lower"< target(20,20); //~ for(std::size_t i = 0; i != 20; ++i){ //~ for(std::size_t j = 0; j != 20; ++j){ //~ target(i,j) = 3*(20*i+1)+2; //~ } //~ } //~ std::cout<<"testing column-row/upper"< target(20,20); //~ for(std::size_t i = 0; i != 20; ++i){ //~ for(std::size_t j = 0; j != 20; ++j){ //~ target(i,j) = 3*(20*i+1)+2; //~ } //~ } //~ std::cout<<"testing column-row/lower"< target(20,20); //~ for(std::size_t i = 0; i != 20; ++i){ //~ for(std::size_t j = 0; j != 20; ++j){ //~ target(i,j) = 3*(20*i+1)+2; //~ } //~ } //~ std::cout<<"testing column-column/upper"< target(20,20); //~ for(std::size_t i = 0; i != 20; ++i){ //~ for(std::size_t j = 0; j != 20; ++j){ //~ target(i,j) = 3*(20*i+1)+2; //~ } //~ } //~ std::cout<<"testing column-column/lower"< MRU; typedef blas::triangular_matrix MCU; typedef blas::triangular_matrix MRL; typedef blas::triangular_matrix MCL; MRU source_upper_row_major(20); MRL source_lower_row_major(20); MCU source_upper_column_major(20); MCL source_lower_column_major(20); for(unsigned int i = 0; i != 20; ++i){ MRU::row_iterator pos1=source_upper_row_major.row_begin(i); MRL::row_iterator pos2=source_lower_row_major.row_begin(i); MCU::column_iterator pos3=source_upper_column_major.column_begin(i); MCL::column_iterator pos4=source_lower_column_major.column_begin(i); for(; pos1 != source_upper_row_major.row_end(i);++pos1){ *pos1 = i*20+pos1.index()+1; } for(; pos2 != source_lower_row_major.row_end(i);++pos2){ *pos2 = i*20+pos2.index()+1; } for(; pos3 != source_upper_column_major.column_end(i);++pos3){ *pos3 = i*20+pos3.index()+1; } for(; pos4 != source_lower_column_major.column_end(i);++pos4){ *pos4 = i*20+pos4.index()+1; } } //test all 8 combinations of row/column major target and the four sources //for simplicitely we just assign the targets to be 1... { MRU target(20,1); std::cout<<"testing row-row/upper"< source_row_major(10,20,0); blas::compressed_matrix source_column_major_base(20,10); blas::matrix_transpose > source_column_major(source_column_major_base); for(unsigned int i = 0; i != 10; ++i){ for(unsigned int j = 1; j < 20; j += (i + 1)){ source_row_major(i,j) = 2*(20*i+1)+1; source_column_major(i,j) = 2*(20*i+1)+1;//source_row_major(i,j)+2; } } //test all 4 combinations of row/column major { blas::matrix target(10,20); for(unsigned int i = 0; i != 10; ++i){ for(unsigned int j = 0; j != 20; ++j){ target(i,j) = 3*(20*i+1)+2; } } std::cout<<"testing row-row"< target(10,20); for(unsigned int i = 0; i != 10; ++i){ for(unsigned int j = 0; j != 20; ++j){ target(i,j) = 3*(20*i+1)+2; } } std::cout<<"testing row-column"< target(10,20); for(unsigned int i = 0; i != 10; ++i){ for(unsigned int j = 0; j != 20; ++j){ target(i,j) = 3*(20*i+1)+2; } } std::cout<<"testing column-row"< target(10,20); for(unsigned int i = 0; i != 10; ++i){ for(unsigned int j = 0; j != 20; ++j){ target(i,j) = 3*(20*i+1)+2; } } std::cout<<"testing column-column"< source_row_major(10,20); blas::matrix source_column_major(10,20); for(unsigned int i = 0; i != 10; ++i){ for(unsigned int j = 0; j != 20; ++j){ source_row_major(i,j) = 2*(20*i+1)+1; source_column_major(i,j) = source_row_major(i,j)+2; } } //test all 4 combinations of row/column major { blas::compressed_matrix target(10,20,0); for(unsigned int i = 0; i != 10; ++i){ for(unsigned int j = 1; j < 20; j += (i + 1)){ target(i,j) = 4*(20*i+1)+9; } } std::cout<<"testing row-row"< target_base(20,10); blas::matrix_transpose > target(target_base); for(unsigned int i = 0; i != 10; ++i){ for(unsigned int j = 1; j < 20; j += (i + 1)){ target(i,j) = 4*(20*i+1)+9; } } std::cout<<"testing row-column"< target(10,20,0); for(unsigned int i = 0; i != 10; ++i){ for(unsigned int j = 1; j < 20; j += (i + 1)){ target(i,j) = 4*(20*i+1)+9; } } std::cout<<"testing column-row"< target_base(20,10); blas::matrix_transpose > target(target_base); for(unsigned int i = 0; i != 10; ++i){ for(unsigned int j = 1; j < 20; j += (i + 1)){ target(i,j) = 4*(20*i+1)+9; } } std::cout<<"testing column-column"< source_row_major(10,20,0); blas::compressed_matrix source_column_major_base(20,10); blas::matrix_transpose > source_column_major(source_column_major_base); for(unsigned int i = 0; i != 10; ++i){ for(unsigned int j = 1; j < 20; j += (i + 1)){ source_row_major(i,j) = 2*(20*i+1)+1; source_column_major(i,j) = source_row_major(i,j); } } //test all 4 combinations of row/column major { blas::compressed_matrix target(10,20,0); //~ for(std::size_t i = 0; i != 10; ++i){ //~ for(std::size_t j = 1; j < 20; j+=(i+1)){ //~ target(i,j) = 4*(20*i+1)+9; //~ } //~ } std::cout<<"testing row-row"< target(10,20,0); //~ for(std::size_t i = 0; i != 10; ++i){ //~ for(std::size_t j = 1; j < 20; j+=(i+1)){ //~ target(i,j) = 4*(20*i+1)+9; //~ } //~ } std::cout<<"testing row-column"< target_base(20,10); blas::matrix_transpose > target(target_base); //~ for(std::size_t i = 0; i != 10; ++i){ //~ for(std::size_t j = 1; j < 20; j+=(i+1)){ //~ target(i,j) = 4*(20*i+1)+9; //~ } //~ } std::cout<<"testing column-row"< target_base(20,10); blas::matrix_transpose > target(target_base); for(unsigned int i = 0; i != 10; ++i){ for(unsigned int j = 1; j < 20; j += (i + 1)){ target(i,j) = 4*(20*i+1)+9; } } std::cout<<"testing column-column"< source_row_major(10,20); blas::matrix source_column_major(10,20); blas::matrix preinit(10,20); blas::matrix result(10,20); for(unsigned int i = 0; i != 10; ++i){ for(unsigned int j = 0; j != 20; ++j){ source_row_major(i,j) = 2*(20*i+1)+1; source_column_major(i,j) = source_row_major(i,j); preinit(i,j) = 3*(20*i+1)+2; result(i,j) = preinit(i,j)+source_row_major(i,j); } } //test all 4 combinations of row/column major { blas::matrix target = preinit; std::cout<<"testing row-row"<(target,source_row_major); checkMatrixEqual(target,result); } { blas::matrix target = preinit; std::cout<<"testing row-column"<(target,source_column_major); checkMatrixEqual(target,result); } { blas::matrix target = preinit; std::cout<<"testing column-row"<(target,source_row_major); checkMatrixEqual(target,result); } { blas::matrix target = preinit; std::cout<<"testing column-column"<(target,source_column_major); checkMatrixEqual(target,result); } } BOOST_AUTO_TEST_CASE( LinAlg_BLAS_Dense_Sparse_Matrix_Plus_Assign ){ std::cout<<"\ntesting dense-sparse functor assignment"< source_row_major(10,20); blas::compressed_matrix source_column_major_base(20,10); blas::matrix_transpose > source_column_major(source_column_major_base); blas::matrix preinit(10,20); blas::matrix result(10,20); for(unsigned int i = 0; i != 10; ++i){ for(unsigned int j = 1; j < 20; j += (i + 1)){ source_row_major(i,j) = 2*(20*i+1)+1; source_column_major(i,j) = 2*(20*i+1)+1; } } for(unsigned int i = 0; i != 10; ++i){ for(unsigned int j = 0; j != 20; ++j){ preinit(i,j) = 3*(20*i+1)+2; result(i,j) = preinit(i,j)+source_row_major(i,j); } } //test all 4 combinations of row/column major { blas::matrix target = preinit; std::cout<<"testing row-row"<(target,source_row_major); checkMatrixEqual(target,result); } { blas::matrix target = preinit; std::cout<<"testing row-column"<(target,source_column_major); checkMatrixEqual(target,result); } { blas::matrix target = preinit; std::cout<<"testing column-row"<(target,source_row_major); checkMatrixEqual(target,result); } { blas::matrix target = preinit; std::cout<<"testing column-column"<(target,source_column_major); checkMatrixEqual(target,result); } } BOOST_AUTO_TEST_CASE( LinAlg_BLAS_Sparse_Sparse_Matrix_Plus_Assign ){ std::cout<<"\ntesting sparse-sparse functor assignment"< source_row_major(10,20); blas::compressed_matrix source_column_major_base(20,10); blas::matrix_transpose > source_column_major(source_column_major_base); blas::compressed_matrix preinit(10,20); blas::compressed_matrix result(10,20); for(unsigned int i = 0; i != 10; ++i){ for(unsigned int j = 1; j < 20; j += (i + 1)){ source_row_major(i,j) = 2*(20*i+1)+1; source_column_major(i,j) = 2*(20*i+1)+1; } } for(unsigned int i = 0; i != 10; ++i){ for(unsigned int j = 0; j < 20; j += (i + 2) / 2){ preinit(i,j) = 3*(20*i+1)+2; } } for(unsigned int i = 0; i != 10; ++i){ for(unsigned int j = 0; j < 20; ++j){ int r = preinit(i,j)+source_row_major(i,j); if(r != 0) result(i,j) = r; } } //test all 4 combinations of row/column major { blas::compressed_matrix target = preinit; std::cout<<"testing row-row"<(target,source_row_major); checkMatrixEqual(target,result); } { blas::matrix target = preinit; std::cout<<"testing row-column"<(target,source_column_major); checkMatrixEqual(target,result); } { blas::compressed_matrix target_base(20,10); blas::matrix_transpose > target(target_base); target = preinit; std::cout<<"testing column-row"<(target,source_row_major); checkMatrixEqual(target,result); } { blas::compressed_matrix target_base(20,10); blas::matrix_transpose > target(target_base); target = preinit; std::cout<<"testing column-column"<(target,source_column_major); checkMatrixEqual(target,result); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/LinAlg/BLAS/matrix_proxy.cpp000066400000000000000000000242621314655040000206520ustar00rootroot00000000000000#define BOOST_TEST_MODULE LinAlg_MatrixProxy #include #include #include using namespace shark; template void checkDenseMatrixEqual(M1 const& m1, M2 const& m2){ BOOST_REQUIRE_EQUAL(m1.size1(),m2.size1()); BOOST_REQUIRE_EQUAL(m1.size2(),m2.size2()); //indexed access for(std::size_t i = 0; i != m2.size1(); ++i){ for(std::size_t j = 0; j != m2.size2(); ++j){ BOOST_CHECK_EQUAL(m1(i,j),m2(i,j)); } } //iterator access rows for(std::size_t i = 0; i != m2.size1(); ++i){ typedef typename M1::const_row_iterator Iter; BOOST_REQUIRE_EQUAL(m1.row_end(i)-m1.row_begin(i), m1.size2()); std::size_t k = 0; for(Iter it = m1.row_begin(i); it != m1.row_end(i); ++it,++k){ BOOST_CHECK_EQUAL(k,it.index()); BOOST_CHECK_EQUAL(*it,m2(i,k)); } //test that the actual iterated length equals the number of elements BOOST_CHECK_EQUAL(k, m1.size2()); } //iterator access columns for(std::size_t i = 0; i != m2.size2(); ++i){ typedef typename M1::const_column_iterator Iter; BOOST_REQUIRE_EQUAL(m1.column_end(i)-m1.column_begin(i), m1.size1()); std::size_t k = 0; for(Iter it = m1.column_begin(i); it != m1.column_end(i); ++it,++k){ BOOST_CHECK_EQUAL(k,it.index()); BOOST_CHECK_EQUAL(*it,m2(k,i)); } //test that the actual iterated length equals the number of elements BOOST_CHECK_EQUAL(k, m1.size1()); } } template void checkDenseMatrixAssignment(M1& m1, M2 const& m2){ BOOST_REQUIRE_EQUAL(m1.size1(),m2.size1()); BOOST_REQUIRE_EQUAL(m1.size2(),m2.size2()); //indexed access for(std::size_t i = 0; i != m2.size1(); ++i){ for(std::size_t j = 0; j != m2.size2(); ++j){ m1(i,j) = 0; BOOST_CHECK_EQUAL(m1(i,j),0); m1(i,j) = m2(i,j); BOOST_CHECK_EQUAL(m1(i,j),m2(i,j)); m1(i,j) = 0; BOOST_CHECK_EQUAL(m1(i,j),0); } } //iterator access rows for(std::size_t i = 0; i != m2.size1(); ++i){ typedef typename M1::row_iterator Iter; BOOST_REQUIRE_EQUAL(m1.row_end(i)-m1.row_begin(i), m1.size2()); std::size_t k = 0; for(Iter it = m1.row_begin(i); it != m1.row_end(i); ++it,++k){ BOOST_CHECK_EQUAL(k,it.index()); *it=0; BOOST_CHECK_EQUAL(*it,0); BOOST_CHECK_EQUAL(m1(i,k),0); *it = m2(i,k); BOOST_CHECK_EQUAL(*it,m2(i,k)); BOOST_CHECK_EQUAL(m1(i,k),m2(i,k)); *it=0; BOOST_CHECK_EQUAL(*it,0); BOOST_CHECK_EQUAL(m1(i,k),0); } //test that the actual iterated length equals the number of elements BOOST_CHECK_EQUAL(k, m1.size2()); } //iterator access columns for(std::size_t i = 0; i != m2.size2(); ++i){ typedef typename M1::column_iterator Iter; BOOST_REQUIRE_EQUAL(m1.column_end(i)-m1.column_begin(i), m1.size1()); std::size_t k = 0; for(Iter it = m1.column_begin(i); it != m1.column_end(i); ++it,++k){ BOOST_CHECK_EQUAL(k,it.index()); *it=0; BOOST_CHECK_EQUAL(*it,0); BOOST_CHECK_EQUAL(m1(k,i),0); *it = m2(k,i); BOOST_CHECK_EQUAL(*it,m2(k,i)); BOOST_CHECK_EQUAL(m1(k,i),m2(k,i)); *it=0; BOOST_CHECK_EQUAL(*it,0); BOOST_CHECK_EQUAL(m1(k,i),0); } //test that the actual iterated length equals the number of elements BOOST_CHECK_EQUAL(k, m1.size1()); } } template void checkDenseVectorEqual(V1 const& v1, V2 const& v2){ BOOST_REQUIRE_EQUAL(v1.size(),v2.size()); //indexed access for(std::size_t i = 0; i != v2.size(); ++i){ BOOST_CHECK_EQUAL(v1(i),v2(i)); } //iterator access rows typedef typename V1::const_iterator Iter; BOOST_REQUIRE_EQUAL(v1.end()-v1.begin(), v1.size()); std::size_t k = 0; for(Iter it = v1.begin(); it != v1.end(); ++it,++k){ BOOST_CHECK_EQUAL(k,it.index()); BOOST_CHECK_EQUAL(*it,v2(k)); } //test that the actual iterated length equals the number of elements BOOST_CHECK_EQUAL(k, v2.size()); } template void checkDenseVectorAssignment(V1& v1, V2 const& v2){ BOOST_REQUIRE_EQUAL(v1.size(),v2.size()); //indexed access for(std::size_t i = 0; i != v2.size(); ++i){ v1(i) = 0; BOOST_CHECK_EQUAL(v1(i),0); v1(i) = v2(i); BOOST_CHECK_EQUAL(v1(i),v2(i)); v1(i) = 0; BOOST_CHECK_EQUAL(v1(i),0); } //iterator access rows typedef typename V1::iterator Iter; BOOST_REQUIRE_EQUAL(v1.end()-v1.begin(), v1.size()); std::size_t k = 0; for(Iter it = v1.begin(); it != v1.end(); ++it,++k){ BOOST_CHECK_EQUAL(k,it.index()); *it = 0; BOOST_CHECK_EQUAL(v1(k),0); *it = v2(k); BOOST_CHECK_EQUAL(v1(k),v2(k)); *it = 0; BOOST_CHECK_EQUAL(v1(k),0); } //test that the actual iterated length equals the number of elements BOOST_CHECK_EQUAL(k, v2.size()); } std::size_t Dimensions1 = 4; std::size_t Dimensions2 = 5; struct MatrixProxyFixture { blas::matrix denseData; blas::matrix denseDataColMajor; MatrixProxyFixture():denseData(Dimensions1,Dimensions2),denseDataColMajor(Dimensions1,Dimensions2){ for(std::size_t row=0;row!= Dimensions1;++row){ for(std::size_t col=0;col!=Dimensions2;++col){ denseData(row,col) = row*Dimensions2+col+5.0; denseDataColMajor(row,col) = row*Dimensions2+col+5.0; } } } }; BOOST_FIXTURE_TEST_SUITE (LinAlg_BLAS_matrix_proxy, MatrixProxyFixture); BOOST_AUTO_TEST_CASE( LinAlg_Dense_Subrange ){ //all possible combinations of ranges on the data matrix for(std::size_t rowEnd=0;rowEnd!= Dimensions1;++rowEnd){ for(std::size_t rowBegin =0;rowBegin <= rowEnd;++rowBegin){//<= for 0 range for(std::size_t colEnd=0;colEnd!=Dimensions2;++colEnd){ for(std::size_t colBegin=0;colBegin != colEnd;++colBegin){ std::size_t size1= rowEnd-rowBegin; std::size_t size2= colEnd-colBegin; blas::matrix mTest(size1,size2); for(std::size_t i = 0; i != size1; ++i){ for(std::size_t j = 0; j != size2; ++j){ mTest(i,j) = denseData(i+rowBegin,j+colBegin); } } checkDenseMatrixEqual( subrange(denseData,rowBegin,rowEnd,colBegin,colEnd), mTest ); blas::matrix newData(Dimensions1,Dimensions2,0); blas::matrix newDataColMaj(Dimensions1,Dimensions2,0); blas::matrix_range > rangeTest = subrange(newData,rowBegin,rowEnd,colBegin,colEnd); blas::matrix_range > rangeTestColMaj = subrange(newDataColMaj,rowBegin,rowEnd,colBegin,colEnd); checkDenseMatrixAssignment(rangeTest,mTest); checkDenseMatrixAssignment(rangeTestColMaj,mTest); //check assignment { rangeTest=mTest; blas::matrix newData2(Dimensions1,Dimensions2,0); blas::matrix_range > rangeTest2 = subrange(newData2,rowBegin,rowEnd,colBegin,colEnd); rangeTest2=rangeTest; for(std::size_t i = 0; i != size1; ++i){ for(std::size_t j = 0; j != size2; ++j){ BOOST_CHECK_EQUAL(newData(i+rowBegin,j+colBegin),mTest(i,j)); BOOST_CHECK_EQUAL(newData2(i+rowBegin,j+colBegin),mTest(i,j)); } } } //check clear for(std::size_t i = 0; i != size1; ++i){ for(std::size_t j = 0; j != size2; ++j){ rangeTest(i,j) = denseData(i+rowBegin,j+colBegin); rangeTestColMaj(i,j) = denseData(i+rowBegin,j+colBegin); } } rangeTest.clear(); rangeTestColMaj.clear(); for(std::size_t i = 0; i != size1; ++i){ for(std::size_t j = 0; j != size2; ++j){ BOOST_CHECK_EQUAL(rangeTest(i,j),0); BOOST_CHECK_EQUAL(rangeTestColMaj(i,j),0); } } } } } } } BOOST_AUTO_TEST_CASE( LinAlg_Dense_row){ for(std::size_t r = 0;r != Dimensions1;++r){ blas::vector vecTest(Dimensions2); for(std::size_t i = 0; i != Dimensions2; ++i) vecTest(i) = denseData(r,i); checkDenseVectorEqual(row(denseData,r),vecTest); checkDenseVectorEqual(row(denseDataColMajor,r),vecTest); blas::matrix newData(Dimensions1,Dimensions2,0); blas::matrix newDataColMajor(Dimensions1,Dimensions2,0); blas::matrix_row > rowTest = row(newData,r); blas::matrix_row > rowTestColMajor = row(newDataColMajor,r); checkDenseVectorAssignment(rowTest,vecTest); checkDenseVectorAssignment(rowTestColMajor,vecTest); //check assignment { rowTest=vecTest; blas::matrix newData2(Dimensions1,Dimensions2,0); blas::matrix_row > rowTest2 = row(newData2,r); rowTest2=rowTest; for(std::size_t i = 0; i != Dimensions2; ++i){ BOOST_CHECK_EQUAL(newData(r,i),vecTest(i)); BOOST_CHECK_EQUAL(newData2(r,i),vecTest(i)); } } //check clear for(std::size_t i = 0; i != Dimensions2; ++i){ rowTest(i) = double(i); rowTestColMajor(i) = double(i); } rowTest.clear(); rowTestColMajor.clear(); for(std::size_t i = 0; i != Dimensions2; ++i){ BOOST_CHECK_EQUAL(rowTest(i),0.0); BOOST_CHECK_EQUAL(rowTestColMajor(i),0.0); } } } BOOST_AUTO_TEST_CASE( LinAlg_Dense_column){ for(std::size_t c = 0;c != Dimensions2;++c){ blas::vector vecTest(Dimensions1); for(std::size_t i = 0; i != Dimensions1; ++i) vecTest(i) = denseData(i,c); checkDenseVectorEqual(column(denseData,c),vecTest); blas::matrix newData(Dimensions1,Dimensions2,0); blas::matrix newDataColMajor(Dimensions1,Dimensions2,0); blas::matrix_column > columnTest = column(newData,c); blas::matrix_column > columnTestColMajor = column(newDataColMajor,c); checkDenseVectorAssignment(columnTest,vecTest); checkDenseVectorAssignment(columnTestColMajor,vecTest); { columnTest=vecTest; blas::matrix newData2(Dimensions1,Dimensions2,0); blas::matrix_column > columnTest2 = column(newData2,c); columnTest2=columnTest; for(std::size_t i = 0; i != Dimensions1; ++i){ BOOST_CHECK_EQUAL(newData(i,c),vecTest(i)); BOOST_CHECK_EQUAL(newData2(i,c),vecTest(i)); } } //check clear for(std::size_t i = 0; i != Dimensions1; ++i){ columnTest(i) = double(i); columnTestColMajor(i) = double(i); } columnTest.clear(); columnTestColMajor.clear(); for(std::size_t i = 0; i != Dimensions1; ++i){ BOOST_CHECK_EQUAL(columnTest(i),0.0); BOOST_CHECK_EQUAL(columnTestColMajor(i),0.0); } } } BOOST_AUTO_TEST_SUITE_END();Shark-3.1.4/Test/LinAlg/BLAS/transformations.cpp000066400000000000000000000164221314655040000213350ustar00rootroot00000000000000#define BOOST_TEST_MODULE LinAlg_VectorTransformations #include #include #include using namespace shark; using namespace std; const size_t Dimensions=3; double target[Dimensions] = {3, 4, 15}; BOOST_AUTO_TEST_SUITE (LinAlg_BLAS_transformations) BOOST_AUTO_TEST_CASE( LinAlg_Exp ) { RealVector x(Dimensions); RealMatrix xm(Dimensions,1); RealVector result(Dimensions); for (size_t i = 0; i < Dimensions; i++) { x(i) = i; xm(i,0) = i; result(i)=std::exp(x(i)); } RealVector y = exp(x); RealMatrix ym = exp(xm); //check result for (size_t i = 0; i < Dimensions; i++) { BOOST_CHECK_SMALL(y(i) - result(i),1.e-10); BOOST_CHECK_SMALL(ym(i,0) - result(i),1.e-10); } } BOOST_AUTO_TEST_CASE( LinAlg_Log ) { RealVector x(Dimensions); RealMatrix xm(Dimensions,1); RealVector result(Dimensions); for (size_t i = 0; i < Dimensions; i++) { x(i) = i+1; xm(i,0) = i+1; result(i)=std::log(x(i)); } RealVector y = log(x); RealMatrix ym = log(xm); //check result for (size_t i = 0; i < Dimensions; i++) { BOOST_CHECK_SMALL(y(i) - result(i),1.e-10); BOOST_CHECK_SMALL(ym(i,0) - result(i),1.e-10); } } BOOST_AUTO_TEST_CASE( LinAlg_Tanh ) { RealVector x(Dimensions); RealMatrix xm(Dimensions,1); RealVector result(Dimensions); for (size_t i = 0; i < Dimensions; i++) { x(i) = i; xm(i,0) = i; result(i)=std::tanh(x(i)); } RealVector y = tanh(x); RealMatrix ym = tanh(xm); //check result for (size_t i = 0; i < Dimensions; i++) { BOOST_CHECK_SMALL(y(i) - result(i),1.e-10); BOOST_CHECK_SMALL(ym(i,0) - result(i),1.e-10); } } BOOST_AUTO_TEST_CASE( LinAlg_Sigmoid ) { RealVector x(Dimensions); RealMatrix xm(Dimensions,1); RealVector result(Dimensions); for (size_t i = 0; i < Dimensions; i++) { x(i) = i; xm(i,0) = i; result(i) = sigmoid(x(i)); } RealVector y = sigmoid(x); RealMatrix ym = sigmoid(xm); //check result for (size_t i = 0; i < Dimensions; i++) { BOOST_CHECK_SMALL(y(i) - result(i),1.e-10); BOOST_CHECK_SMALL(ym(i,0) - result(i),1.e-10); } } BOOST_AUTO_TEST_CASE( LinAlg_Safe_Div ) { RealVector x(Dimensions); RealVector y(Dimensions); RealMatrix xm(Dimensions,1); RealMatrix ym(Dimensions,1); RealVector result(Dimensions); for (size_t i = 0; i < Dimensions; i++) { x(i) = i; y(i) = i % 3; xm(i,0) = i; ym(i,0) = i % 3; result(i) = (i % 3 == 0)? 2.0: x(i)/y(i); } RealVector z = safe_div(x,y,2.0); RealMatrix zm = safe_div(xm,ym,2.0); //check result for (size_t i = 0; i < Dimensions; i++) { BOOST_CHECK_SMALL(z(i) - result(i),1.e-10); BOOST_CHECK_SMALL(zm(i,0) - result(i),1.e-10); } } BOOST_AUTO_TEST_CASE( LinAlg_SoftPlus ) { RealVector x(Dimensions); RealMatrix xm(Dimensions,1); RealVector result(Dimensions); for (size_t i = 0; i < Dimensions; i++) { x(i) = i; xm(i,0) = i; result(i) = softPlus(x(i)); } RealVector y = softPlus(x); RealMatrix ym = softPlus(xm); //check result for (size_t i = 0; i < Dimensions; i++) { BOOST_CHECK_SMALL(y(i) - result(i),1.e-10); BOOST_CHECK_SMALL(ym(i,0) - result(i),1.e-10); } } //when dense works, this tests, whether sparse input is also correct BOOST_AUTO_TEST_CASE( LinAlg_UnaryTransformation_Sparse_Vector ){ //we first check, that transformations which allow sparseness are actually sparse. CompressedRealVector compressed(100); compressed(10) = 2; compressed(50) = 3; { typedef blas::vector_unary > SqrExpression; SqrExpression sqrexpr = sqr(compressed); SqrExpression::const_iterator iter = sqrexpr.begin(); BOOST_CHECK_EQUAL(iter.index(),10); BOOST_CHECK_SMALL(*iter-4, 1.e-15); ++iter; BOOST_CHECK_EQUAL(iter.index(),50); BOOST_CHECK_SMALL(*iter-9, 1.e-15); ++iter; BOOST_CHECK(iter==sqrexpr.end()); } //won't fix. This is stupid. //~ { //~ //now we check, that transformations which don't allow, are dense. //~ typedef blas::vector_unary > ExpExpression; //~ ExpExpression expexpr = exp(compressed); //~ ExpExpression::const_iterator iter = expexpr.begin(); //~ std::size_t dist = std::distance(expexpr.begin(),expexpr.end()); //~ BOOST_REQUIRE_EQUAL(dist,100); //~ for(std::size_t i = 0; i != 10; ++i){ //~ BOOST_CHECK_EQUAL(iter.index(),i); //~ BOOST_CHECK_EQUAL(*iter, 1.0); //~ ++iter; //~ } //~ BOOST_CHECK_EQUAL(iter.index(),10); //~ BOOST_CHECK_SMALL(*iter-std::exp(2.0), 1.e-15); //~ ++iter; //~ for(std::size_t i = 11; i != 50; ++i){ //~ BOOST_CHECK_EQUAL(iter.index(),i); //~ BOOST_CHECK_EQUAL(*iter, 1.0); //~ ++iter; //~ } //~ BOOST_CHECK_EQUAL(iter.index(),50); //~ BOOST_CHECK_SMALL(*iter-std::exp(3.0), 1.e-15); //~ ++iter; //~ for(std::size_t i = 51; i != 100; ++i){ //~ BOOST_CHECK_EQUAL(iter.index(),i); //~ BOOST_CHECK_EQUAL(*iter, 1.0); //~ ++iter; //~ } //~ BOOST_CHECK(iter==expexpr.end()); //~ } } BOOST_AUTO_TEST_CASE( LinAlg_UnaryTransformation_Sparse_Matrix ){ //we first check, that transformations which allow sparseness are actually sparse. CompressedIntMatrix compressed(10,9); compressed(5,5) = 2; compressed(8,2) = 3; compressed(8,5) = 5; { typedef blas::matrix_unary > SqrExpression; SqrExpression sqrexpr = sqr(compressed); BOOST_CHECK_EQUAL(sqrexpr.size1(),10); BOOST_CHECK_EQUAL(sqrexpr.size2(),9); for(std::size_t i = 0; i != 10; ++i){ int elements = std::distance(sqrexpr.row_begin(i),sqrexpr.row_end(i)); if(i == 5){ BOOST_CHECK_EQUAL(elements,1); BOOST_CHECK_EQUAL(*sqrexpr.row_begin(i),4); }else if(i == 8){ BOOST_CHECK_EQUAL(elements,2); BOOST_CHECK_EQUAL(*sqrexpr.row_begin(i),9); BOOST_CHECK_EQUAL(*(++sqrexpr.row_begin(i)),25); }else{ BOOST_CHECK_EQUAL(elements,0); } } } //won't fix. This is stupid. //~ { //~ //now we check, that transformations which don't allow, are dense. //~ typedef blas::matrix_unary > SqrExpression; //~ ExpExpression expexpr = exp(compressed); //~ std::size_t i = 0; //~ for(ExpExpression::const_iterator1 iter = expexpr.begin1(); iter != expexpr.end1(); ++iter,++i){ //~ std::size_t j = 0; //~ for(ExpExpression::const_iterator2 iter2 = iter.begin(); iter2 != iter.end(); ++iter2,++j){ //~ BOOST_CHECK_EQUAL(iter2.index1(),i); //~ BOOST_CHECK_EQUAL(iter2.index2(),j); //~ if(i == 5 && j ==5){ //~ BOOST_CHECK_SMALL(*iter2-std::exp(2.0), 1.e-15); //~ } else if(i == 8 && j == 2){ //~ BOOST_CHECK_SMALL(*iter2-std::exp(3.0), 1.e-15); //~ } else //~ BOOST_CHECK_SMALL(*iter2-1.0, 1.e-15); //~ } //~ } //~ std::size_t j = 0; //~ for(ExpExpression::const_iterator2 iter2 = expexpr.begin2(); iter2 != expexpr.end2(); ++iter2,++j){ //~ std::size_t i = 0; //~ for(ExpExpression::const_iterator1 iter = iter2.begin(); iter != iter2.end(); ++iter,++i){ //~ BOOST_CHECK_EQUAL(iter.index1(),i); //~ BOOST_CHECK_EQUAL(iter.index2(),j); //~ if(i == 5 && j ==5){ //~ BOOST_CHECK_SMALL(*iter-std::exp(2.0), 1.e-15); //~ } else if(i == 8 && j == 2){ //~ BOOST_CHECK_SMALL(*iter-std::exp(3.0), 1.e-15); //~ } else //~ BOOST_CHECK_SMALL(*iter-1.0, 1.e-15); //~ } //~ } //~ } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/LinAlg/BLAS/triangular_matrix.cpp000066400000000000000000000232301314655040000216330ustar00rootroot00000000000000#define BOOST_TEST_MODULE LinAlg_MatrixTriangular #include #include #include #include #include using namespace shark; using namespace blas; template void testTriangularEqualLower(TriangularMatrix& matrix,blas::matrix const& result,std::size_t rows){ BOOST_REQUIRE_EQUAL(matrix.size1(),rows); BOOST_REQUIRE_EQUAL(matrix.size1(),rows); for(std::size_t i = 0; i != rows; ++i){ for(std::size_t j = 0; j !=rows; ++j){ BOOST_CHECK_EQUAL(matrix(i,j),result(i,j)); } } for(std::size_t i = 0; i != rows;++i){ typename TriangularMatrix::row_iterator pos=matrix.row_begin(i); BOOST_REQUIRE(pos < matrix.row_end(i)); std::size_t posIndex = 0; while(pos != matrix.row_end(i)){ BOOST_REQUIRE(posIndex < rows); BOOST_CHECK_EQUAL(pos.index(),posIndex); BOOST_CHECK_EQUAL(*pos,result(i,posIndex)); //test operator [n] for(int j=0;j!= (int)i;++j){ BOOST_CHECK_EQUAL(pos[j-posIndex],result(i,j)); } ++pos; ++posIndex; } BOOST_CHECK(posIndex == i+1); } for(std::size_t i = 0; i!=rows;++i){ typename TriangularMatrix::column_iterator pos=matrix.column_begin(i); BOOST_REQUIRE(pos < matrix.column_end(i)); std::size_t posIndex = i; while(pos != matrix.column_end(i)){ BOOST_REQUIRE(posIndex < rows); BOOST_CHECK_EQUAL(pos.index(),posIndex); BOOST_CHECK_EQUAL(*pos,result(posIndex,i)); //test operator [n] for(int j=i;j!= (int)rows;++j){ BOOST_CHECK_EQUAL(pos[j-(int)posIndex],result(j,i)); } ++pos; ++posIndex; } BOOST_CHECK(posIndex == rows); } TriangularMatrix const& cmatrix = matrix; for(std::size_t i = 0; i != rows;++i){ typename TriangularMatrix::const_row_iterator pos=cmatrix.row_begin(i); BOOST_REQUIRE(pos < cmatrix.row_end(i)); std::size_t posIndex = 0; while(pos != cmatrix.row_end(i)){ BOOST_REQUIRE(posIndex < rows); BOOST_CHECK_EQUAL(pos.index(),posIndex); BOOST_CHECK_EQUAL(*pos,result(i,posIndex)); //test operator [n] for(int j=0;j!= (int)i;++j){ BOOST_CHECK_EQUAL(pos[j-posIndex],result(i,j)); } ++pos; ++posIndex; } BOOST_CHECK(posIndex == i+1); } for(std::size_t i = 0; i!=rows;++i){ typename TriangularMatrix::const_column_iterator pos=cmatrix.column_begin(i); BOOST_REQUIRE(pos < cmatrix.column_end(i)); std::size_t posIndex = i; while(pos != cmatrix.column_end(i)){ BOOST_REQUIRE(posIndex < rows); BOOST_CHECK_EQUAL(pos.index(),posIndex); BOOST_CHECK_EQUAL(*pos,result(posIndex,i)); //test operator [n] for(int j=i;j!= (int)rows;++j){ BOOST_CHECK_EQUAL(pos[j-(int)posIndex],result(j,i)); } ++pos; ++posIndex; } BOOST_CHECK(posIndex == rows); } } template void testTriangularEqualUpper(TriangularMatrix& matrix,blas::matrix const& result,std::size_t rows){ BOOST_REQUIRE_EQUAL(matrix.size1(),rows); BOOST_REQUIRE_EQUAL(matrix.size1(),rows); for(std::size_t i = 0; i != rows; ++i){ for(std::size_t j = 0; j !=rows; ++j){ BOOST_CHECK_EQUAL(matrix(i,j),result(i,j)); } } for(std::size_t i = 0; i != rows;++i){ typename TriangularMatrix::row_iterator pos=matrix.row_begin(i); BOOST_REQUIRE(pos < matrix.row_end(i)); std::size_t posIndex = i; while(pos != matrix.row_end(i)){ BOOST_REQUIRE(posIndex < rows); BOOST_CHECK_EQUAL(pos.index(),posIndex); BOOST_CHECK_EQUAL(*pos,result(i,posIndex)); //test operator [n] for(int j=i;j!= (int)rows;++j){ BOOST_CHECK_EQUAL(pos[j-posIndex],result(i,j)); } ++pos; ++posIndex; } BOOST_CHECK(posIndex == rows); } for(std::size_t i = 0; i!=rows;++i){ typename TriangularMatrix::column_iterator pos=matrix.column_begin(i); BOOST_REQUIRE(pos < matrix.column_end(i)); std::size_t posIndex = 0; while(pos != matrix.column_end(i)){ BOOST_REQUIRE(posIndex < rows); BOOST_CHECK_EQUAL(pos.index(),posIndex); BOOST_CHECK_EQUAL(*pos,result(posIndex,i)); //test operator [n] for(int j=0;j!= (int)i;++j){ BOOST_CHECK_EQUAL(pos[j-(int)posIndex],result(j,i)); } ++pos; ++posIndex; } BOOST_CHECK(posIndex == i+1); } TriangularMatrix const& cmatrix = matrix; for(std::size_t i = 0; i != rows;++i){ typename TriangularMatrix::const_row_iterator pos=cmatrix.row_begin(i); BOOST_REQUIRE(pos < cmatrix.row_end(i)); std::size_t posIndex = i; while(pos != cmatrix.row_end(i)){ BOOST_REQUIRE(posIndex < rows); BOOST_CHECK_EQUAL(pos.index(),posIndex); BOOST_CHECK_EQUAL(*pos,result(i,posIndex)); //test operator [n] for(int j=i;j!= (int)rows;++j){ BOOST_CHECK_EQUAL(pos[j-posIndex],result(i,j)); } ++pos; ++posIndex; } BOOST_CHECK(posIndex == rows); } for(std::size_t i = 0; i!=rows;++i){ typename TriangularMatrix::const_column_iterator pos=cmatrix.column_begin(i); BOOST_REQUIRE(pos < cmatrix.column_end(i)); std::size_t posIndex = 0; while(pos != cmatrix.column_end(i)){ BOOST_REQUIRE(posIndex < rows); BOOST_CHECK_EQUAL(pos.index(),posIndex); BOOST_CHECK_EQUAL(*pos,result(posIndex,i)); //test operator [n] for(int j=0;j!= (int)i;++j){ BOOST_CHECK_EQUAL(pos[j-(int)posIndex],result(j,i)); } ++pos; ++posIndex; } BOOST_CHECK(posIndex == i+1); } } struct TriangularMatrixFixture { std::size_t rows; std::size_t elements; triangular_matrix matrix1; triangular_matrix matrix2; triangular_matrix matrix3; triangular_matrix matrix4; matrix result1; matrix result2; matrix result3; matrix result4; TriangularMatrixFixture():rows(5),elements(15) ,matrix1(rows),matrix2(rows),matrix3(rows),matrix4(rows){ int result1array[]={ 1,0,0,0,0, 2,3,0,0,0, 4,5,6,0,0, 7,8,9,10,0, 11,12,13,14,15 }; int result2array[]={ 1,0,0,0,0, 2,6,0,0,0, 3,7,10,0,0, 4,8,11,13,0, 5,9,12,14,15 }; result1 = adapt_matrix(rows,rows,result1array); result2 = adapt_matrix(rows,rows,result2array); result3 = trans(result2); result4 = trans(result1); for(std::size_t elem=0;elem != elements;++elem){ matrix1.storage()[elem]=elem+1; matrix2.storage()[elem]=elem+1; matrix3.storage()[elem]=elem+1; matrix4.storage()[elem]=elem+1; } } }; BOOST_FIXTURE_TEST_SUITE (LinAlg_BLAS_triangular_matrix,TriangularMatrixFixture) //Check that reading entries of the matrix works and it is the same structure //as demanded by the BLAS Standard BOOST_AUTO_TEST_CASE( triangular_matrix_structure){ BOOST_REQUIRE_EQUAL(matrix1.nnz(),elements); BOOST_REQUIRE_EQUAL(matrix2.nnz(),elements); BOOST_REQUIRE_EQUAL(matrix3.nnz(),elements); BOOST_REQUIRE_EQUAL(matrix4.nnz(),elements); //test that matrices are the same std::cout<<"test lower/row_major"< matrix11(matrix1); triangular_matrix matrix11exp(trans(matrix4)); triangular_matrix matrix12(matrix2); triangular_matrix matrix21(matrix1); triangular_matrix matrix22(matrix2); triangular_matrix matrix22exp(trans(matrix3)); triangular_matrix matrix33(matrix3); triangular_matrix matrix33exp(trans(matrix2)); triangular_matrix matrix34(matrix4); triangular_matrix matrix43(matrix3); triangular_matrix matrix44(matrix4); triangular_matrix matrix44exp(trans(matrix1)); std::cout<<"test copying"< matrix11; matrix11=matrix1; triangular_matrix matrix11exp; matrix11exp=trans(matrix4); triangular_matrix matrix12; matrix12=matrix2; std::cout<<"test op="< #include #include using namespace shark; using namespace blas; template void checkMatrixVectorMultiply(M const& arg1, V const& arg2, Result const& result){ BOOST_REQUIRE_EQUAL(arg1.size1(), result.size()); BOOST_REQUIRE_EQUAL(arg2.size(), arg1.size2()); for(std::size_t i = 0; i != arg1.size1(); ++i) { double test_result = inner_prod(row(arg1,i),arg2); BOOST_CHECK_CLOSE(result(i), test_result, 1.e-10); } } template void checkMatrixMatrixMultiply(M1 const& arg1, M2 const& arg2, Result const& result) { BOOST_REQUIRE_EQUAL(arg1.size1(), arg1.size2()); BOOST_REQUIRE_EQUAL(arg1.size2(), arg2.size1()); BOOST_REQUIRE_EQUAL(arg1.size2(), result.size1()); BOOST_REQUIRE_EQUAL(arg2.size2(), result.size2()); for(std::size_t i = 0; i != arg2.size1(); ++i) { for(std::size_t j = 0; j != arg2.size2(); ++j) { double test_result = inner_prod(row(arg1,i),column(arg2,j)); BOOST_CHECK_CLOSE(result(i,j), test_result, 1.e-10); } } } BOOST_AUTO_TEST_SUITE(LinAlg_BLAS_triangular_prod) BOOST_AUTO_TEST_CASE(LinAlg_triangular_prod_matrix_vector) { std::size_t dims = 231;//chosen as not to be a multiple of the block size //initialize the arguments in both row and column major, lower and upper, unit and non-unit diagonal //we add one on the remaining elements to ensure, that triangular_prod does not tuch these elements matrix arg1lowerrm(dims,dims,1.0); matrix arg1lowercm(dims,dims,1.0); matrix arg1upperrm(dims,dims,1.0); matrix arg1uppercm(dims,dims,1.0); //inputs to compare to with the standard prod matrix arg1lowertest(dims,dims,0.0); matrix arg1uppertest(dims,dims,0.0); for(std::size_t i = 0; i != dims; ++i){ for(std::size_t j = 0; j <=i; ++j){ arg1lowerrm(i,j) = arg1lowercm(i,j) = i*dims+0.2*j+1; arg1lowertest(i,j) = i*dims+0.2*j+1; arg1upperrm(j,i) = arg1uppercm(j,i) = i*dims+0.2*j+1; arg1uppertest(j,i) = i*dims+0.2*j+1; } } vector arg2(dims); for(std::size_t j = 0; j != dims; ++j){ arg2(j) = 1.5*j+2; } std::cout<<"\nchecking matrix-vector prod non-unit"< result = arg2; triangular_prod(arg1lowerrm,result); checkMatrixVectorMultiply(arg1lowertest,arg2,result); } { std::cout<<"column major lower Ax"< result = arg2; triangular_prod(arg1lowercm,result); checkMatrixVectorMultiply(arg1lowertest,arg2,result); } { std::cout<<"row major upper Ax"< result = arg2; triangular_prod(arg1upperrm,result); checkMatrixVectorMultiply(arg1uppertest,arg2,result); } { std::cout<<"column major upper Ax"< result = arg2; triangular_prod(arg1uppercm,result); checkMatrixVectorMultiply(arg1uppertest,arg2,result); } diag(arg1lowertest) = blas::repeat(1.0,dims); diag(arg1uppertest) = blas::repeat(1.0,dims); std::cout<<"\nchecking matrix-vector prod unit"< result = arg2; triangular_prod(arg1lowerrm,result); checkMatrixVectorMultiply(arg1lowertest,arg2,result); } { std::cout<<"column major lower Ax"< result = arg2; triangular_prod(arg1lowercm,result); checkMatrixVectorMultiply(arg1lowertest,arg2,result); } { std::cout<<"row major upper Ax"< result = arg2; triangular_prod(arg1upperrm,result); checkMatrixVectorMultiply(arg1uppertest,arg2,result); } { std::cout<<"column major upper Ax"< result = arg2; triangular_prod(arg1uppercm,result); checkMatrixVectorMultiply(arg1uppertest,arg2,result); } } BOOST_AUTO_TEST_CASE(LinAlg_triangular_prod_matrix_matrix) { std::size_t dims = 231;//chosen as not to be a multiple of the block size std::size_t N = 30; //initialize the arguments in both row and column major, lower and upper, unit and non-unit diagonal //we add one on the remaining elements to ensure, that triangular_prod does not tuch these elements matrix arg1lowerrm(dims, dims, 1.0); matrix arg1lowercm(dims, dims, 1.0); matrix arg1upperrm(dims, dims, 1.0); matrix arg1uppercm(dims, dims, 1.0); //inputs to compare to with the standard prod matrix arg1lowertest(dims, dims, 0.0); matrix arg1uppertest(dims, dims, 0.0); for(std::size_t i = 0; i != dims; ++i) { for(std::size_t j = 0; j <= i; ++j) { arg1lowerrm(i, j) = arg1lowercm(i, j) = i * dims + 0.2 * j + 1; arg1lowertest(i, j) = i * dims + 0.2 * j + 1; arg1upperrm(j, i) = arg1uppercm(j, i) = i * dims + 0.2 * j + 1; arg1uppertest(j, i) = i * dims + 0.2 * j + 1; } } matrix arg2(dims,N); for(std::size_t i = 0; i != dims; ++i) { for(std::size_t j = 0; j != N; ++j) { arg2(i,j) = 1.5 * j + 2+i; } } std::cout << "\nchecking matrix-matrix prod non-unit" << std::endl; { std::cout << "row major lower AX" << std::endl; matrix result = arg2; triangular_prod(arg1lowerrm, result); checkMatrixMatrixMultiply(arg1lowertest, arg2, result); } { std::cout << "row major upper AX" << std::endl; matrix result = arg2; triangular_prod(arg1upperrm, result); checkMatrixMatrixMultiply(arg1uppertest, arg2, result); } { std::cout << "column major lower AX" << std::endl; matrix result = arg2; triangular_prod(arg1lowercm, result); checkMatrixMatrixMultiply(arg1lowertest, arg2, result); } { std::cout << "column major upper AX" << std::endl; matrix result = arg2; triangular_prod(arg1uppercm, result); checkMatrixMatrixMultiply(arg1uppertest, arg2, result); } diag(arg1lowertest) = blas::repeat(1.0, dims); diag(arg1uppertest) = blas::repeat(1.0, dims); std::cout << "\nchecking matrix-matrix prod unit" << std::endl; { std::cout << "row major lower AX" << std::endl; matrix result = arg2; triangular_prod(arg1lowerrm, result); checkMatrixMatrixMultiply(arg1lowertest, arg2, result); } { std::cout << "row major upper AX" << std::endl; matrix result = arg2; triangular_prod(arg1upperrm, result); checkMatrixMatrixMultiply(arg1uppertest, arg2, result); } { std::cout << "column major lower AX" << std::endl; matrix result = arg2; triangular_prod(arg1lowercm, result); checkMatrixMatrixMultiply(arg1lowertest, arg2, result); } { std::cout << "column major upper AX" << std::endl; matrix result = arg2; triangular_prod(arg1uppercm, result); checkMatrixMatrixMultiply(arg1uppertest, arg2, result); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/LinAlg/BLAS/vector_assign.cpp000066400000000000000000000104051314655040000207450ustar00rootroot00000000000000#define BOOST_TEST_MODULE LinAlg_BLAS_VectorAssign #include #include #include using namespace shark; template void checkVectorEqual(V1 const& v1, V2 const& v2){ BOOST_REQUIRE_EQUAL(v1.size(),v2.size()); for(std::size_t i = 0; i != v2.size(); ++i){ BOOST_CHECK_EQUAL(v1(i),v2(i)); } } BOOST_AUTO_TEST_SUITE (LinAlg_BLAS_vector_assign) BOOST_AUTO_TEST_CASE( LinAlg_BLAS_Vector_Assign ){ std::cout<<"testing direct assignment"< source_dense(10); blas::compressed_vector source_sparse(10); for(std::size_t i = 0; i != 10; ++i){ source_dense(i) = 2*i+1; } source_sparse(2) = 1; source_sparse(5) = 2; source_sparse(7) = 3; //test all 4 combinations { blas::compressed_vector target_dense(10); for(std::size_t i = 0; i != 10; ++i){ target_dense(i) = 3*i+2; } std::cout<<"testing dense-dense"< target_dense(10); for(std::size_t i = 0; i != 10; ++i){ target_dense(i) = 3*i+2; } std::cout<<"testing dense-sparse"< target_sparse(10); target_sparse(1) = 2; target_sparse(7) = 8; target_sparse(9) = 3; std::cout<<"testing sparse-dense"< target_sparse(10); target_sparse(1) = 2; target_sparse(2) = 2; target_sparse(7) = 8; target_sparse(9) = 3; std::cout<<"testing sparse-sparse"< source_dense(10); blas::compressed_vector source_sparse(10); for(std::size_t i = 0; i != 10; ++i){ source_dense(i) = 2*i+1; } source_sparse(2) = 1; source_sparse(5) = 2; source_sparse(7) = 3; //test all 4 combinations { blas::vector target_dense(10); blas::vector result_dense(10); for(std::size_t i = 0; i != 10; ++i){ target_dense(i) = 3*i+2; result_dense(i) = source_dense(i)+target_dense(i); } std::cout<<"testing dense-dense"<(target_dense,source_dense); checkVectorEqual(target_dense,result_dense); } { blas::vector target_dense(10); blas::vector result_dense(10); for(std::size_t i = 0; i != 10; ++i){ target_dense(i) = 3*i+2; result_dense(i) = source_sparse(i)+target_dense(i); } std::cout<<"testing dense-sparse"<(target_dense,source_sparse); checkVectorEqual(target_dense,result_dense); } { blas::compressed_vector target_sparse(10); blas::vector result_dense(10); target_sparse(1) = 2; target_sparse(2) = 2; target_sparse(7) = 8; target_sparse(9) = 3; for(std::size_t i = 0; i != 10; ++i){ result_dense(i) = source_dense(i)+target_sparse(i); } std::cout<<"testing sparse-dense"<(target_sparse,source_dense); BOOST_CHECK_EQUAL(target_sparse.nnz(), 10); checkVectorEqual(target_sparse,result_dense); } { blas::compressed_vector target_sparse(10); blas::vector result_dense(10); target_sparse(1) = 2; target_sparse(2) = 2; target_sparse(7) = 8; target_sparse(9) = 3; for(std::size_t i = 0; i != 10; ++i){ result_dense(i) = source_sparse(i)+target_sparse(i); } std::cout<<"testing sparse-sparse"<(target_sparse,source_sparse); BOOST_CHECK_EQUAL(target_sparse.nnz(), 5); checkVectorEqual(target_sparse,result_dense); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/LinAlg/BLAS/vector_expression.cpp000066400000000000000000000336021314655040000216640ustar00rootroot00000000000000#define BOOST_TEST_MODULE BLAS_Vector_vector_expression #include #include #include using namespace shark; using namespace blas; //////////////////DENSE////////////////////////////// template void checkDenseExpressionEquality( Operation op, Result const& result ){ BOOST_REQUIRE_EQUAL(op.size(), result.size()); typename Operation::const_iterator pos = op.begin(); for(std::size_t i = 0; i != op.size(); ++i,++pos){ BOOST_REQUIRE(pos != op.end()); BOOST_CHECK_EQUAL(pos.index(), i); BOOST_CHECK_CLOSE(result(i), op(i),1.e-10); BOOST_CHECK_CLOSE(*pos, op(i),1.e-10); } BOOST_REQUIRE(pos == op.end()); } const std::size_t Dimensions = 10; ///////////////////////////////////////////////////////////// //////UNARY TRANSFORMATIONS/////// //////////////////////////////////////////////////////////// BOOST_AUTO_TEST_SUITE (LinAlg_BLAS_vector_expression) BOOST_AUTO_TEST_CASE( BLAS_Vector_Unary_Minus ) { vector x(Dimensions); vector result(Dimensions); for (size_t i = 0; i < Dimensions; i++) { x(i) = i-3.0; result(i)= -x(i); } checkDenseExpressionEquality(-x,result); } BOOST_AUTO_TEST_CASE( BLAS_Vector_Scalar_Multiply ) { vector x(Dimensions); vector result(Dimensions); for (size_t i = 0; i < Dimensions; i++) { x(i) = i-3.0; result(i)= 5.0*x(i); } checkDenseExpressionEquality(5.0*x,result); checkDenseExpressionEquality(x*5.0,result); } BOOST_AUTO_TEST_CASE( BLAS_Vector_Scalar_Div ) { vector x(Dimensions); vector result(Dimensions); for (size_t i = 0; i < Dimensions; i++) { x(i) = i-3.0; result(i)= x(i)/5.0; } checkDenseExpressionEquality(x/5.0,result); } BOOST_AUTO_TEST_CASE( BLAS_Vector_Abs ) { vector x(Dimensions); vector result(Dimensions); for (size_t i = 0; i < Dimensions; i++) { x(i) = -i+3.0; result(i)= std::abs(x(i)); } checkDenseExpressionEquality(abs(x),result); } BOOST_AUTO_TEST_CASE( BLAS_Vector_Sqr ) { vector x(Dimensions); vector result(Dimensions); for (size_t i = 0; i < Dimensions; i++) { x(i) = -i+3.0; result(i)= x(i)*x(i); } checkDenseExpressionEquality(sqr(x),result); } BOOST_AUTO_TEST_CASE( BLAS_Vector_Sqrt ) { vector x(Dimensions); vector result(Dimensions); for (size_t i = 0; i < Dimensions; i++) { x(i) = i; result(i)= sqrt(x(i)); } checkDenseExpressionEquality(sqrt(x),result); } BOOST_AUTO_TEST_CASE( BLAS_Vector_Exp ) { vector x(Dimensions); vector result(Dimensions); for (size_t i = 0; i < Dimensions; i++) { x(i) = i; result(i)=std::exp(x(i)); } checkDenseExpressionEquality(exp(x),result); } BOOST_AUTO_TEST_CASE( BLAS_Vector_Log ) { vector x(Dimensions); vector result(Dimensions); for (size_t i = 0; i < Dimensions; i++) { x(i) = i+1; result(i)=std::log(x(i)); } checkDenseExpressionEquality(log(x),result); } BOOST_AUTO_TEST_CASE( BLAS_Vector_Tanh ) { vector x(Dimensions); vector result(Dimensions); for (size_t i = 0; i < Dimensions; i++){ x(i) = i; result(i)=std::tanh(x(i)); } checkDenseExpressionEquality(tanh(x),result); } BOOST_AUTO_TEST_CASE( BLAS_Vector_Sigmoid ) { vector x(Dimensions); vector result(Dimensions); for (size_t i = 0; i < Dimensions; i++){ x(i) = i; result(i) = 1.0/(1.0+std::exp(-x(i))); } checkDenseExpressionEquality(sigmoid(x),result); } BOOST_AUTO_TEST_CASE( BLAS_Vector_SoftPlus ) { vector x(Dimensions); vector result(Dimensions); for (size_t i = 0; i < Dimensions; i++) { x(i) = i; result(i) = shark::softPlus(x(i)); } checkDenseExpressionEquality(softPlus(x),result); } BOOST_AUTO_TEST_CASE( BLAS_Vector_Pow ) { vector x(Dimensions); vector result(Dimensions); for (size_t i = 0; i < Dimensions; i++) { x(i) = i+1.0; result(i)= std::pow(x(i),3.2); } checkDenseExpressionEquality(pow(x,3.2),result); } ///////////////////////////////////////////////////// ///////BINARY OPERATIONS////////// ///////////////////////////////////////////////////// BOOST_AUTO_TEST_CASE( BLAS_Vector_Binary_Plus) { vector x(Dimensions); vector y(Dimensions); vector result(Dimensions); for (size_t i = 0; i < Dimensions; i++) { x(i) = i; y(i) = i+Dimensions; result(i) = x(i)+y(i); } checkDenseExpressionEquality(x+y,result); } BOOST_AUTO_TEST_CASE( BLAS_Vector_Binary_Minus) { vector x(Dimensions); vector y(Dimensions); vector result(Dimensions); for (size_t i = 0; i < Dimensions; i++) { x(i) = i; y(i) = -3.0*i+Dimensions; result(i) = x(i)-y(i); } checkDenseExpressionEquality(x-y,result); } BOOST_AUTO_TEST_CASE( BLAS_Vector_Binary_Multiply) { vector x(Dimensions); vector y(Dimensions); vector result(Dimensions); for (size_t i = 0; i < Dimensions; i++) { x(i) = i; y(i) = -3.0*i+Dimensions; result(i) = x(i)*y(i); } checkDenseExpressionEquality(x*y,result); checkDenseExpressionEquality(element_prod(x,y),result); } BOOST_AUTO_TEST_CASE( BLAS_Vector_Binary_Div) { vector x(Dimensions); vector y(Dimensions); vector result(Dimensions); for (size_t i = 0; i < Dimensions; i++) { x(i) = 3.0*i+3.0; y(i) = i+1; result(i) = x(i)/y(i); } checkDenseExpressionEquality(x/y,result); checkDenseExpressionEquality(element_div(x,y),result); } BOOST_AUTO_TEST_CASE( BLAS_Vector_Safe_Div ) { vector x(Dimensions); vector y(Dimensions); vector result(Dimensions); for (size_t i = 0; i < Dimensions; i++) { x(i) = i; y(i) = i % 3; result(i) = (i % 3 == 0)? 2.0: x(i)/y(i); } checkDenseExpressionEquality(safe_div(x,y,2.0),result); } ///////////////////////////////////////////////////// ///////////Vector Reductions/////////// ///////////////////////////////////////////////////// BOOST_AUTO_TEST_CASE( BLAS_Vector_Max ) { vector x(Dimensions); double result = 1; for (size_t i = 0; i < Dimensions; i++){ x(i) = exp(-(i-5.0)*(i-5.0));//max at i = 5 result = std::max(result,x(i)); } BOOST_CHECK_CLOSE(max(x),result,1.e-10); } BOOST_AUTO_TEST_CASE( BLAS_Vector_Min ) { vector x(Dimensions); double result = -1; for (size_t i = 0; i < Dimensions; i++){ x(i) = -std::exp(-(i-5.0)*(i-5.0));//min at i = 5 result = std::min(result,x(i)); } BOOST_CHECK_CLOSE(min(x),result,1.e-10); } BOOST_AUTO_TEST_CASE( BLAS_Vector_Arg_Max ) { vector x(Dimensions); unsigned int result = 5; for (size_t i = 0; i < Dimensions; i++){ x(i) = exp(-(i-5.0)*(i-5.0));//max at i = 5 } BOOST_CHECK_EQUAL(arg_max(x),result); } BOOST_AUTO_TEST_CASE( BLAS_Vector_Arg_Min ) { vector x(Dimensions); unsigned int result = 5; for (size_t i = 0; i < Dimensions; i++){ x(i) = -exp(-(i-5.0)*(i-5.0));//min at i = 5 } BOOST_CHECK_EQUAL(arg_min(x),result); } BOOST_AUTO_TEST_CASE( BLAS_Vector_Sum ) { vector x(Dimensions); double result = 0; for (size_t i = 0; i < Dimensions; i++){ x(i) = 2*i-5; result +=x(i); } BOOST_CHECK_CLOSE(sum(x),result,1.e-10); } BOOST_AUTO_TEST_CASE( BLAS_Vector_norm_1 ) { vector x(Dimensions); double result = 0; for (size_t i = 0; i < Dimensions; i++){ x(i) = 2*i-5; result +=std::abs(x(i)); } BOOST_CHECK_CLOSE(norm_1(x),result,1.e-10); } BOOST_AUTO_TEST_CASE( BLAS_Vector_norm_sqr ) { vector x(Dimensions); double result = 0; for (size_t i = 0; i < Dimensions; i++){ x(i) = 2*i-5; result +=x(i)*x(i); } BOOST_CHECK_CLOSE(norm_sqr(x),result,1.e-10); } BOOST_AUTO_TEST_CASE( BLAS_Vector_norm_2 ) { vector x(Dimensions); double result = 0; for (size_t i = 0; i < Dimensions; i++){ x(i) = 2*i-5; result +=x(i)*x(i); } result = std::sqrt(result); BOOST_CHECK_CLOSE(norm_2(x),result,1.e-10); } BOOST_AUTO_TEST_CASE( BLAS_Vector_norm_inf ) { vector x(Dimensions); for (size_t i = 0; i < Dimensions; i++){ x(i) = exp(-(i-5.0)*(i-5.0)); } x(8)=-2; BOOST_CHECK_EQUAL(norm_inf(x),2.0); } BOOST_AUTO_TEST_CASE( BLAS_Vector_index_norm_inf ) { vector x(Dimensions); for (size_t i = 0; i < Dimensions; i++){ x(i) = exp(-(i-5.0)*(i-5.0)); } x(8)=-2; BOOST_CHECK_EQUAL(index_norm_inf(x),8); } BOOST_AUTO_TEST_CASE( BLAS_Vector_inner_prod ) { vector x(Dimensions); vector y(Dimensions); for (size_t i = 0; i < Dimensions; i++){ x(i) = exp(-(i-5.0)*(i-5.0)); y(i) = exp((i-5.0)*(i-5.0)); } BOOST_CHECK_CLOSE(inner_prod(x,y),(double)Dimensions,1.e-5); } //////////////////////////////SPARSE TESTS////////////////////////////// //we only check the operations which make sense for sparseness, that is sparseness is preserved. template void checkSparseExpressionEquality( Operation op, Result const& result ){ BOOST_CHECK_EQUAL(op.size(), result.size()); typename Operation::const_iterator posOp = op.begin(); typename Result::const_iterator posResult = result.begin(); for(;posResult != result.end();++posOp,++posResult){ BOOST_REQUIRE(posOp != op.end()); BOOST_REQUIRE_EQUAL(posOp.index(), posResult.index()); BOOST_CHECK_SMALL(*posOp-*posResult,1.e-3); } } ///////////////////////////////////////////////////////////// //////UNARY TRANSFORMATIONS/////// //////////////////////////////////////////////////////////// std::size_t SparseDimensions = 100; std::size_t VectorNNZ = 10; BOOST_AUTO_TEST_CASE( BLAS_Sparse_Vector_Unary_Minus ) { compressed_vector x(SparseDimensions); compressed_vector result(SparseDimensions); for (size_t i = 0; i < VectorNNZ; i++) { x(i*9+3) = i-3.0; result(i*9+3)= -x(i*9+3); } checkSparseExpressionEquality(-x,result); } BOOST_AUTO_TEST_CASE( BLAS_Sparse_Vector_Scalar_Multiply ) { compressed_vector x(SparseDimensions); compressed_vector result(SparseDimensions); for (size_t i = 0; i < VectorNNZ; i++) { x(i) = i-3.0; result(i)= 5.0*x(i); } checkSparseExpressionEquality(5.0*x,result); checkSparseExpressionEquality(x*5.0,result); } BOOST_AUTO_TEST_CASE( BLAS_Sparse_Vector_Scalar_Div ) { compressed_vector x(SparseDimensions); compressed_vector result(SparseDimensions); for (size_t i = 0; i < VectorNNZ; i++) { x(i+90) = i-3.0; result(i+90)= x(i+90)/5.0; } checkSparseExpressionEquality(x/5.0,result); } BOOST_AUTO_TEST_CASE( BLAS_Sparse_Vector_Abs ) { compressed_vector x(SparseDimensions); compressed_vector result(SparseDimensions); for (size_t i = 0; i < VectorNNZ; i++) { x(i) = -double(i)+3.0; result(i)= std::abs(x(i)); } checkSparseExpressionEquality(abs(x),result); } BOOST_AUTO_TEST_CASE( BLAS_Sparse_Vector_Sqr ) { compressed_vector x(SparseDimensions); compressed_vector result(SparseDimensions); for (size_t i = 0; i < VectorNNZ; i++) { x(i) = -double(i)+3.0; result(i)= x(i)*x(i); } checkSparseExpressionEquality(sqr(x),result); } BOOST_AUTO_TEST_CASE( BLAS_Sparse_Vector_Sqrt ) { compressed_vector x(SparseDimensions); compressed_vector result(SparseDimensions); for (size_t i = 0; i < VectorNNZ; i++) { x(i) = i; result(i)= sqrt(x(i)); } checkSparseExpressionEquality(sqrt(x),result); } ////////////////////////////////////////////////////////////// //////BINARY TRANSFORMATIONS/////// ///////////////////////////////////////////////////////////// BOOST_AUTO_TEST_CASE( BLAS_Sparse_Vector_Binary_Plus) { compressed_vector x(SparseDimensions); compressed_vector y(SparseDimensions); compressed_vector result(SparseDimensions); for (size_t i = 0; i < VectorNNZ; i++) { x(10*i+1) = 0.5*i; x(10*i+2) = i; y(10*i) = 0.5*i; y(10*i+1) = 2*i; result(10*i) = y(10*i); result(10*i+1) = x(10*i+1)+y(10*i+1); result(10*i+2) = x(10*i+2); } checkSparseExpressionEquality(x+y,result); } BOOST_AUTO_TEST_CASE( BLAS_Sparse_Vector_Binary_Minus) { compressed_vector x(SparseDimensions); compressed_vector y(SparseDimensions); compressed_vector result(SparseDimensions); for (size_t i = 0; i < VectorNNZ; i++) { x(10*i+1) = 0.5*i; x(10*i+2) = i; y(10*i) = 0.5*i; y(10*i+1) = 2*i; result(10*i) = -y(10*i); result(10*i+1) = x(10*i+1)-y(10*i+1); result(10*i+2) = x(10*i+2); } checkSparseExpressionEquality(x-y,result); } BOOST_AUTO_TEST_CASE( BLAS_Sparse_Vector_Binary_Multiply) { compressed_vector x(SparseDimensions); compressed_vector y(SparseDimensions); compressed_vector result(SparseDimensions); for (size_t i = 0; i < VectorNNZ; i++) { y(10*i) = 0.5*i+1; y(10*i+1) = 2*i+1; x(10*i+1) = 0.5*i+1; x(10*i+2) = i+1; result(10*i+1) = x(10*i+1)*y(10*i+1); } checkSparseExpressionEquality(x*y,result); } //////////////////////////////DENSE-SPARSE TESTS////////////////////////////// ////////////////////////////////////////////////////////////// //////BINARY TRANSFORMATIONS/////// ///////////////////////////////////////////////////////////// BOOST_AUTO_TEST_CASE( BLAS_Dense_Sparse_Vector_Binary_Plus) { vector x(SparseDimensions); compressed_vector y(SparseDimensions); vector result(SparseDimensions); for (size_t i = 0; i < SparseDimensions; i++) { x(i) = 0.5*i+2; result(i) = x(i); } for (size_t i = 0; i < VectorNNZ; i++) { y(10*i+1) = 0.5*i; result(10*i+1) += y(10*i+1); } checkDenseExpressionEquality(x+y,result); checkDenseExpressionEquality(y+x,result); } BOOST_AUTO_TEST_CASE( BLAS_Dense_Sparse_Vector_Binary_Multiply) { vector x(SparseDimensions); compressed_vector y(SparseDimensions); compressed_vector result(SparseDimensions); for (size_t i = 0; i < SparseDimensions; i++) { x(i) = 0.5*i+2; } for (size_t i = 0; i < VectorNNZ; i++) { y(10*i+1) = 0.5*i; result(10*i+1) = x(10*i+1)*y(10*i+1); } checkSparseExpressionEquality(x*y,result); checkSparseExpressionEquality(y*x,result); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/LinAlg/BLAS/vector_proxy.cpp000066400000000000000000000071311314655040000206440ustar00rootroot00000000000000#define BOOST_TEST_MODULE LinAlg_Vector_Proxy #include #include #include using namespace shark; template void checkDenseVectorEqual(V1 const& v1, V2 const& v2){ BOOST_REQUIRE_EQUAL(v1.size(),v2.size()); //indexed access for(std::size_t i = 0; i != v2.size(); ++i){ BOOST_CHECK_EQUAL(v1(i),v2(i)); } //iterator accessranges typedef typename V1::const_iterator Iter; BOOST_REQUIRE_EQUAL(v1.end()-v1.begin(), v1.size()); std::size_t k = 0; for(Iter it = v1.begin(); it != v1.end(); ++it,++k){ BOOST_CHECK_EQUAL(k,it.index()); BOOST_CHECK_EQUAL(*it,v2(k)); } //test that the actual iterated length equals the number of elements BOOST_CHECK_EQUAL(k, v2.size()); } template void checkDenseVectorAssignment(V1& v1, V2 const& v2){ BOOST_REQUIRE_EQUAL(v1.size(),v2.size()); //indexed access for(std::size_t i = 0; i != v2.size(); ++i){ v1(i) = 0; BOOST_CHECK_EQUAL(v1(i),0); v1(i) = v2(i); BOOST_CHECK_EQUAL(v1(i),v2(i)); v1(i) = 0; BOOST_CHECK_EQUAL(v1(i),0); } //iterator accessranges typedef typename V1::iterator Iter; BOOST_REQUIRE_EQUAL(v1.end()-v1.begin(), v1.size()); std::size_t k = 0; for(Iter it = v1.begin(); it != v1.end(); ++it,++k){ BOOST_CHECK_EQUAL(k,it.index()); *it = 0; BOOST_CHECK_EQUAL(v1(k),0); *it = v2(k); BOOST_CHECK_EQUAL(v1(k),v2(k)); *it = 0; BOOST_CHECK_EQUAL(v1(k),0); } //test that the actual iterated length equals the number of elements BOOST_CHECK_EQUAL(k, v2.size()); } std::size_t Dimensions = 8; struct VectorProxyFixture { blas::vector denseData; blas::compressed_vector compressedData; VectorProxyFixture():denseData(Dimensions){ for(std::size_t i = 0; i!= Dimensions;++i){ denseData(i) = i+5; } } }; BOOST_FIXTURE_TEST_SUITE (LinAlg_BLAS_vector_proxy, VectorProxyFixture); BOOST_AUTO_TEST_CASE( LinAlg_Dense_Subrange ){ //all possible combinations of ranges on the data vector for(std::size_t rangeEnd=0;rangeEnd!= Dimensions;++rangeEnd){ for(std::size_t rangeBegin =0;rangeBegin <=rangeEnd;++rangeBegin){//<= for 0 range std::size_t size=rangeEnd-rangeBegin; blas::vector vTest(size); for(std::size_t i = 0; i != size; ++i){ vTest(i) = denseData(i+rangeBegin); } checkDenseVectorEqual(subrange(denseData,rangeBegin,rangeEnd),vTest); //assignment using op() and iterators { blas::vector newData(Dimensions,1.0); blas::vector_range > rangeTest = subrange(newData,rangeBegin,rangeEnd); checkDenseVectorAssignment(rangeTest,vTest);//cehcks op() and iterators for assignment //check that after assignment all elements outside the range are still intact for(std::size_t i = 0; i != rangeBegin; ++i){ BOOST_CHECK_EQUAL(newData(i),1.0); } for(std::size_t i = rangeEnd; i != Dimensions; ++i){ BOOST_CHECK_EQUAL(newData(i),1.0); } } //check clear { blas::vector newData(Dimensions,1.0); blas::vector_range > rangeTest = subrange(newData,rangeBegin,rangeEnd); rangeTest.clear(); for(std::size_t i = 0; i != size; ++i){ BOOST_CHECK_EQUAL(rangeTest(i),0); BOOST_CHECK_EQUAL(newData(i+rangeBegin),0); } //check that after clear all elements outside the range are still intact for(std::size_t i = 0; i != rangeBegin; ++i){ BOOST_CHECK_EQUAL(newData(i),1.0); } for(std::size_t i = rangeEnd; i != Dimensions; ++i){ BOOST_CHECK_EQUAL(newData(i),1.0); } } } } } BOOST_AUTO_TEST_SUITE_END();Shark-3.1.4/Test/LinAlg/DiagonalMatrix.cpp000066400000000000000000000052211314655040000202610ustar00rootroot00000000000000#define BOOST_TEST_MODULE LinAlg_Diagonal_Matrix #include #include #include using namespace shark; template void checkDiagonalMatrix(M const& diagonal, D const& diagonalElements, std::size_t const Dimensions){ BOOST_REQUIRE_EQUAL(diagonal.size1(),Dimensions); BOOST_REQUIRE_EQUAL(diagonal.size2(),Dimensions); for(std::size_t i = 0; i != Dimensions; ++i){ for(std::size_t j = 0; j != Dimensions; ++j){ if(i != j) BOOST_CHECK_EQUAL(diagonal(i,j),0); else BOOST_CHECK_EQUAL(diagonal(i,i),diagonalElements(i)); } } } BOOST_AUTO_TEST_SUITE (LinAlg_DiagonalMatrix) BOOST_AUTO_TEST_CASE( LinAlg_Diagonal_Matrix_Basic ){ std::size_t const Dimensions = 10; IntVector diagonalElements(Dimensions); for(std::size_t i = 0; i != Dimensions; ++i) diagonalElements(i) = (unsigned int)i; blas::diagonal_matrix diagonal(diagonalElements); checkDiagonalMatrix(diagonal,diagonalElements,Dimensions); } BOOST_AUTO_TEST_CASE( LinAlg_Diagonal_Matrix_Copy ){ std::size_t const Dimensions = 10; IntVector diagonalElements(Dimensions); for(std::size_t i = 0; i != Dimensions; ++i) diagonalElements(i) = (unsigned int)i; blas::diagonal_matrix diagonal(diagonalElements); blas::diagonal_matrix diagonal2(diagonal); checkDiagonalMatrix(diagonal2,diagonalElements,Dimensions); } BOOST_AUTO_TEST_CASE( LinAlg_Diagonal_Matrix_DefaultCtorAndAssignment){ std::size_t const Dimensions = 10; IntVector diagonalElements(Dimensions); for(std::size_t i = 0; i != Dimensions; ++i) diagonalElements(i) = (unsigned int)i; blas::diagonal_matrix diagonal(diagonalElements); blas::diagonal_matrix diagonal2; BOOST_REQUIRE_EQUAL(diagonal2.size1(),0); BOOST_REQUIRE_EQUAL(diagonal2.size2(),0); //assignment diagonal2 = diagonal; checkDiagonalMatrix(diagonal2,diagonalElements,Dimensions); } BOOST_AUTO_TEST_CASE( LinAlg_Diagonal_Matrix_Assignment){ std::size_t const Dimensions = 10; IntVector diagonalElements(Dimensions); for(std::size_t i = 0; i != Dimensions; ++i) diagonalElements(i) = (unsigned int)i; blas::diagonal_matrix diagonal(diagonalElements); IntMatrix diagonal2(diagonal); checkDiagonalMatrix(diagonal2,diagonalElements,Dimensions); } BOOST_AUTO_TEST_CASE( LinAlg_Identity_Matrix ){ std::size_t const Dimensions = 10; IntVector diagonalElements(Dimensions); for(std::size_t i = 0; i != Dimensions; ++i) diagonalElements(i) = 1; blas::identity_matrix diagonal(Dimensions); checkDiagonalMatrix(diagonal,diagonalElements,Dimensions); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/LinAlg/Initialize.cpp000066400000000000000000000304101314655040000174550ustar00rootroot00000000000000#define BOOST_TEST_MODULE LinAlg_Initialize #include #include #include #include #include using namespace shark; using namespace std; BOOST_AUTO_TEST_SUITE (LinAlg_Initialize) BOOST_AUTO_TEST_CASE( LinAlg_init_test_base ){ //check that the first step allready works UIntVector vec1(1),vec2(1); vec1.clear(); vec2.clear(); init(vec1)<<1u; init(vec2)< vectors(3,UIntVector(3)); for(unsigned int i = 0; i != 3; ++i){ for(unsigned int j = 0; j != 3; ++j){ vectors[i](j)=1+i*3+j; } } //check vectorSet in the middle of an expression UIntVector result(11); init(result)<<0,vectorSet(vectors),10; for(std::size_t i = 0; i != 11; ++i){ BOOST_CHECK_EQUAL(result(i),i); } //check vectorSet in the beginning of an expression result.clear(); init(result)< constVectors = vectors; result.clear(); init(result)<<0,vectorSet(constVectors),10; for(std::size_t i = 0; i != 11; ++i){ BOOST_CHECK_EQUAL(result(i),i); } result.clear(); init(result)< matrices(3); matrices[0]=matrix; matrices[1]=2*matrix; matrices[2]=3*matrix; //check matrixSet in the middle of an expression UIntVector result(29); init(result)<<0,matrixSet(matrices),28u; BOOST_CHECK_EQUAL(result(0),0u); BOOST_CHECK_EQUAL(result(28),28u); for(std::size_t i = 0; i != 3; ++i){ for(std::size_t j = 0; j != 9; ++j){ BOOST_CHECK_EQUAL(result(i*9+j+1),(1+j)*(1+i)); } } //check vectorSet in the beginning of an expression result.clear(); init(result)< constMatrices = matrices; result.clear(); init(result)<<0,matrixSet(constMatrices),28u; BOOST_CHECK_EQUAL(result(0),0u); BOOST_CHECK_EQUAL(result(28),28u); for(std::size_t i = 0; i != 3; ++i){ for(std::size_t j = 0; j != 9; ++j){ BOOST_CHECK_EQUAL(result(i*9+j+1),(1+j)*(1+i)); } } result.clear(); init(result)< network; network.setStructure(2,5,2); initRandomNormal(network,1); RealVector result(network.numberOfParameters()); init(result)< > networks(3); for(std::size_t i = 0; i != 3; ++i){ networks[i].setStructure(2,5,2); initRandomNormal(networks[i],1); } std::size_t n = networks[0].numberOfParameters(); RealVector result(3*n); init(result)<> value; init(vec1) >> vec2; BOOST_CHECK_EQUAL(value,1u); BOOST_CHECK_EQUAL(vec2(0),1u); //check again with const input const UIntVector constVec = vec1; vec2.clear(); value = 0; init(constVec) >> value; init(constVec) >> vec2; BOOST_CHECK_EQUAL(value,1u); BOOST_CHECK_EQUAL(vec2(0),1u); //now multiple values UIntVector vec3(3); vec3(0) = 1u; vec3(1u) = 2; vec3(2) = 3; unsigned int a = 0; unsigned int b = 0; unsigned int c = 0; init(vec3) >> a,b,c; BOOST_CHECK_EQUAL(a,1u); BOOST_CHECK_EQUAL(b,2u); BOOST_CHECK_EQUAL(c,3u); //multiple vectors UIntVector vec4(1); vec4.clear(); UIntVector vec5(2); vec5.clear(); init(vec3) >> vec4,vec5; BOOST_CHECK_EQUAL(vec4(0),1u); BOOST_CHECK_EQUAL(vec5(0),2u); BOOST_CHECK_EQUAL(vec5(1),3u); //vector and multiple values vec4.clear(); a=0; b=0; init(vec3) >> vec4,b,c; BOOST_CHECK_EQUAL(vec4(0),1u); BOOST_CHECK_EQUAL(b,2u); BOOST_CHECK_EQUAL(c,3u); //value and vector vec5.clear(); a=0; init(vec3) >> a,vec5; BOOST_CHECK_EQUAL(a,1u); BOOST_CHECK_EQUAL(vec5(0),2u); BOOST_CHECK_EQUAL(vec5(1),3u); //subrange as source vector b = 0; c = 0; init(subrange(vec3,1,3)) >> b,c; BOOST_CHECK_EQUAL(b,2u); BOOST_CHECK_EQUAL(c,3u); //subrange as target vector a = 0; b= 0; vec5.clear(); init(vec3) >> a,b,subrange(vec5,1u,2); BOOST_CHECK_EQUAL(a,1u); BOOST_CHECK_EQUAL(b,2u); BOOST_CHECK_EQUAL(vec5(0),0u); BOOST_CHECK_EQUAL(vec5(1u),3u); b = 0; c = 0; vec5.clear(); init(vec3) >> subrange(vec5,1,2),b,c; BOOST_CHECK_EQUAL(vec5(0),0u); BOOST_CHECK_EQUAL(vec5(1),1u); BOOST_CHECK_EQUAL(b,2u); BOOST_CHECK_EQUAL(c,3u); } BOOST_AUTO_TEST_CASE( LinAlg_split_test_vectorSet ){ std::vector vectors(3,RealVector(3)); UIntVector input(11); for(unsigned int i = 0; i != 11; ++i){ input(i) = i+1; } //check vectorSet in the middle of an expression unsigned int a = 0; unsigned int b = 0; init(input) >> a,vectorSet(vectors),b; BOOST_CHECK_EQUAL(a,1u); BOOST_CHECK_EQUAL(b,11u); for(std::size_t i = 0; i != 3; ++i){ for(std::size_t j = 0; j != 3; ++j){ BOOST_CHECK_EQUAL(vectors[i](j),2+i*3+j); } } //check vectorSet in the beginning of an expression a = 0; b = 0; for(std::size_t i = 0; i != 3; ++i){ vectors[i].clear(); } init(input ) >> vectorSet(vectors),a,b; for(std::size_t i = 0; i != 3; ++i){ for(std::size_t j = 0; j != 3; ++j){ BOOST_CHECK_EQUAL(vectors[i](j),1+i*3+j); } } BOOST_CHECK_EQUAL(a,10u); BOOST_CHECK_EQUAL(b,11u); } BOOST_AUTO_TEST_CASE( LinAlg_split_test_toMatrix ){ //test dense matrix //input UIntVector input(11); for(unsigned int i = 0; i != 11; ++i){ input(i) = i+1; } //check in the middle of an expression UIntMatrix matrix(3,3); matrix.clear(); unsigned int a = 0; unsigned int b = 0; init(input)>>a,toVector(matrix),b; BOOST_CHECK_EQUAL(a,1u); BOOST_CHECK_EQUAL(b,11u); for(std::size_t i = 0; i != 3; ++i){ for(std::size_t j = 0; j != 3; ++j){ BOOST_CHECK_EQUAL(matrix(i,j),2u+i*3+j); } } //check in the beginning of an expression a = 0; b = 0; matrix.clear(); init(input)>>toVector(matrix),a,b; BOOST_CHECK_EQUAL(a,10u); BOOST_CHECK_EQUAL(b,11u); for(std::size_t i = 0; i != 3; ++i){ for(std::size_t j = 0; j != 3; ++j){ BOOST_CHECK_EQUAL(matrix(i,j),1u+i*3u+j); } } //test sparse matrix //first define entries and input CompressedUIntMatrix sparseMatrix(3,3); sparseMatrix(0,0)=25; sparseMatrix(1u,1u)=25; sparseMatrix(2,2)=25; UIntVector sparseInput(5); for(unsigned int i = 0; i != 5; ++i){ sparseInput(i) = i+1; } //check in the middle of an expression a = 0; b = 0; init(sparseInput)>>a,toVector(sparseMatrix),b; BOOST_CHECK_EQUAL(a,1u); BOOST_CHECK_EQUAL(b,5u); for(std::size_t i = 0; i != 3; ++i){ BOOST_CHECK_EQUAL(sparseMatrix(i,i),i+2u); } //check vectorSet in the beginning of an expression sparseMatrix.clear(); sparseMatrix(0,0)=25; sparseMatrix(1u,1u)=25; sparseMatrix(2,2)=25; init(sparseInput)>>toVector(sparseMatrix),a,b; BOOST_CHECK_EQUAL(a,4u); BOOST_CHECK_EQUAL(b,5u); for(std::size_t i = 0; i != 3; ++i){ BOOST_CHECK_EQUAL(sparseMatrix(i,i),i+1); } } BOOST_AUTO_TEST_CASE( LinAlg_split_test_matrixSet ){ UIntVector input(29); for(unsigned int i = 0; i != 29; ++i){ input(i) = i+1; } std::vector matrices(3,IntMatrix(3,3)); //check matrixSet in the middle of an expression unsigned int a = 0; unsigned int b = 0; init(input)>>a,matrixSet(matrices),b; BOOST_CHECK_EQUAL(a,1u); BOOST_CHECK_EQUAL(b,29u); for(std::size_t i = 0; i != 3; ++i){ for(std::size_t j = 0; j != 3; ++j){ for(std::size_t k = 0; k != 3; ++k){ BOOST_CHECK_EQUAL(matrices[i](j,k),i*9u+3u*j+k+2u); } } } //check vectorSet in the beginning of an expression a = 0; b = 0; matrices[0].clear(); matrices[1u].clear(); matrices[2].clear(); init(input)>>matrixSet(matrices),a,b; BOOST_CHECK_EQUAL(a,28u); BOOST_CHECK_EQUAL(b,29u); for(std::size_t i = 0; i != 3; ++i){ for(std::size_t j = 0; j != 3; ++j){ for(std::size_t k = 0; k != 3; ++k){ BOOST_CHECK_EQUAL(matrices[i](j,k),i*9u+3u*j+k+1u); } } } } BOOST_AUTO_TEST_CASE( LinAlg_split_test_parameters ){ //initialize network FFNet network; network.setStructure(2,5,2); initRandomNormal(network,1u); RealVector input = network.parameterVector(); initRandomNormal(network,1u); init(input) >> blas::parameters(network); BOOST_CHECK_SMALL(norm_sqr(input-network.parameterVector()),1.e-10); } BOOST_AUTO_TEST_CASE( LinAlg_split_test_parameterSet ){ //initialize network std::vector > networks(3); for(std::size_t i = 0; i != 3; ++i){ networks[i].setStructure(2,5,2); initRandomNormal(networks[i],1); } std::size_t n = networks[0].numberOfParameters(); RealVector input(3*n); for(std::size_t i = 0; i != 3*n;++i){ input(i) = Rng::uni(0,1); } init(input) >> blas::parameterSet(networks); for(std::size_t i = 0; i != 3; ++i){ for(std::size_t j = 0; j != n;++j){ BOOST_CHECK_SMALL(input(i*n+j)-networks[i].parameterVector()(j),1.e-20); } } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/LinAlg/KernelMatrix.cpp000066400000000000000000000162731314655040000177740ustar00rootroot00000000000000#define BOOST_TEST_MODULE ALGORITHMS_QP_KERNELMATRIX #include #include #include #include #include #include #include #include #include #include #include #include using namespace shark; class Problem : public LabeledDataDistribution { public: void draw(RealVector& input,unsigned int& label) const{ input.resize(5); label = Rng::coinToss(0.5)*2+1; for(std::size_t i = 0; i != 5; ++i){ input(i) = Rng::uni(-1,1); } } }; struct Fixture { Fixture():size(100){ Problem problem; data = problem.generateDataset(size,9); //create standard kernel matrix kernelMatrix = calculateRegularizedKernelMatrix(kernel,data.inputs()); } std::size_t size; LabeledData data; LinearKernel<> kernel; RealMatrix kernelMatrix; }; template void testFullMatrix(MatrixType& matrix, Result const& result){ std::size_t size = matrix.size(); //calculate full matrix RealMatrix matrixResult(size,size); matrix.matrix(matrixResult); for(std::size_t i = 0; i != size; ++i){ for(std::size_t j = 0; j != size; ++j){ BOOST_CHECK_SMALL(matrixResult(i,j)-result(i,j),1.e-13); } } } template void testMatrix(MatrixType& matrix, Result const& result){ std::size_t size = matrix.size(); //check entry for(std::size_t i = 0; i != size; ++i){ for(std::size_t j = 0; j != size; ++j){ BOOST_CHECK_SMALL(matrix(i,j)-result(i,j),1.e-13); } } //check row RealVector matrixRow(size); std::size_t start = size/4; std::size_t end = size/2; for(std::size_t i = 0; i != size; ++i){ matrix.row(i,0,size,&matrixRow[0]);//full row for(std::size_t j = 0; j != size; ++j) BOOST_CHECK_SMALL(matrixRow(j)-result(i,j),1.e-13); matrix.row(i,start,end,&matrixRow[start]);//subrow for(std::size_t j = start; j != end; ++j) BOOST_CHECK_SMALL(matrixRow(j)-result(i,j),1.e-13); } //flip columns matrix.flipColumnsAndRows(start,end); matrix.row(start,0,size,&matrixRow[0]);//full row for(std::size_t j = 0; j != start; ++j) BOOST_CHECK_SMALL(matrixRow(j)-result(end,j),1.e-13); BOOST_CHECK_SMALL(matrixRow(start)-result(end,end),1.e-13); for(std::size_t j = start+1; j != end; ++j) BOOST_CHECK_SMALL(matrixRow(j)-result(end,j),1.e-13); BOOST_CHECK_SMALL(matrixRow(end)-result(end,start),1.e-13); for(std::size_t j = end+1; j != size; ++j) BOOST_CHECK_SMALL(matrixRow(j)-result(end,j),1.e-13); matrix.row(end,0,size,&matrixRow[0]);//full row for(std::size_t j = 0; j != start; ++j) BOOST_CHECK_SMALL(matrixRow(j)-result(start,j),1.e-13); BOOST_CHECK_SMALL(matrixRow(start)-result(end,start),1.e-13); for(std::size_t j = start+1; j != end; ++j) BOOST_CHECK_SMALL(matrixRow(j)-result(start,j),1.e-13); BOOST_CHECK_SMALL(matrixRow(end)-result(start,start),1.e-13); for(std::size_t j = end+1; j != size; ++j) BOOST_CHECK_SMALL(matrixRow(j)-result(start,j),1.e-13); } BOOST_FIXTURE_TEST_SUITE (LinAlg_KernelMatrix, Fixture) BOOST_AUTO_TEST_CASE( QP_KernelMatrix ) { RealMatrix matrix = kernelMatrix; KernelMatrix km(kernel,data.inputs()); testFullMatrix(km,matrix); testMatrix(km,matrix); } BOOST_AUTO_TEST_CASE( QP_RegularizedKernelMatrix ) { RealMatrix matrix = kernelMatrix; RealVector diagVec(size); for(std::size_t i = 0; i != size; ++i){ diagVec(i) = i; } diag(matrix) += diagVec; RegularizedKernelMatrix km(kernel,data.inputs(),diagVec); testFullMatrix(km,matrix); testMatrix(km,matrix); } BOOST_AUTO_TEST_CASE( QP_ModifiedKernelMatrix ) { double sameClass = 2; double diffClass = -0.5; RealMatrix matrix = kernelMatrix; for(std::size_t i = 0; i != size; ++i){ for(std::size_t j = 0; j != size; ++j){ if(data.element(i).label == data.element(j).label) matrix(i,j) *= sameClass; else matrix(i,j) *= diffClass; } } ModifiedKernelMatrix km(kernel,data,sameClass,diffClass); testFullMatrix(km,matrix); testMatrix(km,matrix); } BOOST_AUTO_TEST_CASE( QP_BlockMatrix ) { RealMatrix matrix(2*size,2*size); subrange(matrix,0,100,0,100) = kernelMatrix; subrange(matrix,0,100,100,200) = kernelMatrix; subrange(matrix,100,200,0,100) = kernelMatrix; subrange(matrix,100,200,100,200) = kernelMatrix; KernelMatrix kmbase(kernel,data.inputs()); BlockMatrix2x2 > km(&kmbase); testFullMatrix(km,matrix); testMatrix(km,matrix); } BOOST_AUTO_TEST_CASE( QP_PrecomputedMatrix ) { KernelMatrix km(kernel,data.inputs()); PrecomputedMatrix > cache(&km); testMatrix(cache,kernelMatrix); } BOOST_AUTO_TEST_CASE( QP_CachedMatrix_Simple ) { std::size_t numRowsToStore = 10; std::size_t cacheSize = numRowsToStore*size; KernelMatrix km(kernel,data.inputs()); CachedMatrix > cache(&km,cacheSize); BOOST_REQUIRE_EQUAL(cache.getMaxCacheSize(),cacheSize); BOOST_REQUIRE_EQUAL(cache.getCacheSize(),0); testMatrix(cache,kernelMatrix); //this last call should not have cached anything BOOST_REQUIRE_EQUAL(cache.getCacheSize(),0); for(std::size_t i = 0; i != size; ++i){ BOOST_CHECK_EQUAL(cache.isCached(i), false); BOOST_CHECK_EQUAL(cache.getCacheRowSize(i), 0); } } BOOST_AUTO_TEST_CASE( QP_CachedMatrix_Flipping ) { std::size_t numRowsToStore = 10; std::size_t cacheSize = numRowsToStore*size; std::size_t simulationSteps = 1000; KernelMatrix km(kernel,data.inputs()); KernelMatrix groundTruthMatrix(kernel,data.inputs()); CachedMatrix > cache(&km,cacheSize); BOOST_REQUIRE_EQUAL(cache.getMaxCacheSize(),cacheSize); BOOST_REQUIRE_EQUAL(cache.getCacheSize(),0); //next do a running check that the cache works with flipping of rows and random accesses for(std::size_t t = 0; t != simulationSteps; ++t){ std::size_t index = Rng::discrete(0,size-1); std::size_t accessSize = Rng::discrete(size/2,size-1); std::size_t flipi = Rng::discrete(0,size-1); std::size_t flipj = Rng::discrete(0,size-1); //access matrix cache and check whether the right values are returned double* line = cache.row(index,0,accessSize); for(std::size_t i = 0; i != accessSize; ++i){ BOOST_CHECK_CLOSE(line[i],groundTruthMatrix(index,i), 1.e-10); } //flip cache.flipColumnsAndRows(flipi,flipj); groundTruthMatrix.flipColumnsAndRows(flipi,flipj); } //truncate the cache //~ cache.setMaxCachedIndex(10); //~ for(std::size_t i = 0; i != 10; ++i){ //~ BOOST_CHECK(!cache.isCached(i) || cache.getCacheRowSize(i) <= 10 ); //~ } //finally clear the cache, afterwards it should be empty cache.clear(); BOOST_REQUIRE_EQUAL(cache.getCacheSize(),0); for(std::size_t i = 0; i != size; ++i){ BOOST_CHECK_EQUAL(cache.isCached(i), false); BOOST_CHECK_EQUAL(cache.getCacheRowSize(i), 0); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/LinAlg/LRUCache.cpp000066400000000000000000000120351314655040000167450ustar00rootroot00000000000000#define BOOST_TEST_MODULE LINALG_LRUCACHE #include #include #include #include #include using namespace shark; void simulateCache( std::size_t maxIndex,std::size_t cacheSize, std::vector const& accessIndices, std::vector const& accessSizes, std::vector > const& flips ){ BOOST_REQUIRE_EQUAL(accessIndices.size(), accessSizes.size()); BOOST_REQUIRE_EQUAL(accessIndices.size(), flips.size()); std::size_t simulationSteps = accessIndices.size(); LRUCache cache(maxIndex,cacheSize); std::size_t currentCacheSize = 0; std::vector elemSizes(maxIndex,0); std::list lruList; for(std::size_t t = 0; t != simulationSteps; ++t){ std::size_t index = accessIndices[t]; std::size_t size = accessSizes[t]; //in the simulated cache we can just throw queried elements away if they would //ned to be resized and add them later on if(size > elemSizes[index] && elemSizes[index] != 0){ //remove element from the simulated cache currentCacheSize -= elemSizes[index]; elemSizes[index] = 0; lruList.erase(std::find(lruList.begin(),lruList.end(),index)); } if(size <= elemSizes[index] && elemSizes[index] != 0){ lruList.erase(std::find(lruList.begin(),lruList.end(),index)); lruList.push_front(index); }else{ while(cacheSize < currentCacheSize+size){ std::size_t index2 = lruList.back(); currentCacheSize -= elemSizes[index2]; elemSizes[index2] = 0; lruList.pop_back(); } //add element to the simulated cache currentCacheSize +=size; elemSizes[index] = size; lruList.push_front(index); } //access real cache std::size_t* line = cache.getCacheLine(index,size); for(std::size_t i = 0; i != size; ++i){ line[i] = i+1; } //check whether the caching is correct for(std::size_t i = 0; i != maxIndex; ++i){ BOOST_REQUIRE_EQUAL(cache.lineLength(i), elemSizes[i]); BOOST_CHECK(cache.cachedLines()<= cacheSize); BOOST_CHECK_EQUAL(cache.size(), currentCacheSize); //check that elements are the same for(std::size_t j = 0; j != elemSizes[i]; ++j){ BOOST_CHECK_EQUAL(cache.getLinePointer(i)[j], j+1); } } //apply flipping std::pair flip = flips[t]; if(flip.first == flip.second) continue; //flip in simulated cache std::swap(elemSizes[flip.first],elemSizes[flip.second]); std::list::iterator iter1 = std::find(lruList.begin(),lruList.end(),flip.first); std::list::iterator iter2 = std::find(lruList.begin(),lruList.end(),flip.second); if(iter1 == lruList.end() && iter2 == lruList.end()) continue; else if(iter1 == lruList.end()) *iter2=flip.first; else if(iter2 == lruList.end()) *iter1 = flip.second; else std::iter_swap(iter1,iter2); //flip in real cache cache.swapLineIndices(flip.first,flip.second); } } ///\brief tests whether simple same length access-schemes work BOOST_AUTO_TEST_SUITE (LinAlg_LRUCache) BOOST_AUTO_TEST_CASE( LinAlg_LRUCache_Simple_Access ) { std::size_t cacheSize = 10; std::size_t maxIndex = 20; std::size_t simulationSteps = 10000; std::vector accessIndices(simulationSteps); std::vector accessSizes(simulationSteps,1); std::vector > flips(simulationSteps,std::pair(0,0)); for(std::size_t i = 0; i != simulationSteps; ++i){ accessIndices[i] = Rng::discrete(0,maxIndex-1); } simulateCache(maxIndex, cacheSize,accessIndices,accessSizes,flips); } ///\brief tests whether simple different length access-schemes work BOOST_AUTO_TEST_CASE( LinAlg_LRUCache_DifferentLength_Access ) { std::size_t cacheSize = 10; std::size_t maxIndex = 20; std::size_t simulationSteps = 10000; std::vector accessIndices(simulationSteps); std::vector accessSizes(simulationSteps); std::vector > flips(simulationSteps,std::pair(0,0)); for(std::size_t i = 0; i != simulationSteps; ++i){ accessIndices[i] = Rng::discrete(0,maxIndex-1); accessSizes[i] = Rng::discrete(1,3); } simulateCache(maxIndex, cacheSize,accessIndices,accessSizes,flips); } ///\brief tests whether simple same length access-schemes work BOOST_AUTO_TEST_CASE( LinAlg_LRUCache_DifferentLength_Access_fliped ) { std::size_t cacheSize = 10; std::size_t maxIndex = 20; std::size_t simulationSteps = 10000; std::vector accessIndices(simulationSteps); std::vector accessSizes(simulationSteps); std::vector > flips(simulationSteps); for(std::size_t i = 0; i != simulationSteps; ++i){ accessIndices[i] = Rng::discrete(0,maxIndex-1); accessSizes[i] = Rng::discrete(1,3); flips[i].first = Rng::discrete(0,maxIndex-1); flips[i].second = Rng::discrete(0,maxIndex-1); } simulateCache(maxIndex, cacheSize,accessIndices,accessSizes,flips); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/LinAlg/Metrics.cpp000066400000000000000000000137711314655040000167750ustar00rootroot00000000000000#define BOOST_TEST_MODULE LinAlg_Metrics #include #include #include #include #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (LinAlg_Metrics) BOOST_AUTO_TEST_CASE( LinAlg_Norm_distanceSqr_Vector){ //dense - dense { IntVector vec1(3); vec1(0) = 2; vec1(1) = 4; vec1(2) = 6; IntVector vec2(3); vec2(0) = 3; vec2(1) = 7; vec2(2) = 11; int result1 = distanceSqr(vec2,vec1); int result2 = distanceSqr(vec1,vec2); BOOST_CHECK_EQUAL(result1,35); BOOST_CHECK_EQUAL(result2,35); } //dense - compressed { IntVector vec1(3); vec1(0) = 2; vec1(1) = 4; vec1(2) = 6; CompressedIntVector vec2(3); vec2(1) = 7; int result1 = distanceSqr(vec2,vec1); int result2 = distanceSqr(vec1,vec2); BOOST_CHECK_EQUAL(result1,49); BOOST_CHECK_EQUAL(result2,49); } //compressed- compressed { CompressedIntVector vec1(100); vec1(0) = 2; vec1(10) = 4; vec1(20) = 6; CompressedIntVector vec2(100); vec2(0) = 3; vec2(8) = 7; vec2(20) = 11; int result1 = distanceSqr(vec2,vec1); int result2 = distanceSqr(vec1,vec2); BOOST_CHECK_EQUAL(result1,91); BOOST_CHECK_EQUAL(result2,91); } } BOOST_AUTO_TEST_CASE( LinAlg_Norm_distanceSqr_Matrix_Vector){ //dense - dense { IntMatrix vec1(3,3); vec1(0,0) = 2; vec1(0,1) = 4; vec1(0,2) = 6; vec1(1,0) = 3; vec1(1,1) = 5; vec1(1,2) = 7; vec1(2,0) = 4; vec1(2,1) = 6; vec1(2,2) = 8; IntVector vec2(3); vec2(0) = 3; vec2(1) = 7; vec2(2) = 11; IntVector result1 = distanceSqr(vec2,vec1); IntVector result2 = distanceSqr(vec1,vec2); BOOST_CHECK_EQUAL(result1(0),35); BOOST_CHECK_EQUAL(result1(1),20); BOOST_CHECK_EQUAL(result1(2),11); BOOST_CHECK_EQUAL(result2(0),35); BOOST_CHECK_EQUAL(result2(1),20); BOOST_CHECK_EQUAL(result2(2),11); } //dense - compressed { CompressedIntMatrix vec1(3,3); vec1(0,2) = 6; vec1(2,0) = 4; vec1(2,1) = 6; vec1(2,2) = 8; IntMatrix vec3(3,3,0); vec3(0,2) = 6; vec3(2,0) = 4; vec3(2,1) = 6; vec3(2,2) = 8; IntVector vec2(3); vec2(0) = 3; vec2(1) = 7; vec2(2) = 11; CompressedIntVector vec4(3); vec4(0) = 3; vec4(1) = 7; vec4(2) = 11; IntVector result1 = distanceSqr(vec2,vec1); IntVector result2 = distanceSqr(vec1,vec2); IntVector result3 = distanceSqr(vec3,vec4); IntVector result4 = distanceSqr(vec4,vec3); BOOST_CHECK_EQUAL(result1(0),83); BOOST_CHECK_EQUAL(result1(1),179); BOOST_CHECK_EQUAL(result1(2),11); BOOST_CHECK_EQUAL(result2(0),83); BOOST_CHECK_EQUAL(result2(1),179); BOOST_CHECK_EQUAL(result2(2),11); BOOST_CHECK_EQUAL(result3(0),83); BOOST_CHECK_EQUAL(result3(1),179); BOOST_CHECK_EQUAL(result3(2),11); BOOST_CHECK_EQUAL(result4(0),83); BOOST_CHECK_EQUAL(result4(1),179); BOOST_CHECK_EQUAL(result4(2),11); } //compressed- compressed { CompressedIntMatrix vec1(3,100); vec1(0,0) = 2; vec1(0,10) = 4; vec1(0,20) = 6; vec1(2,1) = 1; vec1(2,2) = 6; vec1(2,3) = 2; CompressedIntVector vec2(100); vec2(0) = 3; vec2(8) = 7; vec2(20) = 11; IntVector result1 = distanceSqr(vec2,vec1); IntVector result2 = distanceSqr(vec1,vec2); BOOST_CHECK_EQUAL(result1(0),91); BOOST_CHECK_EQUAL(result1(1),179); BOOST_CHECK_EQUAL(result1(2),220); BOOST_CHECK_EQUAL(result2(0),91); BOOST_CHECK_EQUAL(result2(1),179); BOOST_CHECK_EQUAL(result2(2),220); } } BOOST_AUTO_TEST_CASE( LinAlg_Norm_distanceSqr_Matrix_Matrix){ //small dense block { RealMatrix mat1(2,3); mat1(0,0) = 2; mat1(0,1) = 4; mat1(0,2) = 6; mat1(1,0) = 3; mat1(1,1) = 5; mat1(1,2) = 7; RealMatrix mat2(3,3); mat2(0,0) = 3; mat2(0,1) = 5; mat2(0,2) = 7; mat2(1,0) = 4; mat2(1,1) = 6; mat2(1,2) = 8; mat2(2,0) = 2; mat2(2,1) = 1; mat2(2,2) = 0; RealMatrix result1 = distanceSqr(mat1,mat2); RealMatrix result2 = distanceSqr(mat2,mat1); BOOST_CHECK_CLOSE(result1(0,0),3,1e-12); BOOST_CHECK_CLOSE(result1(0,1),12,1e-12); BOOST_CHECK_CLOSE(result1(0,2),45,1e-12); BOOST_CHECK_CLOSE(result1(1,0),0,1e-12); BOOST_CHECK_CLOSE(result1(1,1),3,1e-12); BOOST_CHECK_CLOSE(result1(1,2),66,1e-12); BOOST_CHECK_CLOSE(result2(0,0),3,1e-12); BOOST_CHECK_CLOSE(result2(0,1),0,1e-12); BOOST_CHECK_CLOSE(result2(1,1),3,1e-12); BOOST_CHECK_CLOSE(result2(1,0),12,1e-12); BOOST_CHECK_CLOSE(result2(2,0),45,1e-12); BOOST_CHECK_CLOSE(result2(2,1),66,1e-12); } //small compressed block { CompressedIntMatrix mat1(2,3); mat1(0,0) = 2; mat1(0,1) = 4; mat1(0,2) = 6; mat1(1,0) = 3; mat1(1,1) = 5; mat1(1,2) = 7; CompressedIntMatrix mat2(3,3); mat2(0,0) = 3; mat2(0,1) = 5; mat2(0,2) = 7; mat2(1,0) = 4; mat2(1,1) = 6; mat2(1,2) = 8; mat2(2,0) = 2; mat2(2,1) = 1; mat2(2,2) = 0; IntMatrix result1 = distanceSqr(mat1,mat2); IntMatrix result2 = distanceSqr(mat2,mat1); BOOST_CHECK_EQUAL(result1(0,0),3); BOOST_CHECK_EQUAL(result1(0,1),12); BOOST_CHECK_EQUAL(result1(0,2),45); BOOST_CHECK_EQUAL(result1(1,0),0); BOOST_CHECK_EQUAL(result1(1,1),3); BOOST_CHECK_EQUAL(result1(1,2),66); BOOST_CHECK_EQUAL(result2(0,0),3); BOOST_CHECK_EQUAL(result2(0,1),0); BOOST_CHECK_EQUAL(result2(1,1),3); BOOST_CHECK_EQUAL(result2(1,0),12); BOOST_CHECK_EQUAL(result2(2,0),45); BOOST_CHECK_EQUAL(result2(2,1),66); } //big dense block { RealMatrix mat1(64,3); RealMatrix mat2(64,3); for(std::size_t i = 0; i != 64; ++i){ mat1(i,0) = 3.0*i; mat1(i, 1) = 3.0*i + 1; mat1(i, 2) = 3.0*i + 2; mat2(i,0) = 1.5*i; mat2(i,1) = 1.5*i+1; mat2(i,2) = 1.5*i+2; } RealMatrix result1 = distanceSqr(mat1,mat2); RealMatrix result2 = distanceSqr(mat2,mat1); for(std::size_t i = 0; i != 64; ++i){ for(std::size_t j = 0; j != 64; ++j){ double d = distanceSqr(row(mat1,i),row(mat2,j)); BOOST_CHECK_CLOSE(result1(i,j),d,1.e-12); BOOST_CHECK_CLOSE(result2(j,i),d,1.e-12); } } } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/LinAlg/PartlyPrecomputedMatrix.cpp000066400000000000000000000261071314655040000222340ustar00rootroot00000000000000//=========================================================================== /*! * * * \brief Test for the PartlyPrecomputedMatrix class * * * * \author Aydin Demircioglu * \date 2014 * * * \par Copyright 1995-2015 Shark Development Team * *

* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ //=========================================================================== #define BOOST_TEST_MODULE LINALG_PARTLYPRECOMPUTEDMATRIX #include #include #include #include #include #include #include #include #include using namespace shark; template class DummyKernelMatrix { public: typedef CacheType QpFloatType; DummyKernelMatrix(size_t _size) { m_size = _size; } /// return a single matrix entry QpFloatType operator()(std::size_t i, std::size_t j) const { return entry(i, j); } /// return a single matrix entry QpFloatType entry(std::size_t i, std::size_t j) const { return (QpFloatType)(i + 1) / (j + 1); } /// \brief Computes the i-th row of the kernel matrix. /// ///The entries start,...,end of the i-th row are computed and stored in storage. ///There must be enough room for this operation preallocated. void row(std::size_t i, std::size_t start, std::size_t end, QpFloatType* storage) const { m_accessCounter += end - start; SHARK_PARALLEL_FOR(int j = start; j < (int) end; j++) { storage[j - start] = entry(i, j); } } /// return the size of the quadratic matrix std::size_t size() const { return m_size; } /// query the kernel access counter unsigned long long getAccessCount() const { return m_accessCounter; } /// reset the kernel access counter void resetAccessCount() { m_accessCounter = 0; } protected: // fake size of matrix size_t m_size; /// counter for the kernel accesses mutable unsigned long long m_accessCounter; }; // FIXME: count accesses to GaussianRbfKernel to verify that the cache works. ///\brief test if the whole matrix fits, if we make the cache large enough BOOST_AUTO_TEST_SUITE (LinAlg_PartlyPrecomputedMatrix) BOOST_AUTO_TEST_CASE(LinAlg_PartlyPrecomputedMatrix_MediumCache) { Rng::seed(42); // FIXME: need a test for 1x1?? ;) size_t maxDimension = 120; size_t maxGamma = 100; size_t repeats = 16; bool verbose = false; for(size_t i = 0; i < repeats; i++) { // some random dimension size_t currentDimension = Rng::discrete(2, maxDimension); if(verbose) std::cout << i << std::endl; if(verbose) std::cout << currentDimension << std::endl; // create unionjack matrix std::vector unionJack(currentDimension); RealVector unionJackVector(currentDimension); for(size_t r = 0; r < currentDimension; r++) { unionJackVector.clear(); unionJackVector[currentDimension - r - 1] += 1; unionJackVector[r] += 1; unionJack[r] = unionJackVector; } Data unionJackData = createDataFromRange(unionJack); if(verbose) std::cout << ".." << std::endl; for(size_t r = 0; r < unionJackData.numberOfElements(); r++) { for(size_t c = 0; c < unionJackData.element(r).size(); c++) { if(verbose) std::cout << " " << unionJackData.element(r)[c]; } if(verbose) std::cout << std::endl; } // compute a much bigger cachesize typedef KernelMatrix KernelMatrixType; size_t cacheSize = sizeof(double) * currentDimension * currentDimension / 2; if(verbose) std::cout << "cache size: " << cacheSize << std::endl; // now cache this with a kernel with random gamma double gamma = Rng::uni(1, maxGamma); if(verbose) std::cout << "g:" << gamma << std::endl; GaussianRbfKernel<> kernel(log(gamma)); KernelMatrixType km(kernel, unionJackData); PartlyPrecomputedMatrix K(&km, cacheSize); // now test for any row-- // the kernel matrix should have the following form: // on the two crossing diagonals we have ones. // if its even: the rest of the entries are 1/gamma^4 // if its odd: the row in the middle and the column in // the middle have 1/gamma^6 double error = 0; for(size_t r = 0; r < currentDimension; r++) { for(size_t c = 0; c < currentDimension; c++) { double expectedEntry = gamma * gamma * gamma * gamma; if((r == c) || (c == currentDimension - r - 1)) expectedEntry = 1; if((currentDimension % 2 == 1) && ((r == currentDimension / 2) || (c == currentDimension / 2))) expectedEntry *= gamma * gamma; if(verbose) std::cout << " " << 1.0 / K.entry(r, c) << "/" << expectedEntry; error += 1.0 / K.entry(r, c) - expectedEntry; } if(verbose) std::cout << std::endl; } if(verbose) std::cout << "error: " << error << std::endl;; // get a whole row if(verbose) std::cout << " --- " << std::endl; size_t r = 1; blas::vector kernelRow(currentDimension, 0); K.row(r, kernelRow); for(size_t c = 0; c < currentDimension; c++) { double expectedEntry = gamma * gamma * gamma * gamma; if((r == c) || (c == currentDimension - r - 1)) expectedEntry = 1; if((currentDimension % 2 == 1) && ((r == currentDimension / 2) || (c == currentDimension / 2))) expectedEntry *= gamma * gamma; if(verbose) std::cout << " " << 1.0 / K.entry(r, c) << "/" << expectedEntry; error += 1.0 / K.entry(r, c) - expectedEntry; } if(verbose) std::cout << std::endl; if(verbose) std::cout << " --- 8< ---" << std::endl; // this test might be not exact, and might seem nonsentical. // we still hope it should give reasonable timings. // find a cached and an uncached row // test if isCached is what we expected for(size_t r = 0; r < currentDimension; r++) { // FIXME: everything below X should not be cached if(K.isCached(r)) { if(verbose) { std::cout << "+";} } else { if(verbose) {std::cout << "-";} } } //FIXME: do this cache-rows counting if(verbose) std::cout << std::endl; size_t cachedRowIndex = 0; size_t uncachedRowIndex = currentDimension - 1; // first test how many uncached rows we roughly can obtain in a certain given timespan. double timespan = 0.01; shark::Timer timer; size_t uncachedRowEvalCount = 0; double uncachedStartTime = timer.now(); do { // evaluate a whole uncached row uncachedRowEvalCount++; K.row(uncachedRowIndex, kernelRow); } while(timer.now() - uncachedStartTime < timespan); if(verbose) std::cout << "uncached: " << uncachedRowEvalCount; size_t cachedRowEvalCount = 0; double cachedStartTime = timer.now(); do { // evaluate a whole uncached row cachedRowEvalCount++; K.row(cachedRowIndex, kernelRow); } while(timer.now() - cachedStartTime < timespan); if(verbose) std::cout << "cached: " << cachedRowEvalCount; if(verbose) std::cout << std::endl; // now that we know how many evaluations we can roughly do in a given timespan // we do this 10 timespans, but alternating double cachedTotalTime = 0; double uncachedTotalTime = 0; double cachedTotalEvalCount = 0; double uncachedTotalEvalCount = 0; double startTime = timer.now(); do { // evaluate N cached rows accesses cachedStartTime = timer.now(); for(size_t p = 0; p < cachedRowEvalCount; p++) K.row(cachedRowIndex, kernelRow); cachedTotalTime += timer.now() - cachedStartTime ; cachedTotalEvalCount += cachedRowEvalCount; // evaluate N cached rows accesses uncachedStartTime = timer.now(); for(size_t p = 0; p < uncachedRowEvalCount; p++) K.row(uncachedRowIndex, kernelRow); uncachedTotalTime += timer.now() - uncachedStartTime ; uncachedTotalEvalCount += uncachedRowEvalCount; } while(timer.now() - startTime < timespan); // now we hope to have some kind of balanced, unbiased // time and number of row evals in that time // we believe that cached access must be at least twice as fast // than uncached, given the dimension double uncachedSpeed = (double)uncachedTotalEvalCount / (double)uncachedTotalTime; double cachedSpeed = (double)cachedTotalEvalCount / (double)cachedTotalTime; if(verbose) std::cout << "uncached vs cached speed: " << uncachedSpeed << "/ " << cachedSpeed << " --> " << (double)cachedSpeed / (double)uncachedSpeed << std::endl; // this might be a 'stupid' test, but for all values i ever saw this was true: // the speed of the cache is always at least 10x faster than uncached // for the gamma and dimensions we choose here. so expect at least 3x speed // OK: on virtual machines this might not be true! //BOOST_CHECK(cachedSpeed / uncachedSpeed >= 3.0); } } ///\brief test if we can cache a gigantic matrix and still can access all rows BOOST_AUTO_TEST_CASE(LinAlg_PartlyPrecomputedMatrix_GiganticKernel) { Rng::seed(42); // FIXME: need a test for 1x1?? ;) size_t maxDimension = 120; size_t repeats = 2000; // create that 'gigantic' matrix size_t currentDimension = Rng::discrete(2, maxDimension) + 1000000; // compute a cache that should hold a few rows at most typedef DummyKernelMatrix KernelMatrixType; // allocate enough space for exactly 10 rows { size_t cacheSize = sizeof(double) * currentDimension * 10; KernelMatrixType kernel(currentDimension); PartlyPrecomputedMatrix K(&kernel, cacheSize); // see how many rows are actually cached size_t nCachedRows = 0; for(size_t r = 0; r < currentDimension; r++) { if(K.isCached(r)) nCachedRows++; } // as we have allocated enough space for 10 rows, we expect 10 rows. BOOST_CHECK(nCachedRows == 10); } // allocate enough space just not enough 11 rows size_t cacheSize = sizeof(double) * currentDimension * 11 - 1; KernelMatrixType kernel(currentDimension); PartlyPrecomputedMatrix K(&kernel, cacheSize); // see how many rows are actually cached size_t nCachedRows = 0; for(size_t r = 0; r < currentDimension; r++) { if(K.isCached(r)) nCachedRows++; } // as we have allocated enough space for only 10 rows, we expect 10 rows. BOOST_CHECK(nCachedRows == 10); // do some random access for(size_t i = 0; i < repeats; i++) { // find a random entry size_t r = Rng::discrete(0, currentDimension - 1); size_t c = Rng::discrete(0, currentDimension - 1); BOOST_CHECK((K.entry(r, c) == (double)(r + 1) / (double)(c + 1))); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/LinAlg/Proxy.cpp000066400000000000000000000111201314655040000164720ustar00rootroot00000000000000#define BOOST_TEST_MODULE BLAS_Adaptor #include #include #include using namespace shark; using namespace std; BOOST_AUTO_TEST_SUITE (LinAlg_Proxy) BOOST_AUTO_TEST_CASE( BLAS_dense_vector_adaptor ) { double mem[]={0,4,1,4,2,4,3,4}; RealVector x(3); RealMatrix xm(3,3); RealVector result(3); RealVector resultProxy(3); for(std::size_t i = 0; i != 3; ++i){ x(i) = i; for(std::size_t j = 0; j != 3; ++j){ xm(i,j) = j+3*i; } } //create some proxies blas::dense_vector_adaptor proxy(x); blas::dense_vector_adaptor proxym(mem,4,2); //check internal variables BOOST_REQUIRE_EQUAL(proxy.size() ,3); BOOST_REQUIRE_EQUAL(proxy.stride() ,1); BOOST_REQUIRE_EQUAL(proxy.storage() ,&x(0)); BOOST_REQUIRE_EQUAL(proxym.size() ,4); BOOST_REQUIRE_EQUAL(proxym.stride() ,2); BOOST_REQUIRE_EQUAL(proxym.storage() ,mem); //check traits BOOST_REQUIRE_EQUAL(proxy.stride() ,1); BOOST_REQUIRE_EQUAL(proxy.storage() ,&x(0)); BOOST_REQUIRE_EQUAL(proxym.stride() ,2); BOOST_REQUIRE_EQUAL(proxym.storage() ,mem); //check values for(std::size_t i = 0; i != 3; ++i){ BOOST_REQUIRE_EQUAL(x(i) ,proxy(i)); BOOST_REQUIRE_EQUAL(x(i) ,const_cast const&>(proxy)(i)); BOOST_REQUIRE_EQUAL(x(i) ,proxy[i]); BOOST_REQUIRE_EQUAL(x[i] ,const_cast const&>(proxy)[i]); } for(std::size_t i = 0; i != 4; ++i){ BOOST_REQUIRE_EQUAL(mem[2*i] ,proxym(i)); } axpy_prod(xm,x,result); axpy_prod(xm,proxy,resultProxy); for(std::size_t i = 0; i != 3; ++i){ BOOST_REQUIRE_EQUAL(result(i) ,resultProxy(i)); } } template void checkProxyBase(Vector& vec, std::size_t nnz){ Proxy proxy(vec); //check basic properties BOOST_REQUIRE_EQUAL(proxy.nnz(),nnz); BOOST_REQUIRE_EQUAL(proxy.size(),vec.size()); //check iterators typename boost::range_iterator::type proxyiter = proxy.begin(); typename boost::range_iterator::type vectoriter = vec.begin(); for(;vectoriter != vec.end(); ++vectoriter, ++ proxyiter){ BOOST_REQUIRE(proxyiter != proxy.end()); BOOST_CHECK_EQUAL(vectoriter.index(), proxyiter.index()); BOOST_CHECK_EQUAL(*vectoriter, *proxyiter); } } template void checkProxy(Vector const& vec, std::size_t nnz){ checkProxyBase >(vec,nnz); checkProxyBase >(vec,nnz); checkProxyBase >(vec,nnz); checkProxyBase >(vec,nnz); //check proxy conversion const blas::sparse_vector_adaptor proxy(vec); checkProxyBase >(proxy,nnz); checkProxyBase >(proxy,nnz); checkProxyBase >(proxy,nnz); checkProxyBase >(proxy,nnz); blas::sparse_vector_adaptor cproxy(vec); checkProxyBase >(cproxy,nnz); checkProxyBase >(cproxy,nnz); checkProxyBase >(cproxy,nnz); checkProxyBase >(cproxy,nnz); } BOOST_AUTO_TEST_CASE( LinAlg_sparse_vector_adaptor ) { //som vectors to test CompressedRealVector x1(6); x1(3) = 1.5; CompressedRealVector x2(6); x2(3) = 2.0; x2(5) = 1.0; CompressedRealVector x3(6);//empty CompressedRealVector x4(6);//full for(std::size_t i = 0; i != 6; ++i) x4(i) = i; CompressedRealVector x5(10); x5(0) = 1.5;//only first element CompressedRealVector x6(10); x6(9) = 1.5;//only last element checkProxy(x1,1); checkProxy(x2,2); checkProxy(x3,0); checkProxy(x4,6); checkProxy(x5,1); checkProxy(x6,1); } BOOST_AUTO_TEST_CASE( LinAlg_sparse_vector_adaptor_MatrixRow ) { //som vectors to test CompressedRealMatrix x1(5,6); //(last line is also empty) x1(0,3) = 1.5; x1(1,3) = 2.0; x1(1,5) = 1.0; for(std::size_t i = 0; i != 6; ++i) x1(3,i) = i; CompressedRealMatrix x2(2,10); x2(0,0) = 1.5;//only first element x2(1,9) = 1.5;//only last element checkProxy(row(x1,0),1); checkProxy(row(x1,1),2); checkProxy(row(x1,2),0); checkProxy(row(x1,3),6); checkProxy(row(x1,4),0); checkProxy(row(x2,0),1); checkProxy(row(x2,1),1); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/LinAlg/RQ.cpp000066400000000000000000000326451314655040000157120ustar00rootroot00000000000000#include #include #define BOOST_TEST_MODULE LinAlg_RQ #include #include using namespace shark; RealMatrix createRandomTriangularMatrix(RealMatrix const& lambda,std::size_t Dimensions){ RealMatrix R = lambda; for(std::size_t i = 0; i != Dimensions; ++i){ for(std::size_t j = 0; j != i; ++j){ R(i,j) = Rng::gauss(0,1); } } return R; } RealMatrix createRandomMatrix(RealMatrix const& lambda,std::size_t Dimensions){ RealMatrix R = blas::randomRotationMatrix(Dimensions); RealMatrix Atemp(Dimensions,Dimensions); RealMatrix A(Dimensions,Dimensions); axpy_prod(R,lambda,Atemp); axpy_prod(Atemp,trans(R),A); return A; } //special case first: the matrix is triangular BOOST_AUTO_TEST_SUITE (LinAlg_RQ) BOOST_AUTO_TEST_CASE( LinAlg_PivotingRQ_Triangular_FullRank ){ std::size_t NumTests = 100; std::size_t Dimensions = 50; for(std::size_t test = 0; test != NumTests; ++test){ //first generate a suitable triangular matrix A //with diagonal elements lambda RealMatrix lambda(Dimensions,Dimensions); lambda.clear(); for(std::size_t i = 0; i != Dimensions; ++i){ lambda(i,i) = 0.7*i-10*Rng::uni(1,2); } RealMatrix A = createRandomTriangularMatrix(lambda,Dimensions); //calculate RQ RealMatrix R(Dimensions,Dimensions); RealMatrix Q(Dimensions,Dimensions); blas::permutation_matrix P(Dimensions); std::size_t rank = pivotingRQ(A,R,Q,P); //test whether result is full rank BOOST_CHECK_EQUAL(rank,Dimensions); //create reconstruction of A RealMatrix ATest(Dimensions,Dimensions); axpy_prod(R,Q,ATest); //test reconstruction error after pivoting swap_rows(P,A); double errorA = norm_inf(A-ATest); BOOST_CHECK_SMALL(errorA,1.e-12); BOOST_CHECK(!(boost::math::isnan)(norm_frobenius(ATest)));//test for nans //test determinant of R double logDetA = trace(log(abs(lambda))); double logDetR = trace(log(abs(R))); BOOST_CHECK_SMALL(std::abs(logDetA)-std::abs(logDetR),1.e-10); //test orthonormal property of Q RealMatrix testIdentity(Dimensions,Dimensions); axpy_prod(Q,trans(Q),testIdentity); RealMatrix testIdentity2(Dimensions,Dimensions); axpy_prod(trans(Q),Q,testIdentity2); double errorID1 = norm_inf(testIdentity-RealIdentityMatrix(Dimensions)); double errorID2 = norm_inf(testIdentity2-RealIdentityMatrix(Dimensions)); BOOST_CHECK(!(boost::math::isnan)(errorID1)); BOOST_CHECK(!(boost::math::isnan)(errorID2)); BOOST_CHECK_SMALL(errorID1,1.e-12); BOOST_CHECK_SMALL(errorID2,1.e-12); } } //second special case: the matrix is a rotation matrix BOOST_AUTO_TEST_CASE( LinAlg_PivotingRQ_Rotation_FullRank ){ std::size_t NumTests = 100; std::size_t Dimensions = 51; for(std::size_t test = 0; test != NumTests; ++test){ RealMatrix A = blas::randomRotationMatrix(Dimensions); //calculate RQ RealMatrix R(Dimensions,Dimensions); RealMatrix Q(Dimensions,Dimensions); blas::permutation_matrix P(Dimensions); std::size_t rank = pivotingRQ(A,R,Q,P); //test whether result is full rank BOOST_CHECK_EQUAL(rank,Dimensions); //create reconstruction of A RealMatrix ATest(Dimensions,Dimensions); axpy_prod(R,Q,ATest); //test reconstruction error after pivoting swap_rows(P,A); double errorA = norm_inf(A-ATest); BOOST_CHECK_SMALL(errorA,1.e-12); BOOST_CHECK(!(boost::math::isnan)(norm_frobenius(ATest)));//test for nans //test determinant of R double logDetA = 0; double logDetR = trace(log(abs(R))); BOOST_CHECK_SMALL(std::abs(logDetA)-std::abs(logDetR),1.e-10); //test orthonormal property of Q RealMatrix testIdentity(Dimensions,Dimensions); axpy_prod(Q,trans(Q),testIdentity); RealMatrix testIdentity2(Dimensions,Dimensions); axpy_prod(trans(Q),Q,testIdentity2); double errorID1 = norm_inf(testIdentity-RealIdentityMatrix(Dimensions)); double errorID2 = norm_inf(testIdentity2-RealIdentityMatrix(Dimensions)); BOOST_CHECK(!(boost::math::isnan)(errorID1)); BOOST_CHECK(!(boost::math::isnan)(errorID2)); BOOST_CHECK_SMALL(errorID1,1.e-12); BOOST_CHECK_SMALL(errorID2,1.e-12); } } //the input matrix is square and full rank BOOST_AUTO_TEST_CASE( LinAlg_PivotingRQ_Square_FullRank ){ std::size_t NumTests = 100; std::size_t Dimensions = 48; for(std::size_t test = 0; test != NumTests; ++test){ //first generate a suitable eigenvalue problem matrix A RealMatrix lambda(Dimensions,Dimensions); lambda.clear(); for(std::size_t i = 0; i != Dimensions; ++i){ lambda(i,i) = -10+i*0.7; } RealMatrix A = createRandomMatrix(lambda,Dimensions); //calculate RQ RealMatrix R(Dimensions,Dimensions); RealMatrix Q(Dimensions,Dimensions); blas::permutation_matrix P(Dimensions); std::size_t rank = pivotingRQ(A,R,Q,P); //test whether result is full rank BOOST_CHECK_EQUAL(rank,Dimensions); //create reconstruction of A RealMatrix ATest(Dimensions,Dimensions); axpy_prod(R,Q,ATest); //test reconstruction error after pivoting swap_rows(P,A); double errorA = norm_inf(A-ATest); BOOST_CHECK_SMALL(errorA,1.e-12); BOOST_CHECK(!(boost::math::isnan)(norm_frobenius(ATest)));//test for nans //test determinant of R double logDetA = trace(log(abs(lambda))); double logDetR = trace(log(abs(R))); BOOST_CHECK_SMALL(std::abs(logDetA)-std::abs(logDetR),1.e-12); //test orthonormal property of Q RealMatrix testIdentity(Dimensions,Dimensions); axpy_prod(Q,trans(Q),testIdentity); RealMatrix testIdentity2(Dimensions,Dimensions); axpy_prod(trans(Q),Q,testIdentity2); double errorID1 = norm_inf(testIdentity-RealIdentityMatrix(Dimensions)); double errorID2 = norm_inf(testIdentity2-RealIdentityMatrix(Dimensions)); BOOST_CHECK(!(boost::math::isnan)(errorID1)); BOOST_CHECK(!(boost::math::isnan)(errorID2)); BOOST_CHECK_SMALL(errorID1,1.e-12); BOOST_CHECK_SMALL(errorID2,1.e-12); } } //Square and not full rank BOOST_AUTO_TEST_CASE( LinAlg_PivotingRQ_Square ){ std::size_t NumTests = 100; std::size_t Dimensions = 53; for(std::size_t test = 0; test != NumTests; ++test){ //first generate a suitable eigenvalue problem matrix A RealMatrix lambda(Dimensions,Dimensions); lambda.clear(); for(std::size_t i = 0; i != Dimensions; ++i){ lambda(i,i) = double(i)-10; lambda(3,3)=0; lambda(1,1)=0; } RealMatrix A = createRandomMatrix(lambda,Dimensions); //calculate RQ RealMatrix R(Dimensions,Dimensions); RealMatrix Q(Dimensions,Dimensions); blas::permutation_matrix P(Dimensions); std::size_t rank = pivotingRQ(A,R,Q,P); //test whether rank is correct BOOST_CHECK_EQUAL(rank,Dimensions-3); //create reconstruction of A RealMatrix ATest(Dimensions,Dimensions); axpy_prod(R,Q,ATest); //test reconstruction error after pivoting swap_rows(P,A); double errorA= norm_inf(A-ATest); BOOST_CHECK_SMALL(errorA,1.e-12); BOOST_CHECK(!(boost::math::isnan)(norm_frobenius(ATest)));//test for nans //test orthonormal property of Q RealMatrix testIdentity(Dimensions,Dimensions); axpy_prod(Q,trans(Q),testIdentity); RealMatrix testIdentity2(Dimensions,Dimensions); axpy_prod(trans(Q),Q,testIdentity2); double errorID1 = norm_inf(testIdentity-RealIdentityMatrix(Dimensions)); double errorID2 = norm_inf(testIdentity2-RealIdentityMatrix(Dimensions)); BOOST_CHECK_SMALL(errorID1,1.e-12); BOOST_CHECK_SMALL(errorID2,1.e-12); } } //non square m > n but rank is n input is generated from precreated RQ //with R being diagonal BOOST_AUTO_TEST_CASE( LinAlg_PivotingRQ_DiagonalR_RankN ){ std::size_t NumTests = 100; std::size_t M = 47; std::size_t N = 31; for(std::size_t test = 0; test != NumTests; ++test){ //generate test input RealMatrix QTest = blas::randomRotationMatrix(N); RealMatrix RTest(M,N); RTest.clear(); for(std::size_t i = 0; i != N; ++i){ for(std::size_t j = 0; j != i; ++j){ RTest(i,j) = Rng::gauss(0,1); } //make sure that the matrix has rank M. zero on diagonal is quite bad for this... while(std::abs(RTest(i,i)) < 0.1) RTest(i,i) = 0.7*i-10; } RealMatrix ATest(M,N); axpy_prod(RTest,QTest,ATest); //calculate RQ decomposition from the input RealMatrix R(M,N); RealMatrix Q(N,N); blas::permutation_matrix P(M); std::size_t rank = pivotingRQ(ATest,R,Q,P); //test whether rank is correct BOOST_CHECK_EQUAL(rank,N); //create reconstruction of A RealMatrix A(M,N); axpy_prod(R,Q,A); //test reconstruction error after pivoting swap_rows(P,ATest); double errorA = norm_inf(A-ATest); BOOST_CHECK_SMALL(errorA,1.e-12); BOOST_CHECK(!(boost::math::isnan)(norm_frobenius(A)));//test for nans //test orthonormal property of Q RealMatrix testIdentity(N,N); axpy_prod(Q,trans(Q),testIdentity); RealMatrix testIdentity2(N,N); axpy_prod(trans(Q),Q,testIdentity2); double errorID1 = norm_inf(testIdentity-RealIdentityMatrix(N)); double errorID2 = norm_inf(testIdentity2-RealIdentityMatrix(N)); BOOST_CHECK_SMALL(errorID1,1.e-12); BOOST_CHECK_SMALL(errorID2,1.e-12); } } //non square m < n but rank is m input is generated from precreated RQ //with R being diagonal BOOST_AUTO_TEST_CASE( LinAlg_PivotingRQ_DiagonalR_RankM ){ std::size_t NumTests = 100; std::size_t M = 31; std::size_t N = 47; for(std::size_t test = 0; test != NumTests; ++test){ //generate test input RealMatrix QTest = blas::randomRotationMatrix(N); RealMatrix RTest(M,N); RTest.clear(); for(std::size_t i = 0; i != M; ++i){ for(std::size_t j = 0; j != i; ++j){ RTest(i,j) = Rng::gauss(0,1); } RTest(i,i) = 0.7*i-10; } RealMatrix ATest(M,N); axpy_prod(RTest,QTest,ATest); //calculate RQ decomposition from the input RealMatrix R(M,N); RealMatrix Q(N,N); blas::permutation_matrix P(M); std::size_t rank = pivotingRQ(ATest,R,Q,P); //test whether rank is correct BOOST_CHECK_EQUAL(rank,M); //create reconstruction of A RealMatrix A(M,N); axpy_prod(R,Q,A); //test reconstruction error after pivoting swap_rows(P,ATest); double errorA = norm_inf(A-ATest); BOOST_CHECK_SMALL(errorA,1.e-12); BOOST_CHECK(!(boost::math::isnan)(norm_frobenius(A)));//test for nans //test orthonormal property of Q RealMatrix testIdentity(N,N); axpy_prod(Q,trans(Q),testIdentity); RealMatrix testIdentity2(N,N); axpy_prod(trans(Q),Q,testIdentity2); double errorID1 = norm_inf(testIdentity-RealIdentityMatrix(N)); double errorID2 = norm_inf(testIdentity2-RealIdentityMatrix(N)); BOOST_CHECK_SMALL(errorID1,1.e-12); BOOST_CHECK_SMALL(errorID2,1.e-12); } } //non square m > n > rank. input is generated from precreated RQ //with R being diagonal BOOST_AUTO_TEST_CASE( LinAlg_PivotingRQ_DiagonalR_RankLowerN ){ std::size_t NumTests = 100; std::size_t M = 30; std::size_t N = 51; std::size_t Rank = 20; for(std::size_t test = 0; test != NumTests; ++test){ //generate test input RealMatrix QTest = blas::randomRotationMatrix(N); RealMatrix RTest(M,N); RTest.clear(); for(std::size_t i = 0; i != Rank; ++i){ for(std::size_t j = 0; j != i; ++j){ RTest(i,j) = Rng::gauss(0,1); } if(i< Rank) RTest(i,i) = 0.7*i-10; } RealMatrix ATest(M,N); axpy_prod(RTest,QTest,ATest); //calculate RQ decomposition from the input RealMatrix R(M,N); RealMatrix Q(N,N); blas::permutation_matrix P(M); std::size_t rank = pivotingRQ(ATest,R,Q,P); //test whether rank is correct BOOST_CHECK_EQUAL(rank,Rank); //create reconstruction of A RealMatrix A(M,N); axpy_prod(R,Q,A); //test reconstruction error after pivoting swap_rows(P,ATest); double errorA = norm_inf(A-ATest); BOOST_CHECK_SMALL(errorA,1.e-12); BOOST_CHECK(!(boost::math::isnan)(norm_frobenius(A)));//test for nans //test orthonormal property of Q RealMatrix testIdentity(N,N); axpy_prod(Q,trans(Q),testIdentity); RealMatrix testIdentity2(N,N); axpy_prod(trans(Q),Q,testIdentity2); double errorID1 = norm_inf(testIdentity-RealIdentityMatrix(N)); double errorID2 = norm_inf(testIdentity2-RealIdentityMatrix(N)); BOOST_CHECK_SMALL(errorID1,1.e-12); BOOST_CHECK_SMALL(errorID2,1.e-12); } } //non square n > m > rank. input is generated from precreated RQ //with R being diagonal BOOST_AUTO_TEST_CASE( LinAlg_PivotingRQ_DiagonalR_RankLowerM ){ std::size_t NumTests = 100; std::size_t M = 47; std::size_t N = 31; std::size_t Rank = 19; for(std::size_t test = 0; test != NumTests; ++test){ //generate test input RealMatrix QTest = blas::randomRotationMatrix(N); RealMatrix RTest(M,N); RTest.clear(); for(std::size_t i = 0; i != Rank; ++i){ for(std::size_t j = 0; j != i; ++j){ RTest(i,j) = Rng::gauss(0,1); } if(i< Rank) RTest(i,i) = 0.7*i-10; } RealMatrix ATest(M,N); axpy_prod(RTest,QTest,ATest); //calculate RQ decomposition from the input RealMatrix R(M,N); RealMatrix Q(N,N); blas::permutation_matrix P(M); std::size_t rank = pivotingRQ(ATest,R,Q,P); //test whether rank is correct BOOST_CHECK_EQUAL(rank,Rank); //create reconstruction of A RealMatrix A(M,N); axpy_prod(R,Q,A); //test reconstruction error after pivoting swap_rows(P,ATest); double errorA = norm_inf(A-ATest); BOOST_CHECK_SMALL(errorA,1.e-12); BOOST_CHECK(!(boost::math::isnan)(norm_frobenius(A)));//test for nans //test orthonormal property of Q RealMatrix testIdentity(N,N); axpy_prod(Q,trans(Q),testIdentity); RealMatrix testIdentity2(N,N); axpy_prod(trans(Q),Q,testIdentity2); double errorID1 = norm_inf(testIdentity-RealIdentityMatrix(N)); double errorID2 = norm_inf(testIdentity2-RealIdentityMatrix(N)); BOOST_CHECK_SMALL(errorID1,1.e-12); BOOST_CHECK_SMALL(errorID2,1.e-12); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/LinAlg/choleskyDecomposition.cpp000066400000000000000000000156661314655040000217520ustar00rootroot00000000000000#define BOOST_TEST_MODULE LinAlg_CholeskyDecomposition #include #include #include #include using namespace shark; const size_t Dimensions=4; double inputMatrix[Dimensions][Dimensions]= { { 9, 3, -6, 12}, { 3, 26, -7, -11}, {-6, -7, 9, 7}, {12, -11, 7, 65} }; double decomposedMatrix[Dimensions][Dimensions]= { { 3, 0, 0, 0}, { 1, 5, 0, 0}, { -2, -1, 2, 0}, { 4, -3, 6, 2} }; BOOST_AUTO_TEST_SUITE (LinAlg_choleskyDecomposition) BOOST_AUTO_TEST_CASE( LinAlg_CholeskyDecomposition_Base ) { RealMatrix M(Dimensions, Dimensions); // input matrix RealMatrix C(Dimensions, Dimensions); // matrix for Cholesky Decomposition // Initializing matrices for (size_t row = 0; row < Dimensions; row++) { for (size_t col = 0; col < Dimensions; col++) { M(row, col) = inputMatrix[row][col]; C(row, col) = 0; } } //Decompose choleskyDecomposition(M,C); //test for equality for (size_t row = 0; row < Dimensions; row++) { for (size_t col = 0; col < Dimensions; col++) { BOOST_CHECK_SMALL(C(row, col)-decomposedMatrix[row][col],1.e-14); } } } BOOST_AUTO_TEST_CASE(LinAlg_CholeskyDecomposition_Base_ColumnMajor) { blas::matrix M(Dimensions, Dimensions); // input matrix blas::matrix C(Dimensions, Dimensions); // matrix for Cholesky Decomposition // Initializing matrices for(size_t row = 0; row < Dimensions; row++) { for(size_t col = 0; col < Dimensions; col++) { M(row, col) = inputMatrix[row][col]; C(row, col) = 0; } } //Decompose choleskyDecomposition(M, C); //test for equality for(size_t row = 0; row < Dimensions; row++) { for(size_t col = 0; col < Dimensions; col++) { BOOST_CHECK_SMALL(C(row, col) - decomposedMatrix[row][col], 1.e-14); } } } BOOST_AUTO_TEST_CASE(LinAlg_PivotingCholeskyDecomposition_Base) { RealMatrix M(Dimensions, Dimensions); // input matrix RealMatrix C(Dimensions, Dimensions); // matrix for Cholesky Decomposition PermutationMatrix P(Dimensions); // Initializing matrices for (size_t row = 0; row < Dimensions; row++) { for (size_t col = 0; col < Dimensions; col++) { M(row, col) = inputMatrix[row][col]; C(row, col) = 0; } } //Decompose std::size_t rank = pivotingCholeskyDecomposition(M,P,C); swap_rows(P,C); double error = norm_inf(prod(C,trans(C))-M); BOOST_CHECK_SMALL(error,1.e-13); BOOST_CHECK_EQUAL(rank,4); } RealMatrix createRandomMatrix(RealVector const& lambda,std::size_t Dimensions){ RealMatrix R = blas::randomRotationMatrix(Dimensions); for(std::size_t i = 0; i != Dimensions; ++i){ column(R,i) *= std::sqrt(lambda(i)); } RealMatrix A(Dimensions,Dimensions); axpy_prod(R,trans(R),A); return A; } BOOST_AUTO_TEST_CASE( LinAlg_CholeskyDecomposition ){ std::size_t NumTests = 100; std::size_t Dimensions = 48; for(std::size_t test = 0; test != NumTests; ++test){ //first generate a suitable eigenvalue problem matrix A RealVector lambda(Dimensions); for(std::size_t i = 0; i != Dimensions; ++i){ lambda(i) = Rng::uni(1,3.0); } RealMatrix A = createRandomMatrix(lambda,Dimensions); //calculate Cholesky RealMatrix C(Dimensions,Dimensions); choleskyDecomposition(A,C); //test determinant of C double logDetA = sum(log(lambda)); double logDetC = trace(log(sqr(C))); BOOST_CHECK_SMALL(std::abs(logDetA)-std::abs(logDetC),1.e-12); //create reconstruction of A RealMatrix ATest(Dimensions,Dimensions); axpy_prod(C,trans(C),ATest); //test reconstruction error double errorA = norm_inf(A-ATest); BOOST_CHECK_SMALL(errorA,1.e-12); BOOST_CHECK(!(boost::math::isnan)(norm_frobenius(ATest)));//test for nans } } BOOST_AUTO_TEST_CASE( LinAlg_PivotingCholeskyDecomposition_FullRank ){ std::size_t NumTests = 100; std::size_t Dimensions = 48; for(std::size_t test = 0; test != NumTests; ++test){ //first generate a suitable eigenvalue problem matrix A RealVector lambda(Dimensions); for(std::size_t i = 0; i != Dimensions; ++i){ lambda(i) = Rng::uni(1,3.0); } RealMatrix A = createRandomMatrix(lambda,Dimensions); //calculate Cholesky RealMatrix C(Dimensions,Dimensions); PermutationMatrix P(Dimensions); std::size_t rank = pivotingCholeskyDecomposition(A,P,C); //test whether result is full rank BOOST_CHECK_EQUAL(rank,Dimensions); //test determinant of C double logDetA = sum(log(lambda)); double logDetC = trace(log(sqr(C))); BOOST_CHECK_SMALL(std::abs(logDetA)-std::abs(logDetC),1.e-12); //create reconstruction of A RealMatrix ATest(Dimensions,Dimensions); swap_full(P,A); axpy_prod(C,trans(C),ATest); //test reconstruction error double errorA = norm_inf(A-ATest); BOOST_CHECK_SMALL(errorA,1.e-12); BOOST_CHECK(!(boost::math::isnan)(norm_frobenius(ATest)));//test for nans } } BOOST_AUTO_TEST_CASE( LinAlg_PivotingCholeskyDecomposition_RankK ){ std::size_t NumTests = 100; std::size_t Dimensions = 45; for(std::size_t test = 0; test != NumTests; ++test){ std::size_t Rank = Rng::discrete(10,45); //first generate a suitable eigenvalue problem matrix A RealVector lambda(Dimensions); for(std::size_t i = 0; i != Rank; ++i){ lambda(i) = Rng::uni(1,3.0); } RealMatrix A = createRandomMatrix(lambda,Dimensions); //calculate Cholesky RealMatrix C(Dimensions,Dimensions); PermutationMatrix P(Dimensions); std::size_t rank = pivotingCholeskyDecomposition(A,P,C); //test whether result has the correct rank. BOOST_CHECK_EQUAL(rank,Rank); //create reconstruction of A RealMatrix ATest(Dimensions,Dimensions); swap_full(P,A); axpy_prod(C,trans(C),ATest); //test reconstruction error double errorA = norm_inf(A-ATest); BOOST_CHECK_SMALL(errorA,1.e-13); BOOST_CHECK(!(boost::math::isnan)(norm_frobenius(ATest)));//test for nans } } BOOST_AUTO_TEST_CASE( LinAlg_CholeskyUpdate ){ std::size_t NumTests = 100; std::size_t Dimensions = 50; for(std::size_t test = 0; test != NumTests; ++test){ //first generate a suitable eigenvalue problem matrix A as well as its decompisition RealVector lambda(Dimensions); for(std::size_t i = 0; i != Dimensions; ++i){ lambda(i) = Rng::uni(1,3.0); } RealMatrix A = createRandomMatrix(lambda,Dimensions); //calculate Cholesky RealMatrix C(Dimensions,Dimensions); choleskyDecomposition(A,C); //generate proper update double alpha = Rng::uni(0.1,1); double beta = Rng::uni(-0.8,2)*alpha; RealVector v(Dimensions); for(std::size_t i = 0; i != Dimensions; ++i){ v(i) = Rng::uni(-1,1); } if(beta < 0)//preserve positive definiteness v /= norm_2(v); //update decomposition A*=alpha; noalias(A) += beta*outer_prod(v,v); RealMatrix CUpdate=C; choleskyDecomposition(A,CUpdate); //Test the fast update choleskyUpdate(C,v,alpha,beta); BOOST_CHECK(!(boost::math::isnan)(norm_frobenius(C)));//test for nans BOOST_CHECK_SMALL(max(abs(C-CUpdate)),1.e-12); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/LinAlg/eigensymm.cpp000066400000000000000000000057531314655040000173650ustar00rootroot00000000000000#include "shark/LinAlg/eigenvalues.h" #include "shark/LinAlg/rotations.h" #define BOOST_TEST_MODULE LinAlg_eigensymm #include #include using namespace shark; using namespace blas; BOOST_AUTO_TEST_SUITE (LinAlg_eigensymm) BOOST_AUTO_TEST_CASE( LinAlg_eigensymm ) { RealMatrix A(3, 3); // input matrix RealMatrix x(3, 3); // matrix for eigenvectors RealVector lambda(3); // vector for eigenvalues // Initialization values for input matrix: double bottom_triangle[9] = { 7., 0., 0., -2., 6., 0., 0., -2., 0. }; // Initializing matrices and vector: for (size_t curr_row = 0; curr_row < 3; curr_row++) { for (size_t curr_col = 0; curr_col < 3; curr_col++) { A(curr_row, curr_col) = bottom_triangle[curr_row*3+curr_col]; x(curr_row, curr_col) = 0.; } lambda(curr_row) = 0.; } // Calculating eigenvalues and eigenvectors: eigensymm(A, x, lambda);// A is unchanged after the call // lines below are for self-testing this example, please ignore BOOST_CHECK_SMALL(lambda(0)- 8.74697,1.e-5); } RealMatrix createRandomMatrix(RealVector const& lambda,std::size_t Dimensions){ RealMatrix R = blas::randomRotationMatrix(Dimensions); for(std::size_t i = 0; i != Dimensions; ++i){ column(R,i) *= std::sqrt(lambda(i)); } RealMatrix A(Dimensions,Dimensions); axpy_prod(R,trans(R),A); return A; } BOOST_AUTO_TEST_CASE( LinAlg_eigensymm_general ) { std::size_t NumTests = 100; std::size_t Dimensions = 10; for(std::size_t test = 0; test != NumTests; ++test){ //first generate a suitable eigenvalue problem matrix A as well as its decompisition RealVector lambda(Dimensions); for(std::size_t i = 0; i != Dimensions; ++i){ lambda(i) = Rng::uni(1,3.0); } RealMatrix A = createRandomMatrix(lambda,Dimensions); std::sort(lambda.begin(),lambda.end()); std::reverse(lambda.begin(),lambda.end()); //calculate eigenvalue decomposition RealVector eigenvalues; RealMatrix eigenVectors; eigensymm(A, eigenVectors, eigenvalues); //check that the eigenvectors are orthogonal double error1 = max(prod(eigenVectors,trans(eigenVectors))-identity_matrix(Dimensions)); double error2 = max(prod(trans(eigenVectors),eigenVectors)-identity_matrix(Dimensions)); BOOST_CHECK(!(boost::math::isnan)(error1));//test for nans BOOST_CHECK(!(boost::math::isnan)(error2));//test for nans BOOST_CHECK_SMALL(error1,1.e-12); BOOST_CHECK_SMALL(error2,1.e-12); //check that the eigenvalues are correct //~ std::cout<(eigenvalues) ); BOOST_CHECK(!(boost::math::isnan)(error_orthogonalize));//test for nans BOOST_CHECK_SMALL(error_orthogonalize,1.e-12); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/LinAlg/permute.cpp000066400000000000000000000052671314655040000170510ustar00rootroot00000000000000#define BOOST_TEST_MODULE LinAlg_Permute #include #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (LinAlg_permute) BOOST_AUTO_TEST_CASE( LinAlg_Permute_Rows_Matrix ){ PermutationMatrix P(5); P(0)=2; P(1)=2; P(2)=4; P(3)=4; P(4)=4; //generate testing matrix IntMatrix A(5,6); for(int i = 0; i != 5; ++i){ for(int j = 0; j != 6; ++j){ A(i,j) = i*6+j; } } //generate permutated result IntMatrix APerm(5,6); row(APerm,0)=row(A,2); row(APerm,1)=row(A,0); row(APerm,2)=row(A,4); row(APerm,3)=row(A,1); row(APerm,4)=row(A,3); swap_rows(P,A); int error = norm_inf(A-APerm); BOOST_CHECK_EQUAL(error, 0); } BOOST_AUTO_TEST_CASE( LinAlg_Permute_Rows_Vector ){ PermutationMatrix P(5); P(0)=2; P(1)=2; P(2)=4; P(3)=4; P(4)=4; //generate testing matrix IntVector v(5); for(int i = 0; i != 5; ++i){ v(i) = i; } //generate permutated result IntVector vPerm(5); vPerm(0)=v(2); vPerm(1)=v(0); vPerm(2)=v(4); vPerm(3)=v(1); vPerm(4)=v(3); swap_rows(P,v); int error = norm_inf(v-vPerm); BOOST_CHECK_EQUAL(error, 0); } BOOST_AUTO_TEST_CASE( LinAlg_Permute_Columns ){ PermutationMatrix P(5); P(0)=2; P(1)=2; P(2)=4; P(3)=4; P(4)=4; //generate testing matrix IntMatrix A(6,5); for(int i = 0; i != 6; ++i){ for(int j = 0; j != 5; ++j){ A(i,j) = i*5+j; } } //generate permutated result IntMatrix APerm(6,5); column(APerm,0)=column(A,2); column(APerm,1)=column(A,0); column(APerm,2)=column(A,4); column(APerm,3)=column(A,1); column(APerm,4)=column(A,3); swap_columns(P,A); int error = norm_inf(A-APerm); BOOST_CHECK_EQUAL(error, 0); } BOOST_AUTO_TEST_CASE( LinAlg_Permute_Columns_Inverted ){ PermutationMatrix P(5); P(0)=2; P(1)=2; P(2)=4; P(3)=4; P(4)=4; //generate testing matrix IntMatrix A(6,5); for(int i = 0; i != 6; ++i){ for(int j = 0; j != 5; ++j){ A(i,j) = i*5+j; } } //copy to result IntMatrix APerm(A); swap_columns(P,A); swap_columns_inverted(P,A); int error = norm_inf(A-APerm); BOOST_CHECK_EQUAL(error, 0); } BOOST_AUTO_TEST_CASE( LinAlg_Permute_Full ){ PermutationMatrix P(5); P(0)=2; P(1)=2; P(2)=4; P(3)=4; P(4)=4; //generate testing matrix IntMatrix A(6,5); for(int i = 0; i != 6; ++i){ for(int j = 0; j != 5; ++j){ A(i,j) = i*5+j; } } //generate permutated result IntMatrix APerm(A); swap_rows(APerm,0,2); swap_columns(APerm,0,2); swap_rows(APerm,1,2); swap_columns(APerm,1,2); swap_rows(APerm,2,4); swap_columns(APerm,2,4); swap_rows(APerm,3,4); swap_columns(APerm,3,4); swap_full(P,A); int error = norm_inf(A-APerm); BOOST_CHECK_EQUAL(error, 0); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/LinAlg/repeat.cpp000066400000000000000000000044761314655040000166510ustar00rootroot00000000000000#define BOOST_TEST_MODULE LinAlg_Repeat #include #include #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (LinAlg_repeat) BOOST_AUTO_TEST_CASE( LinAlg_Repeat_Indexed ){ std::size_t dimensions = 3; std::size_t repetitions = 10; IntVector vector(dimensions); vector(0) = 1; vector(1) = 2; vector(2) = 3; blas::vector_repeater rep=repeat(vector,repetitions); BOOST_REQUIRE_EQUAL(rep.size1(), repetitions); BOOST_REQUIRE_EQUAL(rep.size2(), dimensions); for(std::size_t i = 0; i != repetitions; ++i){ for(std::size_t j = 0; j != dimensions; ++j){ BOOST_CHECK_EQUAL(rep(i,j), vector(j)); } } } BOOST_AUTO_TEST_CASE( LinAlg_Repeat_Iterator ){ std::size_t dimensions = 3; std::size_t repetitions = 10; IntVector vector(dimensions); vector(0) = 1; vector(1) = 2; vector(2) = 3; blas::vector_repeater rep=repeat(vector,repetitions); BOOST_REQUIRE_EQUAL(rep.size1(), repetitions); BOOST_REQUIRE_EQUAL(rep.size2(), dimensions); //test both iterator orders for(std::size_t i = 0; i != repetitions; ++i){ std::size_t k = 0; for(blas::vector_repeater::const_row_iterator j = rep.row_begin(i); j != rep.row_end(i); ++j,++k){ BOOST_CHECK_EQUAL(*j, vector(k)); } BOOST_REQUIRE_EQUAL(k, dimensions); } for(std::size_t i = 0; i != dimensions; ++i){ std::size_t k = 0; for(blas::vector_repeater::const_column_iterator j = rep.column_begin(i); j != rep.column_end(i); ++j,++k ){ BOOST_CHECK_EQUAL(*j, vector(i)); } BOOST_REQUIRE_EQUAL(k, repetitions); } } //some expressions to test whether everything works together with ublas BOOST_AUTO_TEST_CASE( LinAlg_Repeat_Expressions ){ std::size_t dimensions = 3; std::size_t repetitions = 10; IntVector vector(dimensions); vector(0) = 1; vector(1) = 2; vector(2) = 3; blas::vector_repeater rep=repeat(vector,repetitions); BOOST_REQUIRE_EQUAL(rep.size1(), repetitions); BOOST_REQUIRE_EQUAL(rep.size2(), dimensions); RealMatrix result(rep); result+=blas::repeat(vector,repetitions); for(std::size_t i = 0; i != repetitions; ++i){ for(std::size_t j = 0; j != dimensions; ++j){ BOOST_CHECK_EQUAL(result(i,j), 2*vector(j)); } } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/LinAlg/rotations.cpp000066400000000000000000000076611314655040000174120ustar00rootroot00000000000000#include #include #define BOOST_TEST_MODULE LinAlg_rotations #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (LinAlg_rotations) BOOST_AUTO_TEST_CASE( LinAlg_Householder_Creation ){ ///test for numerical stability of createHouseholderReflection std::size_t NumTests = 1000; std::size_t Dimensions = 200; Rng::seed(42);//our all loved default seed :) for(std::size_t testi = 0;testi!=NumTests;++testi){ RealVector test(Dimensions); RealVector reflection(Dimensions); for(std::size_t i = 0; i != Dimensions; ++i){ test(i) = Rng::gauss(0,1); } double norm = -copySign(norm_2(test),test(0)); double tau = createHouseholderReflection(test,reflection); //when the reflection is correct, it should lead to //(norm,0,....,0) when applied to test RealVector result = test; result -= tau*prod(outer_prod(reflection,reflection),test); BOOST_CHECK_SMALL(norm-result(0),1.e-13); for(size_t i=1; i != Dimensions; ++i){ BOOST_CHECK_SMALL(result(i),1.e-13); } } } BOOST_AUTO_TEST_CASE( LinAlg_Householder_Apply_Left ){ ///test for numerical stability of applyHouseholderOnTheLeft std::size_t NumTests = 100; std::size_t Dimension1 = 200; std::size_t Dimension2 = 10; Rng::seed(42); for(std::size_t testi = 0; testi != NumTests; ++testi){ RealMatrix test(Dimension1,Dimension2); RealMatrix result(Dimension1,Dimension2); RealVector reflection(Dimension1); for(std::size_t i = 0; i != Dimension1; ++i){ for(std::size_t j = 0; j != Dimension2; ++j){ result(i,j) = test(i,j) = Rng::gauss(0,1); } } double tau = createHouseholderReflection(column(test,0),reflection); //apply the slow default version RealMatrix O = outer_prod(reflection,reflection); noalias(result) -= tau*prod(O,test); //now the real test applyHouseholderOnTheLeft(test,reflection,tau); //they should be similar for(std::size_t i = 0; i != Dimension1; ++i){ for(std::size_t j = 0; j != Dimension2; ++j){ BOOST_CHECK_SMALL(result(i,j)-test(i,j),1.e-13); } } } } BOOST_AUTO_TEST_CASE( LinAlg_Householder_Apply_Right ){ ///test for numerical stability of applyHouseholderOnTheRight std::size_t NumTests = 100; std::size_t Dimension1 = 200; std::size_t Dimension2 = 10; Rng::seed(42); for(std::size_t testi = 0; testi != NumTests; ++testi){ RealMatrix test(Dimension1,Dimension2); RealMatrix result(Dimension1,Dimension2); RealVector reflection(Dimension2); for(std::size_t i = 0; i != Dimension1; ++i){ for(std::size_t j = 0; j != Dimension2; ++j){ result(i,j) = test(i,j) = Rng::gauss(0,1); } } double tau = createHouseholderReflection(row(test,0),reflection); //apply the slow default version RealMatrix O = outer_prod(reflection,reflection); noalias(result) -= tau*prod(test,O); //now the real test applyHouseholderOnTheRight(test,reflection,tau); //they should be similar for(std::size_t i = 0; i != Dimension1; ++i){ for(std::size_t j = 0; j != Dimension2; ++j){ BOOST_CHECK_SMALL(result(i,j)-test(i,j),1.e-13); } } } } BOOST_AUTO_TEST_CASE( LinAlg_Random_Rotation_Matrix ){ std::size_t NumTests = 100; std::size_t Dimensions = 50; Rng::seed(42); RealMatrix result(Dimensions,Dimensions); for(std::size_t test = 0;test!=NumTests;++test){ //test whether R^TR = RR^T = I RealMatrix R = blas::randomRotationMatrix(Dimensions); for(std::size_t i = 0; i != Dimensions; ++i){ BOOST_CHECK_SMALL(norm_2(row(R,i))-1,1.e-12); BOOST_CHECK_SMALL(norm_2(column(R,i))-1,1.e-12); } result.clear(); axpy_prod(R,trans(R),result,0); double errorID1 = norm_inf(result-RealIdentityMatrix(Dimensions)); result.clear(); axpy_prod(trans(R),R,result,0); double errorID2 = norm_inf(result-RealIdentityMatrix(Dimensions)); BOOST_CHECK_SMALL(errorID1,1.e-13); BOOST_CHECK_SMALL(errorID2,1.e-13); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/LinAlg/solve.cpp000066400000000000000000000725651314655040000165250ustar00rootroot00000000000000#define BOOST_TEST_MODULE LinAlg_Solve #include #include #include #include using namespace shark; RealMatrix createRandomInvertibleMatrix(std::size_t Dimensions,double lambdaMin, double lambdaMax){ RealMatrix R = blas::randomRotationMatrix(Dimensions); RealMatrix Atemp = trans(R); for(std::size_t i = 0; i != Dimensions; ++i){ double lambda = 0; while(std::abs(lambda)<1.e-5)//prevent ill-conditioning lambda = Rng::uni(lambdaMin,lambdaMax); row(Atemp,i)*=lambda; } RealMatrix A(Dimensions,Dimensions); axpy_prod(R,Atemp,A); return A; } template RealMatrix createSymmetricMatrix(Vec const& lambda,Mat const& rotation){ RealMatrix intermediate=rotation; RealMatrix result(rotation.size1(),rotation.size2()); result.clear(); for(std::size_t i = 0; i != intermediate.size1(); ++i){ row(intermediate,i) *= lambda(i); } axpy_prod(trans(rotation),intermediate,result); return result; } //simple test which checks for all argument combinations whether they are correctly translated BOOST_AUTO_TEST_SUITE (LinAlg_solve) BOOST_AUTO_TEST_CASE( LinAlg_Solve_TriangularInPlace_Calls_Matrix ){ //i actually had to come up with a simple hand calculated example here //this function is so basic that we can't use any other to prove it //(as it is used/might be used by these functions at some point...) RealMatrix A(2,2); A(0,0) = 3; A(0,1) = 0; A(1,0) = 2; A(1,1) = 4; RealMatrix Atrans = trans(A); RealMatrix AInv(2,2); AInv(0,0) = 1.0/3; AInv(0,1) = 0; AInv(1,0) = -1.0/6; AInv(1,1) = 0.25; RealMatrix unitAInv(2,2); unitAInv(0,0) = 1; unitAInv(0,1) = 0; unitAInv(1,0) = -2; unitAInv(1,1) = 1; RealMatrix input(2,10); for(std::size_t j = 0; j != 10; ++j){ input(0,j) = Rng::uni(1,10); input(1,j) = Rng::uni(1,10); } RealMatrix transInput=trans(input); //we print out letters since ATLAS just crashes the program when an error occurs. //very good if you need a backtrace...not! //now we test all combinations of systems //for non-blas::Unit-matrices std::cout<<"triangular matrix"<(A,testResult); RealMatrix result = prod(AInv,input); double error = norm_inf(result-testResult); BOOST_CHECK_SMALL(error, 1.e-12); } std::cout<<"b"<(A,testResult); RealMatrix result = prod(subrange(input,0,1,0,2),AInv); double error = norm_inf(result-testResult); BOOST_CHECK_SMALL(error, 1.e-12); } std::cout<<"c1"<(Atrans,testResult); RealMatrix result = prod(trans(AInv),input); double error = norm_inf(result-testResult); BOOST_CHECK_SMALL(error, 1.e-12); } std::cout<<"c2"<(trans(A),testResult); RealMatrix result = prod(trans(AInv),input); double error = norm_inf(result-testResult); BOOST_CHECK_SMALL(error, 1.e-12); } std::cout<<"d1"<(Atrans,testResult); RealMatrix result = prod(subrange(input,0,1,0,2),trans(AInv)); double error = norm_inf(result-testResult); BOOST_CHECK_SMALL(error, 1.e-12); } std::cout<<"d2"<(trans(A),testResult); RealMatrix result = prod(subrange(input,0,1,0,2),trans(AInv)); double error = norm_inf(result-testResult); BOOST_CHECK_SMALL(error, 1.e-12); } std::cout<<"e"< AcolMaj = A; blas::matrix testResult = input; blas::solveTriangularSystemInPlace(AcolMaj,testResult); RealMatrix result = prod(AInv,input); double error = norm_inf(result-testResult); BOOST_CHECK_SMALL(error, 1.e-12); } std::cout<<"f"<(Atrans,testResult); RealMatrix result = prod(trans(AInv),input); double error = norm_inf(result-testResult); BOOST_CHECK_SMALL(error, 1.e-12); } //now we test all combinations of transpositions //for blas::Unit-matrices std::cout<<"a"<(A,testResult); RealMatrix result = prod(unitAInv,input); double error = norm_inf(result-testResult); BOOST_CHECK_SMALL(error, 1.e-12); } std::cout<<"b"<(A,testResult); RealMatrix result = prod(subrange(input,0,1,0,2),unitAInv); double error = norm_inf(result-testResult); BOOST_CHECK_SMALL(error, 1.e-12); } std::cout<<"c"<(trans(A),testResult); RealMatrix result = prod(trans(unitAInv),input); double error = norm_inf(result-testResult); BOOST_CHECK_SMALL(error, 1.e-12); } std::cout<<"d"<(trans(A),testResult); RealMatrix result = prod(subrange(input,0,1,0,2),trans(unitAInv)); double error = norm_inf(result-testResult); BOOST_CHECK_SMALL(error, 1.e-12); } std::cout<<"e"< AcolMaj = A; blas::matrix testResult = input; blas::solveTriangularSystemInPlace(AcolMaj,testResult); RealMatrix result = prod(unitAInv,input); double error = norm_inf(result-testResult); BOOST_CHECK_SMALL(error, 1.e-12); } std::cout<<"f"<(Atrans,testResult); RealMatrix result = prod(trans(unitAInv),input); double error = norm_inf(result-testResult); BOOST_CHECK_SMALL(error, 1.e-12); } } //simple test which checks for all argument combinations whether they are correctly translated BOOST_AUTO_TEST_CASE( LinAlg_Solve_TriangularInPlace_Calls_Vector ){ RealMatrix A(2,2); A(0,0) = 3; A(0,1) = 0; A(1,0) = 2; A(1,1) = 4; RealMatrix AInv(2,2); AInv(0,0) = 1.0/3; AInv(0,1) = 0; AInv(1,0) = -1.0/6; AInv(1,1) = 0.25; RealMatrix unitAInv(2,2); unitAInv(0,0) = 1; unitAInv(0,1) = 0; unitAInv(1,0) = -2; unitAInv(1,1) = 1; RealVector input(2); input(0) = Rng::uni(1,10); input(1) = Rng::uni(1,10); //we print out letters since ATLAS just crashes the program when an error occurs. //very good if you need a backtrace...not! //now we test all combinations of transpositions //for non-blas::Unit-matrices std::cout<<"triangular vector"<(A,testResult); RealVector result = prod(AInv,input); double error = norm_inf(result-testResult); BOOST_CHECK_SMALL(error, 1.e-12); } std::cout<<"b"<(trans(A),testResult); RealVector result = prod(trans(AInv),input); double error = norm_inf(result-testResult); BOOST_CHECK_SMALL(error, 1.e-12); } std::cout<<"c"<(A,testResult); RealVector result = prod(input,AInv); double error = norm_inf(result-testResult); BOOST_CHECK_SMALL(error, 1.e-12); } std::cout<<"d"<(trans(A),testResult); RealVector result = prod(input,trans(AInv)); double error = norm_inf(result-testResult); BOOST_CHECK_SMALL(error, 1.e-12); } std::cout<<"e"< AcolMaj = A; RealVector testResult = input; blas::solveTriangularSystemInPlace(AcolMaj,testResult); RealVector result = prod(AInv,input); double error = norm_inf(result-testResult); BOOST_CHECK_SMALL(error, 1.e-12); } std::cout<<"f"<(Atrans,testResult); RealVector result = prod(trans(AInv),input); double error = norm_inf(result-testResult); BOOST_CHECK_SMALL(error, 1.e-12); } //now we test all combinations of transpositions //for blas::Unit-matrices std::cout<<"a"<(A,testResult); RealVector result = prod(unitAInv,input); double error = norm_inf(result-testResult); BOOST_CHECK_SMALL(error, 1.e-12); } std::cout<<"b"<(trans(A),testResult); RealVector result = prod(trans(unitAInv),input); double error = norm_inf(result-testResult); BOOST_CHECK_SMALL(error, 1.e-12); } std::cout<<"c"<(A,testResult); RealVector result = prod(input,unitAInv); double error = norm_inf(result-testResult); BOOST_CHECK_SMALL(error, 1.e-12); } std::cout<<"d"<(trans(A),testResult); RealVector result = prod(input,trans(unitAInv)); double error = norm_inf(result-testResult); BOOST_CHECK_SMALL(error, 1.e-12); } std::cout<<"e"< AcolMaj = A; RealVector testResult = input; blas::solveTriangularSystemInPlace(AcolMaj,testResult); RealVector result = prod(unitAInv,input); double error = norm_inf(result-testResult); BOOST_CHECK_SMALL(error, 1.e-12); } std::cout<<"f"<(Atrans,testResult); RealVector result = prod(trans(unitAInv),input); double error = norm_inf(result-testResult); BOOST_CHECK_SMALL(error, 1.e-12); } } //for the remaining functions, we can use random systems and check, whether they are okay BOOST_AUTO_TEST_CASE( LinAlg_Solve_Vector ){ unsigned int NumTests = 100; std::size_t Dimensions = 50; std::cout<<"blas::solveSystem vector"<(A,x,b); RealVector test = prod(A,x); double error = norm_inf(test-b); BOOST_CHECK_SMALL(error,1.e-12); //first test trans(A)X=B blas::solveSymmPosDefSystem(trans(A),x,b); test = prod(trans(A),x); error = norm_inf(test-b); BOOST_CHECK_SMALL(error,1.e-12); //now test XA=B blas::solveSymmPosDefSystem(A,x,b); test = prod(x,A); error = norm_inf(test-b); BOOST_CHECK_SMALL(error,1.e-12); //now test Xtrans(A)=B blas::solveSymmPosDefSystem(trans(A),x,b); test = prod(x,trans(A)); error = norm_inf(test-b); BOOST_CHECK_SMALL(error,1.e-12); } } BOOST_AUTO_TEST_CASE( LinAlg_Solve_Symmetric_Approximated_Vector ){ unsigned int NumTests = 100; std::size_t Dimensions = 50; std::cout<<"approximately blas::solve Symmetric vector"<(A,X,B); RealMatrix test = prod(A,X); double error = norm_inf(test-B); BOOST_CHECK_SMALL(error,1.e-12); //first test trans(A)X=B blas::solveSymmPosDefSystem(trans(A),X,B); test = prod(trans(A),X); error = norm_inf(test-B); BOOST_CHECK_SMALL(error,1.e-12); //now test XA=B blas::solveSymmPosDefSystem(ARight,X,B); test = prod(X,ARight); error = norm_inf(test-B); BOOST_CHECK_SMALL(error,1.e-12); //now test Xtrans(A)=B blas::solveSymmPosDefSystem(trans(ARight),X,B); test = prod(X,trans(ARight)); error = norm_inf(test-B); BOOST_CHECK_SMALL(error,1.e-12); } } //this test tests, whether the semi-definite system solver can handle full rank matrices and arbitrary right-hand vectors BOOST_AUTO_TEST_CASE( LinAlg_solveSymmSemiDefiniteSystemInPlace_fullRank_Vector){ std::size_t NumTests = 100; std::size_t Dimensions = 50; std::cout<<"blas::solve symmetric semi-definite matrix full rank vector LHS"<(A,x); RealVector test = prod(A,x); double error = norm_inf(test-b); BOOST_CHECK_SMALL(error,1.e-12); } //System x^TA=b^T { RealVector x=b; blas::solveSymmSemiDefiniteSystemInPlace(A,x); RealVector test = prod(x,A); double error = norm_inf(test-b); BOOST_CHECK_SMALL(error,1.e-12); } } } BOOST_AUTO_TEST_CASE( LinAlg_solveSymmSemiDefiniteSystemInPlace_RankK_Vector ){ std::size_t NumTests = 100; std::size_t Dimensions = 50; std::size_t Rank = 10; std::cout<<"blas::solve symmetric semi-definite matrix general LHS"<(A,x); RealVector test = prod(AInv,b); double error = norm_inf(test-x); BOOST_CHECK_SMALL(error,1.e-12); } //System XA=B { RealVector x=b; blas::solveSymmSemiDefiniteSystemInPlace(A,x); RealVector test = prod(b,AInv); double error = norm_inf(test-x); BOOST_CHECK_SMALL(error,1.e-12); } } } //this test tests, whether the semi-definite system solver can handle full rank matrices and generate proper inverses BOOST_AUTO_TEST_CASE( LinAlg_solveSymmSemiDefiniteSystemInPlace_fullRank_Matrix_Inverse ){ std::size_t NumTests = 100; std::size_t Dimensions = 50; std::cout<<"blas::solve symmetric semi-definite matrix full rank inverse"<(A,AInv); //should be left and right inverse RealMatrix resultLeft(Dimensions,Dimensions); RealMatrix resultRight(Dimensions,Dimensions); axpy_prod(AInv,A,resultLeft); axpy_prod(A,AInv,resultRight); double errorSame = norm_inf(resultLeft-resultRight); double errorLeft = norm_inf(resultLeft - RealIdentityMatrix(Dimensions)); double errorRight = norm_inf(resultRight - RealIdentityMatrix(Dimensions)); BOOST_CHECK_SMALL(errorSame,1.e-13); BOOST_CHECK_SMALL(errorLeft,1.e-10); BOOST_CHECK_SMALL(errorRight,1.e-10); } //System XA=I { RealMatrix AInv(Dimensions,Dimensions,0.0); for(std::size_t i = 0; i != Dimensions; ++i){ AInv(i,i) = 1.0; } blas::solveSymmSemiDefiniteSystemInPlace(A,AInv); //should be left and right inverse RealMatrix resultLeft(Dimensions,Dimensions); RealMatrix resultRight(Dimensions,Dimensions); axpy_prod(AInv,A,resultLeft); axpy_prod(A,AInv,resultRight); double errorSame = norm_inf(resultLeft-resultRight); double errorLeft = norm_inf(resultLeft - RealIdentityMatrix(Dimensions)); double errorRight = norm_inf(resultRight - RealIdentityMatrix(Dimensions)); BOOST_CHECK_SMALL(errorSame,1.e-13); BOOST_CHECK_SMALL(errorLeft,1.e-10); BOOST_CHECK_SMALL(errorRight,1.e-10); } } } //this test tests, whether the semi-definite system solver can handle full rank matrices and arbitrary right-hand sides BOOST_AUTO_TEST_CASE( LinAlg_solveSymmSemiDefiniteSystemInPlace_fullRank_Matrix ){ std::size_t NumTests = 100; std::size_t Dimensions = 50; std::size_t NumRhs = 21; std::cout<<"blas::solve symmetric semi-definite matrix full rank general LHS"<(A,X); RealMatrix test = prod(A,X); double error = norm_inf(test-B); BOOST_CHECK_SMALL(error,1.e-12); } //System XA=B { RealMatrix X=B; blas::solveSymmSemiDefiniteSystemInPlace(ARight,X); RealMatrix test = prod(X,ARight); double error = norm_inf(test-B); BOOST_CHECK_SMALL(error,1.e-12); } } } BOOST_AUTO_TEST_CASE( LinAlg_solveSymmSemiDefiniteSystemInPlace_RankK_Matrix ){ std::size_t NumTests = 100; std::size_t Dimensions = 50; std::size_t Rank = 10; std::size_t NumRhs = 21; std::cout<<"blas::solve symmetric semi-definite matrix general LHS"<(A,X); RealMatrix test = prod(AInv,B); double error = norm_inf(test-X); BOOST_CHECK_SMALL(error,1.e-12); } //System XA=B { RealMatrix X=B; blas::solveSymmSemiDefiniteSystemInPlace(ARight,X); RealMatrix test = prod(B,ARightInv); double error = norm_inf(test-X); BOOST_CHECK_SMALL(error,1.e-12); } } } //first test that we can actually invert invertible matrices BOOST_AUTO_TEST_CASE( LinAlg_generalSolveSystemInPlace_Invertible ){ std::size_t NumTests = 100; std::size_t Dimensions = 50; std::cout<<"blas::solve general matrix full rank vector LHS"<(A,x); RealVector test = prod(A,x); double error = norm_inf(test-b); BOOST_CHECK_SMALL(error,1.e-12); } //System x^TA=b^T { RealVector x=b; blas::generalSolveSystemInPlace(A,x); RealVector test = prod(x,A); double error = norm_inf(test-b); BOOST_CHECK_SMALL(error,1.e-12); } } } //now test square symmetric matrices without full rank BOOST_AUTO_TEST_CASE( LinAlg_generalSolveSystemInPlace_RankK ){ std::size_t NumTests = 100; std::size_t Dimensions = 50; std::size_t Rank = 20; std::cout<<"blas::solve general semi-definite matrix general LHS"<(A,x); RealVector test = prod(AInv,b); double error = norm_inf(test-x); BOOST_CHECK_SMALL(error,1.e-12); } //System x^TA=b^T { RealVector x=b; blas::generalSolveSystemInPlace(A,x); RealVector test = prod(b,AInv); double error = norm_inf(test-x); BOOST_CHECK_SMALL(error,1.e-12); } } } //rectangular mxn matrix with rank k, m < n BOOST_AUTO_TEST_CASE( LinAlg_generalSolveSystemInPlace_Rectangular1_RankK ){ std::size_t NumTests = 100; std::size_t M = 31; std::size_t N = 50; std::size_t Rank = 20; std::cout<<"blas::solve rectangular matrix m(A,x); RealVector test = prod(AInv,bM); double error = norm_inf(test-x); BOOST_CHECK_SMALL(error,1.e-11); } //System x^TA=b^T { RealVector x=bN; blas::generalSolveSystemInPlace(A,x); RealVector test = prod(bN,AInv); double error = norm_inf(test-x); BOOST_CHECK_SMALL(error,1.e-11); } } } //rectangular mxn matrix with rank k, m < n BOOST_AUTO_TEST_CASE( LinAlg_generalSolveSystemInPlace_Rectangular2_RankK ){ std::size_t NumTests = 100; std::size_t M = 50; std::size_t N = 31; std::size_t Rank = 20; std::cout<<"blas::solve rectangular matrix m>n general LHS"<(A,x); RealVector test = prod(AInv,bM); double error = norm_inf(test-x); BOOST_CHECK_SMALL(error,1.e-12); } //System x^TA=b^T { RealVector x=bN; blas::generalSolveSystemInPlace(A,x); RealVector test = prod(bN,AInv); double error = norm_inf(test-x); BOOST_CHECK_SMALL(error,1.e-12); } } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/LinAlg/sumRows.cpp000066400000000000000000000033241314655040000170370ustar00rootroot00000000000000#define BOOST_TEST_MODULE LinAlg_Sum_Rows #include #include #include #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (LinAlg_sumRows) BOOST_AUTO_TEST_CASE( LinAlg_sum_rows){ std::size_t rows = 101; std::size_t columns = 89; RealMatrix A(rows, columns); RealVector testResult(columns,0.0); for(std::size_t j = 0; j != columns; ++j){ for(std::size_t i = 0; i != rows; ++i){ A(i,j) = Rng::uni(0,1); } } RealMatrix Atrans = trans(A); //test implementation for(std::size_t i = 0; i != rows; ++i){ testResult+=row(A,i); } //sum_rows with row major argument RealVector test1 = sum_rows(A); double error = norm_2(testResult-test1); BOOST_CHECK_SMALL(error,1.e-15); //sum_rows with column major argument RealVector test2 = sum_rows(trans(Atrans)); error = norm_2(testResult-test2); BOOST_CHECK_SMALL(error,1.e-15); } BOOST_AUTO_TEST_CASE( LinAlg_sum_columns){ std::size_t rows = 101; std::size_t columns = 89; RealMatrix A(rows, columns); RealVector testResult(rows,0.0); for(std::size_t i = 0; i != rows; ++i){ for(std::size_t j = 0; j != columns; ++j){ A(i,j) = Rng::uni(0,1); } } RealMatrix Atrans = trans(A); //test implementation for(std::size_t i = 0; i != columns; ++i){ testResult+=column(A,i); } //sum_rows with row major argument RealVector test1 = sum_columns(A); double error = norm_2(testResult-test1); BOOST_CHECK_SMALL(error,1.e-15); //sum_rows with column major argument RealVector test2 = sum_columns(trans(Atrans)); error = norm_2(testResult-test2); BOOST_CHECK_SMALL(error,1.e-15); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/LinAlg/svd.cpp000066400000000000000000000042621314655040000161560ustar00rootroot00000000000000#include "shark/LinAlg/svd.h" #include "shark/LinAlg/rotations.h" #define BOOST_TEST_MODULE LinAlg_svd #include #include using namespace shark; const size_t NumTests=1; const size_t Dim=2; //Testcases double inputMatrix[NumTests][Dim][Dim] = { { {1., 3.}, {-4., 3.} } }; //Testresults double resultSVD_W[NumTests][Dim]= { {2.91309,5.14916} }; double resultSVD_U[NumTests][Dim][Dim]= { { {-0.957092,-0.289784}, {0.289784,-0.957092} } }; double resultSVD_V[NumTests][Dim][Dim]= { { {-0.726454,0.687215}, {-0.687215,-0.726454} } }; BOOST_AUTO_TEST_SUITE (LinAlg_svd) BOOST_AUTO_TEST_CASE( LinAlg_svd ) { for(size_t test=0;test!=NumTests;++test) { RealMatrix A(Dim,Dim); RealMatrix U(Dim,Dim); RealMatrix V(Dim,Dim); RealVector w(Dim); for (size_t row = 0; row != Dim; row++) { for (size_t col = 0; col != Dim; col++) { A(row, col) = inputMatrix[test][row][col]; U(row, col) = 0.; V(row, col) = 0.; } w(row) = 0.; } //storing svd results svd(A, U, V, w); //check w-vector for(size_t i=0;i!=Dim;++i) { BOOST_CHECK_SMALL(w(i)-resultSVD_W[test][i],1.e-5); } //check U/V-vector for (size_t row = 0; row != Dim; row++) { for (size_t col = 0; col != Dim; col++) { BOOST_CHECK_SMALL(U(row , col)-resultSVD_U[test][row][col],1.e-5); BOOST_CHECK_SMALL(V(row , col)-resultSVD_V[test][row][col],1.e-5); } } } } BOOST_AUTO_TEST_CASE( LinAlg_svd_big ) { for(std::size_t test = 0; test != 10; ++test){ //first generate a suitable eigenvalue problem matrix A RealMatrix R = blas::randomRotationMatrix(5); RealMatrix lambda(5,5); lambda.clear(); for(std::size_t i = 0; i != 3; ++i){ lambda(i,i) = double(i); } RealMatrix A = prod(R,lambda); A = prod(A,trans(R)); RealMatrix U(5,5); RealMatrix V(5,5); RealVector w(5); //storing svd results svd(A, U, V, w); //recreate A for(std::size_t i = 0; i != 5; ++i){ lambda(i,i) = w(i); } RealMatrix ATest = prod(U,lambda); ATest = prod(ATest,trans(V)); //test reconstruction error BOOST_CHECK_SMALL(norm_1(A-ATest)/25.0,1.e-12); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Models/000077500000000000000000000000001314655040000147275ustar00rootroot00000000000000Shark-3.1.4/Test/Models/Autoencoder.cpp000066400000000000000000000067701314655040000177150ustar00rootroot00000000000000#define BOOST_TEST_MODULE MODEL_AUTOENCODER #include #include #include "derivativeTestHelper.h" #include #include #include using namespace std; using namespace boost::archive; using namespace shark; //check that the structure is correct, i.e. matrice have the right form and setting parameters works BOOST_AUTO_TEST_SUITE (Models_Autoencoder) BOOST_AUTO_TEST_CASE( AUTOENCODER_Structure) { std::size_t weightNum = 2*2*3+5; Autoencoder net; net.setStructure(2,3); BOOST_REQUIRE_EQUAL(net.hiddenBias().size(),3u); BOOST_REQUIRE_EQUAL(net.outputBias().size(),2u); BOOST_CHECK_EQUAL(net.encoderMatrix().size1(), 3u); BOOST_CHECK_EQUAL(net.encoderMatrix().size2(), 2u); BOOST_CHECK_EQUAL(net.decoderMatrix().size1(), 2u); BOOST_CHECK_EQUAL(net.decoderMatrix().size2(), 3u); BOOST_REQUIRE_EQUAL(net.numberOfParameters(),weightNum); RealVector newParams(weightNum); for(std::size_t i = 0; i != weightNum; ++i){ newParams(i) = Rng::uni(0,1); } //check that setting and getting parameters works net.setParameterVector(newParams); RealVector params = net.parameterVector(); BOOST_REQUIRE_EQUAL(params.size(),newParams.size()); for(std::size_t i = 0; i != weightNum; ++i){ BOOST_CHECK_CLOSE(newParams(i),params(i), 1.e-10); } //check that the weight matrix has the right values std::size_t param = 0; for(std::size_t i = 0; i != net.encoderMatrix().size1(); ++i){ for(std::size_t j = 0; j != net.encoderMatrix().size2(); ++j,++param){ BOOST_CHECK_EQUAL(net.encoderMatrix()(i,j), newParams(param)); } } for(std::size_t i = 0; i != net.decoderMatrix().size1(); ++i){ for(std::size_t j = 0; j != net.decoderMatrix().size2(); ++j,++param){ BOOST_CHECK_EQUAL(net.decoderMatrix()(i,j), newParams(param)); } } for(std::size_t i = 0; i != 3; ++i,++param){ BOOST_CHECK_EQUAL(net.hiddenBias()(i), newParams(param)); } BOOST_CHECK_EQUAL(net.outputBias()(0), newParams(param)); BOOST_CHECK_EQUAL(net.outputBias()(1), newParams(param+1)); } BOOST_AUTO_TEST_CASE( AUTOENCODER_Value ) { Autoencoder net; net.setStructure(3,2); std::size_t numParams = 2*3*2+5; for(std::size_t i = 0; i != 100; ++i){ //initialize parameters RealVector parameters(numParams); for(size_t j=0; j != numParams;++j) parameters(j)=Rng::gauss(0,1); net.setParameterVector(parameters); //the testpoints RealVector point(3); point(0)=Rng::uni(-5,5); point(1)= Rng::uni(-5,5); point(2)= Rng::uni(-5,5); //evaluate ground truth result RealVector hidden = sigmoid(prod(net.encoderMatrix(),point)+net.hiddenBias()); RealVector output = tanh(prod(net.decoderMatrix(),hidden)+net.outputBias()); //check whether final result is correct RealVector netResult = net(point); BOOST_CHECK_SMALL(netResult(0)-output(0),1.e-12); BOOST_CHECK_SMALL(netResult(1)-output(1),1.e-12); BOOST_CHECK_SMALL(netResult(2)-output(2),1.e-12); } //now also test batches RealMatrix inputs(100,3); for(std::size_t i = 0; i != 100; ++i){ inputs(i,0)=Rng::uni(-5,5); inputs(i,1)= Rng::uni(-5,5); inputs(i,2)= Rng::uni(-5,5); } testBatchEval(net,inputs); } BOOST_AUTO_TEST_CASE( AUTOENCODER_WeightedDerivatives) { Autoencoder net; net.setStructure(2,5); testWeightedInputDerivative(net,1000,5.e-6,1.e-7); testWeightedDerivative(net,1000,5.e-6,1.e-7); testWeightedDerivativesSame(net,1000); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Models/CARTClassifier.cpp000066400000000000000000000046321314655040000201760ustar00rootroot00000000000000//=========================================================================== /*! * * * \brief unit test for CART classifier * * * * * * \author K. Hansen * \date 2012 * * * \par Copyright 1995-2015 Shark Development Team * *

* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ //=========================================================================== #define BOOST_TEST_MODULE Models_CARTClassifier #include #include #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (Models_CARTClassifier) BOOST_AUTO_TEST_CASE( CART_Classifier ) { //Test data std::vector input(10, RealVector(2)); input[0](0)=1; input[0](1)=3; input[1](0)=-1; input[1](1)=3; input[2](0)=1; input[2](1)=0; input[3](0)=-1; input[3](1)=0; input[4](0)=1; input[4](1)=-3; input[5](0)=-1; input[5](1)=-3; input[6](0)=-4; input[6](1)=-3; input[7](0)=-2; input[7](1)=-1; input[8](0)=-6; input[8](1)=-8; input[9](0)=-2; input[9](1)=-2; std::vector target(10); target[0]=0; target[1]=0; target[2]=1; target[3]=1; target[4]=2; target[5]=2; target[6]=3; target[7]=3; target[8]=4; target[9]=4; ClassificationDataset dataset = createLabeledDataFromRange(input, target); CARTTrainer trainer; CARTClassifier model; trainer.train(model, dataset); Data prediction = model(dataset.inputs()); ZeroOneLoss loss; double error = loss.eval(dataset.labels(), prediction); std::cout << model.countAttributes() << std::endl; BOOST_CHECK(error == 0.0); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Models/CMAC.cpp000066400000000000000000000127201314655040000161400ustar00rootroot00000000000000#include #include #include #include #include #define BOOST_TEST_MODULE Models_CMAC #include #include #include "derivativeTestHelper.h" #include using namespace shark; using namespace std; using namespace boost::archive; BOOST_AUTO_TEST_SUITE (Models_CMAC) BOOST_AUTO_TEST_CASE( CMAC_PARAMETERS ){ CMACMap model; model.setStructure(3,5,2,4,-1,1,true); BOOST_REQUIRE_EQUAL(model.numberOfParameters(),640); //this test simply checks whether a CMAC can learn itself RealVector testParameters(model.numberOfParameters()); for(size_t param=0;param!=model.numberOfParameters();++param) { testParameters(param)=Rng::gauss(0,1); } model.setParameterVector( testParameters); model.setParameterVector(testParameters); RealVector retrievedParameters = model.parameterVector(); BOOST_REQUIRE_EQUAL(retrievedParameters.size(),model.numberOfParameters()); for (size_t i=0; i!=model.numberOfParameters(); ++i) BOOST_CHECK_EQUAL(retrievedParameters(i), testParameters(i)); } BOOST_AUTO_TEST_CASE( CMAC_DERIVATIVE ) { for(std::size_t i = 0; i != 10000; ++i){ CMACMap model; model.setStructure(3,5,2,4,-1,1,true); //this test simply checks whether a CMAC can learn itself RealVector testParameters(model.numberOfParameters()); for(size_t param=0;param!=model.numberOfParameters();++param) { testParameters(param)=Rng::gauss(0,1); } model.setParameterVector( testParameters); // Test the general derivative RealVector coefficients(5); coefficients(0) = Rng::uni(-1,1); coefficients(1) = Rng::uni(-1,1); coefficients(2) = Rng::uni(-1,1); coefficients(3) = Rng::uni(-1,1); coefficients(4) = Rng::uni(-1,1); RealVector testInput(3); testInput(0) = Rng::uni(-1,1); testInput(1) = Rng::uni(-1,1); testInput(2) = Rng::uni(-1,1); testWeightedDerivative(model,testInput,coefficients); //testWeightedDerivative(model); } } BOOST_AUTO_TEST_CASE( CMAC_COPY ) { //the target cmac CMACMap cmac; cmac.setStructure(2,1,2,2,-1,1,true); //this test simply checks whether a CMAC can learn itself RealVector testParameters(cmac.numberOfParameters()); for(size_t param=0;param!=cmac.numberOfParameters();++param) { testParameters(param)=Rng::gauss(0,1); } cmac.setParameterVector( testParameters); //create dataset for training std::vector data; std::vector target; RealVector input(cmac.inputSize()); for (size_t i=0; i<1000; i++) { for(size_t j=0;j!=cmac.inputSize();++j) { input(j)=Rng::uni(-1,1); } data.push_back(input); target.push_back(cmac(input)); } RegressionDataset dataset = createLabeledDataFromRange(data,target); //the test cmac with the same tiling CMACMap cmacTest=cmac; RealVector parameters(cmac.numberOfParameters()); for(size_t param=0;param!=cmacTest.numberOfParameters();++param) { parameters(param)=Rng::gauss(0,1); } cmacTest.setParameterVector(parameters); IRpropPlus optimizer; SquaredLoss<> loss; ErrorFunction mse(dataset,&cmacTest,&loss); optimizer.init(mse); // train the cmac double error=0; for(size_t iteration=0;iteration<500;++iteration) { optimizer.step(mse); error=optimizer.solution().value; } std::cout<<"Optimization done. Error:"< data; std::vector target; RealVector input(cmac.inputSize()); for (size_t i=0; i<1000; i++) { for(size_t j=0;j!=cmac.inputSize();++j) { input(j)=Rng::uni(-1,1); } data.push_back(input); target.push_back(cmac(input)); } RegressionDataset dataset = createLabeledDataFromRange(data,target); //now we serialize the CMAC ostringstream outputStream; TextOutArchive oa(outputStream); oa << cmac; //and create a new CMAC from the serialization CMACMap cmacDeserialized; istringstream inputStream(outputStream.str()); TextInArchive ia(inputStream); ia >> cmacDeserialized; //test whether serialization works //first simple parameter and topology check BOOST_CHECK_SMALL(norm_2(cmacDeserialized.parameterVector() - testParameters),1.e-50); BOOST_REQUIRE_EQUAL(cmacDeserialized.inputSize(),cmac.inputSize()); BOOST_REQUIRE_EQUAL(cmacDeserialized.outputSize(),cmac.outputSize()); for (size_t i=0; i<1000; i++) { RealVector output = cmacDeserialized(dataset.element(i).input); BOOST_CHECK_SMALL(norm_2(output -dataset.element(i).label),1.e-50); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Models/ConcatenatedModel.cpp000066400000000000000000000237351314655040000210160ustar00rootroot00000000000000#define BOOST_TEST_MODULE ML_CONCATENATED_MODEL #include #include #include "derivativeTestHelper.h" #include #include #include #include #include #include using namespace std; using namespace boost::archive; using namespace shark; BOOST_AUTO_TEST_SUITE (Models_ConcatenatedModel) BOOST_AUTO_TEST_CASE( CONCATENATED_MODEL_Value ) { FFNet net1; Softmax net2(2); net1.setStructure(3,5,2); size_t modelParameters = net1.numberOfParameters(); ConcatenatedModel model (&net1,&net2); BOOST_CHECK_EQUAL(model.numberOfParameters(),modelParameters); //parameters Uniform< Rng::rng_type > uni( shark::Rng::globalRng,-1,1); RealVector modelParams(modelParameters); RealVector net1Params(net1.numberOfParameters()); for(size_t i=0;i!=net1Params.size();++i){ net1Params(i)=uni(); modelParams(i)=net1Params(i); } RealVector net2Params(net2.numberOfParameters()); for(size_t i=0;i!=net2Params.size();++i){ net2Params(i)=uni(); modelParams(i+net1Params.size())=net2Params(i); } //check whether parameter copying is working model.setParameterVector(modelParams); double error1=norm_sqr(net1Params-net1.parameterVector()); double error2=norm_sqr(net2Params-net2.parameterVector()); double error3=norm_sqr(modelParams-model.parameterVector()); BOOST_CHECK_EQUAL(error1,0.0); BOOST_CHECK_EQUAL(error2,0.0); BOOST_CHECK_EQUAL(error3,0.0); //test Results; RealVector input(3); for(size_t i=0;i!=3;++i){ input(i)=uni(); } RealVector intermediateResult = net1(input); RealVector endResult = net2(intermediateResult); //evaluate point RealVector modelResult = model(input); double modelError = norm_sqr(modelResult-endResult); BOOST_CHECK_SMALL(modelError,1.e-35); } BOOST_AUTO_TEST_CASE( CONCATENATED_MODEL_weightedParameterDerivative ) { FFNet net1; LinearModel<> net2; net1.setStructure(3,5,2); net2.setStructure(2,4); ConcatenatedModel model (&net1,&net2); BOOST_CHECK_EQUAL(model.optimizeFirstModelParameters(),1); BOOST_CHECK_EQUAL(model.optimizeSecondModelParameters(),1); //test1: all activated { //parameters size_t modelParameters = net1.numberOfParameters()+net2.numberOfParameters(); BOOST_REQUIRE_EQUAL(model.numberOfParameters(), modelParameters); RealVector parameters(modelParameters); RealVector coefficients(4); RealVector point(3); for(unsigned int test = 0; test != 10; ++test){ for(size_t i = 0; i != modelParameters;++i){ parameters(i) = Rng::uni(-5,5); } for(size_t i = 0; i != 4;++i){ coefficients(i) = Rng::uni(-5,5); } for(size_t i = 0; i != 3;++i){ point(i) = Rng::uni(-5,5); } model.setParameterVector(parameters); testWeightedDerivative(model, point, coefficients, 1.e-5,1.e-8); } } //test1: only first model { //parameters size_t modelParameters = net1.numberOfParameters(); model.optimizeFirstModelParameters() = true; model.optimizeSecondModelParameters() = false; BOOST_REQUIRE_EQUAL(model.numberOfParameters(), modelParameters); RealVector parameters(modelParameters); RealVector coefficients(4); RealVector point(3); for(unsigned int test = 0; test != 10; ++test){ for(size_t i = 0; i != modelParameters;++i){ parameters(i) = Rng::uni(-5,5); } for(size_t i = 0; i != 4;++i){ coefficients(i) = Rng::uni(-5,5); } for(size_t i = 0; i != 3;++i){ point(i) = Rng::uni(-5,5); } model.setParameterVector(parameters); testWeightedDerivative(model, point, coefficients, 1.e-5,1.e-8); } } //test2: only second model { //parameters size_t modelParameters = net2.numberOfParameters(); model.optimizeFirstModelParameters() = false; model.optimizeSecondModelParameters() = true; BOOST_REQUIRE_EQUAL(model.numberOfParameters(),modelParameters); RealVector parameters(modelParameters); RealVector coefficients(4); RealVector point(3); for(unsigned int test = 0; test != 10; ++test){ for(size_t i = 0; i != modelParameters;++i){ parameters(i) = Rng::uni(-5,5); } for(size_t i = 0; i != 4;++i){ coefficients(i) = Rng::uni(-5,5); } for(size_t i = 0; i != 3;++i){ point(i) = Rng::uni(-5,5); } model.setParameterVector(parameters); testWeightedDerivative(model, point, coefficients, 1.e-5,1.e-8); } } //test3: no parameters { //parameters model.optimizeFirstModelParameters() = false; model.optimizeSecondModelParameters() = false; BOOST_REQUIRE_EQUAL(model.numberOfParameters(), 0); } } BOOST_AUTO_TEST_CASE( CONCATENATED_MODEL_weightedInputDerivative ) { Softmax net1(10); Softmax net2(10); size_t modelParameters = net1.numberOfParameters()+net2.numberOfParameters(); ConcatenatedModel model (&net1,&net2); RealVector parameters(modelParameters); RealVector coefficients(10); RealVector point(10); for(unsigned int test = 0; test != 100; ++test){ for(size_t i = 0; i != modelParameters;++i){ parameters(i) = Rng::uni(-10,10); } for(size_t i = 0; i != 10;++i){ coefficients(i) = Rng::uni(-10,10); } for(size_t i = 0; i != 10;++i){ point(i) = Rng::uni(-10,10); } model.setParameterVector(parameters); testWeightedInputDerivative(model, point, coefficients, 1.e-5,1.e-5); } } BOOST_AUTO_TEST_CASE( CONCATENATED_MODEL_SERIALIZE ) { Softmax net1(10); Softmax net2(10); ConcatenatedModel model (&net1,&net2); //parameters Uniform<> uni(Rng::globalRng,-1,1); RealVector testParameters(model.numberOfParameters()); for(size_t i=0;i!=model.numberOfParameters();++i){ testParameters(i)=uni(); } model.setParameterVector(testParameters); //the test is, that after deserialization, the results must be identical //so we generate some data first std::vector data; std::vector target; RealVector input(10); RealVector output(10); for (size_t i=0; i<1000; i++) { for(size_t j=0;j!=10;++j) { input(j)=Rng::uni(-1,1); } data.push_back(input); target.push_back(model(input)); } RegressionDataset dataset = createLabeledDataFromRange(data,target); //now we serialize the model ostringstream outputStream; TextOutArchive oa(outputStream); oa << model; //and create a new model from the serialization Softmax netTest1; Softmax netTest2; ConcatenatedModel modelDeserialized (&netTest1,&netTest2); istringstream inputStream(outputStream.str()); TextInArchive ia(inputStream); ia >> modelDeserialized; //test whether serialization works //first simple parameter and topology check BOOST_CHECK_SMALL(norm_2(modelDeserialized.parameterVector() - testParameters),1.e-50); BOOST_REQUIRE_EQUAL(net1.inputSize(),netTest1.inputSize()); BOOST_REQUIRE_EQUAL(net2.inputSize(),netTest2.inputSize()); for (size_t i=0; i<1000; i++) { RealVector output = modelDeserialized(dataset.element(i).input); BOOST_CHECK_SMALL(norm_2(output -dataset.element(i).label),1.e-50); } } BOOST_AUTO_TEST_CASE( CONCATENATED_MODEL_OPERATOR ) { FFNet net1; Softmax net2(2); FFNet net3; net1.setStructure(2,5,2); net3.setStructure(2,5,2); size_t modelParameters2 = net1.numberOfParameters()+net2.numberOfParameters(); size_t modelParameters3 = net1.numberOfParameters()+net2.numberOfParameters()+net3.numberOfParameters(); ConcatenatedModel model2 = net1>>net2; ConcatenatedModel model3 = net1>>net2>>net3; BOOST_CHECK_EQUAL(model2.numberOfParameters(),modelParameters2); BOOST_CHECK_EQUAL(model3.numberOfParameters(),modelParameters3); //parameters Uniform< Rng::rng_type > uni( shark::Rng::globalRng,-1,1); RealVector modelParams2(modelParameters2); RealVector modelParams3(modelParameters3); RealVector net1Params(net1.numberOfParameters()); for(size_t i=0;i!=net1Params.size();++i){ net1Params(i)=uni(); modelParams2(i)=net1Params(i); modelParams3(i)=net1Params(i); } RealVector net2Params(net2.numberOfParameters()); for(size_t i=0;i!=net2Params.size();++i){ net2Params(i)=uni(); modelParams2(i+net1Params.size())=net2Params(i); modelParams3(i+net1Params.size())=net2Params(i); } RealVector net3Params(net3.numberOfParameters()); for(size_t i=0;i!=net3Params.size();++i){ net3Params(i)=uni(); modelParams3(i+modelParameters2)=net3Params(i); } //check whether parameter copying is working //two models model2.setParameterVector(modelParams2); double error1=norm_sqr(net1Params-net1.parameterVector()); double error2=norm_sqr(net2Params-net2.parameterVector()); double errorComplete=norm_sqr(modelParams2-model2.parameterVector()); BOOST_CHECK_EQUAL(error1,0.0); BOOST_CHECK_EQUAL(error2,0.0); BOOST_CHECK_EQUAL(errorComplete,0.0); //clear parameters initRandomNormal(net1,1); initRandomNormal(net2,1); initRandomNormal(net3,1); //three models model3.setParameterVector(modelParams3); error1=norm_sqr(net1Params-net1.parameterVector()); error2=norm_sqr(net2Params-net2.parameterVector()); double error3=norm_sqr(net3Params-net3.parameterVector()); errorComplete=norm_sqr(modelParams3-model3.parameterVector()); BOOST_CHECK_EQUAL(error1,0.0); BOOST_CHECK_EQUAL(error2,0.0); BOOST_CHECK_EQUAL(error3,0.0); BOOST_CHECK_EQUAL(errorComplete,0.0); //test Results; for(std::size_t trial = 0; trial != 1000; ++trial){ RealVector input(2); for(size_t i=0;i!=2;++i){ input(i)=uni(); } RealVector intermediateResult1 = net1(input); RealVector intermediateResult2 = net2(intermediateResult1); RealVector endResult = net3(intermediateResult2); //evaluate point RealVector modelResult2 = model2(input); RealVector modelResult3 = model3(input); double modelError2 = norm_sqr(modelResult2-intermediateResult2); double modelError3 = norm_sqr(modelResult3-endResult); BOOST_CHECK_SMALL(modelError2,1.e-35); BOOST_CHECK_SMALL(modelError3,1.e-35); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Models/ConvexCombination.cpp000066400000000000000000000126631314655040000210700ustar00rootroot00000000000000#include #include #define BOOST_TEST_MODULE Models_ConvexCombination #include #include #include "derivativeTestHelper.h" #include using namespace std; using namespace boost::archive; using namespace shark; //chck that parameters are set correctly BOOST_AUTO_TEST_SUITE (Models_ConvexCombination) BOOST_AUTO_TEST_CASE( Models_ConvexCombination_Parameters) { std::size_t numTrials = 10; std::size_t inputDim = 10; std::size_t outputDim = 7; for(std::size_t t = 0; t != numTrials; ++t){ //create the ground truth unnormalized and normalized weight matrices RealMatrix paramW(outputDim,inputDim); RealMatrix paramWUnnorm(outputDim,inputDim); for(std::size_t i = 0; i != outputDim; ++i){ for(std::size_t j = 0; j != inputDim; ++j){ paramWUnnorm(i,j) = std::abs(Rng::gauss(0,1)); paramW(i,j) = paramWUnnorm(i,j); } row(paramW,i) /= sum(row(paramW,i)); } RealVector paramsUnnorm = log(blas::adapt_vector(inputDim*outputDim,¶mWUnnorm(0,0))); RealVector params = log(blas::adapt_vector(inputDim*outputDim,¶mW(0,0))); //check normalized parameters { ConvexCombination model(inputDim,outputDim); model.setParameterVector(params); BOOST_REQUIRE_EQUAL(model.numberOfParameters(),inputDim * outputDim); //check that the constructed weight matrix is correct double error = norm_inf(paramW - model.weights()); BOOST_CHECK_SMALL(error, 1.e-5); //check that the returned parameter vector is correct double errorParams = norm_inf(params - model.parameterVector()); BOOST_CHECK_SMALL(errorParams, 1.e-5); } //check the unnormalized parameters { ConvexCombination model(inputDim,outputDim); model.setParameterVector(paramsUnnorm); BOOST_REQUIRE_EQUAL(model.numberOfParameters(),inputDim * outputDim); //check that the constructed weight matrix is correct double error = norm_inf(paramW - model.weights()); BOOST_CHECK_SMALL(error, 1.e-5); //check that the returned parameter vector is correct double errorParams = norm_inf(params - model.parameterVector()); BOOST_CHECK_SMALL(errorParams, 1.e-5); } } } //check that the model output is the same as the one of a linear model BOOST_AUTO_TEST_CASE( Models_ConvexCombination_Value) { std::size_t numTrials = 10; std::size_t inputDim = 10; std::size_t outputDim = 7; std::size_t numPoints = 100; for(std::size_t t = 0; t != numTrials; ++t){ ConvexCombination model(inputDim,outputDim); BOOST_REQUIRE_EQUAL(model.numberOfParameters(),inputDim * outputDim); BOOST_REQUIRE_EQUAL(model.inputSize(),inputDim); BOOST_REQUIRE_EQUAL(model.outputSize(),outputDim); RealVector params(inputDim * outputDim); for(std::size_t i = 0; i != params.size(); ++i){ params(i) = Rng::gauss(0,1); } model.setParameterVector(params); LinearModel<> modelTest(model.weights()); RealMatrix inputs(numPoints,inputDim); for(std::size_t i = 0; i != numPoints; ++i){ for(std::size_t j = 0; j != inputDim; ++j){ inputs(i,j) = Rng::gauss(0,1); } } RealMatrix testOutput = modelTest(inputs); RealMatrix modelOutput = model(inputs); BOOST_REQUIRE_EQUAL(modelOutput.size1(), numPoints); BOOST_REQUIRE_EQUAL(modelOutput.size2(), outputDim); double error = norm_inf(testOutput - modelOutput); BOOST_CHECK_SMALL(error, 1.e-10); } } BOOST_AUTO_TEST_CASE( Models_ConvexCombination_Derivatives) { ConvexCombination model(3, 5); BOOST_REQUIRE_EQUAL(model.numberOfParameters(), 15u); testWeightedDerivative(model,10000); testWeightedInputDerivative(model,10000); } BOOST_AUTO_TEST_CASE( Models_ConvexCombination_SERIALIZE ) { //the target modelwork ConvexCombination model(2, 3); //create random parameters RealVector testParameters(model.numberOfParameters()); for(size_t param=0;param!=model.numberOfParameters();++param) { testParameters(param)=Rng::gauss(0,1); } model.setParameterVector( testParameters); testParameters = model.parameterVector(); //allow transformation of parameters //the test is, that after deserialization, the results must be identical //so we generate some data first std::vector data; std::vector target; RealVector input(model.inputSize()); RealVector output(model.outputSize()); for (size_t i=0; i<1000; i++) { for(size_t j=0;j!=model.inputSize();++j) { input(j)=Rng::uni(-1,1); } data.push_back(input); target.push_back(model(input)); } RegressionDataset dataset = createLabeledDataFromRange(data,target); //now we serialize the model ostringstream outputStream; TextOutArchive oa(outputStream); oa << model; //and create a new model from the serialization ConvexCombination modelDeserialized; istringstream inputStream(outputStream.str()); TextInArchive ia(inputStream); ia >> modelDeserialized; //test whether serialization works //first simple parameter and topology check BOOST_REQUIRE_EQUAL(modelDeserialized.numberOfParameters(),model.numberOfParameters()); BOOST_CHECK_SMALL(norm_2(modelDeserialized.parameterVector() - testParameters),1.e-50); BOOST_REQUIRE_EQUAL(modelDeserialized.inputSize(),model.inputSize()); BOOST_REQUIRE_EQUAL(modelDeserialized.outputSize(),model.outputSize()); for (size_t i=0; i<1000; i++) { RealVector output = modelDeserialized(dataset.element(i).input); BOOST_CHECK_SMALL(norm_2(output -dataset.element(i).label),1.e-50); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Models/FFNet.cpp000066400000000000000000000575701314655040000164130ustar00rootroot00000000000000#define BOOST_TEST_MODULE ML_FFNET #include #include #include "derivativeTestHelper.h" #include #include #include using namespace std; using namespace boost::archive; using namespace shark; double activation(double a) { return 1.0/(1.0+std::exp(-a)); } //check that the structure is correct, i.e. matrice have the right form and setting parameters works BOOST_AUTO_TEST_SUITE (Models_FFNet) BOOST_AUTO_TEST_CASE( FFNET_structure_Normal) { //no bias { std::size_t weightNum = 2*3+3*4; FFNet net; net.setStructure(2,3,4,FFNetStructures::Normal,false); BOOST_REQUIRE_EQUAL(net.bias().size(),0u);// no bias! BOOST_REQUIRE_EQUAL(net.layerMatrices().size(),2u); BOOST_CHECK_EQUAL(net.layerMatrices()[0].size1(), 3u); BOOST_CHECK_EQUAL(net.layerMatrices()[0].size2(), 2u); BOOST_CHECK_EQUAL(net.layerMatrices()[1].size1(), 4u); BOOST_CHECK_EQUAL(net.layerMatrices()[1].size2(), 3u); BOOST_CHECK_EQUAL(net.inputOutputShortcut().size1(), 0u); BOOST_CHECK_EQUAL(net.inputOutputShortcut().size2(), 0u); BOOST_REQUIRE_EQUAL(net.backpropMatrices().size(),2u); BOOST_CHECK_EQUAL(net.backpropMatrices()[0].size1(), 2u); BOOST_CHECK_EQUAL(net.backpropMatrices()[0].size2(), 3u); BOOST_CHECK_EQUAL(net.backpropMatrices()[1].size1(), 3u); BOOST_CHECK_EQUAL(net.backpropMatrices()[1].size2(), 4u); BOOST_REQUIRE_EQUAL(net.numberOfParameters(),weightNum); RealVector newParams(weightNum); for(std::size_t i = 0; i != weightNum; ++i){ newParams(i) = Rng::uni(0,1); } //check that setting and getting parameters works net.setParameterVector(newParams); RealVector params = net.parameterVector(); BOOST_REQUIRE_EQUAL(params.size(),newParams.size()); for(std::size_t i = 0; i != weightNum; ++i){ BOOST_CHECK_CLOSE(newParams(i),params(i), 1.e-10); } //check that the weight matrices have the right values std::size_t param = 0; for(std::size_t k = 0; k != 2; ++k){ for(std::size_t i = 0; i != net.layerMatrices()[k].size1(); ++i){ for(std::size_t j = 0; j != net.layerMatrices()[k].size2(); ++j,++param){ BOOST_CHECK_EQUAL(net.layerMatrices()[k](i,j), newParams(param)); BOOST_CHECK_EQUAL(net.backpropMatrices()[k](j,i), net.layerMatrices()[k](i,j)); } } } } //no bias, 3 layers { std::size_t weightNum = 2*3+3*4+4*5; FFNet net; net.setStructure(2,3,4,5,FFNetStructures::Normal,false); BOOST_REQUIRE_EQUAL(net.bias().size(),0u);// no bias! BOOST_REQUIRE_EQUAL(net.layerMatrices().size(),3u); BOOST_CHECK_EQUAL(net.layerMatrices()[0].size1(), 3u); BOOST_CHECK_EQUAL(net.layerMatrices()[0].size2(), 2u); BOOST_CHECK_EQUAL(net.layerMatrices()[1].size1(), 4u); BOOST_CHECK_EQUAL(net.layerMatrices()[1].size2(), 3u); BOOST_CHECK_EQUAL(net.layerMatrices()[2].size1(), 5u); BOOST_CHECK_EQUAL(net.layerMatrices()[2].size2(), 4u); BOOST_CHECK_EQUAL(net.inputOutputShortcut().size1(), 0u); BOOST_CHECK_EQUAL(net.inputOutputShortcut().size2(), 0u); BOOST_REQUIRE_EQUAL(net.backpropMatrices().size(),3u); BOOST_CHECK_EQUAL(net.backpropMatrices()[0].size1(), 2u); BOOST_CHECK_EQUAL(net.backpropMatrices()[0].size2(), 3u); BOOST_CHECK_EQUAL(net.backpropMatrices()[1].size1(), 3u); BOOST_CHECK_EQUAL(net.backpropMatrices()[1].size2(), 4u); BOOST_CHECK_EQUAL(net.backpropMatrices()[2].size1(), 4u); BOOST_CHECK_EQUAL(net.backpropMatrices()[2].size2(), 5u); BOOST_REQUIRE_EQUAL(net.numberOfParameters(),weightNum); RealVector newParams(weightNum); for(std::size_t i = 0; i != weightNum; ++i){ newParams(i) = Rng::uni(0,1); } //check that setting and getting parameters works net.setParameterVector(newParams); RealVector params = net.parameterVector(); BOOST_REQUIRE_EQUAL(params.size(),newParams.size()); for(std::size_t i = 0; i != weightNum; ++i){ BOOST_CHECK_CLOSE(newParams(i),params(i), 1.e-10); } //check that the weight matrices have the right values std::size_t param = 0; for(std::size_t k = 0; k != 3; ++k){ for(std::size_t i = 0; i != net.layerMatrices()[k].size1(); ++i){ for(std::size_t j = 0; j != net.layerMatrices()[k].size2(); ++j,++param){ BOOST_CHECK_EQUAL(net.layerMatrices()[k](i,j), newParams(param)); BOOST_CHECK_EQUAL(net.backpropMatrices()[k](j,i), net.layerMatrices()[k](i,j)); } } } } //with bias { std::size_t weightNum = 2*3+3*4+7; FFNet net; net.setStructure(2,3,4,FFNetStructures::Normal,true); BOOST_REQUIRE_EQUAL(net.bias().size(),7u); BOOST_REQUIRE_EQUAL(net.layerMatrices().size(),2u); BOOST_CHECK_EQUAL(net.layerMatrices()[0].size1(), 3u); BOOST_CHECK_EQUAL(net.layerMatrices()[0].size2(), 2u); BOOST_CHECK_EQUAL(net.layerMatrices()[1].size1(), 4u); BOOST_CHECK_EQUAL(net.layerMatrices()[1].size2(), 3u); BOOST_CHECK_EQUAL(net.inputOutputShortcut().size1(),0u); BOOST_CHECK_EQUAL(net.inputOutputShortcut().size2(), 0u); BOOST_REQUIRE_EQUAL(net.backpropMatrices().size(),2u); BOOST_CHECK_EQUAL(net.backpropMatrices()[0].size1(), 2u); BOOST_CHECK_EQUAL(net.backpropMatrices()[0].size2(), 3u); BOOST_CHECK_EQUAL(net.backpropMatrices()[1].size1(), 3u); BOOST_CHECK_EQUAL(net.backpropMatrices()[1].size2(), 4u); BOOST_REQUIRE_EQUAL(net.numberOfParameters(),weightNum); RealVector newParams(weightNum); for(std::size_t i = 0; i != weightNum; ++i){ newParams(i) = Rng::uni(0,1); } //check that setting and getting parameters works net.setParameterVector(newParams); RealVector params = net.parameterVector(); BOOST_REQUIRE_EQUAL(params.size(),newParams.size()); for(std::size_t i = 0; i != weightNum; ++i){ BOOST_CHECK_CLOSE(newParams(i),params(i), 1.e-10); } //check that the weight matrices have the right values std::size_t param = 0; for(std::size_t k = 0; k != 2; ++k){ for(std::size_t i = 0; i != net.layerMatrices()[k].size1(); ++i){ for(std::size_t j = 0; j != net.layerMatrices()[k].size2(); ++j,++param){ BOOST_CHECK_EQUAL(net.layerMatrices()[k](i,j), newParams(param)); BOOST_CHECK_EQUAL(net.backpropMatrices()[k](j,i), net.layerMatrices()[k](i,j)); } } } for(std::size_t i = 0; i != 7; ++i){ BOOST_CHECK_EQUAL(net.bias()(i), newParams(weightNum-7+i)); } } } BOOST_AUTO_TEST_CASE( FFNET_structure_InputOutputShortcut) { //for this test, we add another layer as the 3 layer version is redirected to Full- the next test //no bias, 3 layers { std::size_t weightNum = 2*3+3*4+4*5+2*5; std::size_t shortcutStart = 2*3+3*4+4*5; FFNet net; net.setStructure(2,3,4,5,FFNetStructures::InputOutputShortcut,false); BOOST_REQUIRE_EQUAL(net.bias().size(),0u);// no bias! BOOST_REQUIRE_EQUAL(net.layerMatrices().size(),3u); BOOST_CHECK_EQUAL(net.layerMatrices()[0].size1(), 3u); BOOST_CHECK_EQUAL(net.layerMatrices()[0].size2(), 2u); BOOST_CHECK_EQUAL(net.layerMatrices()[1].size1(), 4u); BOOST_CHECK_EQUAL(net.layerMatrices()[1].size2(), 3u); BOOST_CHECK_EQUAL(net.layerMatrices()[2].size1(), 5u); BOOST_CHECK_EQUAL(net.layerMatrices()[2].size2(), 4u); BOOST_CHECK_EQUAL(net.inputOutputShortcut().size1(), 5u); BOOST_CHECK_EQUAL(net.inputOutputShortcut().size2(), 2u); BOOST_REQUIRE_EQUAL(net.backpropMatrices().size(),3u); BOOST_CHECK_EQUAL(net.backpropMatrices()[0].size1(), 2u); BOOST_CHECK_EQUAL(net.backpropMatrices()[0].size2(), 3u); BOOST_CHECK_EQUAL(net.backpropMatrices()[1].size1(), 3u); BOOST_CHECK_EQUAL(net.backpropMatrices()[1].size2(), 4u); BOOST_CHECK_EQUAL(net.backpropMatrices()[2].size1(), 4u); BOOST_CHECK_EQUAL(net.backpropMatrices()[2].size2(), 5u); BOOST_REQUIRE_EQUAL(net.numberOfParameters(),weightNum); RealVector newParams(weightNum); for(std::size_t i = 0; i != weightNum; ++i){ newParams(i) = Rng::uni(0,1); } //check that setting and getting parameters works net.setParameterVector(newParams); RealVector params = net.parameterVector(); BOOST_REQUIRE_EQUAL(params.size(),newParams.size()); for(std::size_t i = 0; i != weightNum; ++i){ BOOST_CHECK_CLOSE(newParams(i),params(i), 1.e-10); } //check that the weight matrices have the right values std::size_t param = 0; for(std::size_t k = 0; k != 3; ++k){ for(std::size_t i = 0; i != net.layerMatrices()[k].size1(); ++i){ for(std::size_t j = 0; j != net.layerMatrices()[k].size2(); ++j,++param){ BOOST_CHECK_EQUAL(net.layerMatrices()[k](i,j), newParams(param)); BOOST_CHECK_EQUAL(net.backpropMatrices()[k](j,i), net.layerMatrices()[k](i,j)); } } } //check shortcuts std::size_t pos = shortcutStart; for(std::size_t i = 0; i != 4; ++i){ for(std::size_t j = 0; j != 2; ++j,++pos){ BOOST_CHECK_EQUAL(net.inputOutputShortcut()(i,j), newParams(pos)); } } } //no bias, two layers { std::size_t weightNum = 2*3+5*4; FFNet net; net.setStructure(2,3,4,FFNetStructures::InputOutputShortcut,false); BOOST_REQUIRE_EQUAL(net.bias().size(),0u); BOOST_REQUIRE_EQUAL(net.layerMatrices().size(),2u); BOOST_CHECK_EQUAL(net.layerMatrices()[0].size1(), 3u); BOOST_CHECK_EQUAL(net.layerMatrices()[0].size2(), 2u); BOOST_CHECK_EQUAL(net.layerMatrices()[1].size1(), 4u); BOOST_CHECK_EQUAL(net.layerMatrices()[1].size2(), 5u); //shortcut is implemented by using the usual layerMatrices BOOST_CHECK_EQUAL(net.inputOutputShortcut().size1(), 0u); BOOST_CHECK_EQUAL(net.inputOutputShortcut().size2(), 0u); BOOST_REQUIRE_EQUAL(net.backpropMatrices().size(),2u); BOOST_CHECK_EQUAL(net.backpropMatrices()[0].size1(), 2u); BOOST_CHECK_EQUAL(net.backpropMatrices()[0].size2(), 7u); BOOST_CHECK_EQUAL(net.backpropMatrices()[1].size1(), 3u); BOOST_CHECK_EQUAL(net.backpropMatrices()[1].size2(), 4u); BOOST_REQUIRE_EQUAL(net.numberOfParameters(),weightNum); RealVector newParams(weightNum); for(std::size_t i = 0; i != weightNum; ++i){ newParams(i) = Rng::uni(0,1); } //check that setting and getting parameters works net.setParameterVector(newParams); RealVector params = net.parameterVector(); BOOST_REQUIRE_EQUAL(params.size(),newParams.size()); for(std::size_t i = 0; i != weightNum; ++i){ BOOST_CHECK_CLOSE(newParams(i),params(i), 1.e-10); } //check that the weight matrices have the right values std::size_t param = 0; for(std::size_t k = 0; k != 2; ++k){ for(std::size_t i = 0; i != net.layerMatrices()[k].size1(); ++i){ for(std::size_t j = 0; j != net.layerMatrices()[k].size2(); ++j,++param){ BOOST_CHECK_EQUAL(net.layerMatrices()[k](i,j), newParams(param)); } } } //check backprop matrices //weights from layer 1 to layer 0 for(std::size_t i = 0; i != net.layerMatrices()[0].size1(); ++i){ for(std::size_t j = 0; j != net.layerMatrices()[0].size2(); ++j){ BOOST_CHECK_EQUAL(net.backpropMatrices()[0](j,i), net.layerMatrices()[0](i,j)); } } //weights from layer 2 to layer 0 for(std::size_t i = 0; i != net.layerMatrices()[1].size1(); ++i){ for(std::size_t j = 0; j != net.layerMatrices()[0].size2(); ++j){ BOOST_CHECK_EQUAL(net.backpropMatrices()[0](j,i+3), net.layerMatrices()[1](i,j)); } } //weights from layer 2 to layer 1 for(std::size_t i = 0; i != net.layerMatrices()[1].size1(); ++i){ for(std::size_t j = 0; j != 3; ++j){ BOOST_CHECK_EQUAL(net.backpropMatrices()[1](j,i), net.layerMatrices()[1](i,j+2)); } } } //with bias, 3 layers { std::size_t weightNum = 2*3+3*4+4*5+2*5+12; std::size_t shortcutStart = 2*3+3*4+4*5+12; std::size_t biasStart = 2*3+3*4+4*5; FFNet net; net.setStructure(2,3,4,5,FFNetStructures::InputOutputShortcut,true); BOOST_REQUIRE_EQUAL(net.bias().size(),12u);// no bias! BOOST_REQUIRE_EQUAL(net.layerMatrices().size(),3u); BOOST_CHECK_EQUAL(net.layerMatrices()[0].size1(), 3u); BOOST_CHECK_EQUAL(net.layerMatrices()[0].size2(), 2u); BOOST_CHECK_EQUAL(net.layerMatrices()[1].size1(), 4u); BOOST_CHECK_EQUAL(net.layerMatrices()[1].size2(), 3u); BOOST_CHECK_EQUAL(net.layerMatrices()[2].size1(), 5u); BOOST_CHECK_EQUAL(net.layerMatrices()[2].size2(), 4u); BOOST_CHECK_EQUAL(net.inputOutputShortcut().size1(), 5u); BOOST_CHECK_EQUAL(net.inputOutputShortcut().size2(), 2u); BOOST_REQUIRE_EQUAL(net.backpropMatrices().size(),3u); BOOST_CHECK_EQUAL(net.backpropMatrices()[0].size1(), 2u); BOOST_CHECK_EQUAL(net.backpropMatrices()[0].size2(), 3u); BOOST_CHECK_EQUAL(net.backpropMatrices()[1].size1(), 3u); BOOST_CHECK_EQUAL(net.backpropMatrices()[1].size2(), 4u); BOOST_CHECK_EQUAL(net.backpropMatrices()[2].size1(), 4u); BOOST_CHECK_EQUAL(net.backpropMatrices()[2].size2(), 5u); BOOST_REQUIRE_EQUAL(net.numberOfParameters(),weightNum); RealVector newParams(weightNum); for(std::size_t i = 0; i != weightNum; ++i){ newParams(i) = Rng::uni(0,1); } //check that setting and getting parameters works net.setParameterVector(newParams); RealVector params = net.parameterVector(); BOOST_REQUIRE_EQUAL(params.size(),newParams.size()); for(std::size_t i = 0; i != weightNum; ++i){ BOOST_CHECK_CLOSE(newParams(i),params(i), 1.e-10); } //check that the weight matrices have the right values std::size_t param = 0; for(std::size_t k = 0; k != 3; ++k){ for(std::size_t i = 0; i != net.layerMatrices()[k].size1(); ++i){ for(std::size_t j = 0; j != net.layerMatrices()[k].size2(); ++j,++param){ BOOST_CHECK_EQUAL(net.layerMatrices()[k](i,j), newParams(param)); BOOST_CHECK_EQUAL(net.backpropMatrices()[k](j,i), net.layerMatrices()[k](i,j)); } } } //check shortcuts std::size_t pos = shortcutStart; for(std::size_t i = 0; i != 5; ++i){ for(std::size_t j = 0; j != 2; ++j,++pos){ BOOST_CHECK_EQUAL(net.inputOutputShortcut()(i,j), newParams(pos)); } } for(std::size_t i = 0; i != 7; ++i){ BOOST_CHECK_EQUAL(net.bias()(i), newParams(biasStart+i)); } } } //todo: test Full (the simple case with 2 layers and shortcut input/output is already tested) // given that the structure is correct, we can now test eval by giving it random parameters // and random inputs and compute the result by hand. This test is quite long as we have to // check a lot of different structures. Also for every structure we ensure that all calls to eval // produce the same output given a set of inputs BOOST_AUTO_TEST_CASE( FFNET_Value ) { //2 layers, no shortcut, no bias { FFNet net; net.setStructure(2,3,4,FFNetStructures::Normal,false); std::size_t numParams = 2*3+3*4; for(std::size_t i = 0; i != 100; ++i){ //initialize parameters RealVector parameters(numParams); for(size_t j=0; j != numParams;++j) parameters(j)=Rng::gauss(0,1); net.setParameterVector(parameters); //the testpoints RealVector point(2); point(0)=Rng::uni(-5,5); point(1)= Rng::uni(-5,5); //evaluate ground truth result RealVector hidden = sigmoid(prod(net.layerMatrices()[0],point)); RealVector output = prod(net.layerMatrices()[1],hidden); //check whether final result is correct RealVector netResult = net(point); BOOST_CHECK_SMALL(netResult(0)-output(0),1.e-12); BOOST_CHECK_SMALL(netResult(1)-output(1),1.e-12); BOOST_CHECK_SMALL(netResult(2)-output(2),1.e-12); BOOST_CHECK_SMALL(netResult(3)-output(3),1.e-12); } //now also test batches RealMatrix inputs(100,2); for(std::size_t i = 0; i != 100; ++i){ inputs(i,0)=Rng::uni(-5,5); inputs(i,1)= Rng::uni(-5,5); } testBatchEval(net,inputs); } //2 layers, no shortcut, bias { FFNet net; net.setStructure(2,3,4,FFNetStructures::Normal,true); std::size_t numParams = 2*3+3*4+7; for(std::size_t i = 0; i != 100; ++i){ //initialize parameters RealVector parameters(numParams); for(size_t j=0; j != numParams;++j) parameters(j)=Rng::gauss(0,1); net.setParameterVector(parameters); //the testpoints RealVector point(2); point(0)=Rng::uni(-5,5); point(1)= Rng::uni(-5,5); //evaluate ground truth result RealVector hidden = sigmoid(prod(net.layerMatrices()[0],point)+subrange(net.bias(),0,3)); RealVector output = prod(net.layerMatrices()[1],hidden)+subrange(net.bias(),3,7); //check whether final result is correct RealVector netResult = net(point); BOOST_CHECK_SMALL(netResult(0)-output(0),1.e-12); BOOST_CHECK_SMALL(netResult(1)-output(1),1.e-12); BOOST_CHECK_SMALL(netResult(2)-output(2),1.e-12); BOOST_CHECK_SMALL(netResult(3)-output(3),1.e-12); } //now also test batches RealMatrix inputs(100,2); for(std::size_t i = 0; i != 100; ++i){ inputs(i,0)=Rng::uni(-5,5); inputs(i,1)= Rng::uni(-5,5); } testBatchEval(net,inputs); } //2 layers, shortcut, bias { FFNet net; net.setStructure(2,3,4,FFNetStructures::InputOutputShortcut,true); std::size_t numParams = 2*3+3*4+2*4+7; for(std::size_t i = 0; i != 100; ++i){ //initialize parameters RealVector parameters(numParams); for(size_t j=0; j != numParams;++j) parameters(j)=Rng::gauss(0,1); net.setParameterVector(parameters); //the testpoints RealVector point(2); point(0)=Rng::uni(-5,5); point(1)= Rng::uni(-5,5); //evaluate ground truth result RealVector hidden = sigmoid(prod(net.layerMatrices()[0],point)+subrange(net.bias(),0,3)); RealVector output = prod(columns(net.layerMatrices()[1],2,5),hidden); output += prod(columns(net.layerMatrices()[1],0,2),point); output += subrange(net.bias(),3,7); output = tanh(output); //check whether final result is correct RealVector netResult = net(point); BOOST_CHECK_SMALL(netResult(0)-output(0),1.e-12); BOOST_CHECK_SMALL(netResult(1)-output(1),1.e-12); BOOST_CHECK_SMALL(netResult(2)-output(2),1.e-12); BOOST_CHECK_SMALL(netResult(3)-output(3),1.e-12); } //now also test batches RealMatrix inputs(100,2); for(std::size_t i = 0; i != 100; ++i){ inputs(i,0)=Rng::uni(-5,5); inputs(i,1)= Rng::uni(-5,5); } testBatchEval(net,inputs); } //3 layers, no shortcut { FFNet net; net.setStructure(2,3,4,5,FFNetStructures::Normal,false); std::size_t numParams = 2*3+3*4+4*5; for(std::size_t i = 0; i != 100; ++i){ //initialize parameters RealVector parameters(numParams); for(size_t j=0; j != numParams;++j) parameters(j)=Rng::gauss(0,1); net.setParameterVector(parameters); //the testpoints RealVector point(2); point(0)=Rng::uni(-5,5); point(1)= Rng::uni(-5,5); //evaluate ground truth result RealVector hidden1 = sigmoid(prod(net.layerMatrices()[0],point)); RealVector hidden2 = sigmoid(prod(net.layerMatrices()[1],hidden1)); RealVector output = prod(net.layerMatrices()[2],hidden2); //check whether final result is correct RealVector netResult = net(point); BOOST_CHECK_SMALL(netResult(0)-output(0),1.e-12); BOOST_CHECK_SMALL(netResult(1)-output(1),1.e-12); BOOST_CHECK_SMALL(netResult(2)-output(2),1.e-12); BOOST_CHECK_SMALL(netResult(3)-output(3),1.e-12); BOOST_CHECK_SMALL(netResult(4)-output(4),1.e-12); } //now also test batches RealMatrix inputs(100,2); for(std::size_t i = 0; i != 100; ++i){ inputs(i,0)=Rng::uni(-5,5); inputs(i,1)= Rng::uni(-5,5); } testBatchEval(net,inputs); } //3 layers, shortcut { FFNet net; net.setStructure(2,3,4,5,FFNetStructures::InputOutputShortcut,false); std::size_t numParams = 2*3+3*4+4*5+2*5; for(std::size_t i = 0; i != 100; ++i){ //initialize parameters RealVector parameters(numParams); for(size_t j=0; j != numParams;++j) parameters(j)=Rng::gauss(0,1); net.setParameterVector(parameters); //the testpoints RealVector point(2); point(0)=Rng::uni(-5,5); point(1)= Rng::uni(-5,5); //evaluate ground truth result RealVector hidden1 = sigmoid(prod(net.layerMatrices()[0],point)); RealVector hidden2 = sigmoid(prod(net.layerMatrices()[1],hidden1)); RealVector output = prod(net.layerMatrices()[2],hidden2) + prod(net.inputOutputShortcut(),point); output =tanh(output); //check whether final result is correct RealVector netResult = net(point); BOOST_CHECK_SMALL(netResult(0)-output(0),1.e-12); BOOST_CHECK_SMALL(netResult(1)-output(1),1.e-12); BOOST_CHECK_SMALL(netResult(2)-output(2),1.e-12); BOOST_CHECK_SMALL(netResult(3)-output(3),1.e-12); BOOST_CHECK_SMALL(netResult(4)-output(4),1.e-12); } //now also test batches RealMatrix inputs(100,2); for(std::size_t i = 0; i != 100; ++i){ inputs(i,0)=Rng::uni(-5,5); inputs(i,1)= Rng::uni(-5,5); } testBatchEval(net,inputs); } //3 layers, full { FFNet net; net.setStructure(2,3,4,5,FFNetStructures::Full,false); std::size_t numParams = 2*3+3*4+4*5+2*5+2*4+3*5; for(std::size_t i = 0; i != 100; ++i){ //initialize parameters RealVector parameters(numParams); for(size_t j=0; j != numParams;++j) parameters(j)=Rng::gauss(0,1); net.setParameterVector(parameters); //the testpoints RealVector point(2); point(0)=Rng::uni(-5,5); point(1)= Rng::uni(-5,5); //evaluate ground truth result RealVector hidden1 = sigmoid(prod(net.layerMatrices()[0],point)); RealVector hidden2 = prod(columns(net.layerMatrices()[1],0,2),point); hidden2 += prod(columns(net.layerMatrices()[1],2,5),hidden1); hidden2 = sigmoid(hidden2); RealVector output = prod(columns(net.layerMatrices()[2],0,2),point); output += prod(columns(net.layerMatrices()[2],2,5),hidden1); output += prod(columns(net.layerMatrices()[2],5,9),hidden2); //check whether final result is correct RealVector netResult = net(point); BOOST_CHECK_SMALL(netResult(0)-output(0),1.e-12); BOOST_CHECK_SMALL(netResult(1)-output(1),1.e-12); BOOST_CHECK_SMALL(netResult(2)-output(2),1.e-12); BOOST_CHECK_SMALL(netResult(3)-output(3),1.e-12); BOOST_CHECK_SMALL(netResult(4)-output(4),1.e-12); } //now also test batches RealMatrix inputs(100,2); for(std::size_t i = 0; i != 100; ++i){ inputs(i,0)=Rng::uni(-5,5); inputs(i,1)= Rng::uni(-5,5); } testBatchEval(net,inputs); } } BOOST_AUTO_TEST_CASE( FFNET_WeightedDerivatives) { //2 layers, Normal, no Bias { FFNet net; net.setStructure(2,5,3,FFNetStructures::Normal,false); testWeightedInputDerivative(net,1000,5.e-6,1.e-7); testWeightedDerivative(net,1000,5.e-6,1.e-7); testWeightedDerivativesSame(net,1000); } //2 layers, Normal, bias { FFNet net; net.setStructure(2,5,3,FFNetStructures::Normal,true); testWeightedInputDerivative(net,1000,5.e-6,1.e-7); testWeightedDerivative(net,1000,5.e-6,1.e-7); testWeightedDerivativesSame(net,1000); } //2 layers, Shortcut, bias { FFNet net; net.setStructure(2,5,3,FFNetStructures::InputOutputShortcut,true); testWeightedInputDerivative(net,1000,5.e-6,1.e-7); testWeightedDerivative(net,1000,5.e-6,1.e-7); testWeightedDerivativesSame(net,1000); } //2 layers, Full, bias { FFNet net; net.setStructure(2,5,3,FFNetStructures::Full,true); testWeightedInputDerivative(net,1000,5.e-6,1.e-7); testWeightedDerivative(net,1000,5.e-6,1.e-7); testWeightedDerivativesSame(net,1000); } //3 layers, Normal, no Bias { FFNet net; net.setStructure(2,5,3,3,FFNetStructures::Normal,false); testWeightedInputDerivative(net,1000,5.e-6,1.e-7); testWeightedDerivative(net,1000,5.e-6,1.e-7); testWeightedDerivativesSame(net,1000); } //3 layers, Shortcut, Bias { FFNet net; net.setStructure(2,5,3,3,FFNetStructures::InputOutputShortcut,true); testWeightedInputDerivative(net,1000,5.e-6,1.e-7); testWeightedDerivative(net,1000,5.e-6,1.e-7); testWeightedDerivativesSame(net,1000); } //3 layers, Full, Bias { FFNet net; net.setStructure(2,5,3,3,FFNetStructures::Full,true); testWeightedInputDerivative(net,100,5.e-6,1.e-7); testWeightedDerivative(net,100,5.e-6,1.e-7); testWeightedDerivativesSame(net,100); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Models/Kernels/000077500000000000000000000000001314655040000163325ustar00rootroot00000000000000Shark-3.1.4/Test/Models/Kernels/ArdKernel.cpp000066400000000000000000000117351314655040000207140ustar00rootroot00000000000000#define BOOST_TEST_MODULE Models_ArdKernel #include #include #include "KernelDerivativeTestHelper.h" #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (Models_Kernels_ArdKernel) BOOST_AUTO_TEST_CASE( DenseARDKernel_Parameters ) { const double gamma_init = 2.0; const unsigned int cur_dims = 3; DenseARDKernel kernel(cur_dims, gamma_init); // test gamma getter BOOST_CHECK_SMALL( gamma_init - kernel.gammaVector()(0), 1E-10); // test gamma setter RealVector new_gamma( cur_dims ); new_gamma(0) = 9.0; new_gamma(1) = 16.0; new_gamma(2) = 25.0; kernel.setGammaVector( new_gamma ); RealVector check_gamma = kernel.gammaVector(); for ( std::size_t i=0; i
* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ //=========================================================================== #define BOOST_TEST_MODULE MODELS_C_SVM_DERIVATIVE #include #include #include #include #include #include #include #include #include using namespace shark; // very straight-forward dataset with conceptual, hand-picked testing scenario. tests the deriv wrt C only. BOOST_AUTO_TEST_SUITE (Models_Kernels_CSvmDerivative) BOOST_AUTO_TEST_CASE( KERNEL_EXPANSION_CSVM_DERIVATIVE_TRIVIAL_DATASET ) { // set up dataset std::size_t NUM_DATA_POINTS = 6; std::vector input(NUM_DATA_POINTS); std::vector target(NUM_DATA_POINTS); for (size_t i=0; i quiz(NUM_QUIZ_POINTS); for (size_t i=0; i Cs; Cs.push_back(100); Cs.push_back(10); Cs.push_back(1); Cs.push_back(0.5); Cs.push_back(0.48); Cs.push_back(0.4); Cs.push_back(0.2); Cs.push_back(0.1); Cs.push_back(0.01); Cs.push_back(0.001); Cs.push_back(0.0001); double C_eps; double NUMERICAL_C_INCREASE_FACTOR = 1.0001; // loop through Cs: repeat tests for every different value of C for ( unsigned int i=0; i kernel; KernelClassifier kc; KernelExpansion& svm = kc.decisionFunction(); CSvmTrainer trainer( &kernel, Cs[i],true ); trainer.sparsify() = false; trainer.stoppingCondition().minAccuracy = 1e-10; trainer.train(kc, dataset); RealVector param = svm.parameterVector(); CSvmDerivative svm_deriv( &kc, &trainer ); // set up helper variables double diff, deriv; RealVector computed_derivative; // set up svm for numerical comparsion-C LinearKernel<> cmp_kernel; KernelClassifier cmp_kc; KernelExpansion& cmp_svm =cmp_kc.decisionFunction(); CSvmTrainer cmp_trainer(&cmp_kernel, C_eps,true); cmp_trainer.sparsify() = false; cmp_trainer.stoppingCondition().minAccuracy = 1e-10; cmp_trainer.train( cmp_kc, dataset ); RealVector cmp_param = cmp_svm.parameterVector(); // first test derivatives of dataset-points themselves for ( unsigned int j=0; j input(NUM_DATA_POINTS); std::vector target(NUM_DATA_POINTS); for (size_t i=0; i quiz(NUM_QUIZ_POINTS); for (size_t i=0; i Cs; Cs.push_back(100); Cs.push_back(10); Cs.push_back(1); Cs.push_back(0.5); Cs.push_back(0.48); //the accuracies are suffering too much in the log-encoded case for the last two values... Cs.push_back(0.4); Cs.push_back(0.2); Cs.push_back(0.1); Cs.push_back(0.01); //Cs.push_back(0.001); Cs.push_back(0.0001); double C_eps; double NUMERICAL_C_INCREASE_FACTOR = 1.00001; bool UNCONSTRAINED = true; // loop through Cs: repeat tests for every different value of C for ( unsigned int i=0; i kernel; KernelClassifier kc; KernelExpansion& svm = kc.decisionFunction(); CSvmTrainer trainer( &kernel, Cs[i], true,UNCONSTRAINED ); trainer.sparsify() = false; trainer.stoppingCondition().minAccuracy = 1e-14; trainer.train(kc, dataset); RealVector param = svm.parameterVector(); CSvmDerivative svm_deriv( &kc, &trainer ); // set up helper variables double diff, deriv; RealVector computed_derivative; // set up svm for numerical comparsion-C LinearKernel<> cmp_kernel; KernelClassifier cmp_kc; KernelExpansion& cmp_svm =cmp_kc.decisionFunction(); CSvmTrainer cmp_trainer(&cmp_kernel, C_eps, true, UNCONSTRAINED ); cmp_trainer.sparsify() = false; cmp_trainer.stoppingCondition().minAccuracy = 1e-15; cmp_trainer.train( cmp_kc, dataset ); RealVector cmp_param = cmp_svm.parameterVector(); // first test derivatives of dataset-points themselves for ( unsigned int j=0; j input(NUM_DATA_POINTS); std::vector target(NUM_DATA_POINTS); for (size_t i=0; i quiz(NUM_QUIZ_POINTS); for (size_t i=0; i Cs; Cs.push_back(100); Cs.push_back(10); Cs.push_back(1); Cs.push_back(0.5); Cs.push_back(0.48); Cs.push_back(0.4); Cs.push_back(0.2); Cs.push_back(0.1); Cs.push_back(0.01); Cs.push_back(0.001); Cs.push_back(0.0001); double C_eps; double NUMERICAL_C_INCREASE_FACTOR = 1.0001; // set up different values for the kernel parameters std::vector< double > RbfParams; RbfParams.push_back(10); RbfParams.push_back(2); RbfParams.push_back(1); RbfParams.push_back(0.5); RbfParams.push_back(0.2); RbfParams.push_back(0.1); RbfParams.push_back(0.05); RbfParams.push_back(0.01); RbfParams.push_back(0.001); RbfParams.push_back(0.0001); // loop through RbfParams: repeat tests for every different value of Gamma for ( unsigned int h=0; h kc; KernelExpansion& svm = kc.decisionFunction(); CSvmTrainer trainer( &kernel, Cs[i],true ); trainer.sparsify() = false; trainer.stoppingCondition().minAccuracy = 1e-15; trainer.train(kc, dataset); RealVector param = svm.parameterVector(); CSvmDerivative svm_deriv( &kc, &trainer ); // set up helper variables double diff, deriv; RealVector computed_derivative; // set up svm for numerical comparsion-C DenseRbfKernel cmp_kernel( RbfParams[h] ); KernelClassifier cmp_kc; KernelExpansion& cmp_svm =cmp_kc.decisionFunction(); CSvmTrainer cmp_trainer(&cmp_kernel, C_eps,true); cmp_trainer.sparsify() = false; cmp_trainer.stoppingCondition().minAccuracy = 1e-15; cmp_trainer.train( cmp_kc, dataset ); RealVector cmp_param = cmp_svm.parameterVector(); // first test derivatives of dataset-points themselves for ( unsigned int j=0; j input(NUM_DATA_POINTS); std::vector target(NUM_DATA_POINTS); for (size_t i=0; i quiz(NUM_QUIZ_POINTS); for (size_t i=0; i Cs; Cs.push_back(100); Cs.push_back(10); Cs.push_back(1); Cs.push_back(0.5); Cs.push_back(0.48); Cs.push_back(0.4); Cs.push_back(0.2); Cs.push_back(0.1); Cs.push_back(0.01); Cs.push_back(0.001); Cs.push_back(0.0001); double C_eps; double NUMERICAL_C_INCREASE_FACTOR = 1.0001; // loop through Cs: repeat tests for every different value of C for ( unsigned int i=0; i kernel; KernelClassifier kc; KernelExpansion& svm = kc.decisionFunction(); CSvmTrainer trainer( &kernel, Cs[i],true ); trainer.sparsify() = false; trainer.stoppingCondition().minAccuracy = 1e-10; trainer.train(kc, dataset); RealVector param = svm.parameterVector(); CSvmDerivative svm_deriv( &kc, &trainer ); // set up helper variables double diff, deriv; RealVector computed_derivative; // set up svm for numerical comparsion-C LinearKernel<> cmp_kernel; KernelClassifier cmp_kc; KernelExpansion& cmp_svm =cmp_kc.decisionFunction(); CSvmTrainer cmp_trainer(&cmp_kernel, C_eps,true); cmp_trainer.sparsify() = false; cmp_trainer.stoppingCondition().minAccuracy = 1e-10; cmp_trainer.train( cmp_kc, dataset ); RealVector cmp_param = cmp_svm.parameterVector(); // first test derivatives of dataset-points themselves for ( unsigned int j=0; j input(NUM_DATA_POINTS); std::vector target(NUM_DATA_POINTS); for (size_t i=0; i quiz(NUM_QUIZ_POINTS); for (size_t i=0; i Cs; // Cs.push_back(100); Cs.push_back(10); //responsible for the highest of the in-accuracies encountered Cs.push_back(1); Cs.push_back(0.5); Cs.push_back(0.48); Cs.push_back(0.4); Cs.push_back(0.2); Cs.push_back(0.1); Cs.push_back(0.01); Cs.push_back(0.001); Cs.push_back(0.0001); double RbfParam_eps; // set up different values for the kernel parameters std::vector< double > RbfParams; RbfParams.push_back(10); RbfParams.push_back(2); RbfParams.push_back(1); RbfParams.push_back(0.5); RbfParams.push_back(0.2); RbfParams.push_back(0.1); RbfParams.push_back(0.05); RbfParams.push_back(0.01); RbfParams.push_back(0.001); RbfParams.push_back(0.0001); double NUMERICAL_KERNEL_PARAMETER_INCREASE_FACTOR = 1.0001; // loop through RbfParams: repeat test for different values of gamma for ( unsigned int h=0; h kc; KernelExpansion& svm = kc.decisionFunction(); CSvmTrainer trainer( &kernel, Cs[i],true ); trainer.sparsify() = false; trainer.stoppingCondition().minAccuracy = 1e-15; trainer.train(kc, dataset); RealVector param = svm.parameterVector(); CSvmDerivative svm_deriv( &kc, &trainer ); // set up helper variables double diff, deriv; RealVector computed_derivative; // set up svm with epsiloned-kernel-parameters for numerical comparsion DenseRbfKernel cmp_kernel( RbfParam_eps ); KernelClassifier cmp_kc; KernelExpansion& cmp_svm =cmp_kc.decisionFunction(); CSvmTrainer cmp_trainer( &cmp_kernel, Cs[i],true ); cmp_trainer.sparsify() = false; cmp_trainer.stoppingCondition().minAccuracy = 1e-15; cmp_trainer.train( cmp_kc, dataset ); RealVector cmp_param = cmp_svm.parameterVector(); // first test derivatives of dataset-points themselves for ( unsigned int j=0; j Cs; // Cs.push_back(1); Cs.push_back(0.67); double RbfParam_eps; std::vector< double > RbfParams; // RbfParams.push_back(0.5); RbfParams.push_back(0.82); double NUMERICAL_KERNEL_PARAMETER_INCREASE_FACTOR = 1.00001; // loop through RbfParams: repeat test for different values of gamma for ( unsigned int h=0; h kc; KernelExpansion& svm = kc.decisionFunction(); CSvmTrainer trainer( &kernel, Cs[i],true ); trainer.sparsify() = false; trainer.stoppingCondition().minAccuracy = 1e-15; trainer.train(kc, d1); RealVector param = svm.parameterVector(); CSvmDerivative svm_deriv( &kc, &trainer ); // set up helper variables double diff, deriv; RealVector computed_derivative; // set up svm with epsiloned-kernel-parameters for numerical comparsion DenseRbfKernel cmp_kernel( RbfParam_eps ); KernelClassifier cmp_kc; KernelExpansion& cmp_svm =cmp_kc.decisionFunction(); CSvmTrainer cmp_trainer( &cmp_kernel, Cs[i], true ); cmp_trainer.sparsify() = false; cmp_trainer.stoppingCondition().minAccuracy = 1e-15; cmp_trainer.train( cmp_kc, d1 ); RealVector cmp_param = cmp_svm.parameterVector(); // first test derivatives of dataset-points themselves for ( unsigned int j=0; j Cs; // Cs.push_back(1); Cs.push_back(0.67); double RbfParam_eps; std::vector< double > RbfParams; // RbfParams.push_back(0.5); RbfParams.push_back(0.82); double NUMERICAL_KERNEL_PARAMETER_INCREASE_FACTOR = 1.00001; // loop through RbfParams: repeat test for different values of gamma for ( unsigned int h=0; h kc; KernelExpansion& svm = kc.decisionFunction(); CSvmTrainer trainer( &kernel, Cs[i],true ); trainer.sparsify() = false; trainer.stoppingCondition().minAccuracy = 1e-15; trainer.train(kc, d2); RealVector param = svm.parameterVector(); CSvmDerivative svm_deriv( &kc, &trainer ); // set up helper variables double diff, deriv; RealVector computed_derivative; // set up svm with epsiloned-kernel-parameters for numerical comparsion DenseRbfKernel cmp_kernel( RbfParam_eps ); KernelClassifier cmp_kc; KernelExpansion& cmp_svm =cmp_kc.decisionFunction(); CSvmTrainer cmp_trainer( &cmp_kernel, Cs[i],true ); cmp_trainer.sparsify() = false; cmp_trainer.stoppingCondition().minAccuracy = 1e-15; cmp_trainer.train( cmp_kc, d2 ); RealVector cmp_param = cmp_svm.parameterVector(); // first test derivatives of dataset-points themselves for ( unsigned int j=0; j input(NUM_DATA_POINTS); std::vector target(NUM_DATA_POINTS); for (size_t i=0; i quiz(NUM_QUIZ_POINTS); for (size_t i=0; i Cs; // Cs.push_back(100); Cs.push_back(10); //responsible for the highest of the in-accuracies encountered Cs.push_back(1); Cs.push_back(0.5); Cs.push_back(0.48); Cs.push_back(0.4); Cs.push_back(0.2); Cs.push_back(0.1); Cs.push_back(0.01); Cs.push_back(0.001); Cs.push_back(0.0001); double RbfParam_eps; // set up different values for the kernel parameters std::vector< double > RbfParams; RbfParams.push_back(10); RbfParams.push_back(2); RbfParams.push_back(1); RbfParams.push_back(0.5); RbfParams.push_back(0.2); RbfParams.push_back(0.1); RbfParams.push_back(0.05); RbfParams.push_back(0.01); RbfParams.push_back(0.001); RbfParams.push_back(0.0001); double NUMERICAL_KERNEL_PARAMETER_INCREASE_FACTOR = 1.0001; // loop through RbfParams: repeat test for different values of gamma for ( unsigned int h=0; h kc; KernelExpansion& svm = kc.decisionFunction(); CSvmTrainer trainer( &kernel, Cs[i],true ); trainer.sparsify() = false; trainer.stoppingCondition().minAccuracy = 1e-15; trainer.train(kc, dataset); RealVector param = svm.parameterVector(); CSvmDerivative svm_deriv( &kc, &trainer ); // set up helper variables double diff, deriv; RealVector computed_derivative; // set up svm with epsiloned-kernel-parameters for numerical comparsion DenseRbfKernel cmp_kernel( RbfParam_eps, true ); KernelClassifier cmp_kc; KernelExpansion& cmp_svm =cmp_kc.decisionFunction(); CSvmTrainer cmp_trainer( &cmp_kernel, Cs[i],true ); cmp_trainer.sparsify() = false; cmp_trainer.stoppingCondition().minAccuracy = 1e-15; cmp_trainer.train( cmp_kc, dataset ); RealVector cmp_param = cmp_svm.parameterVector(); // first test derivatives of dataset-points themselves for ( unsigned int j=0; j input(NUM_DATA_POINTS); std::vector target(NUM_DATA_POINTS); for (size_t i=0; i quiz(NUM_QUIZ_POINTS); for (size_t i=0; i Cs; // Cs.push_back(100); Cs.push_back(10); //responsible for the highest of the in-accuracies encountered Cs.push_back(1); Cs.push_back(0.5); Cs.push_back(0.48); Cs.push_back(0.4); Cs.push_back(0.2); Cs.push_back(0.1); Cs.push_back(0.01); Cs.push_back(0.001); Cs.push_back(0.0001); double ArdParam_eps, C_eps; // set up different values for the kernel parameters std::vector< double > ArdParams; ArdParams.push_back(10); ArdParams.push_back(2); ArdParams.push_back(1); ArdParams.push_back(0.5); ArdParams.push_back(0.2); ArdParams.push_back(0.1); ArdParams.push_back(0.05); ArdParams.push_back(0.01); ArdParams.push_back(0.001); ArdParams.push_back(0.0001); double NUMERICAL_PARAMETER_INCREASE_FACTOR = 1.0001; RealVector tmp_param_manipulator; // loop through ArdParams: repeat test for different values of gamma for ( unsigned int h=0; h kc; KernelExpansion& svm = kc.decisionFunction(); CSvmTrainer trainer( &kernel, Cs[i],true ); trainer.sparsify() = false; trainer.stoppingCondition().minAccuracy = 1e-15; trainer.train(kc, dataset); RealVector param = svm.parameterVector(); CSvmDerivative svm_deriv( &kc, &trainer ); // set up helper variables double diff, deriv; RealVector computed_derivative; // set up svm with epsiloned-c_parameter for numerical comparsion DenseARDKernel c_cmp_kernel( 2, ArdParams[h]*ArdParams[h] ); tmp_param_manipulator = c_cmp_kernel.parameterVector(); tmp_param_manipulator(1) *= 0.5; c_cmp_kernel.setParameterVector( tmp_param_manipulator ); KernelClassifier c_cmp_kc; KernelExpansion& c_cmp_svm = c_cmp_kc.decisionFunction(); CSvmTrainer c_cmp_trainer( &c_cmp_kernel, C_eps, true ); c_cmp_trainer.sparsify() = false; c_cmp_trainer.stoppingCondition().minAccuracy = 1e-15; c_cmp_trainer.train( c_cmp_kc, dataset ); RealVector c_cmp_param = c_cmp_svm.parameterVector(); // set up svm with epsiloned-kernel-parameters_1 for numerical comparsion DenseARDKernel g1_cmp_kernel( 2, ArdParams[h]*ArdParams[h] ); tmp_param_manipulator = g1_cmp_kernel.parameterVector(); tmp_param_manipulator(0) = ArdParam_eps; tmp_param_manipulator(1) *= 0.5; g1_cmp_kernel.setParameterVector( tmp_param_manipulator ); KernelClassifier g1_cmp_kc; KernelExpansion& g1_cmp_svm = g1_cmp_kc.decisionFunction(); CSvmTrainer g1_cmp_trainer( &g1_cmp_kernel, Cs[i], true ); g1_cmp_trainer.sparsify() = false; g1_cmp_trainer.stoppingCondition().minAccuracy = 1e-15; g1_cmp_trainer.train( g1_cmp_kc, dataset ); RealVector g1_cmp_param = g1_cmp_svm.parameterVector(); // set up svm with epsiloned-kernel-parameters_2 for numerical comparsion DenseARDKernel g2_cmp_kernel( 2, ArdParams[h]*ArdParams[h] ); tmp_param_manipulator = g2_cmp_kernel.parameterVector(); tmp_param_manipulator(1) *= 0.5; tmp_param_manipulator(1) *= NUMERICAL_PARAMETER_INCREASE_FACTOR; g2_cmp_kernel.setParameterVector( tmp_param_manipulator ); KernelClassifier g2_cmp_kc; KernelExpansion& g2_cmp_svm = g2_cmp_kc.decisionFunction(); CSvmTrainer g2_cmp_trainer( &g2_cmp_kernel, Cs[i], true ); g2_cmp_trainer.sparsify() = false; g2_cmp_trainer.stoppingCondition().minAccuracy = 1e-15; g2_cmp_trainer.train( g2_cmp_kc, dataset ); RealVector g2_cmp_param = g2_cmp_svm.parameterVector(); //////////////////////////////////////////////////////////////////////////////// // TEST ALL THREE DERIVATIVES //////////////////////////////////////////////////////////////////////////////// // FIRST, TEST DERIVS WRT C // first, test derivatives of dataset-points themselves for ( unsigned int j=0; j #include #include #include #include using namespace shark; // This unit test checks correctness of the // DiscreteKernel class. BOOST_AUTO_TEST_SUITE (Models_Kernels_DiscreteKernel) BOOST_AUTO_TEST_CASE( DiscreteKernel_Test ) { // define a positive definite, symmetric matrix RealMatrix mat(3, 3); mat(0, 0) = 2.0; mat(0, 1) = 1.0; mat(0, 2) = 0.5; mat(1, 0) = 1.0; mat(1, 1) = 2.0; mat(1, 2) = 1.5; mat(2, 0) = 0.5; mat(2, 1) = 1.5; mat(2, 2) = 1.25; // define the kernel DiscreteKernel k(mat); // target accuracy const double tolerance = 1e-14; // test kernel values and symmetry for (std::size_t i=0; i #include namespace shark { BOOST_AUTO_TEST_SUITE (Models_Kernels_EvalSkipMissingFeaturesTests) BOOST_AUTO_TEST_CASE(TestEvalSkipMissingFeatures) { // This test case is testing evaluating kernel function while skipping missing features const double tolerancePct = 0.001; // Test that calculation will take all features into consideration when missingness is set to boost::none LinearKernel<> kernel; RealVector inputA(5); inputA(0) = 1.0; inputA(1) = std::numeric_limits::quiet_NaN(); inputA(2) = 1.0; inputA(3) = 1.0; inputA(4) = 1.0; RealVector inputB(5); inputB(0) = 1.0; inputB(1) = 2.0; inputB(2) = std::numeric_limits::quiet_NaN(); inputB(3) = 4.0; inputB(4) = 5.0; // empty missingness will be ignored completed { const double actual = evalSkipMissingFeatures(kernel, inputA, inputB); BOOST_CHECK_CLOSE(actual, 10.0, tolerancePct); } { // No missing feature should be able to evaluated correctly RealVector missingness(5); missingness(0) = 1.0; missingness(1) = 1.0; missingness(2) = 1.0; missingness(3) = 1.0; missingness(4) = 1.0; const double actual1 = evalSkipMissingFeatures(kernel, inputA, inputB, missingness); BOOST_CHECK_CLOSE(actual1, 10.0, tolerancePct); // A missing feature should affect evaluation accordingly missingness(4) = std::numeric_limits::quiet_NaN(); const double actual2 = evalSkipMissingFeatures(kernel, inputA, inputB, missingness); BOOST_CHECK_CLOSE(actual2, 5.0, tolerancePct); } } // TODO: A interesting measurement: // safety check comparing 1000 "normal" kernel evaluations to 1000 kernel evaluation where just the number of // dimensions was doubled and the added dimensions filled with NaNs, and then see check the time penalty. // Current implementation would not be a fast solution given those memory copies between vectors. } // namespace shark { BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Models/Kernels/GaussianRbfKernel.cpp000066400000000000000000000157131314655040000224120ustar00rootroot00000000000000#define BOOST_TEST_MODULE Kernels_GaussianRbfKernel #include #include #include "KernelDerivativeTestHelper.h" #include #include #include using namespace shark; template void testBatch(Kernel& kernel,std::size_t batchSize1,std::size_t batchSize2,std::size_t dim){ //generate two sets of points and check that the batched results equal the single results Matrix batch1(batchSize1,dim); Matrix batch2(batchSize2,dim); for(std::size_t i = 0; i != batchSize1;++i){ for(std::size_t j = 0; j != dim; ++j) batch1(i,j)=Rng::uni(-1,1); } for(std::size_t i = 0; i != batchSize2;++i){ for(std::size_t j = 0; j != dim; ++j) batch2(i,j)=Rng::uni(-1,1); } testEval(kernel,batch1,batch2); } BOOST_AUTO_TEST_SUITE (Models_Kernels_GaussianRbfKernel) BOOST_AUTO_TEST_CASE( DenseRbfKernel_Test ) { const double gamma1=0.1; const double gamma2=0.2; DenseRbfKernel kernel(gamma1); //firs evaluate a single test point //testpoints RealVector x1(2); x1(0)=1; x1(1)=0; RealVector x2(2); x2(0)=2; x2(1)=0; double result=std::exp(-gamma1); //evaluate point double test=kernel.eval(x1,x2); BOOST_CHECK_SMALL(result-test,1.e-15); //now the same with parameterVector to ensure that gamma is set correctly RealVector testParameter(1); testParameter(0)=gamma2; kernel.setParameterVector(testParameter); BOOST_CHECK_SMALL(gamma2-kernel.gamma(),1.e-15); //now test whether the parametervector is created correctly RealVector parameter=kernel.parameterVector(); BOOST_CHECK_SMALL(norm_sqr(parameter-testParameter),1.e-15); //evaluate point for the new gamma value test=kernel.eval(x1,x2); result=std::exp(-gamma2); BOOST_CHECK_SMALL(result-test,1.e-15); //test batches kernel.setGamma(gamma1); testBatch(kernel,100,200,2); testBatch(kernel,100,1,2); //test derivatives testKernelDerivative(kernel,2,1.e-7); testKernelInputDerivative(kernel,2); } BOOST_AUTO_TEST_CASE( DenseRbfKernelUnconstrained_Test ) { const double gamma1=0.1; const double gamma2=0.2; DenseRbfKernel kernel( gamma1, true ); //firs evaluate a single test point //testpoints RealVector x1(2); x1(0)=1; x1(1)=0; RealVector x2(2); x2(0)=2; x2(1)=0; double result=std::exp(-gamma1); //evaluate point double test=kernel.eval(x1,x2); BOOST_CHECK_SMALL(result-test,1.e-15); //now the same with parameterVector to ensure that gamma is set correctly RealVector testParameter(1); testParameter(0)=gamma2; kernel.setParameterVector(testParameter); BOOST_CHECK_SMALL( gamma2 - std::log(kernel.gamma()) , 1.e-15 ); //now test whether the parametervector is created correctly RealVector parameter=kernel.parameterVector(); BOOST_CHECK_SMALL(norm_sqr(parameter-testParameter),1.e-15); //evaluate point for the new gamma value test=kernel.eval(x1,x2); result=std::exp( -std::exp(gamma2) ); BOOST_CHECK_SMALL(result-test,1.e-15); //test batches kernel.setGamma(gamma1); testBatch(kernel,100,200,2); testBatch(kernel,100,1,2); //test derivatives testKernelDerivative(kernel,2,1.e-7); testKernelInputDerivative(kernel,2); } BOOST_AUTO_TEST_CASE( CompressedRbfKernel_Test ) { const double gamma=0.1; CompressedRbfKernel kernel(gamma); //testpoints CompressedRealVector x1(17000); x1(3745)=1; x1(14885)=0; CompressedRealVector x2(17000); x2(3745)=1; x2(14885)=-1; double result=std::exp(-gamma); //evaluate point double test=kernel.eval(x1,x2); BOOST_CHECK_SMALL(result-test,1.e-15); //now the same with parameterVector to ensure that gmame is set correctly RealVector testParameter(1); testParameter(0)=gamma; kernel.setParameterVector(testParameter); //evaluate point test=kernel.eval(x1,x2); BOOST_CHECK_SMALL(result-test,1.e-15); //now test whether the parametervector is created correctly RealVector parameter=kernel.parameterVector(); BOOST_CHECK_SMALL(norm_sqr(parameter-testParameter),1.e-15); //test first derivative testKernelDerivative(kernel,2); //doesn't make sense for sparse (also does not work) // testKernelInputDerivative(kernel,2); } BOOST_AUTO_TEST_CASE( CompressedRbfKernelUnconstrained_Test ) { const double gamma=0.1; CompressedRbfKernel kernel( gamma, true ); //testpoints CompressedRealVector x1(17000); x1(3745)=1; x1(14885)=0; CompressedRealVector x2(17000); x2(3745)=1; x2(14885)=-1; double result=std::exp(-gamma); //evaluate point double test=kernel.eval(x1,x2); BOOST_CHECK_SMALL(result-test,1.e-15); //now the same with parameterVector to ensure that gmame is set correctly RealVector testParameter(1); testParameter(0)=gamma; kernel.setParameterVector(testParameter); //evaluate point test=kernel.eval(x1,x2); result=std::exp( -std::exp(gamma) ); BOOST_CHECK_SMALL(result-test,1.e-15); //now test whether the parametervector is created correctly RealVector parameter=kernel.parameterVector(); BOOST_CHECK_SMALL(norm_sqr(parameter-testParameter),1.e-15); //test first derivative testKernelDerivative(kernel,2); //doesn't make sense for sparse (also does not work) // testKernelInputDerivative(kernel,2); } #ifdef NDEBUG void benchmark(std::size_t batchSize1,std::size_t batchSize2, std::size_t dim, std::size_t iterations){ const double gamma1=0.1; DenseRbfKernel kernel(gamma1); RealMatrix batch1(batchSize1,dim); RealMatrix batch2(batchSize2,dim); std::vector batchVec1(batchSize1,RealVector(dim)); std::vector batchVec2(batchSize2,RealVector(dim)); for(std::size_t i = 0; i != batchSize1;++i){ for(std::size_t j = 0; j != dim; ++j) batchVec1[i](j)=batch1(i,j)=FastRng::uni(-1,1); } for(std::size_t i = 0; i != batchSize2;++i){ for(std::size_t j = 0; j != dim; ++j) batchVec2[i](j)=batch2(i,j)=Rng::uni(-1,1); } RealMatrix result(batchSize1,batchSize2); result.clear(); double start1=Timer::now(); for(std::size_t i = 0; i != iterations; ++i){ kernel.eval(batch1,batch2,result); } double end1=Timer::now(); for(std::size_t i = 0; i != iterations; ++i){ for(std::size_t i = 0; i != batchSize1; ++i){ for(std::size_t j = 0; j != batchSize2; ++j){ result(i,j)+=kernel.eval(batchVec1[i],batchVec2[j]); } } } double end2=Timer::now(); double speedupFactor = (end2-end1)/(end1-start1); std::cout< #include #include #include #include #include namespace shark{ template RealVector estimateDerivative(Kernel& kernel, const Point& point1, const Point& point2, double epsilon = 1.e-10) { RealVector parameters = kernel.parameterVector(); RealVector gradient = parameters; for (size_t parameter=0; parameter != parameters.size(); ++parameter) { RealVector testPoint1 = parameters; testPoint1(parameter) += epsilon; kernel.setParameterVector(testPoint1); double result1 = kernel.eval(point1, point2); RealVector testPoint2 = parameters; testPoint2(parameter) -= epsilon; kernel.setParameterVector(testPoint2); double result2 = kernel.eval(point1, point2); double estimatedDerivative = (result1 - result2) / (2 * epsilon); gradient(parameter) = estimatedDerivative; } kernel.setParameterVector(parameters); return gradient; } template RealVector estimateInputDerivative(Kernel& kernel,const Point& point1,const Point& point2,double epsilon=1.e-10){ size_t size = point1.size(); RealVector gradient(size); for(size_t dim=0;dim!=size;++dim){ Point testPoint1=point1; testPoint1(dim)+=epsilon; double result1=kernel(testPoint1,point2); Point testPoint2=point1; testPoint2(dim)-=epsilon; double result2=kernel(testPoint2,point2); double estimatedDerivative=(result1-result2)/(2*epsilon); gradient(dim)=estimatedDerivative; } return gradient; } /// Convenience function for automatic sampling of both points /// of a kernel and evaluatation and test of the parameter derivative. /// Samples are taken from the interval [-5,5] (for each component). ///it is assumed, that the input points are vectors and batches are matrices template void testKernelDerivative(AbstractKernelFunction& kernel,std::size_t inputSize, double epsilon=1.e-8, double testEpsilon= 1.e-6, unsigned int numberOfTests = 5, std::size_t batchSize = 20) { BOOST_REQUIRE_EQUAL(kernel.hasFirstParameterDerivative(),true); for(unsigned int test = 0; test != numberOfTests; ++test) { //create data typename Batch::type batch1(batchSize,inputSize); typename Batch::type batch2(2*batchSize,inputSize); for(std::size_t i = 0; i != batchSize; ++i){ for(std::size_t j = 0; j != inputSize;++j) { batch1(i,j) = Rng::uni(-3,3); batch2(i,j) = Rng::uni(-3,3); batch2(i+batchSize,j) = Rng::uni(-3,3); } } //evaluate batched derivative boost::shared_ptr state = kernel.createState(); RealMatrix kernelBatchRes; kernel.eval(batch1,batch2,kernelBatchRes,*state); //check that single derivatives fit the estimated derivative //also calculate the batxh derivative but set only 1 weight to 1. for(std::size_t i = 0; i != batchSize; ++i){ T x1=row(batch1,i); for(std::size_t j = 0; j != 2*batchSize;++j) { T x2=row(batch2,j); //calculate batch derivative but set only the current derivative to one. RealMatrix singleCoeff(batchSize,2*batchSize); singleCoeff.clear(); singleCoeff(i,j) = 1.0; RealVector singleBatchGradient; kernel.weightedParameterDerivative(batch1,batch2,singleCoeff,*state,singleBatchGradient); //compare with estimated derivative RealVector estimatedDerivative = estimateDerivative(kernel, x1, x2, epsilon); BOOST_REQUIRE_EQUAL(estimatedDerivative.size(), singleBatchGradient.size()); BOOST_CHECK_SMALL(norm_2(estimatedDerivative - singleBatchGradient), testEpsilon); for(std::size_t k = 0; k != estimatedDerivative.size();++k){ if(singleBatchGradient(k)-estimatedDerivative(k)> epsilon) BOOST_CHECK_CLOSE(singleBatchGradient(k),estimatedDerivative(k), 0.01); } //std::cout< void testKernelInputDerivative(AbstractKernelFunction& kernel,std::size_t inputSize, double epsilon=1.e-8, double testEpsilon= 1.e-6, unsigned int numberOfTests = 5, std::size_t batchSize = 20) { BOOST_REQUIRE_EQUAL(kernel.hasFirstInputDerivative(),true); for(unsigned int test = 0; test != numberOfTests; ++test) { //create data typename Batch::type batch1(batchSize,inputSize); typename Batch::type batch2(batchSize+1,inputSize); for(std::size_t i = 0; i != batchSize; ++i){ for(std::size_t j = 0; j != inputSize;++j) { batch1(i,j) = Rng::uni(-3,3); batch2(i,j) = Rng::uni(-3,3); } } for(std::size_t j = 0; j != inputSize;++j) { batch2(batchSize,j) = Rng::uni(-3,3); } //evaluate batched derivative boost::shared_ptr state = kernel.createState(); RealMatrix kernelBatchRes; kernel.eval(batch1,batch2,kernelBatchRes,*state); //check that single derivatives fit the estimated derivative RealMatrix singleCoeff(batchSize,batchSize+1,0.0); for(std::size_t j = 0; j != batchSize+1; ++j){ T x2=row(batch2,j); for(std::size_t i = 0; i != batchSize; ++i){ //calculate batch derivative but set only the coefficient of the i-th sample in Batch2 to one singleCoeff(i,j) = 1.0; //calculate the input derivative for all elements of Batch1 typename Batch::type derivativeBatch; kernel.weightedInputDerivative(batch1,batch2,singleCoeff,*state,derivativeBatch); singleCoeff(i,j) = 0; //check whether the previously calculated //is the same as the estimated input derivative T x1=row(batch1,i); //estimate the derivative RealVector estimatedDerivative = estimateInputDerivative(kernel, x1, x2, epsilon); BOOST_CHECK_SMALL(norm_2(estimatedDerivative - row(derivativeBatch,i)), testEpsilon); } } } } template void testEval(AbstractKernelFunction& kernel, typename Batch::type const& sampleBatch1,typename Batch::type const& sampleBatch2){ std::size_t batchSize1 = size(sampleBatch1); std::size_t batchSize2 = size(sampleBatch2); //evaluate batch on the kernels boost::shared_ptr state = kernel.createState(); RealMatrix kernelResultsIntermediate; RealMatrix kernelResults = kernel(sampleBatch1,sampleBatch2); kernel.eval(sampleBatch1,sampleBatch2,kernelResultsIntermediate,*state); //eval every single combination and compare with kernel results, //also check that the single eval version returns the same results BOOST_REQUIRE_EQUAL(kernelResults.size1(),batchSize1); BOOST_REQUIRE_EQUAL(kernelResults.size2(),batchSize2); BOOST_REQUIRE_EQUAL(kernelResultsIntermediate.size1(),batchSize1); BOOST_REQUIRE_EQUAL(kernelResultsIntermediate.size2(),batchSize2); for(std::size_t i = 0; i != batchSize1; ++i){ T x1 = get(sampleBatch1,i); for(std::size_t j = 0; j != batchSize2; ++j){ double result = kernel.eval(x1,get(sampleBatch2,j)); BOOST_CHECK_SMALL(result-kernelResults(i,j), 1.e-13); BOOST_CHECK_SMALL(result-kernelResultsIntermediate(i,j), 1.e-13); } } } } #endif Shark-3.1.4/Test/Models/Kernels/KernelExpansion.cpp000066400000000000000000000123101314655040000221400ustar00rootroot00000000000000//=========================================================================== /*! * * * \brief unit test for the kernel expansion model * * * * * \author T. Glasmachers * \date 2011 * * * \par Copyright 1995-2015 Shark Development Team * *

* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ //=========================================================================== #define BOOST_TEST_MODULE MODELS_KERNEL_EXPANSION #include #include #include #include #include #include #include "../derivativeTestHelper.h" //for evalTest using namespace shark; BOOST_AUTO_TEST_SUITE (Models_Kernels_KernelExpansion) BOOST_AUTO_TEST_CASE( KERNEL_EXPANSION_SIMPLE_VALUE ) { double k1 = exp(-0.5); RealVector test(2); test(0) = 3.0; test(1) = 4.0; std::stringstream ss; // define a set of two basis points std::vector points(2, RealVector(2)); points[0](0) = 0.0; points[0](1) = 0.0; points[1](0) = 6.0; points[1](1) = 0.0; Data basis = createDataFromRange(points); // build a kernel expansion object DenseRbfKernel kernel(0.02); // sigma = 5 KernelExpansion ex(&kernel, basis, true, 2); // offset, two outputs RealVector param(6); param(0) = 1.0; param(1) = 1.0; param(2) = 0.0; param(3) = 1.0; param(4) = 2.0; param(5) = 3.0; ex.setParameterVector(param); // check its prediction RealVector prediction = ex(test); BOOST_CHECK_SMALL(prediction(0) - (1.0 * k1 + 2.0), 1e-10); BOOST_CHECK_SMALL(prediction(1) - (2.0 * k1 + 3.0), 1e-10); } BOOST_AUTO_TEST_CASE( KERNEL_EXPANSION_BATCH_VALUE ) { std::vector data(100,RealVector(3)); for(std::size_t i = 0; i != 100; ++i){ data[i](0) = Rng::uni(-1,1); data[i](1) = Rng::uni(-1,1); data[i](2) = Rng::uni(-1,1); } //BatchSize 10 to check whether complex structured bases also work Data basis = createDataFromRange(data,10); //BatchSize 100 results in a simple structured base Data simpleBasis = createDataFromRange(data,100); RealMatrix inputBatch(100,3); for(std::size_t i = 0; i != 100; ++i){ inputBatch(i,0) = Rng::uni(-1,1); inputBatch(i,1) = Rng::uni(-1,1); inputBatch(i,2) = Rng::uni(-1,1); } //create expansions DenseRbfKernel kernel(0.02); KernelExpansion simpleExpansion(&kernel, simpleBasis, true, 2); KernelExpansion expansion(&kernel, basis, true, 2); RealVector parameters(simpleExpansion.numberOfParameters()); for(std::size_t i = 0; i != parameters.size(); ++i){ parameters(i) = Rng::uni(-1,1); } simpleExpansion.setParameterVector(parameters); expansion.setParameterVector(parameters); //test whether the choice of basis type changes the result //if this is the case, there is again something really wrong RealVector simpleOutput = simpleExpansion(row(inputBatch,0)); RealVector output = expansion(row(inputBatch,0)); double error = norm_sqr(simpleOutput-output); BOOST_REQUIRE_SMALL(error, 1.e-10); //if that worked, the next test checks, whether batch evaluation works at all testBatchEval(simpleExpansion,inputBatch); testBatchEval(expansion,inputBatch); } BOOST_AUTO_TEST_CASE( KERNEL_EXPANSION_SERIALIZATION ) { std::stringstream ss; { // define a set of two basis points std::vector points(2, RealVector(2)); points[0](0) = 0.0; points[0](1) = 0.0; points[1](0) = 6.0; points[1](1) = 0.0; Data basis = createDataFromRange(points); // build a kernel expansion object DenseRbfKernel kernel(0.02); // sigma = 5 KernelExpansion ex(&kernel, basis, true, 2); // offset, two outputs RealVector param(6); param(0) = 1.0; param(1) = 1.0; param(2) = 0.0; param(3) = 1.0; param(4) = 2.0; param(5) = 3.0; ex.setParameterVector(param); // serialize the model TextOutArchive oa(ss); oa << const_cast const&>(ex);//prevent compilation warning } { // recover from the stream DenseRbfKernel kernel(1.0); KernelExpansion ex2(&kernel); TextInArchive ia(ss); ia >> ex2; // check whether the prediction still works double k1 = exp(-0.5); RealVector test(2); test(0) = 3.0; test(1) = 4.0; RealVector prediction2 = ex2(test); BOOST_CHECK_SMALL(prediction2(0) - (1.0 * k1 + 2.0), 1e-10); BOOST_CHECK_SMALL(prediction2(1) - (2.0 * k1 + 3.0), 1e-10); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Models/Kernels/KernelHelpers.cpp000066400000000000000000000072211314655040000216030ustar00rootroot00000000000000//=========================================================================== /*! * * * \brief Test cases for the functions defined in Models/Kernels/KernelHelpers.cpp * * * * \author Oswin Krause * \date 2012 * * * \par Copyright 1995-2015 Shark Development Team * *

* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ #define BOOST_TEST_MODULE Kernels_KernelHelpers #include #include #include #include using namespace shark; using namespace std; struct KernelHelpersFixture { KernelHelpersFixture() : datasetSize(100),dimensions(5),batchSize(8),kernel(0.5){ std::vector points(datasetSize, RealVector(dimensions)); for(std::size_t i = 0; i != datasetSize; ++i){ for(std::size_t j = 0; j != dimensions; ++j){ points[i](j)=Rng::uni(-1,1); } } data = createDataFromRange(points,batchSize); } std::size_t datasetSize; std::size_t dimensions; std::size_t batchSize; DenseRbfKernel kernel; Data data; }; BOOST_FIXTURE_TEST_SUITE (Models_Kernels_KernelHelpers, KernelHelpersFixture ) BOOST_AUTO_TEST_CASE( KernelHelpers_calculateRegularizedKernelMatrix ){ //calculate kernelMatrix to test RealMatrix kernelMatrix = calculateRegularizedKernelMatrix(kernel,data,1.0); BOOST_REQUIRE_EQUAL(kernelMatrix.size1(),datasetSize); BOOST_REQUIRE_EQUAL(kernelMatrix.size2(),datasetSize); //now check the results for(std::size_t i = 0; i != datasetSize; ++i){ for(std::size_t j = 0; j != datasetSize; ++j){ double result = kernel(data.element(i),data.element(j))+double(i==j); BOOST_CHECK_SMALL(kernelMatrix(i,j)-result,1.e-12); } } } BOOST_AUTO_TEST_CASE( KernelHelpers_calculateKernelMatrixParameterDerivative ){ for(std::size_t test = 0; test != 100; ++test){ RealMatrix weights(datasetSize,datasetSize); for(std::size_t i = 0; i != datasetSize; ++i){ for(std::size_t j = 0; j <= i; ++j){ weights(j,i) = weights(i,j)=Rng::uni(1,2); } }; //calculate kernelMatrix to test RealVector kernelGradient = calculateKernelMatrixParameterDerivative(kernel,data,weights); BOOST_REQUIRE_EQUAL(kernelGradient.size(),1u); //now check the results y computing the kernel matrix from scratch boost::shared_ptr state = kernel.createState(); RealMatrix derivativeWeight(1,1); RealVector result(1); result(0) = 0; RealVector gradient(1); RealMatrix block; for(std::size_t i = 0; i != datasetSize; ++i){ RealMatrix x1(1,dimensions); row(x1,0)=data.element(i); for(std::size_t j = 0; j != datasetSize; ++j){ RealMatrix x2(1,dimensions); row(x2,0)=data.element(j); derivativeWeight(0,0) = weights(i,j); kernel.eval(x1,x2,block,*state); kernel.weightedParameterDerivative(x1,x2,derivativeWeight,*state,gradient); result +=gradient; } } BOOST_CHECK_CLOSE(kernelGradient(0),result(0),1.e-12); } } BOOST_AUTO_TEST_SUITE_END()Shark-3.1.4/Test/Models/Kernels/KernelNearestNeighborClassifier.cpp000066400000000000000000000226031314655040000252660ustar00rootroot00000000000000//=========================================================================== /*! * * * \brief unit test for kernel nearest neighbor classifier * * * * * \author T. Glasmachers * \date 2010-2011 * * * \par Copyright 1995-2015 Shark Development Team * *

* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ //=========================================================================== #define BOOST_TEST_MODULE MODELS_KERNEL_NEAREST_NEIGHBOR_CLASSIFIER #include #include #include #include #include #include #include #include #include #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (Models_Kernels_KernelNearestNeighborClassifier) BOOST_AUTO_TEST_CASE( KERNEL_NEAREST_NEIGHBOR_CLASSIFIER ) { std::vector input(6, RealVector(2)); input[0](0)=1; input[0](1)=3; input[1](0)=-1; input[1](1)=3; input[2](0)=1; input[2](1)=0; input[3](0)=-1; input[3](1)=0; input[4](0)=1; input[4](1)=-3; input[5](0)=-1; input[5](1)=-3; std::vector target(6); target[0]=0; target[1]=1; target[2]=0; target[3]=1; target[4]=0; target[5]=1; ClassificationDataset dataset = createLabeledDataFromRange(input, target); DataView > view(dataset.inputs()); DenseRbfKernel kernel(0.5); KHCTree > > tree(view,&kernel); TreeNearestNeighbors algorithm(dataset, &tree); NearestNeighborClassifier model(&algorithm, 3); for (size_t i = 0; i<6; ++i) { unsigned int label = model(input[i]); BOOST_CHECK_EQUAL(target[i], label); } } BOOST_AUTO_TEST_CASE( SIMPLE_NEAREST_NEIGHBOR_CLASSIFIER ) { std::vector input(6, RealVector(2)); input[0](0)=1; input[0](1)=3; input[1](0)=-1; input[1](1)=3; input[2](0)=1; input[2](1)=0; input[3](0)=-1; input[3](1)=0; input[4](0)=1; input[4](1)=-3; input[5](0)=-1; input[5](1)=-3; std::vector target(6); target[0]=0; target[1]=1; target[2]=0; target[3]=1; target[4]=0; target[5]=1; ClassificationDataset dataset = createLabeledDataFromRange(input, target,2); DenseRbfKernel kernel(0.5); SimpleNearestNeighbors algorithm(dataset, &kernel); NearestNeighborClassifier model(&algorithm, 3); Data labels = model(dataset.inputs()); for (size_t i = 0; i<6; ++i) { unsigned int label = model(input[i]); BOOST_CHECK_EQUAL(target[i], label); BOOST_CHECK_EQUAL(target[i], labels.element(i)); } } BOOST_AUTO_TEST_CASE( SIMPLE_NEAREST_NEIGHBOR_CLASSIFIER_BRUTE_FORCE ) { std::size_t Dimension = 5; std::size_t Points = 100; std::size_t TestPoints = 100; std::vector input(Points, RealVector(Dimension)); std::vector target(Points); for(std::size_t i = 0; i != Points; ++i){ target[i] = i%2; for(std::size_t d = 0; d != Dimension; ++d){ input[i][d]=Rng::gauss(3*target[i],1); } } std::vector testInput(Points, RealVector(Dimension)); std::vector testTarget(Points); for(std::size_t i = 0; i != TestPoints; ++i){ testTarget[i] = i%2; for(std::size_t d = 0; d != Dimension; ++d){ testInput[i][d]=Rng::gauss(3*testTarget[i],1); } } ClassificationDataset dataset = createLabeledDataFromRange(input, target,10); ClassificationDataset testDataset = createLabeledDataFromRange(testInput, testTarget,10); //test using the brute force algorithm, whether the test points //are classified correctly in 1-NN { DenseRbfKernel kernel(0.5); SimpleNearestNeighbors algorithm(dataset, &kernel); NearestNeighborClassifier model(&algorithm, 1); Data labels = model(dataset.inputs()); for (size_t i = 0; i testLabels = model(testDataset.inputs()); for (size_t i = 0; i::max(); for(std::size_t j = 0; j != Points; ++j){ double distance=distanceSqr(testInput[i],input[j]); if(distance < minDistance){ minDistance = distance; bruteforceLabel=target[j]; } } unsigned int label = model(testInput[i]); BOOST_CHECK_EQUAL(bruteforceLabel, label); BOOST_CHECK_EQUAL(bruteforceLabel, testLabels.element(i)); } } //test using the brute force algorithm, whether the test points //are classified correctly in 3-NN { DenseRbfKernel kernel(0.5); SimpleNearestNeighbors algorithm(dataset, &kernel); NearestNeighborClassifier model(&algorithm, 3); Data testLabels = model(testDataset.inputs()); for (size_t i = 0; i Pair; std::priority_queue,std::greater > queue; for(std::size_t j = 0; j != Points; ++j){ double distance=norm_2(testInput[i]-input[j]); queue.push(Pair(distance,target[j])); } unsigned int res = 0; for(std::size_t j = 0; j != 3; ++j){ res+=queue.top().value; queue.pop(); } unsigned int bruteforceLabel = res > 1; unsigned int label = model(testInput[i]); BOOST_CHECK_EQUAL(bruteforceLabel, label); BOOST_CHECK_EQUAL(bruteforceLabel, testLabels.element(i)); } } } BOOST_AUTO_TEST_CASE( NEAREST_NEIGHBOR_CLASSIFIER_KDTREE_BRUTE_FORCE ) { std::size_t Dimension = 5; std::size_t Points = 100; std::size_t TestPoints = 100; std::vector input(Points, RealVector(Dimension)); std::vector target(Points); for(std::size_t i = 0; i != Points; ++i){ target[i] = i%2; for(std::size_t d = 0; d != Dimension; ++d){ input[i][d]=Rng::gauss(3*target[i],1); } } std::vector testInput(Points, RealVector(Dimension)); std::vector testTarget(Points); for(std::size_t i = 0; i != TestPoints; ++i){ testTarget[i] = i%2; for(std::size_t d = 0; d != Dimension; ++d){ testInput[i][d]=Rng::gauss(3*testTarget[i],1); } } ClassificationDataset dataset = createLabeledDataFromRange(input, target,10); ClassificationDataset testDataset = createLabeledDataFromRange(testInput, testTarget,10); { KDTree tree(dataset.inputs()); TreeNearestNeighbors algorithm(dataset, &tree); NearestNeighborClassifier model(&algorithm, 1); //test whether 1-NN classifies it's training set correctly. Data labels = model(dataset.inputs()); for (size_t i = 0; i testLabels = model(testDataset.inputs()); for (size_t i = 0; i::max(); for(std::size_t j = 0; j != Points; ++j){ double distance=distanceSqr(testInput[i],input[j]); if(distance < minDistance){ minDistance = distance; bruteforceLabel=target[j]; } } unsigned int label = model(testInput[i]); BOOST_CHECK_EQUAL(bruteforceLabel, label); BOOST_CHECK_EQUAL(bruteforceLabel, testLabels.element(i)); } } //test using the brute force algorithm, whether the test points //are classified correctly in 3-NN { KDTree tree(dataset.inputs()); TreeNearestNeighbors algorithm(dataset, &tree); NearestNeighborClassifier model(&algorithm, 3); Data testLabels = model(testDataset.inputs()); for (size_t i = 0; i Pair; std::priority_queue,std::greater > queue; for(std::size_t j = 0; j != Points; ++j){ double distance=norm_2(testInput[i]-input[j]); queue.push(Pair(distance,target[j])); } unsigned int res = 0; for(std::size_t j = 0; j != 3; ++j){ res+=queue.top().value; queue.pop(); } unsigned int bruteforceLabel = res > 1; unsigned int label = model(testInput[i]); BOOST_CHECK_EQUAL(bruteforceLabel, label); BOOST_CHECK_EQUAL(bruteforceLabel, testLabels.element(i)); } } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Models/Kernels/KernelNearestNeighborRegression.cpp000066400000000000000000000046461314655040000253310ustar00rootroot00000000000000//=========================================================================== /*! * * * \brief unit test for kernel nearest neighbor regression * * * * * \author T. Glasmachers * \date 2011 * * * \par Copyright 1995-2015 Shark Development Team * *

* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ //=========================================================================== #define BOOST_TEST_MODULE MODELS_KERNEL_NEAREST_NEIGHBOR_REGRESSION #include #include #include #include #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (Models_Kernels_KernelNearestNeighborRegression) BOOST_AUTO_TEST_CASE( KERNEL_NEAREST_NEIGHBOR_REGRESSION ) { std::vector input(6, RealVector(2)); input[0](0)=1; input[0](1)=3; input[1](0)=-1; input[1](1)=3; input[2](0)=1; input[2](1)=0; input[3](0)=-1; input[3](1)=0; input[4](0)=1; input[4](1)=-3; input[5](0)=-1; input[5](1)=-3; std::vector target(6, RealVector(1)); target[0](0)= -1.0; target[1](0)= +1.0; target[2](0)= -1.0; target[3](0)= +1.0; target[4](0)= -1.0; target[5](0)= +1.0; RegressionDataset dataset = createLabeledDataFromRange(input, target); DenseRbfKernel kernel(0.5); SimpleNearestNeighbors algorithm(dataset, &kernel); NearestNeighborRegression model(&algorithm, 3); for (std::size_t i = 0; i<6; ++i) { RealVector prediction = model(input[i]); BOOST_CHECK_SMALL(target[i](0) - 3.0 * prediction(0), 1e-12); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Models/Kernels/LinearKernel.cpp000066400000000000000000000020771314655040000214170ustar00rootroot00000000000000#define BOOST_TEST_MODULE Kernels_LinearKernel #include #include #include #include "KernelDerivativeTestHelper.h" #include using namespace shark; BOOST_AUTO_TEST_SUITE (Models_Kernels_LinearKernel) BOOST_AUTO_TEST_CASE( DenseLinearKernel_Test ) { double result=-2.0; DenseLinearKernel kernel; //testpoints RealVector x1(2); x1(0)=1; x1(1)=0; RealVector x2(2); x2(0)=-2; x2(1)=0; //evaluate single point double test=kernel(x1,x2); BOOST_CHECK_SMALL(result-test,1.e-15); //evaluate a batch of points RealMatrix batch1(50,2); RealMatrix batch2(100,2); for(std::size_t i = 0; i != 50;++i){ for(std::size_t j = 0; j != 2; ++j) batch1(i,j)=Rng::uni(-1,1); } for(std::size_t i = 0; i != 100;++i){ for(std::size_t j = 0; j != 2; ++j) batch2(i,j)=Rng::uni(-1,1); } testEval(kernel,batch1,batch2); //test first derivative testKernelDerivative(kernel,2); testKernelInputDerivative(kernel,2); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Models/Kernels/MissingFeaturesKernelExpansionTests.cpp000066400000000000000000000044611314655040000262240ustar00rootroot00000000000000#define BOOST_TEST_MODULE MissingFeaturesKernelExpansionTestModule #include "shark/Models/Kernels/LinearKernel.h" #include "shark/Models/Kernels/MissingFeaturesKernelExpansion.h" #include "../../Utils.h" #include #include #include namespace shark { BOOST_AUTO_TEST_SUITE (Models_Kernels_MissingFeaturesKernelExpansionTests) BOOST_AUTO_TEST_CASE(TestMissingFeaturesKernelExpansion) { // This test case is testing evaluating kernel function while skipping missing features const unsigned int featureSize = 4u; const unsigned int sampleSize = 5u; LinearKernel<> kernel; // Dataset std::vector input(sampleSize,RealVector(featureSize)); std::vector target(sampleSize); input[0](0) = 0.0; input[0](1) = 0.0; input[0](2) = 1.0; input[0](3) = 5.0; target[0] = 0; input[1](0) = 2.0; input[1](1) = 2.0; input[1](2) = 2.0; input[1](3) = 4.0; target[1] = 1; input[2](0) = -1.0; input[2](1) = -8.0; input[2](2) = 3.0; input[2](3) = 3.0; target[2] = 0; input[3](0) = -1.0; input[3](1) = -1.0; input[3](2) = 4.0; input[3](3) = 2.0; target[3] = 0; input[4](0) = 3.0; input[4](1) = 3.0; input[4](2) = 5.0; input[4](3) = 1.0; target[4] = 1; Data basis = createDataFromRange(input); // The class under test MissingFeaturesKernelExpansion ke(&kernel, basis,false); // Scaling coefficients RealVector scalingCoefficients(sampleSize); scalingCoefficients(0) = 1.0; scalingCoefficients(1) = 0.2; scalingCoefficients(2) = 0.3; scalingCoefficients(3) = 0.4; scalingCoefficients(4) = 0.5; ke.setScalingCoefficients(scalingCoefficients); // Alphas RealVector alpha(sampleSize); alpha(0) = 1.0; alpha(1) = 0.2; alpha(2) = 0.3; alpha(3) = 0.4; alpha(4) = 0.5; ke.setParameterVector(alpha); ke.setClassifierNorm(10.0); // Do an evaluation and then verify RealVector pattern(featureSize); pattern(0) = 1.0; pattern(1) = 2.0; pattern(2) = std::numeric_limits::quiet_NaN(); // missing feature pattern(3) = 4.0; RealVector output = ke(pattern); std::cout<<"d"< expected; expected += 34.7851; BOOST_CHECK(test::verifyVectors(output, expected, 1e-2)); } } // namespace shark { BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Models/Kernels/MklKernel.cpp000066400000000000000000000775241314655040000207410ustar00rootroot00000000000000#include #include #include #include #include struct TestStruct1{ shark::RealVector v1; std::size_t v2;//used as input for discrete kernel shark::RealVector v3; }; //~ struct TestStruct2{//differentiable. //~ RealVector v1; //~ RealVector v2; //~ }; //adapt both struct to make boost fusion compatible, this is needed for eval() with single elements BOOST_FUSION_ADAPT_STRUCT( TestStruct1, (shark::RealVector, v1)(std::size_t, v2)(shark::RealVector, v3) ) //~ BOOST_FUSION_ADAPT_STRUCT( //~ TestStruct2, //~ (RealVector, v1)(RealVector, v2) //~ ) //Now adapt both structs to the batch interface //todo make this less cumbersome. more like above. namespace shark{ template<> struct Batch< TestStruct1 >{ SHARK_CREATE_BATCH_INTERFACE_NO_TPL( TestStruct1, (shark::RealVector, v1)(std::size_t, v2)(shark::RealVector, v3) ) }; //~ template<> //~ struct Batch< TestStruct2 >{ //~ SHARK_CREATE_BATCH_INTERFACE( //~ TestStruct2, //~ (RealVector, v1)(RealVector, v2) //~ ) //~ }; } //not sure whther the definitions above can also go below... #define BOOST_TEST_MODULE Kernels_MklKernel #include #include using namespace shark; // Since the MKLKernel is based on the weighted sum kernel (which is properly // tested), we do not need to do numerical testing. // However, we instantiate every function to check for compile errors and that // eval() matches the hand-evaluated kernels. BOOST_AUTO_TEST_SUITE (Models_Kernels_MklKernel) BOOST_AUTO_TEST_CASE( DenseMklKernel_Test_Eval ) { //create Data std::size_t const examples = 1; std::size_t const dim1 = 10; std::size_t const dim2 = 12; std::size_t maxElem = 5; std::vector data(examples); std::vector dataV1(examples,RealVector(dim1)); std::vector dataV2(examples); std::vector dataV3(examples,RealVector(dim2)); for(std::size_t i = 0; i != examples; ++i){ data[i].v1.resize(dim1); data[i].v3.resize(dim2); for(std::size_t j = 0; j != dim1; ++j){ dataV1[i](j)=data[i].v1(j)=Rng::uni(-1,1); } dataV2[i]=data[i].v2=Rng::discrete(0,5); for(std::size_t j = 0; j != dim2; ++j){ dataV3[i](j)=data[i].v3(j)=Rng::uni(-1,1); } } //wouldn't it be nice if we could split these, too? Data dataset = createDataFromRange( data, 10 ); Data datasetV1 = createDataFromRange( dataV1, 10 ); Data datasetV2 = createDataFromRange( dataV2, 10 ); Data datasetV3 = createDataFromRange( dataV3, 10 ); //create MKL Kernel //create state matrix for the discrete kernel RealMatrix matK(maxElem+1,maxElem+1); for(std::size_t i = 0; i != matK.size1();++i){ for(std::size_t j = 0; j <= i;++j){ matK(i,j) = Rng::uni(-1,1); matK(j,i) = matK(i,j); } } DenseRbfKernel baseKernelV1(0.1); DiscreteKernel baseKernelV2(matK); DenseLinearKernel baseKernelV3; MklKernel kernel( boost::fusion::make_vector(&baseKernelV1,&baseKernelV2,&baseKernelV3) ); //check correct number of parameters const unsigned int numParameters = 3; kernel.setAdaptiveAll(true); BOOST_REQUIRE_EQUAL(kernel.numberOfParameters(),numParameters); // test kernel evals. first set weighting factors RealVector parameters(3); init(parameters)<<0.3,0.1,0.3; //weights are 1, 0.5 and 0.1 and the gauss kernel parameter is 0.3 kernel.setParameterVector(parameters); //process kernel matrices for each element separately and weight the results to get ground-truth data RealMatrix matV1 = calculateRegularizedKernelMatrix(baseKernelV1,datasetV1); RealMatrix matV2 = calculateRegularizedKernelMatrix(baseKernelV2,datasetV2); RealMatrix matV3 = calculateRegularizedKernelMatrix(baseKernelV3,datasetV3); RealMatrix kernelMatTest = matV1+std::exp(0.3)*matV2+std::exp(0.1)*matV3; kernelMatTest /=1+std::exp(0.3)+std::exp(0.1); //now calculate the kernel matrix of the MKL Kernel. it should be the same. RealMatrix kernelMat = calculateRegularizedKernelMatrix(kernel,dataset); //test for(std::size_t i = 0; i != examples; ++i){ for(std::size_t j = 0; j != examples;++j){ BOOST_CHECK_CLOSE(kernelMatTest(i,j),kernelMat(i,j),1.e-5); } } } //~ // Test the MKL kernel with all sub-ranges exactly the full range //~ BOOST_AUTO_TEST_CASE( DenseMklKernel_Test_FullRange ) //~ { //~ const double gamma1 = 0.1; //~ const double gamma2 = 0.01; //~ RealVector testParams(2); //~ testParams(0) = 1; //~ testParams(1) = gamma2; //~ GaussianRbfKernel baseKernelV1(gamma1); //~ GaussianRbfKernel baseKernel2(2*gamma2); //~ std::vector* > kernels; //~ kernels.push_back(&baseKernelV1); //~ kernels.push_back(&baseKernel2); //~ std::vector< std::pair< std::size_t, std::size_t > > frs; //~ frs.push_back( std::make_pair( 0,2 ) ); //~ frs.push_back( std::make_pair( 0,2 ) ); //~ DenseMklKernel kernel( kernels, frs ); //~ kernel.setAdaptive(1, true); //~ //now test whether the parametervector is created correctly //~ kernel.setParameterVector(testParams); //~ RealVector parameter=kernel.parameterVector(); //~ BOOST_CHECK_SMALL(norm_sqr(parameter-testParams), 1.e-15); //~ //and check whether all gamma values are correct //~ BOOST_CHECK_SMALL(baseKernelV1.gamma() - gamma1, 1e-13); //~ BOOST_CHECK_SMALL(baseKernel2.gamma() - gamma2, 1e-13); //~ //testpoints //~ RealVector x1(2); x1(0)= 2; x1(1)=1; //~ RealVector x2(2); x2(0)=-2; x2(1)=1; //~ ConstRealVectorRange sub1 = subrange((const RealVector&)(x1),0,2); //~ ConstRealVectorRange sub2 = subrange((const RealVector&)(x2),0,2); //~ Intermediate intermediateK1; //~ Intermediate intermediateK2; //~ double k1 = baseKernelV1.eval( sub1, sub2, intermediateK1 ); //~ double k2 = baseKernel2.eval( sub1, sub2, intermediateK2 ); //~ double numeratorResult = k1 + boost::math::constants::e() * k2; //~ double result = numeratorResult / ( 1+boost::math::constants::e() ); //~ //evaluate point //~ double test = kernel.eval(x1,x2); //~ Intermediate intermediate; //~ double test2 = kernel.eval(x1,x2,intermediate); //~ BOOST_REQUIRE_SMALL( result-test, 1.e-15 ); //~ BOOST_REQUIRE_SMALL( result-test2, 1.e-15 ); //~ //test intermediate values. everything is required to be correct because testing //~ //the derivative does not make sense anymore, if this is wrong //~ BOOST_REQUIRE_EQUAL(intermediate.size() , 7); //~ BOOST_REQUIRE_SMALL(intermediate[0] - numeratorResult,1.e-15); //~ BOOST_REQUIRE_SMALL(intermediate[1] - k1,1.e-15); //~ BOOST_REQUIRE_SMALL(intermediate[2] - intermediateK1[0],1.e-15); //~ BOOST_REQUIRE_SMALL(intermediate[3] - intermediateK1[1],1.e-15); //~ BOOST_REQUIRE_SMALL(intermediate[4] - k2,1.e-15); //~ BOOST_REQUIRE_SMALL(intermediate[5] - intermediateK2[0],1.e-15); //~ BOOST_REQUIRE_SMALL(intermediate[6] - intermediateK2[1],1.e-15); //~ //test first derivative //~ testKernelDerivative(kernel, 2, 1.e-6); //~ testKernelInputDerivative(kernel, 2, 1.e-6); //~ } //~ BOOST_AUTO_TEST_CASE( DenseMklKernel_Test_Detailed ) //~ { //~ unsigned int numdim = 3; //~ unsigned int numker = 6; //~ DenseRbfMklKernel baseKernelV1(0.1); //~ DenseRbfMklKernel basekernel2(0.4, true); //~ DenseLinearMklKernel basekernel3; //~ DenseMonomialMklKernel basekernel4(3); //~ DensePolynomialMklKernel basekernel5(2, 1.0); //~ BOOST_CHECK( !basekernel5.hasFirstParameterDerivative() ); //~ DenseARDMklKernel basekernel6(numdim,0.2); //~ std::vector< DenseMklKernelFunction * > kernels; //~ kernels.push_back(&baseKernelV1); //~ kernels.push_back(&basekernel2); //~ kernels.push_back(&basekernel3); //~ kernels.push_back(&basekernel4); //~ kernels.push_back(&basekernel5); //~ kernels.push_back(&basekernel6); //~ std::vector< std::pair< std::size_t, std::size_t > > frs; //~ for ( std::size_t i=0; i<6; i++ ) //~ frs.push_back( std::make_pair( 0,3 ) ); //~ DenseMklKernel kernel( kernels, frs ); //~ BOOST_CHECK_SMALL( (double)kernel.numberOfIntermediateValues(RealVector(), RealVector()) - 16.0, 1e-15 ); //~ // test setting and getting the parameter vector for all kinds of adaptive-ness scenarios //~ unsigned int num_bools = 5000; //~ unsigned int num_trials = 50; //~ std::vector< bool > cur_bools(numker); //~ for ( unsigned int i=0; i kernels; //~ kernels.push_back(&baseKernelV1); //~ kernels.push_back(&basekernel2); //~ kernels.push_back(&basekernel3); //~ kernels.push_back(&basekernel4); //~ kernels.push_back(&basekernel5); //~ kernels.push_back(&basekernel6); //~ std::vector< std::pair< std::size_t, std::size_t > > frs; //~ for ( std::size_t i=0; i<6; i++ ) //~ frs.push_back( std::make_pair( 0,3 ) ); //~ DenseMklKernel kernel( kernels, frs ); //~ BOOST_CHECK_SMALL( (double)kernel.numberOfIntermediateValues(RealVector(), RealVector()) - 16.0, 1e-15 ); //~ // test setting and getting the parameter vector for all kinds of adaptive-ness scenarios //~ unsigned int num_bools = 5000; //~ unsigned int num_trials = 50; //~ std::vector< bool > cur_bools(numker); //~ for ( unsigned int i=0; i kernels; //~ kernels.push_back(&baseKernelV1); //~ kernels.push_back(&basekernel2); //~ kernels.push_back(&basekernel3); //~ kernels.push_back(&basekernel4); //~ kernels.push_back(&basekernel5); //~ kernels.push_back(&basekernel6); //~ std::vector< std::pair< std::size_t, std::size_t > > frs; //~ for ( std::size_t i=0; i<6; i++ ) //~ frs.push_back( std::make_pair( 0,3 ) ); //~ DenseMklKernel kernel( kernels, frs ); //~ BOOST_CHECK_SMALL( (double)kernel.numberOfIntermediateValues(RealVector(), RealVector()) - 16.0, 1e-15 ); //~ // test setting and getting the parameter vector for all kinds of adaptive-ness scenarios //~ unsigned int num_bools = 5000; //~ unsigned int num_trials = 50; //~ std::vector< bool > cur_bools(numker); //~ for ( unsigned int i=0; i kernels; //~ kernels.push_back(&baseKernelV1); //~ kernels.push_back(&basekernel2); //~ kernels.push_back(&basekernel3); //~ kernels.push_back(&basekernel4); //~ kernels.push_back(&basekernel5); //~ kernels.push_back(&basekernel6); //~ std::vector< std::pair< std::size_t, std::size_t > > frs; //~ for ( std::size_t i=0; i<6; i++ ) //~ frs.push_back( std::make_pair( 0,3 ) ); //~ DenseMklKernel kernel( kernels, frs ); //~ BOOST_CHECK_SMALL( (double)kernel.numberOfIntermediateValues(RealVector(), RealVector()) - 16.0, 1e-15 ); //~ // test setting and getting the parameter vector for all kinds of adaptive-ness scenarios //~ unsigned int num_bools = 5000; //~ unsigned int num_trials = 50; //~ std::vector< bool > cur_bools(numker); //~ for ( unsigned int i=0; i #include #include #include #include #include "KernelDerivativeTestHelper.h" #include using namespace shark; BOOST_AUTO_TEST_CASE( ModelKernel_Parameters ) { double gamma = 0.2; GaussianRbfKernel<> kernel(0.2); LinearModel<> model(2,3); initRandomUniform(model,-1,1); RealVector modelParameters = model.parameterVector(); std::size_t kernelParams = model.numberOfParameters()+1; //check whther the right parameters are returned { ModelKernel modelKernel(&kernel,&model); BOOST_REQUIRE_EQUAL(modelKernel.numberOfParameters(), kernelParams); RealVector kernelParameters = modelKernel.parameterVector(); BOOST_REQUIRE_EQUAL(kernelParameters.size(), kernelParams); BOOST_CHECK_CLOSE(kernelParameters(0),gamma,1.e-14); for(std::size_t i = 0; i != modelParameters.size(); ++i){ BOOST_CHECK_CLOSE(modelParameters(i),kernelParameters(i+1),1.e-14); } } //set parameters of kernel/model to 0 and check whether this also works { ModelKernel modelKernel(&kernel,&model); kernel.setGamma(0.1); model.setParameterVector(RealVector(model.numberOfParameters(),0.1)); BOOST_REQUIRE_EQUAL(modelKernel.numberOfParameters(), kernelParams); RealVector kernelParameters = modelKernel.parameterVector(); BOOST_REQUIRE_EQUAL(kernelParameters.size(), kernelParams); for(std::size_t i = 0; i != kernelParameters.size(); ++i){ BOOST_CHECK_SMALL(kernelParameters(i)-0.1,1.e-14); } } //check whether setting parameters works { RealVector params(kernelParams); params(0) = gamma; subrange(params,1,kernelParams)=modelParameters; ModelKernel modelKernel(&kernel,&model); BOOST_REQUIRE_EQUAL(modelKernel.numberOfParameters(), kernelParams); modelKernel.setParameterVector(params); BOOST_CHECK_CLOSE(kernel.gamma(),gamma,1.e-14); RealVector modelParamsNew = model.parameterVector(); for(std::size_t i = 0; i != modelParameters.size(); ++i){ BOOST_CHECK_CLOSE(modelParameters(i),modelParamsNew(i),1.e-14); } } } BOOST_AUTO_TEST_CASE( ModelKernel_Eval_EvalDerivative ) { double gamma = 0.2; GaussianRbfKernel<> kernel(gamma); LinearModel<> model(2,3); initRandomUniform(model,-1,1); std::size_t kernelParams = model.numberOfParameters()+1; ModelKernel modelKernel(&kernel,&model); BOOST_REQUIRE_EQUAL(modelKernel.numberOfParameters(), kernelParams); //testpoints RealVector x1(2); x1(0)=1; x1(1)=1; RealVector x2(2); x2(0)=-2; x2(1)=0.0; //evaluate single point double test = modelKernel(x1,x2); double result = kernel(model(x1),model(x2)); BOOST_CHECK_SMALL(result-test,1.e-15); //evaluate a batch of points RealMatrix batch1(10,2); RealMatrix batch2(20,2); for(std::size_t i = 0; i != 10;++i){ for(std::size_t j = 0; j != 2; ++j) batch1(i,j)=Rng::uni(-1,1); } for(std::size_t i = 0; i != 20;++i){ for(std::size_t j = 0; j != 2; ++j) batch2(i,j)=Rng::uni(-1,1); } testEval(modelKernel,batch1,batch2); //test first derivative testKernelDerivative(modelKernel,2,1.e-7,1.e-5); } Shark-3.1.4/Test/Models/Kernels/MonomialKernel.cpp000066400000000000000000000021411314655040000217500ustar00rootroot00000000000000#define BOOST_TEST_MODULE Kernels_MonomialKernel #include #include #include #include "KernelDerivativeTestHelper.h" #include using namespace shark; BOOST_AUTO_TEST_SUITE (Models_Kernels_MonomialKernel) BOOST_AUTO_TEST_CASE( DenseMonomialKernel_Test ) { double result=4; DenseMonomialKernel kernel(2); //testpoints RealVector x1(2); x1(0)=1; x1(1)=0; RealVector x2(2); x2(0)=-2; x2(1)=0; //evaluate single point double test = kernel(x1,x2); BOOST_CHECK_SMALL(result-test,1.e-15); //evaluate a batch of points RealMatrix batch1(10,2); RealMatrix batch2(20,2); for(std::size_t i = 0; i != 10;++i){ for(std::size_t j = 0; j != 2; ++j) batch1(i,j)=Rng::uni(-1,1); } for(std::size_t i = 0; i != 20;++i){ for(std::size_t j = 0; j != 2; ++j) batch2(i,j)=Rng::uni(-1,1); } testEval(kernel,batch1,batch2); //test first derivative testKernelDerivative(kernel,2,1.e-7,1.e-5); testKernelInputDerivative(kernel,2,1.e-7,1.e-5); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Models/Kernels/MultiTaskKernel.cpp000066400000000000000000000053551314655040000221240ustar00rootroot00000000000000 #define BOOST_TEST_MODULE ML_MultiTaskKernel #include #include #include #include #include namespace shark { // This unit test checks correctness of the // GaussianTaskKernel class by comparing to // a manually computed kernel value, and of // the MultiTaskKernel wrapper class by // checking that the product is correct. BOOST_AUTO_TEST_SUITE (Models_Kernels_MultiTaskKernel) BOOST_AUTO_TEST_CASE( MultiTaskKernel_Test ) { // define dummy data composed of an input and a task component RealVector x1(3); x1(0) = 1.0; x1(1) = 2.0; x1(2) = 3.0; RealVector x2(3); x2(0) = 0.0; x2(1) = 3.14159; x2(2) = 10.0 / 3.0; RealVector x3(3); x3(0) = 1.5; x3(1) = 2.5; x3(2) = 2.5; RealVector x4(3); x4(0) = 0.5; x4(1) = 3.0; x4(2) = 3.5; // composition of vector and task index MultiTaskSample v1(x1, 0); MultiTaskSample v2(x2, 1); MultiTaskSample v3(x3, 0); MultiTaskSample v4(x4, 1); std::vector > vec; vec.push_back(v1); vec.push_back(v2); vec.push_back(v3); vec.push_back(v4); // build a data set object Data > data = createDataFromRange(vec); // define a Gaussian kernel on inputs and a special task kernel on tasks const double gamma = 3.0; GaussianRbfKernel gauss(1.0); GaussianTaskKernel taskkernel(data, 2, gauss, gamma); MultiTaskKernel multitaskkernel(&gauss, &taskkernel); // compute kernel on the inputs RealMatrix g(data.numberOfElements(), data.numberOfElements()); for (std::size_t i=0; i #include #include "KernelDerivativeTestHelper.h" #include #include #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (Models_Kernels_NormalizedKernel) BOOST_AUTO_TEST_CASE( NormalizedKernel_Polynomial_Test ) { PolynomialKernel<> baseKernel(2,1); DenseNormalizedKernel kernel(&baseKernel); // test points RealVector x1(2); x1(0)=1; x1(1)=1; RealVector x2(2); x2(0)=-2; x2(1)=1; double result=baseKernel(x1,x2)/sqrt(baseKernel(x1,x1)*baseKernel(x2,x2)); //evaluate point double test=kernel.eval(x1,x2); BOOST_CHECK_SMALL(result-test,1.e-15); testKernelInputDerivative(kernel, 2, 1e-7, 1e-2); } BOOST_AUTO_TEST_CASE( NormalizedKernel_GaussianRbf_Test ) { GaussianRbfKernel<> baseKernel(0.5); DenseNormalizedKernel kernel(&baseKernel); // test points RealVector x1(2); x1(0)=1; x1(1)=1; RealVector x2(2); x2(0)=-2; x2(1)=1; double result=baseKernel(x1,x2)/sqrt(baseKernel(x1,x1)*baseKernel(x2,x2)); //evaluate point double test=kernel.eval(x1,x2); BOOST_CHECK_SMALL(result-test,1.e-15); //test first derivative testKernelDerivative(kernel, 2, 1e-7, 1e-4); testKernelInputDerivative(kernel, 2, 1e-7, 1e-2); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Models/Kernels/PolynomialKernel.cpp000066400000000000000000000056731314655040000223350ustar00rootroot00000000000000#define BOOST_TEST_MODULE Kernels_PolynomialKernel #include #include #include #include "KernelDerivativeTestHelper.h" #include using namespace shark; BOOST_AUTO_TEST_SUITE (Models_Kernels_PolynomialKernel) BOOST_AUTO_TEST_CASE( DensePolynomialKernel_Value ){ double result3 = 64.0; double result2 = 16.0; double result1 = 4.0; DensePolynomialKernel kernel(3, 1.0, false); // test points RealVector x1(2); x1(0) = 1.5; x1(1) = 0.0; RealVector x2(2); x2(0) = 2.0; x2(1) = 1.0; //evaluate single point double test = kernel(x1,x2); BOOST_CHECK_SMALL(result3-test,1.e-15); //evaluate single point kernel.setDegree(2); test = kernel(x1,x2); BOOST_CHECK_SMALL(result2-test,1.e-15); kernel.setDegree(1); test = kernel(x1,x2); BOOST_CHECK_SMALL(result1-test,1.e-15); //evaluate a batch of points RealMatrix batch1(10,2); RealMatrix batch2(20,2); for(std::size_t i = 0; i != 10;++i){ for(std::size_t j = 0; j != 2; ++j) batch1(i,j)=Rng::uni(-1,1); } for(std::size_t i = 0; i != 20;++i){ for(std::size_t j = 0; j != 2; ++j) batch2(i,j)=Rng::uni(-1,1); } kernel.setDegree(3); testEval(kernel,batch1,batch2); kernel.setDegree(2); testEval(kernel,batch1,batch2); kernel.setDegree(1); testEval(kernel,batch1,batch2); //test first derivative kernel.setDegree(1); testKernelDerivative(kernel,2,1.e-7,1.e-5); testKernelInputDerivative(kernel,2,1.e-7,1.e-5); kernel.setDegree(2); testKernelDerivative(kernel,2,1.e-7,1.e-5); testKernelInputDerivative(kernel,2,1.e-7,1.e-5); } BOOST_AUTO_TEST_CASE( SparsePolynomialKernel_Test ){ double result3 = 64.0; double result2 = 16.0; double result1 = 4.0; CompressedPolynomialKernel kernel(3, 1.0, false); // test points CompressedRealVector x1(200); x1(30) = 1.5; CompressedRealVector x2(200); x2(30) = 2.0; x2(51) = 1.0; //evaluate single point double test = kernel(x1,x2); BOOST_CHECK_SMALL(result3-test,1.e-15); //evaluate single point kernel.setDegree(2); test = kernel(x1,x2); BOOST_CHECK_SMALL(result2-test,1.e-15); kernel.setDegree(1); test = kernel(x1,x2); BOOST_CHECK_SMALL(result1-test,1.e-15); //evaluate a batch of points CompressedRealMatrix batch1(10,200); CompressedRealMatrix batch2(20,200); for(std::size_t i = 0; i != 10;++i){ for(std::size_t j = 0; j != 2; ++j) batch1(i,j*100)=Rng::uni(-1,1); } for(std::size_t i = 0; i != 20;++i){ for(std::size_t j = 0; j != 4; ++j) batch2(i,j*50)=Rng::uni(-1,1); } kernel.setDegree(3); testEval(kernel,batch1,batch2); kernel.setDegree(2); testEval(kernel,batch1,batch2); kernel.setDegree(1); testEval(kernel,batch1,batch2); //test first derivative, input derivative for sparse inputs is not-so-usefull kernel.setDegree(1); testKernelDerivative(kernel,2,1.e-7,1.e-5); kernel.setDegree(2); testKernelDerivative(kernel,2,1.e-7,1.e-5); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Models/Kernels/ProductKernel.cpp000066400000000000000000000035321314655040000216220ustar00rootroot00000000000000 #define BOOST_TEST_MODULE ML_ProductKernel #include #include #include "KernelDerivativeTestHelper.h" #include #include #include #include using namespace shark; // This unit test checks whether the product // kernel class correctly computes the product // of its component kernels. BOOST_AUTO_TEST_SUITE (Models_Kernels_ProductKernel) BOOST_AUTO_TEST_CASE( ProductKernel_Test ) { // define three basis kernels LinearKernel k1; MonomialKernel k2(3); GaussianRbfKernel k3(1.0); // define a product kernel std::vector* > kernels; kernels.push_back(&k1); kernels.push_back(&k2); kernels.push_back(&k3); ProductKernel kp(kernels); RealMatrix batchX1(10,3); RealMatrix batchX2(12,3); for(std::size_t i = 0; i != 10; ++i){ batchX1(i,0) = Rng::uni(-1,1); batchX1(i,1) = Rng::uni(-1,1); batchX1(i,2) = Rng::uni(-1,1); } for(std::size_t i = 0; i != 12; ++i){ batchX2(i,0) = Rng::uni(-1,1); batchX2(i,1) = Rng::uni(-1,1); batchX2(i,2) = Rng::uni(-1,1); } //Evaluate the kernel matrices by hand RealMatrix matK1 = k1(batchX1, batchX2); RealMatrix matK2 = k2(batchX1, batchX2); RealMatrix matK3 = k3(batchX1, batchX2); RealMatrix matTest = element_prod(matK1,element_prod(matK2,matK3)); // test the product kernel RealMatrix matKp = kp(batchX1,batchX2); BOOST_REQUIRE_EQUAL(matKp.size1(),10); BOOST_REQUIRE_EQUAL(matKp.size2(),12); for(std::size_t i = 0; i != 10; ++i){ for(std::size_t j = 0; j != 12; ++j) BOOST_CHECK_SMALL(matTest(i,j)-matKp(i,j),1.e-13); } testEval(kp,batchX1,batchX2); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Models/Kernels/ScaledKernel.cpp000066400000000000000000000022441314655040000213740ustar00rootroot00000000000000#define BOOST_TEST_MODULE Kernels_ScaledKernel #include #include #include #include #include "KernelDerivativeTestHelper.h" #include using namespace shark; BOOST_AUTO_TEST_SUITE (Models_Kernels_ScaledKernel) BOOST_AUTO_TEST_CASE( ScaledKernel_Test ){ double result=-0.2; DenseLinearKernel base; ScaledKernel<> kernel(&base,0.1); // test points RealVector x1(2); x1(0)=1; x1(1)=0; RealVector x2(2); x2(0)=-2; x2(1)=0; //evaluate single point double test = kernel(x1,x2); BOOST_CHECK_SMALL(result-test,1.e-15); //evaluate a batch of points RealMatrix batch1(10,2); RealMatrix batch2(20,2); for(std::size_t i = 0; i != 10;++i){ for(std::size_t j = 0; j != 2; ++j) batch1(i,j)=Rng::uni(-1,1); } for(std::size_t i = 0; i != 20;++i){ for(std::size_t j = 0; j != 2; ++j) batch2(i,j)=Rng::uni(-1,1); } testEval(kernel,batch1,batch2); //test first derivative testKernelDerivative(kernel,2,1.e-7,1.e-5); testKernelInputDerivative(kernel,2,1.e-7,1.e-5); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Models/Kernels/SubrangeKernel.cpp000066400000000000000000000056601314655040000217540ustar00rootroot00000000000000#include #include #include #include #define BOOST_TEST_MODULE Kernels_SubrangeKernel #include #include #include "KernelDerivativeTestHelper.h" using namespace shark; //since SubrangeKernel is based on weighted sum kernel (which is properly tested) we don't need to do numerical testing. BOOST_AUTO_TEST_SUITE (Models_Kernels_SubrangeKernel) BOOST_AUTO_TEST_CASE( DenseSubrangeKernel_Test) { //create Data std::size_t const examples = 35; std::size_t const dim1 = 10; std::size_t const dim2 = 12; std::vector data(examples,RealVector(dim1+dim2)); std::vector dataV1(examples,RealVector(dim1)); std::vector dataV2(examples,RealVector(dim2)); for(std::size_t i = 0; i != examples; ++i){ for(std::size_t j = 0; j != dim1; ++j){ dataV1[i](j)=data[i](j)=Rng::uni(-1,1); } for(std::size_t j = 0; j != dim2; ++j){ dataV2[i](j)=data[i](j+dim1)=Rng::uni(-1,1); } } Data dataset = createDataFromRange(data,10); Data datasetV1 = createDataFromRange(dataV1,10); Data datasetV2 = createDataFromRange(dataV2,10); //create Subrange Kernel std::vector > ranges(2); ranges[0].first=0; ranges[0].second=dim1; ranges[1].first=dim1; ranges[1].second=dim1+dim2; DenseRbfKernel baseKernelV1(0.2); DenseLinearKernel baseKernelV2; std::vector*> kernels; kernels.push_back(&baseKernelV1); kernels.push_back(&baseKernelV2); SubrangeKernel kernel(kernels,ranges); //check correct number of parameters const unsigned int numParameters = 2; kernel.setAdaptiveAll(true); BOOST_REQUIRE_EQUAL(kernel.numberOfParameters(),numParameters); // test kernel evals. first set weighting factors RealVector parameters(2); init(parameters)<<0.3,0.5; //weights are 1 and 0.3 and the gauss kernel parameter is 0.5 kernel.setParameterVector(parameters); //process kernel matrices for each element separately and weight the results to get ground-truth data RealMatrix matV1 = calculateRegularizedKernelMatrix(baseKernelV1,datasetV1); RealMatrix matV2 = calculateRegularizedKernelMatrix(baseKernelV2,datasetV2); RealMatrix kernelMatTest = matV1+std::exp(0.3)*matV2; kernelMatTest /=1+std::exp(0.3); //now calculate the kernel matrix of the MKL Kernel. it should be the same. RealMatrix kernelMat = calculateRegularizedKernelMatrix(kernel,dataset); //test for(std::size_t i = 0; i != examples; ++i){ for(std::size_t j = 0; j != examples;++j){ BOOST_CHECK_CLOSE(kernelMatTest(i,j),kernelMat(i,j),1.e-5); } } testEval(kernel,dataset.batch(0),dataset.batch(3)); testKernelDerivative(kernel,22,1.e-7); testKernelInputDerivative(kernel,22); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Models/Kernels/WeightedSumKernel.cpp000066400000000000000000000651561314655040000224410ustar00rootroot00000000000000#define BOOST_TEST_MODULE ML_KernelFunction #include #include #include "KernelDerivativeTestHelper.h" #include #include #include #include #include #include #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (Models_Kernels_WeightedSumKernel) BOOST_AUTO_TEST_CASE( DenseWeightedSumKernel_Test ) { const double gamma1 = 0.1; const double gamma2 = 0.01; RealVector testParams(2); testParams(0) = 1; testParams(1) = gamma2; DenseRbfKernel baseKernel1(gamma1); DenseRbfKernel baseKernel2(2*gamma2); std::vector* > kernels; kernels.push_back(&baseKernel1); kernels.push_back(&baseKernel2); DenseWeightedSumKernel kernel(kernels); kernel.setAdaptive(1, true); //now test whether the parametervector is created correctly kernel.setParameterVector(testParams); RealVector parameter=kernel.parameterVector(); BOOST_CHECK_SMALL(norm_sqr(parameter-testParams), 1.e-15); //and check whether all gamma values are correct BOOST_CHECK_SMALL(baseKernel1.gamma() - gamma1, 1e-13); BOOST_CHECK_SMALL(baseKernel2.gamma() - gamma2, 1e-13); //testpoints RealVector x1(2); x1(0)=2; x1(1)=1; RealVector x2(2); x2(0)=-2; x2(1)=1; double k1 = baseKernel1.eval(x1,x2); double k2 = baseKernel2.eval(x1,x2); double numeratorResult = k1 + boost::math::constants::e() * k2; double result = numeratorResult / (1 + boost::math::constants::e()); //testbatches RealMatrix batchX1(2,2); batchX1(0,0)=2; batchX1(0,1)=1; batchX1(1,0)=1; batchX1(1,1)=3; RealMatrix batchX2(2,2); batchX2(0,0)=-2; batchX2(1,0)=1; batchX2(1,0)=3; batchX2(1,1)=3; boost::shared_ptr stateK1Batch = baseKernel1.createState(); boost::shared_ptr stateK2Batch = baseKernel2.createState(); RealMatrix k1Batch,k2Batch; baseKernel1.eval(batchX1,batchX2,k1Batch,*stateK1Batch); baseKernel2.eval(batchX1,batchX2,k2Batch,*stateK2Batch); RealMatrix numeratorResultBatch = k1Batch + boost::math::constants::e() * k2Batch; RealMatrix resultBatch = numeratorResultBatch / (1 + boost::math::constants::e()); //evaluate point double test = kernel.eval(x1,x2); BOOST_REQUIRE_SMALL(result - test, 1.e-15); //evaluate batch RealMatrix testBatch,testBatch2; kernel.eval(batchX1,batchX2,testBatch); boost::shared_ptr stateBatch = kernel.createState(); kernel.eval(batchX1,batchX2,testBatch2,*stateBatch); BOOST_REQUIRE_SMALL(resultBatch(0,0) - testBatch(0,0), 1.e-15); BOOST_REQUIRE_SMALL(resultBatch(0,0) - testBatch2(0,0), 1.e-15); BOOST_REQUIRE_SMALL(resultBatch(0,1) - testBatch(0,1), 1.e-15); BOOST_REQUIRE_SMALL(resultBatch(0,1) - testBatch2(0,1), 1.e-15); BOOST_REQUIRE_SMALL(resultBatch(1,0) - testBatch(1,0), 1.e-15); BOOST_REQUIRE_SMALL(resultBatch(1,0) - testBatch2(1,0), 1.e-15); BOOST_REQUIRE_SMALL(resultBatch(1,1) - testBatch(1,1), 1.e-15); BOOST_REQUIRE_SMALL(resultBatch(1,1) - testBatch2(1,1), 1.e-15); //test first derivative testKernelDerivative(kernel, 2, 1.e-8,1.e-6,1,1); testKernelInputDerivative(kernel, 2, 1.e-8); } BOOST_AUTO_TEST_CASE( DenseWeightedSumKernel_Test_Detailed ) { unsigned int numdim = 3; unsigned int numker = 6; DenseRbfKernel basekernel1(0.1); DenseRbfKernel basekernel2(0.4, true); DenseLinearKernel basekernel3; DenseMonomialKernel basekernel4(3); DensePolynomialKernel basekernel5(2, 1.0); BOOST_CHECK( !basekernel5.hasFirstParameterDerivative() ); DenseARDKernel basekernel6(numdim,0.2); std::vector< AbstractKernelFunction * > kernels; kernels.push_back(&basekernel1); kernels.push_back(&basekernel2); kernels.push_back(&basekernel3); kernels.push_back(&basekernel4); kernels.push_back(&basekernel5); kernels.push_back(&basekernel6); DenseWeightedSumKernel kernel(kernels); // test setting and getting the parameter vector for all kinds of adaptive-ness scenarios unsigned int num_bools = 5000; unsigned int num_trials = 50; std::vector< bool > cur_bools(numker); for ( unsigned int i=0; i * > kernels; kernels.push_back(&basekernel1); kernels.push_back(&basekernel2); kernels.push_back(&basekernel3); kernels.push_back(&basekernel4); kernels.push_back(&basekernel5); kernels.push_back(&basekernel6); DenseWeightedSumKernel kernel(kernels); // test setting and getting the parameter vector for all kinds of adaptive-ness scenarios unsigned int num_bools = 5000; unsigned int num_trials = 50; std::vector< bool > cur_bools(numker); for ( unsigned int i=0; i * > kernels; kernels.push_back(&basekernel1); kernels.push_back(&basekernel2); kernels.push_back(&basekernel3); kernels.push_back(&basekernel4); kernels.push_back(&basekernel5); kernels.push_back(&basekernel6); DenseWeightedSumKernel kernel(kernels); // test setting and getting the parameter vector for all kinds of adaptive-ness scenarios unsigned int num_bools = 5000; unsigned int num_trials = 50; std::vector< bool > cur_bools(numker); for ( unsigned int i=0; i * > kernels; kernels.push_back(&basekernel1); kernels.push_back(&basekernel2); kernels.push_back(&basekernel3); kernels.push_back(&basekernel4); kernels.push_back(&basekernel5); kernels.push_back(&basekernel6); DenseWeightedSumKernel kernel(kernels); // test setting and getting the parameter vector for all kinds of adaptive-ness scenarios unsigned int num_bools = 5000; unsigned int num_trials = 50; std::vector< bool > cur_bools(numker); for ( unsigned int i=0; i* > kernels; // kernels.push_back(&baseKernel1); // kernels.push_back(&baseKernel2); // DenseFullyWeightedSumKernel kernel(kernels); // kernel.setAdaptive(1, true); // //now test whether the parametervector is created correctly // kernel.setParameterVector(testParams); // RealVector parameter=kernel.parameterVector(); // BOOST_CHECK_SMALL(norm_sqr(parameter-testParams), 1.e-15); // //and check whether all gamma values are correct // BOOST_CHECK_SMALL(baseKernel1.gamma() - gamma1, 1e-13); // BOOST_CHECK_SMALL(baseKernel2.gamma() - gamma2, 1e-13); // //testpoints // RealVector x1(2); // x1(0)=2; // x1(1)=1; // RealVector x2(2); // x2(0)=-2; // x2(1)=1; // Intermediate intermediateK1; // Intermediate intermediateK2; // double k1 = baseKernel1.eval(x1,x2,intermediateK1); // double k2 = baseKernel2.eval(x1,x2,intermediateK2); // double numeratorResult = k1 + boost::math::constants::e() * k2; // double result = numeratorResult / (1 + boost::math::constants::e()); // // //evaluate point // double test = kernel.eval(x1,x2); // Intermediate intermediate; // double test2 = kernel.eval(x1,x2,intermediate); // BOOST_REQUIRE_SMALL(result - test, 1.e-15); // BOOST_REQUIRE_SMALL(result - test2, 1.e-15); // // //test intermediate values everything is required to be correct because testing // //the derivative does not make sense anymore, if this is wrong // BOOST_REQUIRE_EQUAL(intermediate.size() , 7); // BOOST_REQUIRE_SMALL(intermediate[0] - numeratorResult,1.e-15); // BOOST_REQUIRE_SMALL(intermediate[1] - k1,1.e-15); // BOOST_REQUIRE_SMALL(intermediate[2] - intermediateK1[0],1.e-15); // BOOST_REQUIRE_SMALL(intermediate[3] - intermediateK1[1],1.e-15); // BOOST_REQUIRE_SMALL(intermediate[4] - k2,1.e-15); // BOOST_REQUIRE_SMALL(intermediate[5] - intermediateK2[0],1.e-15); // BOOST_REQUIRE_SMALL(intermediate[6] - intermediateK2[1],1.e-15); // //test first derivative // testKernelDerivative(kernel, 2, 1.e-6); // testKernelInputDerivative(kernel, 2, 1.e-8); //} BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Models/LinearModel.cpp000066400000000000000000000107351314655040000176340ustar00rootroot00000000000000#include #define BOOST_TEST_MODULE Models_LinearModel #include #include #include "derivativeTestHelper.h" #include using namespace std; using namespace boost::archive; using namespace shark; BOOST_AUTO_TEST_SUITE (Models_LinearModel) BOOST_AUTO_TEST_CASE( Models_LinearModel ) { // 2 inputs, 2 outputs, no offset -> 4 parameters LinearModel<> model(2, 2, false); BOOST_CHECK_EQUAL(model.numberOfParameters(), 4u); // The matrix should have the form // [2 1] // [3 5] RealVector testParameters(4); testParameters(0) = 2; testParameters(1) = 1; testParameters(2) = 3; testParameters(3) = 5; // Test whether setting and getting of parameters works model.setParameterVector(testParameters); RealVector retrievedParameters = model.parameterVector(); for (size_t i=0; i!=4; ++i) BOOST_CHECK_EQUAL(retrievedParameters(i), testParameters(i)); // Test the evaluation function RealMatrix testInput(2,2); testInput(0,0) = 1; testInput(0,1) = 2; testInput(1,0) = 3; testInput(1,1) = 4; RealMatrix testResults(2,2); testResults(0,0) = 4; testResults(0,1) = 13; testResults(1,0) = 10; testResults(1,1) = 29; RealMatrix output=model(testInput); for (size_t i=0; i!=2; ++i) for (size_t j=0; j!=2; ++j) BOOST_CHECK_SMALL(output(i,j) - testResults(i,j), 10e-15); testWeightedDerivative(model,1000); testWeightedInputDerivative(model,1000); } BOOST_AUTO_TEST_CASE( Models_AffineLinearModel ) { // 2 inputs, 2 outputs, with offset -> 6 parameters LinearModel<> model(2, 2, true); BOOST_CHECK_EQUAL(model.numberOfParameters(), 6u); // matrix should have the form // [2 1] // [3 5] // offset should have the form // [1 -1] RealVector testParameters(6); testParameters(0) = 2; testParameters(1) = 1; testParameters(2) = 3; testParameters(3) = 5; testParameters(4) = 1; testParameters(5) = -1; // Test whether setting and getting of parameters works model.setParameterVector(testParameters); RealVector retrievedParameters=model.parameterVector(); for(size_t i=0;i!=6;++i) BOOST_CHECK_EQUAL(retrievedParameters(i),testParameters(i)); // Test the evaluation function RealMatrix testInput(2,2); testInput(0,0) = 1; testInput(0,1) = 2; testInput(1,0) = 3; testInput(1,1) = 4; RealMatrix testResults(2,2); testResults(0,0) = 5; testResults(0,1) = 12; testResults(1,0) = 11; testResults(1,1) = 28; RealMatrix output=model(testInput); for (size_t i=0; i!=2; ++i) for (size_t j=0; j!=2; ++j) BOOST_CHECK_SMALL(output(i,j) - testResults(i,j), 1.e-15); // Test the weighted derivatives testWeightedDerivative(model,1000); testWeightedInputDerivative(model,1000); //testWeightedSecondDerivative(model,testInput,coefficients,coeffHessians); } BOOST_AUTO_TEST_CASE( LinearModel_SERIALIZE ) { //the target modelwork LinearModel<> model(2, 2, true); //create random parameters RealVector testParameters(model.numberOfParameters()); for(size_t param=0;param!=model.numberOfParameters();++param) { testParameters(param)=Rng::gauss(0,1); } model.setParameterVector( testParameters); //the test is, that after deserialization, the results must be identical //so we generate some data first std::vector data; std::vector target; RealVector input(model.inputSize()); RealVector output(model.outputSize()); for (size_t i=0; i<1000; i++) { for(size_t j=0;j!=model.inputSize();++j) { input(j)=Rng::uni(-1,1); } data.push_back(input); target.push_back(model(input)); } RegressionDataset dataset = createLabeledDataFromRange(data,target); //now we serialize the FFmodel ostringstream outputStream; TextOutArchive oa(outputStream); oa << model; //and create a new model from the serialization LinearModel<> modelDeserialized; istringstream inputStream(outputStream.str()); TextInArchive ia(inputStream); ia >> modelDeserialized; //test whether serialization works //first simple parameter and topology check BOOST_REQUIRE_EQUAL(modelDeserialized.numberOfParameters(),model.numberOfParameters()); BOOST_CHECK_SMALL(norm_2(modelDeserialized.parameterVector() - testParameters),1.e-50); BOOST_REQUIRE_EQUAL(modelDeserialized.inputSize(),model.inputSize()); BOOST_REQUIRE_EQUAL(modelDeserialized.outputSize(),model.outputSize()); for (size_t i=0; i<1000; i++) { RealVector output = modelDeserialized(dataset.element(i).input); BOOST_CHECK_SMALL(norm_2(output -dataset.element(i).label),1.e-50); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Models/LinearNorm.cpp000066400000000000000000000031471314655040000175060ustar00rootroot00000000000000#define BOOST_TEST_MODULE ML_LinearNorm #include #include #include #include "derivativeTestHelper.h" #include #include using namespace std; using namespace boost::archive; using namespace shark; BOOST_AUTO_TEST_SUITE (Models_LinearNorm) BOOST_AUTO_TEST_CASE( LinearNorm_Value ) { LinearNorm model(2); //the testpoint RealVector point(2); point(0)=1; point(1)=3; RealVector testResult(2); testResult(0)=0.25; testResult(1)=0.75; //evaluate point RealVector result=model(point); double difference=norm_sqr(testResult-result); BOOST_CHECK_SMALL(difference,1.e-15); } BOOST_AUTO_TEST_CASE( LinearNorm_weightedInputDerivative ) { LinearNorm model(2); //the testpoint RealVector point(2); point(0)=1; point(1)=3; RealVector coefficients(2); coefficients(0)=2; coefficients(1)=-1; testWeightedInputDerivative(model,point,coefficients); } BOOST_AUTO_TEST_CASE( LinearNorm_SERIALIZE ) { //the target modelwork LinearNorm model(10); //now we serialize the model ostringstream outputStream; TextOutArchive oa(outputStream); oa << const_cast(model); //and create a new model from the serialization LinearNorm modelDeserialized; istringstream inputStream(outputStream.str()); TextInArchive ia(inputStream); ia >> modelDeserialized; //test whether serialization works //topology check BOOST_REQUIRE_EQUAL(modelDeserialized.inputSize(),model.inputSize()); BOOST_REQUIRE_EQUAL(modelDeserialized.outputSize(),model.outputSize()); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Models/MeanModel.cpp000066400000000000000000000064001314655040000172740ustar00rootroot00000000000000#define BOOST_TEST_MODULE Models_MeanModel #include #include #include "derivativeTestHelper.h" #include #include #include #include using namespace std; using namespace boost::archive; using namespace shark; BOOST_AUTO_TEST_SUITE (Models_MeanModel) BOOST_AUTO_TEST_CASE( MeanModel_Test ) { MeanModel > model; RealMatrix weights(2,2,0.0); RealVector bias(2,0.0); double alphaSum = 0.0; for(std::size_t i = 0; i != 5; ++i){ RealMatrix curWeights(2,2,0.0); RealVector curBias(2,0.0); curWeights(0,0) = Rng::gauss(0,1); curWeights(0,1) = Rng::gauss(0,1); curWeights(1,0) = Rng::gauss(0,1); curWeights(1,1) = Rng::gauss(0,1); curBias(0) = Rng::gauss(0,1); curBias(1) = Rng::gauss(0,1); double curAlpha = Rng::uni(0.1,1); alphaSum +=curAlpha; weights += curAlpha*curWeights; bias +=curAlpha*curBias; model.addModel(LinearModel<>(curWeights,curBias),curAlpha); BOOST_CHECK_EQUAL(model.weight(i), curAlpha); BOOST_CHECK_EQUAL(model.numberOfModels(), i+1); } weights/=alphaSum; bias/=alphaSum; LinearModel<> linear(weights,bias); for(std::size_t i = 0; i != 100; ++i){ //the testpoint2 RealMatrix point(2,2); point(0,0)=Rng::uni(-5,5); point(0,1)= Rng::uni(-5,5); point(1,0)=Rng::uni(-5,5); point(1,1)= Rng::uni(-5,5); //evaluate ground truth result RealMatrix truth = linear(point); RealMatrix test = model(point); RealMatrix dist=distanceSqr(truth,test); BOOST_CHECK_SMALL(dist(0,0),1.e-10); BOOST_CHECK_SMALL(dist(1,1),1.e-10); } } BOOST_AUTO_TEST_CASE( MeanModel_Serialize ) { //the target modelwork MeanModel > model; RealMatrix weights(2,2,0.0); RealVector bias(2,0.0); double alphaSum = 0.0; for(std::size_t i = 0; i != 5; ++i){ RealMatrix curWeights(2,2,0.0); RealVector curBias(2,0.0); curWeights(0,0) = Rng::gauss(0,1); curWeights(0,1) = Rng::gauss(0,1); curWeights(1,0) = Rng::gauss(0,1); curWeights(1,1) = Rng::gauss(0,1); curBias(0) = Rng::gauss(0,1); curBias(1) = Rng::gauss(0,1); double curAlpha = Rng::uni(0.1,1); alphaSum +=curAlpha; weights += curAlpha*curWeights; bias +=curAlpha*curBias; model.addModel(LinearModel<>(curWeights,curBias),curAlpha); } //the test is, that after deserialization, the results must be identical //so we generate some data first std::vector data; std::vector target; RealVector input(2); for (size_t i=0; i<1000; i++) { for(size_t j=0;j!=2;++j) { input(j)=Rng::uni(-1,1); } data.push_back(input); target.push_back(model(input)); } RegressionDataset dataset = createLabeledDataFromRange(data,target); //now we serialize the model ostringstream outputStream; { TextOutArchive oa(outputStream); oa << model; } //and create a new model from the serialization MeanModel > modelDeserialized; istringstream inputStream(outputStream.str()); TextInArchive ia(inputStream); ia >> modelDeserialized; for (size_t i=0; i<1000; i++) { RealVector output = modelDeserialized(dataset.element(i).input); BOOST_CHECK_SMALL(norm_2(output -dataset.element(i).label),1.e-2); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Models/NBClassifierTests.cpp000066400000000000000000000070241314655040000207650ustar00rootroot00000000000000//=========================================================================== /*! * * * \brief Test cases for Naive Bayes classifier * * * * * \author B. Li * \date 2012 * * * \par Copyright 1995-2015 Shark Development Team * *

* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ //=========================================================================== #define BOOST_TEST_MODULE NaiveBayesClassifierTestModule #include "shark/LinAlg/Base.h" #include "shark/Models/NBClassifier.h" #include "shark/Rng/AbstractDistribution.h" #include "shark/Rng/Normal.h" #include #include #include #include namespace shark { /// Fixture for testing naive Bayes classifier class NBClassifierFixture { public: typedef NBClassifier<>::AbstractDistPtr AbstractDistPtr; /// Create a normal distribution from given @a mean and @a variance AbstractDistPtr createNormalDist(double mean, double variance) const { return AbstractDistPtr(new Normal<>(Rng::globalRng, mean, variance)); } }; BOOST_FIXTURE_TEST_SUITE (Models_NBClassifierTests, NBClassifierFixture) BOOST_AUTO_TEST_CASE(Test) { NBClassifier<>::FeatureDistributionsType featureDists; // class 0 std::vector class0; class0.push_back(createNormalDist(5.855, 3.5033e-02)); // feature 0 class0.push_back(createNormalDist(176.25, 1.2292e+02)); // feature 1 class0.push_back(createNormalDist(11.25, 9.1667e-01)); // feature 2 featureDists.push_back(class0); // class 1 std::vector class1; class1.push_back(createNormalDist(5.4175, 9.7225e-02)); // feature 0 class1.push_back(createNormalDist(132.5, 5.5833e+02)); // feature 1 class1.push_back(createNormalDist(7.5, 1.6667e+00)); // feature 2 featureDists.push_back(class1); NBClassifier<> m_NBClassifier(featureDists); // add prior class distribution m_NBClassifier.setClassPrior(0, 0.5); m_NBClassifier.setClassPrior(1, 0.5); // Test that feature/class numbers can be get from the classifier std::size_t classSize; std::size_t featureSize; boost::tie(classSize, featureSize) = m_NBClassifier.getDistSize(); BOOST_CHECK_EQUAL(classSize, 2u); BOOST_CHECK_EQUAL(featureSize, 3u); // Test that an individual distribution can be fetched from the classifier const double tolerancePercentage = 0.01; AbstractDistribution& dist = m_NBClassifier.getFeatureDist(0u, 0u); Normal<> normal = dynamic_cast&>(dist); // this should not throw exception BOOST_CHECK_CLOSE(normal.mean(), 5.855, tolerancePercentage); BOOST_CHECK_CLOSE(normal.variance(), 3.5033e-02, tolerancePercentage); // Finally, let us try to predict a sample RealVector sample(3); sample[0] = 6.0; sample[1] = 130.0; sample[2] = 8.0; BOOST_CHECK_EQUAL(m_NBClassifier(sample), 1u); } BOOST_AUTO_TEST_SUITE_END() } // namespace shark { Shark-3.1.4/Test/Models/NearestNeighborRegression.cpp000066400000000000000000000046651314655040000225660ustar00rootroot00000000000000//=========================================================================== /*! * * * \brief Unit test for nearest neighbor regression. * * * * * \author T. Glasmachers * \date 2012 * * * \par Copyright 1995-2015 Shark Development Team * *

* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ //=========================================================================== #define BOOST_TEST_MODULE Models_NearestNeighborRegression #include #include #include #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (Models_NearestNeighborRegression) BOOST_AUTO_TEST_CASE( Models_NearestNeighborRegression ) { // simple data set with paired points std::vector input(6, RealVector(2)); input[0](0)=1; input[0](1)=3; input[1](0)=-1; input[1](1)=3; input[2](0)=1; input[2](1)=0; input[3](0)=-1; input[3](1)=0; input[4](0)=1; input[4](1)=-3; input[5](0)=-1; input[5](1)=-3; std::vector target(6, RealVector(1)); target[0](0)=-5.0; target[1](0)=-3.0; target[2](0)=-1.0; target[3](0)=+1.0; target[4](0)=+3.0; target[5](0)=+5.0; RegressionDataset dataset = createLabeledDataFromRange(input, target); // model DenseLinearKernel kernel; SimpleNearestNeighbors algorithm(dataset, &kernel); NearestNeighborRegression model(&algorithm, 2); // predictions must be pair averages Data prediction = model(dataset.inputs()); for (int i = 0; i<6; ++i) { BOOST_CHECK_SMALL(prediction.element(i)(0) - 4.0 * (i/2 - 1), 1e-14); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Models/OneVersusOneClassifier.cpp000066400000000000000000000102131314655040000220300ustar00rootroot00000000000000//=========================================================================== /*! * * * \brief Unit test for the generic one-versus-one classifier * * * * * \author T. Glasmachers * \date 2012 * * * \par Copyright 1995-2015 Shark Development Team * *

* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ //=========================================================================== #include #define BOOST_TEST_MODULE Models_OneVersusOneClassifier #include #include using namespace shark; // simple classifier for testing only class ThresholdClassifier : public AbstractModel { public: typedef AbstractModel base_type; ThresholdClassifier(double threshold) : m_threshold(threshold) { } std::string name() const { return "ThresholdClassifier"; } RealVector parameterVector() const { RealVector p(1); p(0) = m_threshold; return p; } void setParameterVector(RealVector const& newParameters) { m_threshold = newParameters(0); } std::size_t numberOfParameters() const { return 1; } boost::shared_ptr createState()const{ return boost::shared_ptr(new EmptyState()); } using base_type::eval; void eval(BatchInputType const& x, BatchOutputType& y, State& state)const { y.resize(shark::size(x)); for(std::size_t i = 0; i != shark::size(x); ++i){ y(i) = (x(i) < m_threshold) ? 0 : 1; } } protected: double m_threshold; }; BOOST_AUTO_TEST_SUITE (Models_OneVersusOneClassifier) BOOST_AUTO_TEST_CASE( Models_OneVersusOneClassifier ) { // Create a one-versus-one classifier for four classes. // It consists of 6 binary classifiers. ThresholdClassifier c10(0.5); ThresholdClassifier c20(1.0); ThresholdClassifier c21(1.5); ThresholdClassifier c30(1.5); ThresholdClassifier c31(2.0); ThresholdClassifier c32(2.5); std::vector::binary_classifier_type*> l1(1); l1[0] = &c10; std::vector::binary_classifier_type*> l2(2); l2[0] = &c20; l2[1] = &c21; std::vector::binary_classifier_type*> l3(3); l3[0] = &c30; l3[1] = &c31; l3[2] = &c32; OneVersusOneClassifier ovo; BOOST_CHECK_EQUAL(ovo.numberOfClasses(), 1); ovo.addClass(l1); BOOST_CHECK_EQUAL(ovo.numberOfClasses(), 2); ovo.addClass(l2); BOOST_CHECK_EQUAL(ovo.numberOfClasses(), 3); ovo.addClass(l3); BOOST_CHECK_EQUAL(ovo.numberOfClasses(), 4); // check parameters RealVector p = ovo.parameterVector(); BOOST_CHECK_EQUAL(ovo.numberOfParameters(), 6); BOOST_CHECK_EQUAL(p.size(), 6); BOOST_CHECK_SMALL(std::fabs(p(0) - 0.5), 1e-14); BOOST_CHECK_SMALL(std::fabs(p(1) - 1.0), 1e-14); BOOST_CHECK_SMALL(std::fabs(p(2) - 1.5), 1e-14); BOOST_CHECK_SMALL(std::fabs(p(3) - 1.5), 1e-14); BOOST_CHECK_SMALL(std::fabs(p(4) - 2.0), 1e-14); BOOST_CHECK_SMALL(std::fabs(p(5) - 2.5), 1e-14); // create a simple data test set std::vector inputs(4); inputs[0] = 0.0; inputs[1] = 1.0; inputs[2] = 2.0; inputs[3] = 3.0; std::vector targets(4); targets[0] = 0; targets[1] = 1; targets[2] = 2; targets[3] = 3; LabeledData dataset = createLabeledDataFromRange(inputs, targets); // check correctness of predictions for (std::size_t i=0; i #include #include #include #include using namespace shark; //this test compares the network to the MSEFFNET of Shark 2.4 //since the topology of the net changed, this is not that easy... BOOST_AUTO_TEST_SUITE (Models_OnlineRNNet) BOOST_AUTO_TEST_CASE( ONLINERNNET_VALUE_TEST ){ //We simulate a Shark 2.4 network which would be created using //setStructure(2,2) and setting all feed forward connections to 0. //since the input units in the old network //where sigmoids, we have to use a hidden layer to emulate them //since this uses every feature of the topology possible, this //test should catch every possible error during eval. int regConnections[4][7]= { {1,0,0,0,0,0,0},//shark 2.4 input1 {0,1,0,0,0,0,0},//shark 2.4 input 2 {0,0,0,1,1,1,1},//output 1 {0,0,0,1,1,1,1} //output 2 }; IntMatrix conn(4,7); for (size_t i = 0; i < 4; i++){ for (size_t j = 0; j < 7; j++){ conn(i,j)=regConnections[i][j]; } } RecurrentStructure netStruct; netStruct.setStructure(2,2,conn); OnlineRNNet net(&netStruct); BOOST_REQUIRE_EQUAL(net.numberOfParameters(),10); //initialize parameters RealVector parameters(10); //our simulated input neurons need strength 1 parameters(0)=1; parameters(1)=1; for(size_t i=2;i!=10;++i){ parameters(i)=0.1*i-0.5; } net.setParameterVector(parameters); std::cout< #define BOOST_TEST_MODULE ML_RBFLayer #include #include #include "derivativeTestHelper.h" #include using namespace std; using namespace boost::archive; using namespace shark; BOOST_AUTO_TEST_SUITE (Models_RBFLayer) BOOST_AUTO_TEST_CASE( RBFLayer_Parameters ) { //2 input 2 output RBFLayer net(2,3); { size_t numParams=net.numberOfParameters(); BOOST_REQUIRE_EQUAL(numParams,9); RealVector parameters(numParams); for(size_t i=0;i!=numParams;i++) parameters(i) = double(i); net.setParameterVector(parameters); RealVector params = net.parameterVector(); BOOST_REQUIRE_EQUAL(params.size(),numParams); double paramError = norm_inf(params-parameters); BOOST_CHECK_SMALL(paramError,1.e-14); } { net.setTrainingParameters(true,false); size_t numParams=net.numberOfParameters(); BOOST_REQUIRE_EQUAL(numParams,6); RealVector parameters(numParams); for(size_t i=0;i!=numParams;i++) parameters(i)= double(i); net.setParameterVector(parameters); RealVector params = net.parameterVector(); BOOST_REQUIRE_EQUAL(params.size(),numParams); double paramError = norm_inf(params-parameters); BOOST_CHECK_SMALL(paramError,1.e-14); } { net.setTrainingParameters(false,true); size_t numParams=net.numberOfParameters(); BOOST_REQUIRE_EQUAL(numParams,3); RealVector parameters(numParams); for(size_t i=0;i!=numParams;i++) parameters(i) = double(i); net.setParameterVector(parameters); RealVector params = net.parameterVector(); BOOST_REQUIRE_EQUAL(params.size(),numParams); double paramError = norm_inf(params-parameters); BOOST_CHECK_SMALL(paramError,1.e-14); } { net.setTrainingParameters(false,false); size_t numParams=net.numberOfParameters(); BOOST_REQUIRE_EQUAL(numParams,0); } { net.setTrainingParameters(true,true); size_t numParams=net.numberOfParameters(); BOOST_REQUIRE_EQUAL(numParams,9); RealVector parameters(numParams); for(size_t i=0;i!=numParams;i++) parameters(i) = double(i); net.setParameterVector(parameters); RealVector params = net.parameterVector(); BOOST_REQUIRE_EQUAL(params.size(),numParams); double paramError = norm_inf(params-parameters); BOOST_CHECK_SMALL(paramError,1.e-14); } } BOOST_AUTO_TEST_CASE( RBFLayer_Value ) { //2 input 3 output RBFLayer net(2,3); //initialize parameters size_t numParams=net.numberOfParameters(); BOOST_REQUIRE_EQUAL(numParams,9); RealVector parameters(numParams); for(size_t i=0;i!=numParams-3;i++) parameters(i)=(i+1)*0.5; parameters(6)= 1; parameters(7)= 0; parameters(8)= -1; net.setParameterVector(parameters); RealVector var(3); var(0) = 0.5/std::exp(1.0); var(1) = 0.5/std::exp(0.0); var(2) = 0.5/std::exp(-1.0); double pi = boost::math::constants::pi(); for(std::size_t i = 0; i != 1000; ++i){ RealVector point(2); point(0)=Rng::gauss(0,2); point(1)=Rng::gauss(0,2); double dist0 = sqr(point(0)-0.5)+sqr(point(1)-1); double dist1 = sqr(point(0)-1.5)+sqr(point(1)-2); double dist2 = sqr(point(0)-2.5)+sqr(point(1)-3); double p0 = std::exp(-dist0/(2*var(0)))/(2*pi*var(0)); double p1 = std::exp(-dist1/(2*var(1)))/(2*pi*var(1)); double p2 = std::exp(-dist2/(2*var(2)))/(2*pi*var(2)); RealVector result=net(point); BOOST_CHECK_SMALL(std::abs(result(0)-p0),1.e-13); BOOST_CHECK_SMALL(std::abs(result(1)-p1),1.e-13); BOOST_CHECK_SMALL(std::abs(result(2)-p2),1.e-13); } //batch eval RealMatrix inputBatch(100,2); for(std::size_t i = 0; i != 100; ++i){ inputBatch(i,0) = Rng::uni(-1,1); inputBatch(i,1) = Rng::uni(-1,1); } testBatchEval(net,inputBatch); } BOOST_AUTO_TEST_CASE( RBFLayer_WeightedDerivative ) { //3 input, 5 output RBFLayer net(3,5); std::cout<<"v1"< data; std::vector target; RealVector input(model.inputSize()); RealVector output(model.outputSize()); for (size_t i=0; i<1000; i++) { for(size_t j=0;j!=model.inputSize();++j) { input(j)=Rng::uni(-1,1); } data.push_back(input); target.push_back(model(input)); } RegressionDataset dataset = createLabeledDataFromRange(data,target); //now we serialize the FFmodel ostringstream outputStream; TextOutArchive oa(outputStream); oa << model; //and create a new model from the serialization RBFLayer modelDeserialized; istringstream inputStream(outputStream.str()); TextInArchive ia(inputStream); ia >> modelDeserialized; //test whether serialization works //first simple parameter and topology check //serialization for restricted set to ensure, it is saved BOOST_REQUIRE_EQUAL(modelDeserialized.numberOfParameters(), model.numberOfParameters()); BOOST_CHECK_SMALL(norm_2(modelDeserialized.parameterVector() - model.parameterVector()),1.e-15); modelDeserialized.setTrainingParameters(true,true); //full parameter check BOOST_CHECK_SMALL(norm_2(modelDeserialized.parameterVector() - testParameters),1.e-15); BOOST_REQUIRE_EQUAL(modelDeserialized.inputSize(),model.inputSize()); BOOST_REQUIRE_EQUAL(modelDeserialized.outputSize(),model.outputSize()); for (size_t i=0; i < 1000; i++) { RealVector output = modelDeserialized(dataset.element(i).input); BOOST_CHECK_SMALL(norm_2(output -dataset.element(i).label),1.e-50); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Models/RFClassifier.cpp000066400000000000000000000046071314655040000177560ustar00rootroot00000000000000//=========================================================================== /*! * * * \brief unit test for Random Forest classifier * * * * * * \author K. Hansen * \date 2012 * * * \par Copyright 1995-2015 Shark Development Team * *

* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ //=========================================================================== #define BOOST_TEST_MODULE Models_RFClassifier #include #include #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (Models_RFClassifier) BOOST_AUTO_TEST_CASE( RF_Classifier ) { //Test data std::vector input(10, RealVector(2)); input[0](0)=1; input[0](1)=3; input[1](0)=-1; input[1](1)=3; input[2](0)=1; input[2](1)=0; input[3](0)=-1; input[3](1)=0; input[4](0)=1; input[4](1)=-3; input[5](0)=-1; input[5](1)=-3; input[6](0)=-4; input[6](1)=-3; input[7](0)=-2; input[7](1)=-1; input[8](0)=-6; input[8](1)=-8; input[9](0)=-2; input[9](1)=-2; std::vector target(10); target[0]=0; target[1]=0; target[2]=1; target[3]=1; target[4]=2; target[5]=2; target[6]=3; target[7]=3; target[8]=4; target[9]=4; ClassificationDataset dataset = createLabeledDataFromRange(input, target); RFTrainer trainer; RFClassifier model; trainer.train(model, dataset); Data prediction = model(dataset.inputs()); ZeroOneLoss loss; double error = loss.eval(dataset.labels(), prediction); std::cout << model.countAttributes() << std::endl; BOOST_CHECK(error == 0.0); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Models/RNNet.cpp000066400000000000000000000142361314655040000164270ustar00rootroot00000000000000#define BOOST_TEST_MODULE ML_RNNET #include #include #include #include #include using namespace shark; int connections[6][9]= { {1,1,1,1,1,1,1,1,1}, {1,1,1,1,1,1,1,1,1}, {1,1,1,1,1,1,1,1,1}, {1,1,1,1,1,1,1,1,1}, {1,1,1,1,1,1,1,1,1}, {1,1,1,1,1,1,1,1,1} }; const size_t numberOfParameters=54; BOOST_AUTO_TEST_SUITE (Models_RNNet) BOOST_AUTO_TEST_CASE( RNNET_SIMPLE_SET_STRUCTURE_TEST) { RecurrentStructure netStruct; netStruct.setStructure(2,4,2); for (size_t i = 0; i < 6; i++){ for (size_t j = 0; j < 9; j++){ BOOST_CHECK_EQUAL(netStruct.connections()(i,j),connections[i][j]); } } } BOOST_AUTO_TEST_CASE( RNNET_SET_PARAMETER_TEST) { RecurrentStructure netStruct; netStruct.setStructure(2,4,2); RNNet net(&netStruct); BOOST_REQUIRE_EQUAL(numberOfParameters, net.numberOfParameters()); //create test parameters RealVector testParameters(numberOfParameters); for(size_t i=0;i!=numberOfParameters;++i){ testParameters(i)=0.1*i; } //set parameters net.setParameterVector(testParameters); //test wether parameterVector works RealVector resultParameters = net.parameterVector(); BOOST_CHECK_SMALL(norm_sqr(testParameters-resultParameters),1.e-30); //test wether the weight matrices are correct for (size_t i = 0; i < 6; i++){ for (size_t j = 0; j < 9; j++){ BOOST_CHECK_SMALL(netStruct.weights()(i,j)-(i*9+j)*0.1,1.e-10); } } } //this test compares the network to the MSEFFNET of Shark 2.4 //since the topology of the net changed, this is not that easy... BOOST_AUTO_TEST_CASE( RNNET_BATCH_VALUE_REGRESSION_TEST ){ //We simulate a Shark 2.4 network which would be created using //setStructure(2,2) and setting all feed forward connections to 0. //since the input units in the old network //where sigmoids, we have to use a hidden layer to emulate them //sicne this uses every feature of the topology possible, this //test should catch every possible error during eval. int regConnections[4][7]= { {1,0,0,0,0,0,0},//shark 2.4 input1 {0,1,0,0,0,0,0},//shark 2.4 input 2 {0,0,0,1,1,1,1},//output 1 {0,0,0,1,1,1,1} //output 2 }; IntMatrix conn(4,7); for (size_t i = 0; i < 4; i++){ for (size_t j = 0; j < 7; j++){ conn(i,j)=regConnections[i][j]; } } RecurrentStructure netStruct; netStruct.setStructure(2,2,conn); RNNet net(&netStruct); BOOST_REQUIRE_EQUAL(net.numberOfParameters(),10); //initialize parameters RealVector parameters(10); //our simulated input neurons need strength 1 parameters(0)=1; parameters(1)=1; //rest is 0.1 for(size_t i=2;i!=10;++i){ parameters(i)=0.1*i-0.5; } net.setParameterVector(parameters); Sequence warmUp(1,RealVector(2));//just 1 element warmUp[0](0)=0; warmUp[0](1)=1; net.setWarmUpSequence(warmUp); //input and output data from an test of an earlier implementation Sequence testInputs(4,RealVector(2)); for (size_t i = 0; i < 4; i++){ for(size_t j=0;j!=2;++j){ testInputs[i](j) = double(i + j + 1); } } Sequence testOutputs(4,RealVector(2)); testOutputs[0](0)=0.414301; testOutputs[0](1)=0.633256; testOutputs[1](0)=0.392478; testOutputs[1](1)=0.651777; testOutputs[2](0)=0.378951; testOutputs[2](1)=0.658597; testOutputs[3](0)=0.372836; testOutputs[3](1)=0.661231; //eval network output and test wether it's the same or not Sequence outputs=net(testInputs); for(size_t i=0;i!=4;++i){ BOOST_CHECK_SMALL(norm_2(outputs[i]-testOutputs[i]),1.e-5); } //in batch mode, a network should reset itself when a new batch is generated //check wether this happens - just repeat it outputs=net(testInputs); for(size_t i=0;i!=4;++i){ BOOST_CHECK_SMALL(norm_2(outputs[i]-testOutputs[i]),1.e-5); } } BOOST_AUTO_TEST_CASE( RNNET_WEIGHTED_PARAMETER_DERIVATIVE ){ const size_t T=5; RecurrentStructure netStruct; netStruct.setStructure(2,4,2); RNNet net(&netStruct); BOOST_REQUIRE_EQUAL(net.numberOfParameters(),numberOfParameters); //initialize parameters RealVector parameters(numberOfParameters); for(size_t i=0;i!=numberOfParameters;++i){ parameters(i)= Rng::gauss(0,1); } net.setParameterVector(parameters); //define sequence Sequence testInputs(T,RealVector(2)); for (size_t t = 0; t < T; t++){ for(size_t j=0;j!=2;++j){ testInputs[t](j) = double(t + j); } } std::vector testInputBatch(1,testInputs); std::vector testOutputBatch; //we choose the same sequence as warmup sequence as to test the net net.setWarmUpSequence(testInputs); //evaluate network boost::shared_ptr state = net.createState(); net.eval(testInputBatch,testOutputBatch,*state); //define coefficients Sequence coefficients(T,RealVector(2)); for (size_t t = 0; t < T; t++){ for(size_t j=0;j!=2;++j){ coefficients[t](j) = 1; } } std::vector coefficientsBatch(1,coefficients); //now calculate the derivative RealVector derivative; net.weightedParameterDerivative(testInputBatch,coefficientsBatch,*state,derivative); BOOST_REQUIRE_EQUAL(derivative.size(),numberOfParameters); //estimate derivative. double epsilon=1.e-5; RealVector testDerivative(numberOfParameters); testDerivative.clear(); for(size_t w=0; w != numberOfParameters; ++w){ //create points with an change of +-epsilon in the wth component RealVector point1(parameters); RealVector point2(parameters); point1(w)+=epsilon; point2(w)-=epsilon; //calculate result net.setParameterVector(point1); Sequence result1=net(testInputs); net.setParameterVector(point2); Sequence result2=net(testInputs); //now estimate the derivative for the changed parameter for(size_t t=0;t!=T;++t){ testDerivative(w)+=inner_prod(coefficients[t],(result1[t]-result2[t])/(2*epsilon)); } } //check wether the derivatives are identical BOOST_CHECK_SMALL(blas::distance(derivative,testDerivative),epsilon); //~ for(size_t w=0; w != numberOfParameters; ++w){ //~ std::cout< #include #include "derivativeTestHelper.h" #include #include #include using namespace std; using namespace boost::archive; using namespace shark; double sigmoid(double a){ return 1.0/(1.0+std::exp(-a)); } BOOST_AUTO_TEST_SUITE (Models_SigmoidModel) BOOST_AUTO_TEST_CASE( SigmoidModel_Value ) { SigmoidModel model( false ); //initialize parameters RealVector parameters(2); parameters(0)=0.5; parameters(1)=1; model.setParameterVector(parameters); //the testpoint RealVector point(1); point(0)=10; double testResult=sigmoid(5-1); //evaluate point RealVector result=model(point); BOOST_CHECK_CLOSE(testResult,result[0],1.e-13); } BOOST_AUTO_TEST_CASE( SigmoidModel_Value_Unconstrained ) { SigmoidModel model( true ); //initialize parameters RealVector parameters(2); parameters(0)=0.5; parameters(1)=1; model.setParameterVector(parameters); //the testpoint RealVector point(1); point(0)=10; double testResult=sigmoid(16.48721270-1); //evaluate point RealVector result=model(point); BOOST_CHECK_CLOSE(testResult,result[0],1.e-12); } BOOST_AUTO_TEST_CASE( SigmoidModel_Value_NoOffset ) { SigmoidModel model( false ); //initialize parameters RealVector parameters(2); parameters(0)=0.5; parameters(1)=1; model.setParameterVector(parameters); model.setOffsetActivity(false); //the testpoint RealVector point(1); point(0)=10; double testResult=sigmoid(5); //evaluate point RealVector result=model(point); BOOST_CHECK_CLOSE(testResult,result[0],1.e-13); } BOOST_AUTO_TEST_CASE( SigmoidModel_weightedParameterDerivative ) { SigmoidModel model( false ); //initialize parameters RealVector parameters(2); parameters(0)=0.5; parameters(1)=1; model.setParameterVector(parameters); //the testpoint RealVector point(1); point(0)=10; model(point); RealVector coefficients(1); coefficients(0)=2; testWeightedDerivative(model,point,coefficients); } BOOST_AUTO_TEST_CASE( SigmoidModel_weightedParameterDerivativeUnconstrained ) { SigmoidModel model( true ); //initialize parameters RealVector parameters(2); parameters(0)=0.5; parameters(1)=1; model.setParameterVector(parameters); //the testpoint RealVector point(1); point(0)=10; model(point); RealVector coefficients(1); coefficients(0)=2; testWeightedDerivative(model,point,coefficients); } BOOST_AUTO_TEST_CASE( SigmoidModel_weightedParameterDerivative_NoOffset ) { SigmoidModel model( false ); //initialize parameters RealVector parameters(2); parameters(0)=0.5; parameters(1)=1; model.setParameterVector(parameters); model.setOffsetActivity(false); //the testpoint RealVector point(1); point(0)=10; model(point); RealVector coefficients(1); coefficients(0)=2; testWeightedDerivative(model,point,coefficients); } BOOST_AUTO_TEST_CASE( SigmoidModel_weightedInputDerivative ) { SigmoidModel model( false ); //initialize parameters RealVector parameters(2); parameters(0)=0.5; parameters(1)=1; model.setParameterVector(parameters); //the testpoint RealVector point(1); point(0)=10; model(point); RealVector coefficients(1); coefficients(0)=2; testWeightedInputDerivative(model,point,coefficients); } BOOST_AUTO_TEST_CASE( SigmoidModel_weightedInputDerivativeUnconstrained ) { SigmoidModel model( true ); //initialize parameters RealVector parameters(2); parameters(0)=0.5; parameters(1)=1; model.setParameterVector(parameters); //the testpoint RealVector point(1); point(0)=10; model(point); RealVector coefficients(1); coefficients(0)=2; testWeightedInputDerivative(model,point,coefficients); } BOOST_AUTO_TEST_CASE( SigmoidModel_Serialize ) { //the target modelwork SigmoidModel model( false ); //create random parameters RealVector testParameters(model.numberOfParameters(),0.0); testParameters(0)=std::abs(Rng::gauss(0,1)); model.setParameterVector( testParameters ); //the test is, that after deserialization, the results must be identical //so we generate some data first std::vector data; std::vector target; RealVector input(1); RealVector output(1); for (size_t i=0; i<1000; i++) { input(0)=Rng::uni(-1,1); data.push_back(input); target.push_back(model(input)); } RegressionDataset dataset = createLabeledDataFromRange(data,target); //now we serialize the model ostringstream outputStream; { TextOutArchive oa(outputStream); oa << model; } //and create a new model from the serialization SigmoidModel modelDeserialized( false ); istringstream inputStream(outputStream.str()); TextInArchive ia(inputStream); ia >> modelDeserialized; //test whether serialization works BOOST_CHECK_SMALL(norm_2(modelDeserialized.parameterVector() - testParameters),1.e-15); for (size_t i=0; i<1000; i++) { RealVector output = modelDeserialized(dataset.element(i).input); BOOST_CHECK_SMALL(norm_2(output -dataset.element(i).label),1.e-50); } } BOOST_AUTO_TEST_CASE( SigmoidModel_Serialize_NoOffset ) { //the target modelwork SigmoidModel model( false ); //create random parameters RealVector testParameters(model.numberOfParameters()); for(size_t param=0;param!=model.numberOfParameters();++param) { testParameters(param)= std::abs(Rng::gauss(0,1)); } model.setParameterVector( testParameters ); model.setOffsetActivity(false); testParameters(1) = 0.0; //the test is, that after deserialization, the results must be identical //so we generate some data first std::vector data; std::vector target; RealVector input(1); RealVector output(1); for (size_t i=0; i<1000; i++) { input(0)=Rng::uni(-1,1); data.push_back(input); target.push_back(model(input)); } RegressionDataset dataset = createLabeledDataFromRange(data,target); //now we serialize the model ostringstream outputStream; TextOutArchive oa(outputStream); oa << model; //and create a new model from the serialization SigmoidModel modelDeserialized( false ); istringstream inputStream(outputStream.str()); TextInArchive ia(inputStream); ia >> modelDeserialized; //test whether serialization works BOOST_CHECK_SMALL(norm_2(modelDeserialized.parameterVector() - testParameters),1.e-50); for (size_t i=0; i<1000; i++) { RealVector output = modelDeserialized(dataset.element(i).input); BOOST_CHECK_SMALL(norm_2(output -dataset.element(i).label),1.e-50); } } BOOST_AUTO_TEST_CASE( SigmoidModel_Serialize_Unconstrained ) { //the target modelwork SigmoidModel model( true ); //create random parameters RealVector testParameters(model.numberOfParameters()); for(size_t param=0;param!=model.numberOfParameters();++param) { testParameters(param)=Rng::gauss(0,1); } model.setParameterVector( testParameters ); //the test is, that after deserialization, the results must be identical //so we generate some data first std::vector data; std::vector target; RealVector input(1); RealVector output(1); for (size_t i=0; i<1000; i++) { input(0)=Rng::uni(-1,1); data.push_back(input); target.push_back(model(input)); } RegressionDataset dataset = createLabeledDataFromRange(data,target); //now we serialize the model ostringstream outputStream; TextOutArchive oa(outputStream); oa << model; //and create a new model from the serialization SigmoidModel modelDeserialized( true ); istringstream inputStream(outputStream.str()); TextInArchive ia(inputStream); ia >> modelDeserialized; //test whether serialization works BOOST_CHECK_SMALL(norm_2(modelDeserialized.parameterVector() - testParameters),1.e-15); for (size_t i=0; i<1000; i++) { RealVector output = modelDeserialized(dataset.element(i).input); BOOST_CHECK_SMALL(norm_2(output -dataset.element(i).label),1.e-50); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Models/SoftNearestNeighborClassifier.cpp000066400000000000000000000050641314655040000233600ustar00rootroot00000000000000//=========================================================================== /*! * * * \brief unit test for soft nearest neighbor classifier * * * * * \author T. Glasmachers * \date 2011 * * * \par Copyright 1995-2015 Shark Development Team * *

* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ //=========================================================================== #define BOOST_TEST_MODULE MODELS_SOFT_NEAREST_NEIGHBOR_CLASSIFIER #include #include #include #include #include #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (Models_SoftNearestNeighborClassifier) BOOST_AUTO_TEST_CASE( SOFT_NEAREST_NEIGHBOR_CLASSIFIER ) { std::vector input(6, RealVector(2)); input[0](0)=1; input[0](1)=3; input[1](0)=-1; input[1](1)=3; input[2](0)=1; input[2](1)=0; input[3](0)=-1; input[3](1)=0; input[4](0)=1; input[4](1)=-3; input[5](0)=-1; input[5](1)=-3; std::vector target(6); target[0]=0; target[1]=0; target[2]=1; target[3]=1; target[4]=2; target[5]=2; ClassificationDataset dataset = createLabeledDataFromRange(input, target); DenseRbfKernel kernel(0.5); SimpleNearestNeighbors algorithm(dataset, &kernel); SoftNearestNeighborClassifier model(&algorithm, 3); Data prediction=model(dataset.inputs()); for (size_t i = 0; i<6; ++i) { BOOST_CHECK_SMALL(prediction.element(i)(target[i]) - 2.0/3.0, 1e-12); } ZeroOneLoss loss; double error = loss.eval(dataset.labels(), prediction); BOOST_CHECK(error == 0.0); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Models/Softmax.cpp000066400000000000000000000067121314655040000170620ustar00rootroot00000000000000#define BOOST_TEST_MODULE ML_Softmax #include #include #include #include "derivativeTestHelper.h" #include #include using namespace std; using namespace boost::archive; using namespace shark; BOOST_AUTO_TEST_SUITE (Models_Softmax) BOOST_AUTO_TEST_CASE( Softmax_Value ) { Softmax model(2); BOOST_CHECK_EQUAL(model.numberOfParameters(),0u); BOOST_CHECK_EQUAL(model.inputSize(),2u); BOOST_CHECK_EQUAL(model.outputSize(),2u); //the testpoint RealVector point(2); point(0)=1; point(1)=3; RealVector testResult(2); testResult(0)=exp(1.0); testResult(1)=exp(3.0); double sum=testResult(0)+testResult(1); testResult/=sum; //evaluate point RealVector result=model(point); double difference=norm_sqr(testResult-result); BOOST_CHECK_SMALL(difference,1.e-15); } //test whether the special case of single input is the same as dualinput with inputs (x,-x) BOOST_AUTO_TEST_CASE( Softmax_Value_Single ) { Softmax model(1); Softmax modelTest(2); BOOST_CHECK_EQUAL(model.numberOfParameters(),0u); BOOST_CHECK_EQUAL(model.inputSize(),1u); BOOST_CHECK_EQUAL(model.outputSize(),2u); //the testpoint RealVector point(1); point(0)=1; RealVector pointTest(2); pointTest(0)=1; pointTest(1)=-1; //evaluate point RealVector result=model(point); RealVector resultTest=modelTest(pointTest); std::cout< data; std::vector target; RealVector input(model.inputSize()); RealVector output(model.outputSize()); for (size_t i=0; i<1000; i++) { for(size_t j=0;j!=model.inputSize();++j) { input(j)=Rng::uni(-1,1); } data.push_back(input); target.push_back(model(input)); } RegressionDataset dataset = createLabeledDataFromRange(data,target); //now we serialize the FFmodel ostringstream outputStream; TextOutArchive oa(outputStream); oa << const_cast(model); //and create a new model from the serialization Softmax modelDeserialized; istringstream inputStream(outputStream.str()); TextInArchive ia(inputStream); ia >> modelDeserialized; //test whether serialization works //first simple parameter and topology check BOOST_CHECK_SMALL(norm_2(modelDeserialized.parameterVector() - testParameters),1.e-50); BOOST_REQUIRE_EQUAL(modelDeserialized.inputSize(),model.inputSize()); BOOST_REQUIRE_EQUAL(modelDeserialized.outputSize(),model.outputSize()); for (size_t i=0; i<1000; i++) { RealVector output = modelDeserialized(dataset.element(i).input); BOOST_CHECK_SMALL(norm_2(output -dataset.element(i).label),1.e-50); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Models/TiedAutoencoder.cpp000066400000000000000000000063371314655040000205220ustar00rootroot00000000000000#define BOOST_TEST_MODULE MODEL_TIEDAUTOENCODER #include #include #include "derivativeTestHelper.h" #include #include #include using namespace std; using namespace boost::archive; using namespace shark; //check that the structure is correct, i.e. matrice have the right form and setting parameters works BOOST_AUTO_TEST_SUITE (Models_TiedAutoencoder) BOOST_AUTO_TEST_CASE( TIED_AUTOENCODER_Structure) { std::size_t weightNum = 2*3+5; TiedAutoencoder net; net.setStructure(2,3); BOOST_REQUIRE_EQUAL(net.hiddenBias().size(),3u); BOOST_REQUIRE_EQUAL(net.outputBias().size(),2u); BOOST_CHECK_EQUAL(net.encoderMatrix().size1(), 3u); BOOST_CHECK_EQUAL(net.encoderMatrix().size2(), 2u); BOOST_REQUIRE_EQUAL(net.numberOfParameters(),weightNum); RealVector newParams(weightNum); for(std::size_t i = 0; i != weightNum; ++i){ newParams(i) = Rng::uni(0,1); } //check that setting and getting parameters works net.setParameterVector(newParams); RealVector params = net.parameterVector(); BOOST_REQUIRE_EQUAL(params.size(),newParams.size()); for(std::size_t i = 0; i != weightNum; ++i){ BOOST_CHECK_CLOSE(newParams(i),params(i), 1.e-10); } //check that the weight matrix has the right values std::size_t param = 0; for(std::size_t i = 0; i != net.encoderMatrix().size1(); ++i){ for(std::size_t j = 0; j != net.encoderMatrix().size2(); ++j,++param){ BOOST_CHECK_EQUAL(net.encoderMatrix()(i,j), newParams(param)); } } for(std::size_t i = 0; i != 3; ++i,++param){ BOOST_CHECK_EQUAL(net.hiddenBias()(i), newParams(param)); } BOOST_CHECK_EQUAL(net.outputBias()(0), newParams(param)); BOOST_CHECK_EQUAL(net.outputBias()(1), newParams(param+1)); } BOOST_AUTO_TEST_CASE( TIED_AUTOENCODER_Value ) { TiedAutoencoder net; net.setStructure(3,2); std::size_t numParams = 3*2+5; for(std::size_t i = 0; i != 100; ++i){ //initialize parameters RealVector parameters(numParams); for(size_t j=0; j != numParams;++j) parameters(j)=Rng::gauss(0,1); net.setParameterVector(parameters); //the testpoints RealVector point(3); point(0)=Rng::uni(-5,5); point(1)= Rng::uni(-5,5); point(2)= Rng::uni(-5,5); //evaluate ground truth result RealVector hidden = sigmoid(prod(net.encoderMatrix(),point)+net.hiddenBias()); RealVector output = tanh(prod(net.decoderMatrix(),hidden)+net.outputBias()); //check whether final result is correct RealVector netResult = net(point); BOOST_CHECK_SMALL(netResult(0)-output(0),1.e-12); BOOST_CHECK_SMALL(netResult(1)-output(1),1.e-12); BOOST_CHECK_SMALL(netResult(2)-output(2),1.e-12); } //now also test batches RealMatrix inputs(100,3); for(std::size_t i = 0; i != 100; ++i){ inputs(i,0)=Rng::uni(-5,5); inputs(i,1)= Rng::uni(-5,5); inputs(i,2)= Rng::uni(-5,5); } testBatchEval(net,inputs); } BOOST_AUTO_TEST_CASE( TIED_AUTOENCODER_WeightedDerivatives) { TiedAutoencoder net; net.setStructure(2,5); testWeightedInputDerivative(net,1000,5.e-6,1.e-7); testWeightedDerivative(net,1000,5.e-6,1.e-7); testWeightedDerivativesSame(net,1000); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Models/derivativeTestHelper.h000066400000000000000000000304021314655040000212410ustar00rootroot00000000000000#ifndef SHARK_TEST_DERIVATIVETESTHELPER_H #define SHARK_TEST_DERIVATIVETESTHELPER_H #include #include #include #include namespace shark{ //estimates Derivative using the formula: //df(x)/dx~=(f(x+e)-f(x-e))/2e template std::vector estimateDerivative(Model& net,const Point& point,double epsilon=1.e-10){ std::size_t outputSize = net(point).size(); RealVector parameters=net.parameterVector(); std::vector gradients(parameters.size(),RealVector(outputSize)); for(size_t parameter=0;parameter!=parameters.size();++parameter){ RealVector testPoint1=parameters; testPoint1(parameter)+=epsilon; net.setParameterVector(testPoint1); RealVector result1=net(point); RealVector testPoint2=parameters; testPoint2(parameter)-=epsilon; net.setParameterVector(testPoint2); RealVector result2=net(point); gradients[parameter]=(result1-result2)/(2*epsilon); } return gradients; } template std::vector estimateSecondDerivative(Model& net,const Point& point,double epsilon=1.e-10){ std::size_t outputSize = net(point).size(); RealVector parameters=net.parameterVector(); std::vector hessians(parameters.size(),RealMatrix(parameters.size(),outputSize)); for(size_t parameter=0;parameter!=parameters.size();++parameter){ RealVector testPoint1=parameters; testPoint1(parameter)+=epsilon; net.setParameterVector(testPoint1); std::vector grad1 = estimateDerivative(net,point); RealVector testPoint2=parameters; testPoint2(parameter)-=epsilon; net.setParameterVector(testPoint2); std::vector grad2 = estimateDerivative(net,point); for(size_t param = 0; param != parameters.size(); ++param){ row(hessians[parameter],param)=(grad1[param]-grad2[param])/(2*epsilon); } } return hessians; } //input Derivative for Models template std::vector estimateInputDerivative(Model& net,const Point& point,double epsilon=1.e-10){ std::size_t outputSize = net(point).size(); std::vector gradients(point.size(),RealVector(outputSize)); for(size_t dim=0;dim!=point.size();++dim){ RealVector testPoint1=point; testPoint1(dim)+=epsilon; RealVector result1=net(testPoint1); RealVector testPoint2=point; testPoint2(dim)-=epsilon; RealVector result2=net(testPoint2); gradients[dim]=(result1-result2)/(2*epsilon); } return gradients; } inline void testDerivative(const std::vector& g1,const std::vector& g2,double epsilon=1.e-5){ BOOST_REQUIRE_EQUAL(g1.size(),g2.size()); for(size_t output=0;output!=g1.size();++output){ BOOST_REQUIRE_EQUAL(g1[output].size(),g2[output].size()); BOOST_CHECK_SMALL(norm_2(g1[output]-g2[output]),epsilon); } } //general functions to estimate and test derivatives template void testWeightedDerivative(Model& net,const Point& point,const RealVector& coefficients,double epsilon=1.e-5,double estimationEpsilon = 1.e-5){ RealMatrix pointBatch(1,point.size()); row(pointBatch,0)=point; boost::shared_ptr state = net.createState(); typename Model::BatchOutputType output; net.eval(pointBatch,output,*state); std::vector derivative=estimateDerivative(net,point, estimationEpsilon); //check every coefficient independent of the others for(std::size_t coeff = 0; coeff!= coefficients.size(); ++coeff){ RealMatrix coeffBatch(1,coefficients.size()); coeffBatch.clear(); coeffBatch(0,coeff)=coefficients(coeff); RealVector testGradient; net.weightedParameterDerivative(pointBatch,coeffBatch,*state,testGradient); //this makes the result again independent of the coefficient //provided that the computation is correct testGradient/=coefficients(coeff); //calculate error between both BOOST_REQUIRE_EQUAL(testGradient.size(),derivative.size()); for(std::size_t i = 0; i != testGradient.size(); ++i){ double error=sqr(testGradient(i)-derivative[i](coeff)); BOOST_CHECK_SMALL(error,epsilon); } } // double error=norm_2(testGradient-resultGradient); // BOOST_CHECK_SMALL(error,epsilon); } template void testWeightedSecondDerivative(Model& net,const Point& point,const RealVector& coefficients, const RealMatrix& coeffHessian, double epsilon=1.e-5,double estimationEpsilon = 1.e-10){ boost::shared_ptr state = net.createState(); typename Model::BatchOutputType output; net.eval(point,output,*state); //now calculate the nets weighted gradient RealVector testGradient; RealMatrix testHessian; net.weightedParameterDerivative(point,coefficients,coeffHessian,*state,testGradient,testHessian); //estimate hessians and derivatives for every output std::vector hessians = estimateSecondDerivative(net,point); std::vector derivative = estimateDerivative(net,point, estimationEpsilon); //calculate testresult RealMatrix resultHessian(derivative.size(),derivative.size()); RealVector resultGradient(derivative.size()); //this is the weighted gradient calculated the naiive way for(size_t i=0;i!=derivative.size();++i) resultGradient(i)=inner_prod(derivative[i],coefficients); //this is the weighted hessian calculated the naiive way for(size_t wi=0;wi!=derivative.size();++wi){ for(size_t wj=0;wj!=derivative.size();++wj){ resultHessian(wi,wj) = inner_prod(row(hessians[wi],wj),coefficients); for(size_t output=0;output!=coefficients.size();++output){ for(size_t output2=0;output2!=coefficients.size();++output2){ resultHessian(wi,wj) += coeffHessian(output,output2)*derivative[wi](output)*derivative[wj](output2); } } } } //test the gradient double error=norm_2(testGradient-resultGradient); BOOST_REQUIRE_SMALL(error,epsilon); //test hessian for(size_t wi=0;wi!=derivative.size();++wi){ for(size_t wj=0;wj!=derivative.size();++wj){ BOOST_CHECK_SMALL(resultHessian(wi,wj)-testHessian(wi,wj),epsilon); } } } template void testWeightedInputDerivative(Model& net,const Point& point,const RealVector& coefficients,double epsilon=1.e-5, double estimationEpsilon=1.e-5){ //now calculate the nets weighted gradient RealMatrix coeffBatch(1,coefficients.size()); RealMatrix pointBatch(1,point.size()); row(coeffBatch,0)=coefficients; row(pointBatch,0)=point; boost::shared_ptr state = net.createState(); typename Model::BatchOutputType output; net.eval(pointBatch,output,*state); RealMatrix testGradient; net.weightedInputDerivative(pointBatch,coeffBatch,*state,testGradient); //calculate testresult //this is the weighted gradient calculated the naive way std::vector derivative=estimateInputDerivative(net,point,estimationEpsilon); RealVector resultGradient(derivative.size()); for(size_t i=0;i!=derivative.size();++i) resultGradient(i)=inner_prod(derivative[i],coefficients); //calculate error between both double error=norm_2(row(testGradient,0)-resultGradient); BOOST_CHECK_SMALL(error,epsilon); } ///convenience function which does automatic sampling of points,parameters and coefficients ///and evaluates and tests the parameter derivative. ///it is assumed that the function has the methods inputSize() and outputSize() ///samples are taken from the interval -10,10 template void testWeightedDerivative(Model& net,unsigned int numberOfTests = 1000, double epsilon=1.e-5,double estimationEpsilon = 1.e-5) { BOOST_CHECK_EQUAL(net.hasFirstParameterDerivative(),true); RealVector parameters(net.numberOfParameters()); RealVector coefficients(net.outputSize()); RealVector point(net.inputSize()); for(unsigned int test = 0; test != numberOfTests; ++test){ for(size_t i = 0; i != net.numberOfParameters();++i){ parameters(i) = Rng::uni(-5,5); } for(size_t i = 0; i != net.outputSize();++i){ coefficients(i) = Rng::uni(-5,5); } for(size_t i = 0; i != net.inputSize();++i){ point(i) = Rng::uni(-5,5); } net.setParameterVector(parameters); testWeightedDerivative(net, point, coefficients, epsilon,estimationEpsilon); } } ///convenience function which does automatic sampling of points,parameters and coefficients ///and evaluates and tests the input derivative. ///it is assumed that the function has the methods inputSize() and outputSize() ///samples are taken from the interval -10,10 template void testWeightedInputDerivative(Model& net,unsigned int numberOfTests = 1000, double epsilon=1.e-5,double estimationEpsilon = 1.e-5) { BOOST_CHECK_EQUAL(net.hasFirstInputDerivative(),true); RealVector parameters(net.numberOfParameters()); RealVector coefficients(net.outputSize()); RealVector point(net.inputSize()); for(unsigned int test = 0; test != numberOfTests; ++test){ for(size_t i = 0; i != net.numberOfParameters();++i){ parameters(i) = Rng::uni(-10,10); } for(size_t i = 0; i != net.outputSize();++i){ coefficients(i) = Rng::uni(-10,10); } for(size_t i = 0; i != net.inputSize();++i){ point(i) = Rng::uni(-10,10); } net.setParameterVector(parameters); testWeightedInputDerivative(net, point, coefficients, epsilon,estimationEpsilon); } } ///tests whether the derivatives computed separately are the same as the result returned by /// weightedInputDerivative and weightedParameterDerivative template void testWeightedDerivativesSame(Model& net,unsigned int numberOfTests = 100, double epsilon = 1.e-10){ BOOST_CHECK_EQUAL(net.hasFirstInputDerivative(),true); RealVector parameters(net.numberOfParameters()); RealMatrix coeffBatch(10,net.outputSize()); RealMatrix pointBatch(10,net.inputSize()); for(unsigned int test = 0; test != numberOfTests; ++test){ for(size_t i = 0; i != net.numberOfParameters();++i){ parameters(i) = Rng::uni(-10,10); } for(std::size_t j = 0; j != 10; ++j){ for(size_t i = 0; i != net.outputSize();++i){ coeffBatch(j,i) = Rng::uni(-10,10); } for(size_t i = 0; i != net.inputSize();++i){ pointBatch(j,i) = Rng::uni(-10,10); } } net.setParameterVector(parameters); boost::shared_ptr state = net.createState(); typename Model::BatchOutputType output; net.eval(pointBatch,output,*state); RealMatrix inputDerivative; RealVector parameterDerivative; net.weightedInputDerivative(pointBatch,coeffBatch,*state,inputDerivative); net.weightedParameterDerivative(pointBatch,coeffBatch,*state, parameterDerivative); RealMatrix testInputDerivative; RealVector testParameterDerivative; net.weightedDerivatives(pointBatch,coeffBatch,*state, testParameterDerivative, testInputDerivative); double errorInput = max(inputDerivative-testInputDerivative); double errorParameter = max(parameterDerivative-testParameterDerivative); BOOST_CHECK_SMALL(errorInput,epsilon); BOOST_CHECK_SMALL(errorParameter,epsilon); } } namespace detail{ //small helper functions which are used in testEval() to get the error between two samples double elementEvalError(unsigned int a, unsigned int b){ return (double) (a > b? a-b: b-a); } template double elementEvalError(T a, U b){ return distance(a,b); } } template void testBatchEval(AbstractModel& model, typename Batch::type const& sampleBatch){ std::size_t batchSize = size(sampleBatch); //evaluate batch of inputs using a state and without stat. typename Batch::type resultBatch = model(sampleBatch); typename Batch::type resultBatch2; boost::shared_ptr state = model.createState(); model.eval(sampleBatch,resultBatch2,*state); //sanity check. if we don't get a result for every input something is seriously broken BOOST_REQUIRE_EQUAL(size(resultBatch),batchSize); BOOST_REQUIRE_EQUAL(size(resultBatch2),batchSize); //eval every element of the batch independently and compare the batch result with it for(std::size_t i = 0; i != batchSize; ++i){ R result = model(get(sampleBatch,i)); double error = detail::elementEvalError(result, get(resultBatch,i)); double error2 = detail::elementEvalError(result, get(resultBatch2,i)); BOOST_CHECK_SMALL(error, 1.e-7); BOOST_CHECK_SMALL(error2, 1.e-7); } } } #endif Shark-3.1.4/Test/Network/000077500000000000000000000000001314655040000151355ustar00rootroot00000000000000Shark-3.1.4/Test/Network/HttpClient.cpp000066400000000000000000000021351314655040000177200ustar00rootroot00000000000000#include #include namespace shark { template< unsigned int N > void request(const std::string & url) { for( unsigned int i = 0; i < N; i++ ) { try { boost::network::http::client::request request_( url ); request_ << boost::network::header("Connection", "close"); boost::network::http::client client_; boost::network::http::client::response response_ = client_.get(request_); std::cout << boost::network::http::body( response_ ) << std::endl; } catch( ... ) { } } } } int main( int argc, char ** argv ) { std::string url( "http://localhost:9090/About" ); if( argc > 1 ) url = argv[1]; boost::progress_timer pt; boost::thread t1( boost::bind( shark::request<100>, url ) ); boost::thread t2( boost::bind( shark::request<100>, url ) ); boost::thread t3( boost::bind( shark::request<100>, url ) ); t1.join(); t2.join(); t3.join(); std::cout << pt.elapsed() / 30000 << " seconds per request." << std::endl; return( EXIT_SUCCESS ); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Network/HttpServer.cpp000077500000000000000000000060531314655040000177560ustar00rootroot00000000000000#define BOOST_TEST_MODULE Http_Server #include #include #include #include #include #include #include #include #include #include #include #include namespace shark { struct Producer : public ProbeProvider< Producer > { Producer() : ProbeProvider( "Producer" ) { for( unsigned int i = 0; i < 30; i++ ) m_probes.push_back( registerProbe( ( boost::format( "Probe_%1%" ) % i ).str(), "An example probe" ) ); } void start() { m_thread = boost::move( boost::thread( boost::bind( &Producer::run, this ) ) ); } void stop() { m_thread.join(); } void run() { while( true ) { m_probes[ shark::Rng::discrete( 0, m_probes.size()-1 ) ]->setValue( shark::Rng::gauss() ); boost::this_thread::sleep( boost::posix_time::milliseconds( 100 ) ); } } std::vector< boost::shared_ptr< shark::Probe > > m_probes; boost::thread m_thread; }; template< unsigned int N > void request(const std::string & url) { for( unsigned int i = 0; i < N; i++ ) { boost::network::http::client::request request_( url ); request_ << boost::network::header("Connection", "close"); boost::network::http::client client_; boost::network::http::client::response response_ = client_.get(request_); std::cout << boost::network::http::body( response_ ) << std::endl; } } } BOOST_AUTO_TEST_SUITE (Network_HttpServer) BOOST_AUTO_TEST_CASE( HttpServer ) { shark::Producer producer; producer.start(); shark::HttpServer httpServer( "0.0.0.0", "9090" ); httpServer.registerHandler( "/About", boost::shared_ptr< shark::HttpServer::AbstractRequestHandler >( new shark::SharkAboutHandler() ) ); httpServer.registerHandler( "/ProbeManager", boost::shared_ptr< shark::HttpServer::AbstractRequestHandler >( new shark::RestHandler() ) ); httpServer.setFallbackHandler( boost::shared_ptr< shark::HttpServer::AbstractRequestHandler >( new shark::FileHandler( boost::filesystem::initial_path() ) ) ); try { boost::thread serverThread( boost::bind( &shark::HttpServer::start, boost::ref( httpServer ) ) ); std::string url( "http://localhost:9090/About" ); boost::progress_timer pt; boost::thread t1( boost::bind( shark::request<100>, url ) ); boost::thread t2( boost::bind( shark::request<100>, url ) ); boost::thread t3( boost::bind( shark::request<100>, url ) ); t1.join(); t2.join(); t3.join(); std::cout << pt.elapsed() / 3000 << " seconds per request." << std::endl; } catch( ... ) { } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/ObjectiveFunctions/000077500000000000000000000000001314655040000173075ustar00rootroot00000000000000Shark-3.1.4/Test/ObjectiveFunctions/AUC.cpp000066400000000000000000000042071314655040000204260ustar00rootroot00000000000000//=========================================================================== /*! * * * \brief Test case for area under the (ROC) curve computation. * * * * \author Christian Igel * \date 2011 * * * \par Copyright 1995-2015 Shark Development Team * *

* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ #include #define BOOST_TEST_MODULE OBJECTIVEFUNCTIONS_AUC #include #include using namespace shark; using namespace std; /// Example taken from AUCCalculator website maintained by Jesse Davis /// and Mark Goadrich and verified using their java code. BOOST_AUTO_TEST_SUITE (ObjectiveFunctions_AUC) BOOST_AUTO_TEST_CASE( AUC_EVAL ) { Data prediction(10,RealVector(1)); Data label(10,0); double values[10] = { .9, 8, .7, .6, .55, .54, .53, .52, .51, .505 }; unsigned int labels[10] = {1, 1, 0, 1, 1, 1, 0, 0, 1, 0}; for(std::size_t i=0; i<10; i++) { prediction.element(i)(0)= values[i]; label.element(i) = labels[i]; } //AUC auc; NegativeAUC auc; const double value = -0.75; // negative AUC double valueResult = auc.eval(label, prediction); BOOST_CHECK_SMALL(value-valueResult, 1.e-13); //NegativeAUC uAuc; //valueResult = uAuc.eval(label, label); //BOOST_CHECK((valueResult == 1.)); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/ObjectiveFunctions/AbsoluteLoss.cpp000066400000000000000000000042431314655040000224350ustar00rootroot00000000000000//=========================================================================== /*! * * * \brief Absolute loss test case * * * * * \author T. Glasmachers * \date 2011 * * * \par Copyright 1995-2015 Shark Development Team * *

* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ //=========================================================================== #include #include #include #include #include #define BOOST_TEST_MODULE OBJECTIVEFUNCTIONS_ABSOLUTELOSS #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (ObjectiveFunctions_AbsoluteLoss) BOOST_AUTO_TEST_CASE( ABSOLUTELOSS_EVAL ) { AbsoluteLoss<> loss; unsigned int maxTests = 1000; unsigned int batchSize = 10; for (unsigned int test = 0; test != maxTests; ++test) { std::size_t dim = Rng::discrete(5, 100); RealMatrix target(batchSize,dim); RealMatrix output(batchSize,dim); double sum = 0; for(std::size_t b = 0; b != batchSize; ++b){ double dist2 = 0; for (std::size_t d=0; d != dim; d++){ target(b,d) = Rng::gauss(); output(b,d) = Rng::gauss(); double diff = target(b,d) - output(b,d); dist2 += diff * diff; } sum += std::sqrt(dist2); } double l = loss.eval(target, output); BOOST_CHECK_SMALL(l - sum, 1e-12); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/ObjectiveFunctions/Benchmarks.cpp000066400000000000000000000032241314655040000220710ustar00rootroot00000000000000#define BOOST_TEST_MODULE ObjectiveFunctions_Benchmarks #include #include #include #include #include #include #include #include #include "TestObjectiveFunction.h" BOOST_AUTO_TEST_SUITE (ObjectiveFunctions_Benchmarks) BOOST_AUTO_TEST_CASE( Himmelblau ) { shark::Himmelblau hb; shark::RealVector v( 2 ); v( 0 ) = -0.270844; v( 1 ) = -0.923038; BOOST_CHECK_SMALL( hb( v ) - 181.616, 1E-3 ); v( 0 ) = 3; v( 1 ) = 2; BOOST_CHECK_SMALL( hb( v ), 1E-10 ); v( 0 ) = -2.805118; v( 1 ) = 3.131312; BOOST_CHECK_SMALL( hb( v ), 1E-10 ); v( 0 ) = -3.779310; v( 1 ) = -3.283186; BOOST_CHECK_SMALL( hb( v ), 1E-10 ); v( 0 ) = 3.584428; v( 1 ) = -1.848126; BOOST_CHECK_SMALL( hb( v ), 1E-10 ); } BOOST_AUTO_TEST_CASE( Rosenbrock_Derivative ) { const std::size_t dimensions = 5; const unsigned int trials = 10000; shark::Rosenbrock rosenbrock(dimensions); for(unsigned int i = 0; i != trials; ++i) { shark::RealVector point = rosenbrock.proposeStartingPoint(); shark::testDerivative(rosenbrock, point,1.e-7,1.e-7,0.005); } } BOOST_AUTO_TEST_CASE( Ellipsoid_Derivative ) { const std::size_t dimensions = 5; const unsigned int trials = 10000; shark::Ellipsoid ellipsoid(dimensions); for(unsigned int i = 0; i != trials; ++i) { shark::RealVector point = ellipsoid.proposeStartingPoint(); shark::testDerivative(ellipsoid, point,1.e-5,1.e-9); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/ObjectiveFunctions/BoxConstraintHandler.cpp000066400000000000000000000055351314655040000241160ustar00rootroot00000000000000#define BOOST_TEST_MODULE OBJECTIVEFUNCTION_BOXCONSTRAINTHANDLER #include #include #include #include using namespace shark; struct Fixture { Fixture():handlerSame(10,0,1){ RealVector lower(10); RealVector upper(10); for(std::size_t i = 0; i != 10; ++i){ lower(i) = i-10.0; upper(i) = lower(i)+2*i; } handler = BoxConstraintHandler(lower,upper); } BoxConstraintHandler handler; BoxConstraintHandler handlerSame; }; BOOST_FIXTURE_TEST_SUITE (ObjectiveFunctions_BoxConstraintHandler, Fixture) //checks that the handler is constructed with the right bounds BOOST_AUTO_TEST_CASE( BoxConstraintHandler_Bounds ) { for(std::size_t i = 0; i != 10; ++i){ BOOST_CHECK_CLOSE(handler.lower()(i),i-10.0,1.e-10); BOOST_CHECK_CLOSE(handler.upper()(i),3*i-10.0,1.e-10); BOOST_CHECK_CLOSE(handlerSame.lower()(i),0,1.e-10); BOOST_CHECK_CLOSE(handlerSame.upper()(i),1,1.e-10); } } //checks that the handler geenerates feasible points in the region //and that isFeasible and clsoestFeasible do the right thing (i.e. return true and do nothing) BOOST_AUTO_TEST_CASE( BoxConstraintHandler_Generate ) { for(std::size_t i = 0; i != 1000; ++i){ RealVector point; handler.generateRandomPoint(point); BOOST_REQUIRE_EQUAL(point.size(),10); //check that the point is feasible for(std::size_t i = 0; i != 10; ++i){ BOOST_CHECK(point(i) >= handler.lower()(i)); BOOST_CHECK(point(i) <= handler.upper()(i)); } //must be feasible BOOST_CHECK(handler.isFeasible(point)); //closest feasible must not change the point RealVector feasible=point; handler.closestFeasible(feasible); BOOST_CHECK_SMALL(norm_sqr(feasible-point),1.e-12); } } //given a random point, checks whether it is feasible and checks whether the correction is okay. BOOST_AUTO_TEST_CASE( BoxConstraintHandler_Infeasible ) { for(std::size_t i = 0; i != 1000; ++i){ RealVector point(10); RealVector corrected(10); bool feasible = true; for(std::size_t i = 0; i != 10; ++i){ point(i) = Rng::uni(-30,40); corrected(i) = point(i); if(point(i) < handler.lower()(i)){ corrected(i) = handler.lower()(i); feasible = false; } if(point(i) > handler.upper()(i)){ corrected(i) = handler.upper()(i); feasible = false; } } //check feasibility of both points BOOST_CHECK(handler.isFeasible(point) == feasible); BOOST_CHECK(handler.isFeasible(corrected)); //now check that the handler returns the same corrected point RealVector handlerCorrected = point; handler.closestFeasible(handlerCorrected); BOOST_REQUIRE_EQUAL(handlerCorrected.size(),10); for(std::size_t i = 0; i != 10; ++i){ BOOST_CHECK_EQUAL(handlerCorrected(i),corrected(i)); } } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/ObjectiveFunctions/CrossEntropy.cpp000066400000000000000000000101101314655040000224560ustar00rootroot00000000000000#include #include #include #include #include #include "TestLoss.h" #define BOOST_TEST_MODULE OBJECTIVEFUNCTIONS_CROSSENTROPY #include #include using namespace shark; using namespace std; BOOST_AUTO_TEST_SUITE (ObjectiveFunctions_CrossEntropy) BOOST_AUTO_TEST_CASE( CROSSENTROPY_DERIVATIVES_TWO_CLASSES_SINGLE_INPUT ){ unsigned int maxTests = 1000; for(unsigned int test = 0; test != maxTests; ++test){ CrossEntropy loss; //sample point between -10,10 RealMatrix testPoint(1,1); testPoint(0,0) = Rng::uni(-300.0,300.0); //sample label unsigned int label = Rng::coinToss(); double calcLabel = label ? 1 : -1; UIntVector labelVec(1); labelVec(0) = label; //the test results double valueResult = std::log(1 + std::exp(- calcLabel * testPoint(0,0))); RealVector estimatedDerivative = estimateDerivative(loss, testPoint, labelVec); //test eval double value = loss.eval(labelVec,testPoint); BOOST_CHECK_SMALL(value-valueResult, 1.e-13); //test evalDerivative (first) RealMatrix derivative; value = loss.evalDerivative(labelVec, testPoint, derivative); BOOST_CHECK_SMALL(value - valueResult, 1.e-13); BOOST_CHECK_SMALL(norm_2(row(derivative,0) - estimatedDerivative), 1.e-8); } } BOOST_AUTO_TEST_CASE( CROSSENTROPY_DERIVATIVES_TWO_CLASSES_TWO_INPUT ){ unsigned int maxTests = 10000; for(unsigned int test = 0; test != maxTests; ++test){ CrossEntropy loss; //sample point between -10,10 RealMatrix testPoint(1,2); testPoint(0,0) = Rng::uni(-150.0,150.0); testPoint(0,1) = -testPoint(0,0); //sample label unsigned int label = Rng::coinToss(); UIntVector labelVec(1); labelVec(0) = label; //the test results double valueResult = std::log(1 + std::exp(-2* testPoint(0,label))); RealVector estimatedDerivative = estimateDerivative(loss, testPoint, labelVec); //test eval double value = loss.eval(labelVec,testPoint); BOOST_CHECK_SMALL(value-valueResult, 1.e-13); //test evalDerivative (first) RealMatrix derivative; value = loss.evalDerivative(labelVec, testPoint, derivative); BOOST_CHECK_SMALL(value - valueResult, 1.e-13); BOOST_CHECK_SMALL(norm_2(row(derivative,0) - estimatedDerivative), 1.e-8); } } BOOST_AUTO_TEST_CASE( CROSSENTROPY_DERIVATIVES_MULTI_CLASS ){ unsigned int maxTests = 1000; for(unsigned int test = 0; test != maxTests; ++test){ CrossEntropy loss; //sample point between -10,10 RealMatrix testPoint(1,5); double norm = 0; for(std::size_t i = 0; i !=5; ++i){ testPoint(0,i) = Rng::uni(-10.0,10.0); norm+=std::exp(testPoint(0,i)); } //sample label unsigned int label = Rng::discrete(0,4); UIntVector labelVec(1); labelVec(0) = label; //std::cout<
* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ //=========================================================================== #include #include #include #include #include #define BOOST_TEST_MODULE ObjectiveFunctions_CrossValidation #include #include using namespace shark; // This test case judges a linear regression of data // *not* on a line by means of cross-validation. In // this very simple case the error can be computed // analytically, and thus can serve validation. BOOST_AUTO_TEST_SUITE (ObjectiveFunctions_CrossValidation) BOOST_AUTO_TEST_CASE( ObjectiveFunctions_CrossValidation ) { // create a simple dataset with three point, // which also form three folds std::vector data(3); std::vector target(3); size_t i; for (i=0; i<3; i++) { data[i].resize(1); target[i].resize(1); } data[0](0) = 0.0; target[0](0) = 0.0; // regression of this point from the others gives 2 --> loss = 2 data[1](0) = 1.0; target[1](0) = 1.0; // regression of this point from the others gives 0 --> loss = 1 data[2](0) = 2.0; target[2](0) = 0.0; // regression of this point from the others gives 2 --> loss = 2 RegressionDataset dataset = createLabeledDataFromRange(data, target); CVFolds folds = createCVSameSize(dataset, 3); // there is only one way to form three folds out of three points // setup (regularized) linear regression for cross-validation LinearModel<> lin(1, 1, true); // affine linear function LinearRegression trainer; AbsoluteLoss<> loss; CrossValidationError > cvError( folds, &trainer, &lin, &trainer, &loss ); // set the regularization to zero and evaluation this setting RealVector param(1); param(0) = 0.0; double cve = cvError.eval(param); std::cout << "*** cross-validation test case ***" << std::endl; std::cout << "CV-error: " << cve << " (should be 5/3 = 1.66666...)" << std::endl; BOOST_CHECK_LT(std::abs(cve - 5.0 / 3.0), 1e-12); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/ObjectiveFunctions/EpsilonHingeLoss.cpp000066400000000000000000000036541314655040000232500ustar00rootroot00000000000000#include #include #include "TestLoss.h" #define BOOST_TEST_MODULE OBJECTIVEFUNCTIONS_EPSILONHINGELOSS #include #include using namespace shark; using namespace std; BOOST_AUTO_TEST_SUITE (ObjectiveFunctions_EpsilonHingeLoss) BOOST_AUTO_TEST_CASE( EPSILONHINGELOSS_EVAL ) { unsigned int maxTests = 10000; unsigned int minDim = 3; unsigned int maxDim = 10; for (unsigned int test = 0; test != maxTests; ++test) { double epsilon = Rng::uni(0,5); EpsilonHingeLoss loss(epsilon); std::size_t dim = Rng::discrete(minDim,maxDim); //sample point between -10,10 RealMatrix testPoint(5,dim); RealMatrix testLabel(5,dim); RealMatrix valueResultM(5,dim,0); for(std::size_t i = 0; i != 5; ++i){ for(std::size_t j = 0; j != dim; ++j){ testPoint(i,j) = Rng::uni(-10.0,10.0); testLabel(i,j) = Rng::uni(-10.0,10.0); valueResultM(i,j) = std::max(0.0, std::abs(testPoint(i,j)-testLabel(i,j))-epsilon); } } double valueResult = sum(valueResultM); //test eval double value = loss.eval(testLabel,testPoint); BOOST_CHECK_SMALL(value-valueResult, 1.e-12); //test evalDerivative (first) RealMatrix derivative; value = loss.evalDerivative(testLabel, testPoint, derivative); BOOST_CHECK_SMALL(value - valueResult, 1.e-12); BOOST_REQUIRE_EQUAL(derivative.size1(), 5); BOOST_REQUIRE_EQUAL(derivative.size2(), dim); for(std::size_t i = 0; i != 5; ++i){ RealVector estimatedDerivative = estimateDerivative(loss, RealMatrix(rows(testPoint,i,i+1)), RealMatrix(rows(testLabel,i,i+1))); for(std::size_t j = 0; j != dim; ++j){ if(valueResultM(i,j) > 1.e-5){ BOOST_CHECK_SMALL(derivative(i,j) - estimatedDerivative(j), 1.e-5); } else if(valueResultM(i,j)<=0.0){ BOOST_CHECK_SMALL(derivative(i,j), 1.e-10); } } } } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/ObjectiveFunctions/ErrorFunction.cpp000066400000000000000000000151151314655040000226150ustar00rootroot00000000000000#include #include #include #include #include #include #define BOOST_TEST_MODULE ObjFunct_ErrorFunction #include #include using namespace shark; class TestModel : public AbstractModel { private: double m_c; size_t m_dim; public: TestModel(int dim,double c):m_c(c),m_dim(dim){ m_features|=HAS_FIRST_PARAMETER_DERIVATIVE; } std::string name() const { return "TestModel"; } //this model doesn't use any parameters...it just pretends! virtual RealVector parameterVector()const{return RealVector(10);} virtual void setParameterVector(RealVector const& newParameters){} virtual size_t numberOfParameters()const{return 1;} virtual size_t inputSize()const { return m_dim; } virtual size_t outputSize()const { return m_dim; } boost::shared_ptr createState()const{ return boost::shared_ptr(new EmptyState()); } // adds just a value c on the input void eval(RealMatrix const& patterns, RealMatrix& output, State& state)const { output.resize(patterns.size1(),m_dim); for(std::size_t p = 0; p != patterns.size1();++p){ for (size_t i=0;i!=m_dim;++i) output(p,i)=patterns(p,i)+m_c; } } using AbstractModel::eval; virtual void weightedParameterDerivative( RealMatrix const& input, RealMatrix const& coefficients, State const& state, RealVector& derivative)const { derivative.resize(1); derivative(0)=0; for (size_t p = 0; p < coefficients.size1(); p++) { derivative(0) +=sum(row(coefficients,p)); } } }; BOOST_AUTO_TEST_SUITE (ObjectiveFunctions_ErrorFunction) BOOST_AUTO_TEST_CASE( ObjFunct_ErrorFunction_BASE ) { RealVector zero(10); zero.clear(); std::vector input(3,zero);//input just zeros std::vector target=input;//target same as input RegressionDataset dataset = createLabeledDataFromRange(input,target); RealVector parameters(1);//parameters also zero parameters.clear(); //every value of input gets 2 added. so the square error of each example is 10*4=40 TestModel model(10,2); SquaredLoss<> loss; ErrorFunction mse(dataset, &model,&loss); double error=mse.eval(parameters); BOOST_CHECK_SMALL(error-40,1.e-15); //calculate derivative - it should also be 40 ErrorFunction::FirstOrderDerivative derivative; mse.evalDerivative(parameters,derivative); BOOST_CHECK_SMALL(derivative(0)-40,1.e-15); } //test whether we can get to the same result as linear regression //this is very similar to the linear regrssion test. first we create a model, //let it evaluate some responses + gaussian noise. then we let it forget //and create an ErrorFunction which is supposed to have a minimum at the point. BOOST_AUTO_TEST_CASE( ObjFunct_ErrorFunction_LinearRegression ){ const size_t trainExamples = 60000; LinearRegression trainer; LinearModel<> model; RealMatrix matrix(2, 2); RealVector offset(2); matrix(0,0) = 3; matrix(1,1) = -5; matrix(0,1) = -2; matrix(1,0) = 7; offset(0) = 3; offset(1) = -6; model.setStructure(matrix, offset); RealVector optimum=model.parameterVector(); // create datatset - the model output + gaussian noise RealMatrix covariance(2, 2); covariance(0,0) = 1; covariance(0,1) = 0; covariance(1,0) = 0; covariance(1,1) = 1; MultiVariateNormalDistribution noise(covariance); Uniform<> uniform(Rng::globalRng,-3.0, 3.0); // create samples std::vector input(trainExamples,RealVector(2)); std::vector trainTarget(trainExamples,RealVector(2)); std::vector testTarget(trainExamples,RealVector(2)); double optimalMSE = 0; for (size_t i=0;i!=trainExamples;++i) { input[i](0) = uniform(); input[i](1) = uniform(); testTarget[i] = model(input[i]); RealVector noiseVal = noise(Rng::globalRng).first; trainTarget[i] = noiseVal + testTarget[i]; optimalMSE+=norm_sqr(noiseVal); } optimalMSE/=trainExamples; //create loss function and internal implementations to check everything is working RegressionDataset trainset = createLabeledDataFromRange(input, trainTarget); SquaredLoss<> loss; { ErrorFunction mse(trainset, &model,&loss); double val = mse.eval(optimum); BOOST_CHECK_CLOSE(optimalMSE,val,1.e-10); ErrorFunction::FirstOrderDerivative d; double valGrad = mse.evalDerivative(optimum,d); double gradNorm = norm_2(d); BOOST_CHECK_CLOSE(optimalMSE,valGrad,1.e-10); BOOST_CHECK_SMALL(gradNorm,1.e-1); //let the model forget by reinitializing with random values initRandomNormal(model,2); //optimize with rprop IRpropPlus rprop; rprop.init(mse); for(std::size_t i = 0; i != 100; ++i){ rprop.step(mse); } double diff = norm_sqr(rprop.solution().point-optimum); std::cout< mse(trainset,&model,&loss); double val = mse.eval(optimum); BOOST_CHECK_CLOSE(optimalMSE,val,1.e-10); ErrorFunction::FirstOrderDerivative d; double valGrad = mse.evalDerivative(optimum,d); double gradNorm = norm_2(d); BOOST_CHECK_CLOSE(optimalMSE,valGrad,1.e-10); BOOST_CHECK_SMALL(gradNorm,1.e-1); //let the model forget by reinitializing with random values initRandomNormal(model,2); //optimize with rprop IRpropPlus rprop; rprop.init(mse); for(std::size_t i = 0; i != 100; ++i){ rprop.step(mse); } double diff = norm_sqr(rprop.solution().point-optimum); std::cout< mse(trainset,&model,&loss); double val = mse.eval(optimum); BOOST_CHECK_CLOSE(optimalMSE,val,1.e-10); ErrorFunction::FirstOrderDerivative d; double valGrad = mse.evalDerivative(optimum,d); double gradNorm = norm_2(d); BOOST_CHECK_CLOSE(optimalMSE,valGrad,1.e-10); BOOST_CHECK_SMALL(gradNorm,1.e-1); //let the model forget by reinitializing with random values initRandomNormal(model,2); //optimize with rprop IRpropPlus rprop; rprop.init(mse); for(std::size_t i = 0; i != 100; ++i){ rprop.step(mse); } double diff = norm_sqr(rprop.solution().point-optimum); std::cout< #include #include "TestLoss.h" #define BOOST_TEST_MODULE OBJECTIVEFUNCTIONS_HINGELOSS #include #include using namespace shark; using namespace std; BOOST_AUTO_TEST_SUITE (ObjectiveFunctions_HingeLoss) BOOST_AUTO_TEST_CASE( HINGELOSS_EVAL_TWOCLASS ) { unsigned int maxTests = 10000; for (unsigned int test = 0; test != maxTests; ++test) { HingeLoss loss; //sample point between -10,10 RealMatrix testPoint(2,1); testPoint(0,0) = Rng::uni(-10.0,10.0); testPoint(1,0) = Rng::uni(-10.0,10.0); RealMatrix testPoint2D(2,2); testPoint2D(0,0) = testPoint(0,0); testPoint2D(0,1) = -testPoint(0,0); testPoint2D(1,0) = testPoint(1,0); testPoint2D(1,1) = -testPoint(1,0); testPoint2D*=-1; //sample label {-1,1} UIntVector testLabel(2); testLabel(0) = Rng::coinToss(0.5); testLabel(1) = Rng::coinToss(0.5); int label0 = testLabel(0)?1:-1; int label1 = testLabel(1)?1:-1; //the test results double valueResultP[] = { std::max(0.0, 1-testPoint(0,0)*label0), std::max(0.0, 1-testPoint(1,0)*label1) }; double valueResult = valueResultP[0]+valueResultP[1]; //test eval double value = loss.eval(testLabel,testPoint); double value2D = loss.eval(testLabel,testPoint2D); BOOST_CHECK_SMALL(value-valueResult, 1.e-13); BOOST_CHECK_SMALL(value2D-valueResult, 1.e-13); //test evalDerivative (first) RealMatrix derivative; RealMatrix derivative2D; value = loss.evalDerivative(testLabel, testPoint, derivative); value2D = loss.evalDerivative(testLabel, testPoint2D, derivative2D); BOOST_CHECK_SMALL(value - valueResult, 1.e-13); BOOST_CHECK_SMALL(value2D - valueResult, 1.e-13); BOOST_REQUIRE_EQUAL(derivative2D.size1(), 2); BOOST_REQUIRE_EQUAL(derivative2D.size2(),2); BOOST_REQUIRE_EQUAL(derivative.size1(), 2); BOOST_REQUIRE_EQUAL(derivative.size2(), 1); for(std::size_t i = 0; i != 2; ++i){ RealVector estimatedDerivative = estimateDerivative(loss, RealMatrix(rows(testPoint,i,i+1)), subrange(testLabel,i,i+1)); RealVector estimatedDerivative2D = estimateDerivative(loss, RealMatrix(rows(testPoint2D,i,i+1)), subrange(testLabel,i,i+1)); if(std::abs(valueResultP[i])>1.e-5){ BOOST_CHECK_SMALL(norm_2(row(derivative,i) - estimatedDerivative), 1.e-5); BOOST_CHECK_SMALL(norm_2(row(derivative2D,i) - estimatedDerivative2D), 1.e-5); } else{ BOOST_CHECK_SMALL(norm_2(row(derivative,i)), 1.e-10); BOOST_CHECK_SMALL(norm_2(row(derivative2D,i)), 1.e-10); } } } } BOOST_AUTO_TEST_CASE( HINGELOSS_EVAL_MULTICLASS ) { unsigned int maxTests = 10000; unsigned int minDim = 3; unsigned int maxDim = 10; for (unsigned int test = 0; test != maxTests; ++test) { HingeLoss loss; std::size_t dim = Rng::discrete(minDim,maxDim); //sample point between -10,10 RealMatrix testPoint(5,dim); UIntVector testLabel(5); RealVector valueResultP(5,0); for(std::size_t i = 0; i != 5; ++i){ testLabel(i) = (unsigned int)Rng::discrete(0,dim-1); testPoint(i,testLabel(i)) = Rng::uni(-10.0,10.0); for(std::size_t j = 0; j != dim; ++j){ if(j == testLabel(i)) continue; testPoint(i,j) = Rng::uni(-10.0,10.0); valueResultP[i]+= std::max(0.0, 1-0.5*(testPoint(i,testLabel(i))- testPoint(i,j))); } } double valueResult = sum(valueResultP); //test eval double value = loss.eval(testLabel,testPoint); BOOST_CHECK_SMALL(value-valueResult, 1.e-13); //test evalDerivative (first) RealMatrix derivative; value = loss.evalDerivative(testLabel, testPoint, derivative); BOOST_CHECK_SMALL(value - valueResult, 1.e-13); BOOST_REQUIRE_EQUAL(derivative.size1(), 5); BOOST_REQUIRE_EQUAL(derivative.size2(), dim); for(std::size_t i = 0; i != 5; ++i){ RealVector estimatedDerivative = estimateDerivative(loss, RealMatrix(rows(testPoint,i,i+1)), UIntVector(subrange(testLabel,i,i+1))); if(std::abs(valueResultP[i])>1.e-5){ BOOST_CHECK_SMALL(norm_2(row(derivative,i) - estimatedDerivative), 1.e-5); } else if(valueResultP[i]<=0.0){ BOOST_CHECK_SMALL(norm_2(row(derivative,i)), 1.e-10); } } } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/ObjectiveFunctions/HuberLoss.cpp000066400000000000000000000031221314655040000217170ustar00rootroot00000000000000#include #include #include "TestLoss.h" #define BOOST_TEST_MODULE OBJECTIVEFUNCTIONS_HUBERLOSS #include #include using namespace shark; using namespace std; BOOST_AUTO_TEST_SUITE (ObjectiveFunctions_HuberLoss) BOOST_AUTO_TEST_CASE( HUBERLOSS_TEST ) { unsigned int maxTests = 10000; for (unsigned int test = 0; test != maxTests; ++test) { HuberLoss loss(2.0); RealMatrix testPoint(2,3); testPoint(0,0) = Rng::uni(-1,1); testPoint(1,0) = Rng::uni(-1,1); testPoint(0,1) = Rng::uni(-1,1); testPoint(1,1) = Rng::uni(-1,1); testPoint(0,2) = Rng::uni(-1,1); testPoint(1,2) = Rng::uni(-1,1); RealMatrix testLabel(2,3); testLabel(0,0) = Rng::uni(-1,1); testLabel(1,0) = Rng::uni(-1,1); testLabel(0,1) = Rng::uni(-1,1); testLabel(1,1) = Rng::uni(-1,1); testLabel(0,2) = Rng::uni(-1,1); testLabel(1,2) = Rng::uni(-1,1); //test evalDerivative (first) RealMatrix derivative; double valueEval = loss.eval(testLabel, testPoint); double valueEvalDerivative = loss.evalDerivative(testLabel, testPoint, derivative); BOOST_CHECK_SMALL(valueEval - valueEvalDerivative, 1.e-13); BOOST_REQUIRE_EQUAL(derivative.size1(), 2); BOOST_REQUIRE_EQUAL(derivative.size2(), 3); for(std::size_t i = 0; i != 2; ++i){ RealVector estimatedDerivative = estimateDerivative(loss, RealMatrix(rows(testPoint,i,i+1)), rows(testLabel,i,i+1)); BOOST_CHECK_SMALL(norm_2(row(derivative,i) - estimatedDerivative), 1.e-5); } } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/ObjectiveFunctions/KernelBasisDistance.cpp000066400000000000000000000155741314655040000237040ustar00rootroot00000000000000//=========================================================================== /*! * * * \brief unit test for the Kernel Basis Distance error function. * * * \author O. Krause * \date 2014 * * * \par Copyright 1995-2015 Shark Development Team * *

* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ //=========================================================================== #include #include #include #include #include #include #define BOOST_TEST_MODULE ObjectiveFunctions_KernelBasisDistance #include #include #include "TestObjectiveFunction.h" using namespace shark; //Sanity check that checks that the error of the exact same basis is minimal and the derivative is small. BOOST_AUTO_TEST_SUITE (ObjectiveFunctions_KernelBasisDistance) BOOST_AUTO_TEST_CASE( ObjectiveFunctions_Value_Derivative_Optimal ) { for(std::size_t trial = 0; trial != 10; ++trial){ Chessboard problem; LabeledData dataset = problem.generateDataset(50,10); GaussianRbfKernel<> kernel(0.5); KernelExpansion expansion(&kernel,dataset.inputs(),false,3); for(std::size_t i = 0; i != 50; ++i){ for(std::size_t j = 0; j != 3; ++j){ expansion.alpha()(i,j) = Rng::gauss(0,1); } } KernelBasisDistance distance(&expansion,50); RealMatrix pointBatch(50,2); RealVector point(2*50); for(std::size_t i = 0; i != 50; ++i){ point(2*i) = dataset.element(i).input(0); point(2*i+1) = dataset.element(i).input(1); row(pointBatch,i) = dataset.element(i).input; } RealMatrix K = kernel(pointBatch,pointBatch); //we omit the expensive to compute constant term in the distance, thus we compute it here to //assure that the corrected value is (very close to) zero. double correction = inner_prod(column(expansion.alpha(),0),prod(K,column(expansion.alpha(),0))); correction += inner_prod(column(expansion.alpha(),1),prod(K,column(expansion.alpha(),1))); correction += inner_prod(column(expansion.alpha(),2),prod(K,column(expansion.alpha(),2))); BOOST_CHECK_SMALL(2*distance(point) + correction,1.e-10); //check that the solution found is also the same as we expected. double solutionDistance = norm_frobenius(distance.findOptimalBeta(point)-expansion.alpha()); BOOST_CHECK_SMALL(sqr(solutionDistance),1.e-10); RealVector derivative; BOOST_CHECK_SMALL(2*distance.evalDerivative(point,derivative) + correction, 1.e-10); BOOST_CHECK_SMALL(norm_2(derivative) / 200, 1.e-9); } } //test that checks that the result with respect to a linear kernel is correct BOOST_AUTO_TEST_CASE( ObjectiveFunctions_Value_Linear ) { for(std::size_t trial = 0; trial != 10; ++trial){ NormalDistributedPoints problem(30); Data dataset = problem.generateDataset(100,10); LinearKernel<> kernel; KernelExpansion expansion(&kernel,dataset,false,1); RealVector alpha(100); for(std::size_t i = 0; i != 100; ++i){ alpha(i) = expansion.alpha()(i,0) = Rng::gauss(0,1); } //construct the target vector in explicit form RealVector optimalPoint(30,0); for(std::size_t i = 0; i != 100; ++i){ noalias(optimalPoint) += alpha(i) * dataset.element(i); } KernelBasisDistance distance(&expansion,20); for(std::size_t test = 0; test != 10; ++test){ //create input point as well as batch version Data dataset = problem.generateDataset(20,20); RealMatrix& pointBatch = dataset.batch(0); RealVector point(30*20); for(std::size_t i = 0; i != 20; ++i){ noalias(subrange(point,i*30,(i+1)*30)) = row(pointBatch,i); } //find optimal solution RealMatrix K = prod(pointBatch,trans(pointBatch)); RealVector linear = prod(pointBatch,optimalPoint); RealVector beta; blas::solveSymmPosDefSystem(K,beta,linear); RealVector optimalApproximation = prod(beta,pointBatch); double errorOfApproximation = distanceSqr(optimalApproximation,optimalPoint)-norm_sqr(optimalPoint); errorOfApproximation /= 2; //check correctness of the reported error BOOST_CHECK_CLOSE(distance(point),errorOfApproximation,1.e-10); //calculate solution found by the function and check that it is close RealMatrix betaTest = distance.findOptimalBeta(point); RealVector approximationTest = prod(column(betaTest,0),pointBatch); BOOST_CHECK_SMALL(distanceSqr(approximationTest,optimalApproximation),1.e-10); } } } //test the derivative for linear kernel and gaussian distributed points numerically BOOST_AUTO_TEST_CASE( ObjectiveFunctions_KernelBasisDistance_Derivative_Linear) { for(std::size_t trial = 0; trial != 10; ++trial){ NormalDistributedPoints problem(30); Data dataset = problem.generateDataset(100,10); LinearKernel<> kernel; KernelExpansion expansion(&kernel,dataset,false,1); for(std::size_t i = 0; i != 100; ++i){ for(std::size_t j = 0; j != 1; ++j){ expansion.alpha()(i,j) = Rng::gauss(0,1); } } KernelBasisDistance distance(&expansion,10); for(std::size_t test = 0; test != 10; ++test){ RealVector point(30*10); for(std::size_t i = 0; i != point.size(); ++i){ point(i) = Rng::gauss(0,1); } testDerivative(distance,point,1.e-6,0,0.1); } } } //test the derivative for gaussian kernel and gaussian distributed points numerically BOOST_AUTO_TEST_CASE( ObjectiveFunctions_KernelBasisDistance_Derivative_Gaussian) { for(std::size_t trial = 0; trial != 10; ++trial){ NormalDistributedPoints problem(30); Data dataset = problem.generateDataset(100,10); GaussianRbfKernel<> kernel(0.5); KernelExpansion expansion(&kernel,dataset,false,1); for(std::size_t i = 0; i != 100; ++i){ for(std::size_t j = 0; j != 1; ++j){ expansion.alpha()(i,j) = Rng::gauss(0,1); } } KernelBasisDistance distance(&expansion,10); for(std::size_t test = 0; test != 10; ++test){ RealVector point(30*10); for(std::size_t i = 0; i != point.size(); ++i){ point(i) = Rng::gauss(0,1); } testDerivative(distance,point,1.e-6,0,0.1); } } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/ObjectiveFunctions/KernelTargetAlignment.cpp000066400000000000000000000174641314655040000242550ustar00rootroot00000000000000//=========================================================================== /*! * * * \brief unit test for the (negative) kernel target alignment * * * * * \author T. Glasmachers * \date 2011 * * * \par Copyright 1995-2015 Shark Development Team * *

* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ //=========================================================================== #include #include #include #include #define BOOST_TEST_MODULE ObjectiveFunctions_KernelTargetAlignment #include #include #include "TestObjectiveFunction.h" using namespace shark; class KTAFixture { public: KTAFixture():numInputs(100),dims(10){ std::vector inputs(numInputs, RealVector(dims)); std::vector labels(numInputs); std::vector labelsRegression(numInputs,RealVector(1)); for(std::size_t i = 0; i != numInputs; ++i){ labels[i] = i%2; labelsRegression[i](0) = Rng::gauss(1,2); for(std::size_t j = 0; j != dims; ++j) inputs[i](j) = Rng::uni(j-1.0+3*labels[i],j+1.0+3*labels[i]); } data = createLabeledDataFromRange(inputs,labels,10); dataRegression = createLabeledDataFromRange(inputs,labelsRegression,10); //center data RealVector mean(dims,0.0); for(std::size_t i = 0; i != numInputs; ++i) mean += inputs[i]; mean /= numInputs; for(std::size_t i = 0; i != numInputs; ++i) inputs[i]-=mean; dataCentered = createLabeledDataFromRange(inputs,labels,10); //calculate Y y.resize(numInputs); y.clear(); Y.resize(numInputs,numInputs); for(std::size_t i = 0; i != numInputs; ++i){ for(std::size_t j = 0; j != numInputs; ++j){ Y(i,j) = 1; if(data.element(i).label != data.element(j).label) Y(i,j) = -1; y(i)+=Y(i,j); } } y/=numInputs; meanY=sum(y)/numInputs; } ClassificationDataset data; RegressionDataset dataRegression; ClassificationDataset dataCentered; RealMatrix Y; RealVector y; double meanY; std::size_t numInputs; std::size_t dims; }; BOOST_FIXTURE_TEST_SUITE (ObjectiveFunctions_KernelTargetAlignment, KTAFixture) template RealMatrix calculateCenteredKernelMatrix(Kernel const& kernel, Data const& data){ std::size_t numInputs = data.numberOfElements(); RealMatrix K = calculateRegularizedKernelMatrix(kernel,data); RealVector k = sum_rows(K)/numInputs; double meanK = sum(k)/numInputs; K-= repeat(k,numInputs); K-= trans(repeat(k,numInputs)); K+= blas::repeat(meanK,numInputs,numInputs); return K; } //just sanity check ensuring, that KTA on pre-centered data on a linear kernel is the same //as on uncentered data (this is the only kernel where this equality holds! BOOST_AUTO_TEST_CASE( ObjectiveFunctions_KernelTargetAlignment_eval_Linear_Centered ) { DenseLinearKernel kernel; KernelTargetAlignment<> kta(data,&kernel); KernelTargetAlignment<> ktaCentered(dataCentered,&kernel); //linear Kernel doesn't have any parameters... RealVector input; double evalCentered = ktaCentered.eval(input); double eval = kta.eval(input); BOOST_CHECK_CLOSE(eval,evalCentered,1.e-5); } //calculate the centered KTA and check against trivially calculated result BOOST_AUTO_TEST_CASE( ObjectiveFunctions_KernelTargetAlignment_eval_Linear ) { DenseLinearKernel kernel; KernelTargetAlignment<> kta(data, &kernel); //calculate analytic result from centered Kernel RealMatrix K = calculateRegularizedKernelMatrix(kernel,dataCentered.inputs()); double KY=sum(element_prod(K,Y)); double KK = sum(element_prod(K,K)); double result = -KY/std::sqrt(KK); //linear Kernel doesn't have any parameters... RealVector input; double eval = kta.eval(input); BOOST_CHECK_CLOSE(eval,result,1.e-5); } //calculate centered KTA against "dumb" calculation BOOST_AUTO_TEST_CASE( ObjectiveFunctions_KernelTargetAlignment_eval_GaussKernel ){ GaussianRbfKernel<> kernel(1); KernelTargetAlignment<> kta(data, &kernel); //calculate analytic result from centered Kernel RealMatrix K = calculateCenteredKernelMatrix(kernel,data.inputs()); double KY=sum(element_prod(K,Y)); double KK = sum(element_prod(K,K)); double result = -KY/std::sqrt(KK); //linear Kernel doesn't have any parameters... RealVector input(1); input(0) = 1; double eval = kta.eval(input); BOOST_CHECK_CLOSE(eval,result,1.e-5); } //testing the correctness of the calculated formulas BOOST_AUTO_TEST_CASE( ObjectiveFunctions_KernelTargetAlignment_numerics){ double epsilon = 1.0e-8; //estimate derivatives double estimatedDerivativeKcKc = 0; double estimatedDerivativeYKc = 0; double estimatedDerivative = 0; { GaussianRbfKernel<> kernel(1.0+epsilon); RealMatrix K = calculateCenteredKernelMatrix(kernel,data.inputs()); double KY=sum(element_prod(K,Y)); double KK = sum(element_prod(K,K)); estimatedDerivativeKcKc+=KK; estimatedDerivativeYKc+=KY; estimatedDerivative+=KY/std::sqrt(KK); } { GaussianRbfKernel<> kernel(1.0-epsilon); RealMatrix K = calculateCenteredKernelMatrix(kernel,data.inputs()); double KY=sum(element_prod(K,Y)); double KK = sum(element_prod(K,K)); estimatedDerivativeKcKc-=KK; estimatedDerivativeYKc-=KY; estimatedDerivative-=KY/std::sqrt(KK); } estimatedDerivativeKcKc/=2*epsilon; estimatedDerivativeYKc/=2*epsilon; estimatedDerivative/=2*epsilon; //calculate derivatives GaussianRbfKernel<> kernel(1.0); RealMatrix K = calculateRegularizedKernelMatrix(kernel,data.inputs()); RealMatrix Kc = calculateCenteredKernelMatrix(kernel,data.inputs()); RealVector k = sum_rows(K)/numInputs; double meanK = sum(k)/numInputs; double YKc=sum(element_prod(Kc,Y)); double KcKc = sum(element_prod(Kc,Kc)); //()' RealMatrix WKcKc = 2*(K-repeat(k, numInputs) -trans(repeat(k, numInputs))+ blas::repeat(meanK,numInputs,numInputs)); double derivativeKcKc = calculateKernelMatrixParameterDerivative(kernel,data.inputs(),WKcKc)(0); RealMatrix WYKc = Y-repeat(y, numInputs) -trans(repeat(y, numInputs))+ blas::repeat(meanY,numInputs,numInputs); double derivativeYKc = calculateKernelMatrixParameterDerivative(kernel,data.inputs(),WYKc)(0); BOOST_CHECK_CLOSE(derivativeKcKc,estimatedDerivativeKcKc,0.0001); BOOST_CHECK_CLOSE(derivativeYKc,estimatedDerivativeYKc,0.0001); double derivative = KcKc*derivativeYKc - 0.5*YKc* derivativeKcKc; derivative /= KcKc*sqrt(KcKc); BOOST_CHECK_CLOSE(derivative,estimatedDerivative,0.0001); } BOOST_AUTO_TEST_CASE( ObjectiveFunctions_KernelTargetAlignment_evalDerivative_GaussKernel ) { GaussianRbfKernel<> kernel(1); KernelTargetAlignment<> kta(data,&kernel); for(std::size_t i = 0; i != 100; ++i){ RealVector input(1); input(0) = Rng::uni(0.1,1); testDerivative(kta,input); } } BOOST_AUTO_TEST_CASE( ObjectiveFunctions_KernelTargetAlignment_evalDerivative_GaussKernel_Regression ) { GaussianRbfKernel<> kernel(1); KernelTargetAlignment kta(dataRegression,&kernel); for(std::size_t i = 0; i != 100; ++i){ RealVector input(1); input(0) = Rng::uni(0.1,1); testDerivative(kta,input); } } BOOST_AUTO_TEST_SUITE_END()Shark-3.1.4/Test/ObjectiveFunctions/LooError.cpp000066400000000000000000000046051314655040000215630ustar00rootroot00000000000000//=========================================================================== /*! * * * \brief unit test for the leave one out error * * * * * \author T. Glasmachers * \date 2011 * * * \par Copyright 1995-2015 Shark Development Team * *

* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ //=========================================================================== #include #include #include #include #define BOOST_TEST_MODULE ObjectiveFunctions_LooError #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (ObjectiveFunctions_LooError) BOOST_AUTO_TEST_CASE( ObjectiveFunctions_LooError ) { std::vector inputs(5, RealVector(2)); inputs[0](0) = 0.0; inputs[0](1) = 0.0; inputs[1](0) = 1.0; inputs[1](1) = 0.0; inputs[2](0) = 0.0; inputs[2](1) = 1.0; inputs[3](0) = 1.0; inputs[3](1) = 1.0; inputs[4](0) = 0.5; inputs[4](1) = 0.5; std::vector targets(5, RealVector(1)); targets[0](0) = 0.0; targets[1](0) = 0.0; targets[2](0) = 0.0; targets[3](0) = 0.0; targets[4](0) = 1.0; RegressionDataset dataset = createLabeledDataFromRange(inputs, targets); SquaredLoss<> loss; LinearModel<> model(2, 1, true); LinearRegression trainer; LooError > loo(dataset, &model, &trainer, &loss); // check the value of the objective function double value = loo.eval(); double should = 5.0 / 9.0; // manually computed reference value BOOST_CHECK_SMALL(value - should, 1e-10); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/ObjectiveFunctions/LooErrorCSvm.cpp000066400000000000000000000134741314655040000223600ustar00rootroot00000000000000//=========================================================================== /*! * * * \brief Unit test for the leave one out error for C-SVMs. * * * * * \author T. Glasmachers * \date 2011 * * * \par Copyright 1995-2015 Shark Development Team * *

* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ //=========================================================================== #include #include #include #include #include #include #define BOOST_TEST_MODULE ObjectiveFunctions_LooErrorCSvm #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (ObjectiveFunctions_LooErrorCSvm) BOOST_AUTO_TEST_CASE( ObjectiveFunctions_LooErrorCSvm_Simple ) { std::cout<<"testing simple test"< inputs(5, RealVector(2)); inputs[0](0) = 0.0; inputs[0](1) = 0.0; inputs[1](0) = 1.0; inputs[1](1) = 0.0; inputs[2](0) = 0.0; inputs[2](1) = 1.0; inputs[3](0) = 1.0; inputs[3](1) = 1.0; inputs[4](0) = 0.5; inputs[4](1) = 0.4; std::vector targets(5); targets[0] = 0; targets[1] = 0; targets[2] = 1; targets[3] = 1; targets[4] = 1; ClassificationDataset dataset = createLabeledDataFromRange(inputs, targets); // SVM setup LinearKernel<> kernel; double C = 1e100; // hard margin CSvmTrainer trainer(&kernel, C,true); // efficiently computed loo error LooErrorCSvm loosvm(dataset, &kernel, true); double value = loosvm.eval(trainer.parameterVector()); // brute force computation ZeroOneLoss loss; KernelClassifier ke; LooError,unsigned int> loo(dataset, &ke, &trainer, &loss); double standardLoo = loo.eval(); // compare to brute force computation // and to geometric intuition (3 out of // 5 are errors) BOOST_CHECK_SMALL(value - standardLoo, 1e-10); BOOST_CHECK_SMALL(value - 0.6, 1e-10); } BOOST_AUTO_TEST_CASE( ObjectiveFunctions_LooErrorCSvm_NoBias_Simple ) { std::cout<<"testing simple test without bias"< inputs(5, RealVector(2)); inputs[0](0) = 0.0; inputs[0](1) = 0.0; inputs[1](0) = 1.0; inputs[1](1) = 0.0; inputs[2](0) = 0.0; inputs[2](1) = 1.0; inputs[3](0) = 1.0; inputs[3](1) = 1.0; inputs[4](0) = 0.5; inputs[4](1) = 0.4; std::vector targets(5); targets[0] = 0; targets[1] = 0; targets[2] = 1; targets[3] = 1; targets[4] = 1; ClassificationDataset dataset = createLabeledDataFromRange(inputs, targets); // SVM setup LinearKernel<> kernel; double C = 1e100; // hard margin CSvmTrainer trainer(&kernel, C,false); // efficiently computed loo error LooErrorCSvm loosvm(dataset, &kernel, false); double value = loosvm.eval(trainer.parameterVector()); // brute force computation ZeroOneLoss loss; KernelClassifier ke; LooError,unsigned int> loo(dataset, &ke, &trainer, &loss); double standardLoo = loo.eval(); // compare to brute force computation BOOST_CHECK_SMALL(value - standardLoo, 1e-10); } BOOST_AUTO_TEST_CASE( ObjectiveFunctions_LooErrorCSvm_Chessboard ) { std::cout<<"testing test on chessboard"< kernel; double C = 10; CSvmTrainer trainer(&kernel, C,true); RealVector parameters = trainer.parameterVector(); // brute force computation ZeroOneLoss loss; KernelClassifier ke; LooError,unsigned int> loo(dataset, &ke, &trainer, &loss); double standardLoo = loo.eval(); // efficiently computed loo error LooErrorCSvm loosvm(dataset, &kernel, true); double value = loosvm.eval(parameters); // compare to brute force computation // and to geometric intuition BOOST_CHECK_SMALL(value - standardLoo, 1e-10); } BOOST_AUTO_TEST_CASE( ObjectiveFunctions_LooErrorCSvm_Chessboard_NoBias ) { std::cout<<"testing test on chessboard without bias"< kernel; double C = 10; CSvmTrainer trainer(&kernel, C,false); trainer.setMinAccuracy(.000001); RealVector parameters = trainer.parameterVector(); // efficiently computed loo error std::cout<<"efficient"< loosvm(dataset, &kernel, false); double value = loosvm.eval(parameters, trainer.stoppingCondition()); std::cout<<"\n\nbrute force"< loss; KernelClassifier ke; LooError,unsigned int> loo(dataset, &ke, &trainer, &loss); double standardLoo = loo.eval(); // compare to brute force computation // and to geometric intuition BOOST_CHECK_SMALL(value - standardLoo, 1e-10); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/ObjectiveFunctions/NegativeGaussianProcessEvidence.cpp000066400000000000000000000106531314655040000262570ustar00rootroot00000000000000//=========================================================================== /*! * * * \brief Test case for optimization of the hyperparameters of a * Gaussian Process/Regularization Network using evidence/marginal * likelihood maximization. * * * * \author Christian Igel * \date 2011 * * * \par Copyright 1995-2015 Shark Development Team * *

* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ #include #include #include #include #include #include #include #include #include #define BOOST_TEST_MODULE OBJECTIVEFUNCTIONS_EVIDENCE #include #include #include "TestObjectiveFunction.h" using namespace shark; using namespace std; BOOST_AUTO_TEST_SUITE (ObjectiveFunctions_NegativeGaussianProcessEvidence) BOOST_AUTO_TEST_CASE( GAUUSIAN_PROCESS_EVIDENCE ) { // experiment settings Rng::seed( 0 ); const unsigned int ell = 100; const unsigned int tests = 10000; const double gamma = 100.; const double beta = 1000; const bool unconstrained = true; GaussianRbfKernel<> kernel(gamma, unconstrained); // parameterization for unconstraint optimization // loss for regression SquaredLoss<> loss; // generate dataset Wave prob; RegressionDataset trainingData = prob.generateDataset(ell); RegressionDataset testData = prob.generateDataset(tests); // define the machine KernelExpansion model; // define the corresponding trainer RegularizationNetworkTrainer trainer(&kernel, beta, unconstrained); /* * Check whether evidence computations coincide. */ // compute evidence NegativeGaussianProcessEvidence<> evidence(trainingData, &kernel, unconstrained); RealVector params = trainer.parameterVector(); double prevEvidence = evidence.eval(params); // compute gradient SingleObjectiveFunction::FirstOrderDerivative derivative; BOOST_CHECK_SMALL(prevEvidence - evidence.evalDerivative(params, derivative), 1.e-10); /* * Check whether gradient is correct. */ for(std::size_t test = 0; test != 100; ++test){ RealVector parameters(params.size()); for(std::size_t i = 0; i != params.size(); ++i){ parameters(i) = Rng::uni(-2,2); } testDerivative(evidence,parameters,1.e-8); } /* * Check whether optimization works. */ IRpropPlus rprop; rprop.init(evidence, params); trainer.setParameterVector(rprop.solution().point); trainer.train(model, trainingData); Data output = model(trainingData.inputs()); double prevTrainError = loss.eval(trainingData.labels(), output); output = model(testData.inputs()); double prevTestError = loss.eval(testData.labels(), output); for (unsigned int iter1=0; iter1<4; iter1++) { for (unsigned int iter2=0; iter2<10; iter2++) rprop.step(evidence); trainer.setParameterVector(rprop.solution().point); trainer.train(model, trainingData); output = model(trainingData.inputs()); double trainError = loss.eval(trainingData.labels(), output); output = model(testData.inputs()); double testError = loss.eval(testData.labels(), output); double e = rprop.solution().value; // we are considering the negative log evidence, so // both evidence as well as test error should decrease BOOST_CHECK(trainError < prevTrainError); BOOST_CHECK(e < prevEvidence); BOOST_CHECK(testError < prevTestError); prevEvidence = e; prevTestError = testError; } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/ObjectiveFunctions/NegativeLogLikelihood.cpp000066400000000000000000000036021314655040000242240ustar00rootroot00000000000000#include #include #include #include #include #define BOOST_TEST_MODULE ObjFunct_NegativeLogLikelihood #include #include #include "TestObjectiveFunction.h" using namespace shark; struct Fixture { Fixture(): covariance(10,10,0.0),offset(10){ for(std::size_t i = 0; i != 10; ++i){ covariance(i,i) = 5; offset(i) = i; } NormalDistributedPoints problem(covariance,offset); data = problem.generateDataset(6000,100); } RealMatrix covariance; RealVector offset; UnlabeledData data; }; BOOST_FIXTURE_TEST_SUITE (ObjectiveFunctions_NegativeLogLikelihood, Fixture) BOOST_AUTO_TEST_CASE( ObjFunct_NegativeLogLikelihood_Derivative ){ RBFLayer model(10,1); NegativeLogLikelihood function(data,&model); std::size_t numTrials = 1000; RealVector point(model.numberOfParameters()); for(std::size_t t = 0; t != numTrials; ++t){ for(std::size_t i = 0; i != model.numberOfParameters()-1; ++i){ point(i) = Rng::gauss(0,1); } point(model.numberOfParameters()-1) = Rng::uni(-5,-3); testDerivative(function,point,1.e-6); } } BOOST_AUTO_TEST_CASE( ObjFunct_NegativeLogLikelihood_Optimize ){ RBFLayer model(10,1); RealVector point(model.numberOfParameters()); for(std::size_t i = 0; i != model.numberOfParameters()-1; ++i){ point(i) = Rng::gauss(0,1); } model.setParameterVector(point); NegativeLogLikelihood function(data,&model); IRpropPlus optimizer; optimizer.init(function); for(std::size_t i = 0; i != 100; ++i){ optimizer.step(function); } model.setParameterVector(optimizer.solution().point); BOOST_CHECK_SMALL(norm_inf(row(model.centers(),0)-offset), 0.1); std::cout< #include #include #include #include #define BOOST_TEST_MODULE ML_NoisyErrorFunction #include #include using namespace shark; using namespace std; struct TestFunction : public SingleObjectiveFunction { typedef SingleObjectiveFunction Base; std::string name() const { return "TestFunction"; } RealVector weights; TestFunction():weights(3){ weights(0)=1; weights(1)=2; weights(2)=-1; } std::size_t numberOfVariables()const{ return 3; } virtual double eval(RealVector const& pattern)const { return inner_prod(weights,pattern); } }; BOOST_AUTO_TEST_SUITE (ObjectiveFunctions_NoisyErrorFunction) BOOST_AUTO_TEST_CASE( ML_NoisyErrorFunction ) { //create regression data from the testfunction TestFunction function; std::vector data; std::vector target; RealVector input(3); RealVector output(1); for (size_t i=0; i<1000; i++) { for(size_t j=0;j!=3;++j) { input(j)=Rng::uni(-1,1); } data.push_back(input); output(0)=function.eval(input); target.push_back(output); } RegressionDataset dataset = createLabeledDataFromRange(data,target); //startingPoint RealVector point(3); point(0) = 0; point(1) = 0; point(2) = 0; SteepestDescent optimizer; SquaredLoss<> loss; LinearModel<> model(3); // batchsize 1 corresponds to stochastic gradient descent NoisyErrorFunction mse(dataset,&model,&loss,1); optimizer.init(mse, point); // train the cmac double error = 0.0; for (size_t iteration=0; iteration<501; ++iteration){ optimizer.step(mse); if (iteration % 100 == 0){ error = optimizer.solution().value; RealVector best = optimizer.solution().point; std::cout << iteration << " error:" << error << " parameter:" << best << std::endl; } } cout << "Optimization done. Error:" << error << std::endl; BOOST_CHECK_SMALL(error, 1.e-15); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/ObjectiveFunctions/RadiusMarginQuotient.cpp000066400000000000000000000055651314655040000241440ustar00rootroot00000000000000//=========================================================================== /*! * * * \brief unit test for the radius margin quotient * * * * * \author T. Glasmachers * \date 2011 * * * \par Copyright 1995-2015 Shark Development Team * *

* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ //=========================================================================== #include #include #define BOOST_TEST_MODULE ObjectiveFunctions_RadiusMarginQuotient #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (ObjectiveFunctions_RadiusMarginQuotient) BOOST_AUTO_TEST_CASE( ObjectiveFunctions_RadiusMarginQuotient ) { std::vector inputs(4, RealVector(1)); inputs[0](0) = 0.0; inputs[1](0) = 0.1; inputs[2](0) = 0.2; inputs[3](0) = 0.3; std::vector targets(4); targets[0] = 0; targets[1] = 0; targets[2] = 1; targets[3] = 1; ClassificationDataset dataset = createLabeledDataFromRange(inputs, targets); RealVector parameters(1, 0.5); GaussianRbfKernel<> kernel; kernel.setParameterVector(parameters); RadiusMarginQuotient rm(dataset, &kernel); // check the value of the objective function double radius_squared = kernel.featureDistanceSqr(dataset.element(0).input, dataset.element(3).input) / 4.0; double margin_squared = kernel.featureDistanceSqr(dataset.element(1).input, dataset.element(2).input) / 4.0; double should = radius_squared / margin_squared; double quotient = rm.eval(parameters); BOOST_CHECK_SMALL(quotient - should, 0.01); // numerically check the derivative double delta = 0.001; parameters(0) += delta; double right = rm.eval(parameters); parameters(0) -= 2 * delta; double left = rm.eval(parameters); double estimate = (right - left) / (2 * delta); parameters(0) = 0.5; RadiusMarginQuotient::FirstOrderDerivative derivative; double quotientDerivative = rm.evalDerivative(parameters, derivative); BOOST_CHECK_SMALL(quotientDerivative - should, 0.01); BOOST_CHECK_SMALL(derivative(0) - estimate, 0.01); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/ObjectiveFunctions/SparseAutoencoderError.cpp000066400000000000000000000240321314655040000244540ustar00rootroot00000000000000#define BOOST_TEST_MODULE ObjFunct_SparseAutoencoderError #include #include #include #include #include #include "TestObjectiveFunction.h" using namespace shark; //allways 0 class NullLoss : public AbstractLoss { public: std::string name() const { return "NullLoss"; } double eval(BatchLabelType const&, BatchOutputType const&) const{ return 0; } double evalDerivative( RealMatrix const&, RealMatrix const& prediction, RealMatrix& gradient ) const { gradient.resize(prediction.size1(),prediction.size2()); gradient.clear(); return 0; } }; BOOST_AUTO_TEST_SUITE (ObjectiveFunctions_SparseAutoencoderError) BOOST_AUTO_TEST_CASE( SparseAutoencoderError_Value ){ std::size_t Inputs = 100; std::size_t Iterations = 100; std::size_t Dimensions = 5; std::vector input(Inputs,RealVector(Dimensions)); for(std::size_t i = 0; i != Inputs; ++i){ for(std::size_t j = 0; j != Dimensions; ++j){ input[i](j) = Rng::uni(-1,1);; } } RegressionDataset dataset = createLabeledDataFromRange(input,input,25); Autoencoder model; model.setStructure(Dimensions,5); NullLoss loss; for(std::size_t iter = 0; iter != Iterations; ++iter){ double roh = Rng::uni(0.1,0.9); double beta = Rng::uni(0.1,0.9); SparseAutoencoderError error(dataset, &model,&loss,roh,beta); //evaluate error and check that its consistent SparseAutoencoderError::FirstOrderDerivative derivative; initRandomNormal(model,1); double errorValue = error.eval(model.parameterVector()); double errorDerivativeValue = error.evalDerivative(model.parameterVector(),derivative); BOOST_CHECK_CLOSE(errorValue,errorDerivativeValue, 0.01); //now check that the error value reported is actually correct. //evaluate the inputs for the model RealVector activations(5,0.0); boost::shared_ptr state = model.createState(); RealMatrix result; for(std::size_t i = 0; i != 4; ++i){ model.eval(dataset.batch(i).input,result,*state); //sum hidden activations activations+=sum_rows(model.hiddenResponses(*state)); } activations /= Inputs; //calculate KL-divergence double errorTest = 0; for(std::size_t i = 0; i != 5; ++i){ double a = activations(i); errorTest += roh*std::log(roh/a); errorTest += (1-roh)*std::log((1-roh)/(1-a)); } errorTest*=beta; BOOST_CHECK_SMALL(errorTest-errorValue,1.e-14); } } BOOST_AUTO_TEST_CASE( SparseAutoencoderError_Value_Tied ){ std::size_t Inputs = 100; std::size_t Iterations = 100; std::size_t Dimensions = 5; std::vector input(Inputs,RealVector(Dimensions)); for(std::size_t i = 0; i != Inputs; ++i){ for(std::size_t j = 0; j != Dimensions; ++j){ input[i](j) = Rng::uni(-1,1); } } RegressionDataset dataset = createLabeledDataFromRange(input,input,25); TiedAutoencoder model; model.setStructure(Dimensions,5); NullLoss loss; for(std::size_t iter = 0; iter != Iterations; ++iter){ double roh = Rng::uni(0.1,0.9); double beta = Rng::uni(0.1,0.9); SparseAutoencoderError error(dataset, &model,&loss,roh,beta); //evaluate error and check that its consistent SparseAutoencoderError::FirstOrderDerivative derivative; initRandomNormal(model,1); double errorValue = error.eval(model.parameterVector()); double errorDerivativeValue = error.evalDerivative(model.parameterVector(),derivative); BOOST_CHECK_CLOSE(errorValue,errorDerivativeValue, 0.01); //now check that the error value reported is actually correct. //evaluate the inputs for the model RealVector activations(5,0.0); boost::shared_ptr state = model.createState(); RealMatrix result; for(std::size_t i = 0; i != 4; ++i){ model.eval(dataset.batch(i).input,result,*state); //sum hidden activations activations+=sum_rows(model.hiddenResponses(*state)); } activations /= Inputs; //calculate KL-divergence double errorTest = 0; for(std::size_t i = 0; i != 5; ++i){ double a = activations(i); errorTest += roh*std::log(roh/a); errorTest += (1-roh)*std::log((1-roh)/(1-a)); } errorTest*=beta; BOOST_CHECK_SMALL(errorTest-errorValue,1.e-14); } } //tests, whether the error function is processed correctly BOOST_AUTO_TEST_CASE( SparseAutoencoderError_Loss ){ std::size_t Inputs = 10; std::size_t Iterations = 10; std::size_t Dimensions = 5; std::vector input(Inputs,RealVector(Dimensions)); for(std::size_t i = 0; i != Inputs; ++i){ for(std::size_t j = 0; j != Dimensions; ++j){ input[i](j) = Rng::uni(-1,1); } } RegressionDataset dataset = createLabeledDataFromRange(input,input,5); Autoencoder model; model.setStructure(Dimensions,5); SquaredLoss loss; SparseAutoencoderError error(dataset,&model,&loss,0.5,0.0); double errortest = error.eval(model.parameterVector()); BOOST_CHECK_SMALL(errortest-loss(dataset.inputs(),model(dataset.inputs())),1.e-15); for(std::size_t i = 0; i != Iterations; ++i){ initRandomNormal(model,0.1); RealVector point = model.parameterVector(); SparseAutoencoderError::FirstOrderDerivative derivative; RealVector estimatedDerivative = estimateDerivative(error,point,1.e-10); error.evalDerivative(point,derivative); double errorDer= norm_inf(estimatedDerivative - derivative); BOOST_CHECK_SMALL(errorDer, 1.e-4); } } BOOST_AUTO_TEST_CASE( SparseAutoencoderError_Derivative_Loss){ std::size_t Inputs = 100; std::size_t Iterations = 1000; std::size_t Dimensions = 5; std::vector input(Inputs,RealVector(Dimensions)); for(std::size_t i = 0; i != Inputs; ++i){ for(std::size_t j = 0; j != Dimensions; ++j){ input[i](j) = Rng::uni(-1,1); } } RegressionDataset dataset = createLabeledDataFromRange(input,input,25); Autoencoder model; model.setStructure(Dimensions,2); SquaredLoss<> loss; for(std::size_t i = 0; i != Iterations; ++i){ double roh = Rng::uni(0.1,0.9); double beta = Rng::uni(0.1,0.9); SparseAutoencoderError error(dataset,&model,&loss,roh,beta); initRandomNormal(model,0.1); RealVector point = model.parameterVector(); SparseAutoencoderError::FirstOrderDerivative derivative; RealVector estimatedDerivative = estimateDerivative(error,point,1.e-10); error.evalDerivative(point,derivative); double errorm= norm_inf(estimatedDerivative - derivative); BOOST_CHECK_SMALL(errorm, 1.e-4); } } BOOST_AUTO_TEST_CASE( SparseAutoencoderError_Derivative){ std::size_t Inputs = 100; std::size_t Iterations = 1000; std::size_t Dimensions = 5; std::vector input(Inputs,RealVector(Dimensions)); for(std::size_t i = 0; i != Inputs; ++i){ for(std::size_t j = 0; j != Dimensions; ++j){ input[i](j) = Rng::uni(-1,1); } } RegressionDataset dataset = createLabeledDataFromRange(input,input,25); Autoencoder model; model.setStructure(Dimensions,2); NullLoss loss; for(std::size_t i = 0; i != Iterations; ++i){ double roh = Rng::uni(0.1,0.9); double beta = Rng::uni(0.1,0.9); SparseAutoencoderError error(dataset,&model,&loss,roh,beta); initRandomNormal(model,0.1); RealVector point = model.parameterVector(); SparseAutoencoderError::FirstOrderDerivative derivative; RealVector estimatedDerivative = estimateDerivative(error,point,1.e-10); error.evalDerivative(point,derivative); double errorm= norm_inf(estimatedDerivative - derivative); BOOST_CHECK_SMALL(errorm, 1.e-4); } } BOOST_AUTO_TEST_CASE( SparseAutoencoderError_Derivative_Tied){ std::size_t Inputs = 100; std::size_t Iterations = 1000; std::size_t Dimensions = 5; std::vector input(Inputs,RealVector(Dimensions)); for(std::size_t i = 0; i != Inputs; ++i){ for(std::size_t j = 0; j != Dimensions; ++j){ input[i](j) = Rng::uni(-1,1); } } RegressionDataset dataset = createLabeledDataFromRange(input,input,25); TiedAutoencoder model; model.setStructure(Dimensions,2); NullLoss loss; for(std::size_t i = 0; i != Iterations; ++i){ double roh = Rng::uni(0.1,0.9); double beta = Rng::uni(0.1,0.9); SparseAutoencoderError error(dataset,&model,&loss,roh,beta); initRandomNormal(model,0.1); RealVector point = model.parameterVector(); SparseAutoencoderError::FirstOrderDerivative derivative; RealVector estimatedDerivative = estimateDerivative(error,point,1.e-10); error.evalDerivative(point,derivative); double errorm= norm_inf(estimatedDerivative - derivative); BOOST_CHECK_SMALL(errorm, 1.e-4); } } //this test only tests, whether the error achieves the right mean activation BOOST_AUTO_TEST_CASE( SparseAutoencoderError_Derivative_GradDesc) { std::size_t Inputs = 100; std::size_t Iterations = 100; std::size_t Dimensions = 5; double roh = 0.3; double beta = 1.0; std::vector input(Inputs,RealVector(Dimensions)); for(std::size_t i = 0; i != Inputs; ++i){ for(std::size_t j = 0; j != Dimensions; ++j){ input[i](j) = Rng::uni(-1,1);; } } RegressionDataset dataset = createLabeledDataFromRange(input,input); Autoencoder model; model.setStructure(Dimensions,5); initRandomNormal(model,1); NullLoss loss; SparseAutoencoderError error(dataset,&model,&loss,roh,beta); IRpropPlus optimizer; optimizer.init(error); //now train the network to optimize the hidden units //to have target roh for(std::size_t i = 0; i != Iterations; ++i){ optimizer.step(error); //std::cout< state = model.createState(); model.eval(dataset.batch(0).input,output,*state); //sum hidden activations RealVector activations=sum_rows(model.hiddenResponses(*state)); activations /= Inputs; //check that the mean activation is correct for(std::size_t i = 0; i != 5; ++i){ BOOST_CHECK_SMALL(activations(i)-roh,1.e-5); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/ObjectiveFunctions/SquaredEpsilonHingeLoss.cpp000066400000000000000000000034501314655040000245670ustar00rootroot00000000000000#include #include #include "TestLoss.h" #define BOOST_TEST_MODULE OBJECTIVEFUNCTIONS_SQUAREDEPSILONHINGELOSS #include #include using namespace shark; using namespace std; BOOST_AUTO_TEST_SUITE (ObjectiveFunctions_SquaredEpsilonHingeLoss) BOOST_AUTO_TEST_CASE( SQUAREDEPSILONHINGELOSS_EVAL ) { unsigned int maxTests = 10000; unsigned int minDim = 3; unsigned int maxDim = 10; for (unsigned int test = 0; test != maxTests; ++test) { double epsilon = Rng::uni(0,5); SquaredEpsilonHingeLoss loss(epsilon); std::size_t dim = Rng::discrete(minDim,maxDim); //sample point between -10,10 RealMatrix testPoint(5,dim); RealMatrix testLabel(5,dim); RealVector valueResultV(5,0); for(std::size_t i = 0; i != 5; ++i){ for(std::size_t j = 0; j != dim; ++j){ testPoint(i,j) = Rng::uni(-10.0,10.0); testLabel(i,j) = Rng::uni(-10.0,10.0); } valueResultV(i) = 0.5*std::max(0.0, norm_sqr(row(testPoint,i)-row(testLabel,i))-sqr(epsilon)); } double valueResult = sum(valueResultV); //test eval double value = loss.eval(testLabel,testPoint); BOOST_CHECK_SMALL(value-valueResult, 1.e-12); //test evalDerivative (first) RealMatrix derivative; value = loss.evalDerivative(testLabel, testPoint, derivative); BOOST_CHECK_SMALL(value - valueResult, 1.e-12); BOOST_REQUIRE_EQUAL(derivative.size1(), 5); BOOST_REQUIRE_EQUAL(derivative.size2(), dim); for(std::size_t i = 0; i != 5; ++i){ RealVector estimatedDerivative = estimateDerivative(loss, RealMatrix(rows(testPoint,i,i+1)), RealMatrix(rows(testLabel,i,i+1))); BOOST_CHECK_SMALL(norm_sqr(row(derivative,i) - estimatedDerivative), 1.e-5); } } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/ObjectiveFunctions/SquaredHingeLoss.cpp000066400000000000000000000076071314655040000232450ustar00rootroot00000000000000#include #include #include "TestLoss.h" #define BOOST_TEST_MODULE OBJECTIVEFUNCTIONS_SQUAREDHINGELOSS #include #include using namespace shark; using namespace std; BOOST_AUTO_TEST_SUITE (ObjectiveFunctions_SquaredHingeLoss) BOOST_AUTO_TEST_CASE( SQUAREDHINGELOSS_EVAL_TWOCLASS ) { unsigned int maxTests = 10000; for (unsigned int test = 0; test != maxTests; ++test) { SquaredHingeLoss loss; //sample point between -10,10 RealMatrix testPoint(2,1); testPoint(0,0) = Rng::uni(-10.0,10.0); testPoint(1,0) = Rng::uni(-10.0,10.0); RealMatrix testPoint2D(2,2); testPoint2D(0,0) = testPoint(0,0); testPoint2D(0,1) = -testPoint(0,0); testPoint2D(1,0) = testPoint(1,0); testPoint2D(1,1) = -testPoint(1,0); testPoint2D*=-1; //sample label {-1,1} UIntVector testLabel(2); testLabel(0) = Rng::coinToss(0.5); testLabel(1) = Rng::coinToss(0.5); int label0 = testLabel(0)?1:-1; int label1 = testLabel(1)?1:-1; //the test results double valueResultP[] = { 0.5*sqr(std::max(0.0, 1-testPoint(0,0)*label0)), 0.5*sqr(std::max(0.0, 1-testPoint(1,0)*label1)) }; double valueResult = valueResultP[0]+valueResultP[1]; //test eval double value = loss.eval(testLabel,testPoint); double value2D = loss.eval(testLabel,testPoint2D); BOOST_CHECK_SMALL(value-valueResult, 1.e-13); BOOST_CHECK_SMALL(value2D-valueResult, 1.e-13); //test evalDerivative (first) RealMatrix derivative; RealMatrix derivative2D; value = loss.evalDerivative(testLabel, testPoint, derivative); value2D = loss.evalDerivative(testLabel, testPoint2D, derivative2D); BOOST_CHECK_SMALL(value - valueResult, 1.e-13); BOOST_CHECK_SMALL(value2D - valueResult, 1.e-13); BOOST_REQUIRE_EQUAL(derivative2D.size1(), 2); BOOST_REQUIRE_EQUAL(derivative2D.size2(),2); BOOST_REQUIRE_EQUAL(derivative.size1(), 2); BOOST_REQUIRE_EQUAL(derivative.size2(), 1); for(std::size_t i = 0; i != 2; ++i){ RealVector estimatedDerivative = estimateDerivative(loss, RealMatrix(rows(testPoint,i,i+1)), subrange(testLabel,i,i+1)); RealVector estimatedDerivative2D = estimateDerivative(loss, RealMatrix(rows(testPoint2D,i,i+1)), subrange(testLabel,i,i+1)); BOOST_CHECK_SMALL(norm_2(row(derivative,i) - estimatedDerivative), 1.e-5); BOOST_CHECK_SMALL(norm_2(row(derivative2D,i) - estimatedDerivative2D), 1.e-5); } } } BOOST_AUTO_TEST_CASE( SQUAREDHINGELOSS_EVAL_MULTICLASS ) { unsigned int maxTests = 10000; unsigned int minDim = 3; unsigned int maxDim = 10; for (unsigned int test = 0; test != maxTests; ++test) { SquaredHingeLoss loss; std::size_t dim = Rng::discrete(minDim,maxDim); //sample point between -10,10 RealMatrix testPoint(5,dim); UIntVector testLabel(5); RealVector valueResultP(5,0); for(std::size_t i = 0; i != 5; ++i){ testLabel(i) = Rng::discrete(0,dim-1); testPoint(i,testLabel(i)) = Rng::uni(-10.0,10.0); for(std::size_t j = 0; j != dim; ++j){ if(j == testLabel(i)) continue; testPoint(i,j) = Rng::uni(-10.0,10.0); valueResultP[i]+= 0.5*sqr(std::max(0.0, 1-0.5*(testPoint(i,testLabel(i))- testPoint(i,j)))); } } double valueResult = sum(valueResultP); //test eval double value = loss.eval(testLabel,testPoint); BOOST_CHECK_SMALL(value-valueResult, 1.e-11); //test evalDerivative (first) RealMatrix derivative; value = loss.evalDerivative(testLabel, testPoint, derivative); BOOST_CHECK_SMALL(value - valueResult, 1.e-11); BOOST_REQUIRE_EQUAL(derivative.size1(), 5); BOOST_REQUIRE_EQUAL(derivative.size2(), dim); for(std::size_t i = 0; i != 5; ++i){ RealVector estimatedDerivative = estimateDerivative(loss, RealMatrix(rows(testPoint,i,i+1)), UIntVector(subrange(testLabel,i,i+1))); BOOST_CHECK_SMALL(norm_2(row(derivative,i) - estimatedDerivative), 1.e-5); } } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/ObjectiveFunctions/SquaredLoss.cpp000066400000000000000000000113241314655040000222610ustar00rootroot00000000000000#include #include #include "TestLoss.h" #define BOOST_TEST_MODULE OBJECTIVEFUNCTIONS_SQUAREDLOSS #include #include using namespace shark; using namespace std; BOOST_AUTO_TEST_SUITE (ObjectiveFunctions_SquaredLoss) BOOST_AUTO_TEST_CASE( SQUAREDLOSS_EVAL ) { unsigned int maxTests = 10000; for (unsigned int test = 0; test != maxTests; ++test) { SquaredLoss<> loss; //sample point between -10,10 RealMatrix testPoint(1,2); testPoint(0,0) = Rng::uni(-10.0,10.0); testPoint(0,1) = Rng::uni(-10.0,10.0); //sample label between -10,10 RealMatrix testLabel(1,2); testLabel(0,0) = Rng::uni(-10.0,10.0); testLabel(0,1) = Rng::uni(-10.0,10.0); //the test results double valueResult = sqr(testPoint(0,0)-testLabel(0,0))+sqr(testPoint(0,1)-testLabel(0,1)); RealVector estimatedDerivative = estimateDerivative(loss, testPoint, testLabel); //test eval double value = loss.eval(testLabel,testPoint); BOOST_CHECK_SMALL(value-valueResult, 1.e-13); //test evalDerivative (first) RealMatrix derivative; value = loss.evalDerivative(testLabel, testPoint, derivative); BOOST_CHECK_SMALL(value - valueResult, 1.e-13); BOOST_CHECK_SMALL(norm_2(row(derivative,0) - estimatedDerivative), 1.e-5); } } BOOST_AUTO_TEST_CASE( SQUAREDLOSS_EVAL_Classification ) { unsigned int maxTests = 10000; for (unsigned int test = 0; test != maxTests; ++test) { SquaredLoss loss; SquaredLoss lossOneHot; //sample point between -10,10 RealMatrix testPoint(1,3); testPoint(0,0) = Rng::uni(-10.0,10.0); testPoint(0,1) = Rng::uni(-10.0,10.0); testPoint(0,2) = Rng::uni(-10.0,10.0); //sample class label UIntVector testLabelDisc(1); testLabelDisc(0) = (unsigned int)Rng::discrete(0,2); RealMatrix testLabel(1,3); testLabel(0,testLabelDisc(0))=1; //the test results double valueResult = lossOneHot.eval(testLabel,testPoint); RealVector estimatedDerivative = estimateDerivative(lossOneHot, testPoint, testLabel); //test eval double value = loss.eval(testLabelDisc,testPoint); BOOST_CHECK_SMALL(value-valueResult, 1.e-13); //test evalDerivative (first) RealMatrix derivative; value = loss.evalDerivative(testLabelDisc, testPoint, derivative); BOOST_CHECK_SMALL(value - valueResult, 1.e-13); BOOST_CHECK_SMALL(norm_2(row(derivative,0) - estimatedDerivative), 1.e-5); } } BOOST_AUTO_TEST_CASE( SQUAREDLOSS_EVAL_Sequence ) { unsigned int maxTests = 100; unsigned int dims = 10; unsigned int batchSize = 5; unsigned int sequenceLength = 30; for (unsigned int test = 0; test != maxTests; ++test) { SquaredLoss loss(10); SquaredLoss lossSingleSequence; //create the sequences as well as ground truth std::vector sequenceBatch; std::vector sequenceBatchLabel; std::vector sequenceBatchMat; std::vector sequenceBatchMatLabel; for(std::size_t b = 0; b != 5; ++b){ Sequence seq(10, RealVector(dims,0.0)); Sequence seqLabel(10, RealVector(dims,0.0)); RealMatrix seqMat(sequenceLength-10, dims); RealMatrix seqMatLabel(sequenceLength-10, dims); for(std::size_t i = 0; i != sequenceLength -10; ++i){ for(std::size_t j = 0; j != dims; ++j){ seqMat(i,j) = Rng::gauss(0,1); seqMatLabel(i,j) = Rng::gauss(0,1); } seq.push_back(row(seqMat,i)); seqLabel.push_back(row(seqMatLabel,i)); } sequenceBatch.push_back(seq); sequenceBatchMat.push_back(seqMat); sequenceBatchLabel.push_back(seqLabel); sequenceBatchMatLabel.push_back(seqMatLabel); } //create ground truth error and gradient double errorTruth = 0; std::vector gradientTruth(batchSize); for(std::size_t b = 0; b != batchSize; ++b){ errorTruth += lossSingleSequence.eval(sequenceBatchMatLabel[b],sequenceBatchMat[b]); lossSingleSequence.evalDerivative(sequenceBatchMatLabel[b],sequenceBatchMat[b], gradientTruth[b]); } double error = loss.eval(sequenceBatchLabel,sequenceBatch); std::vector gradient; double errorDerivative = loss.evalDerivative(sequenceBatchLabel,sequenceBatch,gradient); BOOST_CHECK_CLOSE(error, errorDerivative, 1.e-12); BOOST_CHECK_CLOSE(error, errorTruth, 1.e-10); for(std::size_t b = 0; b != batchSize; ++b){ for(std::size_t i = 0; i != 10; ++i){ for(std::size_t j = 0; j != dims; ++j){ BOOST_CHECK_EQUAL(gradient[b][i][j],0.0); } } for(std::size_t i = 10; i != sequenceLength; ++i){ for(std::size_t j = 0; j != dims; ++j){ BOOST_CHECK_CLOSE(gradient[b][i][j],gradientTruth[b](i-10,j),1.e-10); } } } } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/ObjectiveFunctions/SvmLogisticInterpretation.cpp000066400000000000000000000245561314655040000252220ustar00rootroot00000000000000//=========================================================================== /*! * * * \brief unit test for maximum-likelihood model selection for support vector machines. * * * * * \author M. Tuma, T. Glasmachers * \date 2011 * * * \par Copyright 1995-2015 Shark Development Team * *

* This file is part of Shark. * * * Shark is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Shark is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Shark. If not, see . * */ //=========================================================================== #include #include #include #include #include #include #include #include #define BOOST_TEST_MODULE ObjectiveFunctions_SvmLogisticInterpretation #include #include using namespace shark; const char test[] = "3.2588947676e+00 5.4190801643e-01 1\n\ 3.3400343591e+00 5.0794724748e-01 1\n\ 3.6535034226e+00 8.8413617108e-01 1\n\ 1.2326682014e+00 3.9016160648e-01 1\n\ 1.1139928736e+00 7.5352790393e-01 1\n\ 2.9033558527e+00 3.8823711155e+00 1\n\ 3.8286677990e+00 4.3944700249e-01 1\n\ 1.4284671750e-01 1.4451760054e+00 1\n\ 8.4769732878e-01 3.7359729828e+00 1\n\ 3.1688293004e+00 3.5137225809e+00 0\n\ 2.0146507099e+00 2.6229627840e+00 0\n\ 3.1924234265e+00 3.2011218928e+00 0\n\ 5.6754538044e-01 1.9133937545e-02 0\n\ 2.9625889780e+00 2.9725298844e+00 0\n\ 1.5689080786e+00 1.6883507241e+00 0\n\ 6.9546068739e-01 6.8474675901e-01 0\n\ 3.9715252072e+00 3.8300273186e+00 0\n\ 3.8595541352e+00 3.8707797471e+00 0\n"; // mtq: TODO: add some trivial tests when the kernel-feasibility issue is settled, along the lines of: ////// mlms_score.proposeStartingPoint( params ); ////// BOOST_CHECK( mlms_score.isFeasible( params) ); // on the above mini chessboard problem, optimize CSVM parameters using rprop on the SvmLogisticInterpretation. // after every rprop step, assert that the numerical and analytical derivative coincide. BOOST_AUTO_TEST_SUITE (ObjectiveFunctions_SvmLogisticInterpretation) BOOST_AUTO_TEST_CASE( ObjectiveFunctions_SvmLogisticInterpretation_Small_Chessboard ) { double NUMERICAL_INCREASE_FACTOR = 1.00001; //~ std::stringstream ss(test); //~ std::vector x; //~ std::vector y; // create dataset //~ detail::import_csv( x, y, ss, LAST_COLUMN, " ", ""); ClassificationDataset training_dataset; csvStringToData(training_dataset,test,LAST_COLUMN,0); //~ ClassificationDataset training_dataset = createLabeledDataFromRange(x,y); std::size_t num_eles = training_dataset.numberOfElements(); std::size_t num_folds = 2; std::vector< std::size_t > indices(num_eles); for(std::size_t i = 0; i cv_folds = createCVIndexed( training_dataset, num_folds, indices ); GaussianRbfKernel<> kernel(0.5); QpStoppingCondition stop(1e-10); SvmLogisticInterpretation<> mlms_score( cv_folds, &kernel, false, &stop ); // optimize NCLL using rprop IRpropPlus rprop; RealVector start(2); start(0) = 1.0; start(1) = 0.5; rprop.init( mlms_score, start ); unsigned int its = 50; for (unsigned int i=0; i x; //~ std::vector y; //~ detail::import_csv( x, y, ss, LAST_COLUMN, " ", ""); //~ ClassificationDataset training_dataset = createLabeledDataFromRange(x,y); ClassificationDataset training_dataset; csvStringToData(training_dataset,test,LAST_COLUMN,0); std::size_t num_eles = training_dataset.numberOfElements(); std::size_t num_folds = 2; std::vector< size_t > indices( num_eles ); for(std::size_t i = 0; i cv_folds = createCVIndexed( training_dataset, num_folds, indices ); GaussianRbfKernel<> kernel(0.5); QpStoppingCondition stop(1e-10); SvmLogisticInterpretation<> mlms_score( cv_folds, &kernel, false, &stop ); // optimize NCLL using rprop IRpropPlus rprop; RealVector start(2); start(0) = 1.0; start(1) = 0.5; rprop.init( mlms_score, start ); unsigned int its = 50; for (unsigned int i=0; i cv_folds = createCVIID( train, num_folds ); DenseARDKernel kernel( td, 0.1 ); //dummy gamma, set later for real. QpStoppingCondition stop(1e-10); SvmLogisticInterpretation<> mlms( cv_folds, &kernel, true, &stop ); RealVector start( td+1 ); start(0) = 0.1; for ( unsigned int i=0; i svm; CSvmTrainer trainer( &kernel, exp(rprop.solution().point(0)), true ); trainer.train( svm, train ); ZeroOneLoss loss; // 0-1 loss Data output = svm( train.inputs() ); // evaluate on training set double train_error = loss.eval(train.labels(), output); std::cout << "train error " << train_error << std::endl; output = svm( test.inputs() ); // evaluate on test set double test_error = loss.eval(test.labels(), output); std::cout << "test error " << test_error << std::endl; BOOST_CHECK( test_error < 0.155 ); //should be enough, hopefully.. } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/ObjectiveFunctions/TestLoss.h000066400000000000000000000025051314655040000212420ustar00rootroot00000000000000#ifndef TEST_OBJECTIVEFUNCTIONS_TESTLOSS #define TEST_OBJECTIVEFUNCTIONS_TESTLOSS namespace shark{ template RealVector estimateDerivative( Loss const& loss, Point const& point, Label const& label, double epsilon = 1.e-5 ){ RealVector gradient(point.size2()); for(size_t parameter = 0; parameter != point.size2(); ++parameter){ Point testPoint1 = point; testPoint1(0,parameter) += epsilon; double result1 = loss.eval(label, testPoint1); Point testPoint2 = point; testPoint2(0,parameter) -= epsilon; double result2 = loss.eval(label, testPoint2); gradient(parameter)=(result1 - result2) / (2 * epsilon); } return gradient; } template RealMatrix estimateSecondDerivative( Loss const& loss, Point const& point, Label const& label, double epsilon = 1.e-5 ){ RealMatrix hessian(point.size2(), point.size2()); hessian.clear(); for(size_t parameter = 0;parameter != point.size2(); ++parameter){ Point testPoint1 = point; testPoint1(0,parameter) += epsilon; RealVector grad1 = estimateDerivative(loss, testPoint1, label); Point testPoint2 = point; testPoint2(0,parameter) -= epsilon; RealVector grad2 = estimateDerivative(loss, testPoint2, label); row(hessian,parameter)=(grad1 - grad2) / (2 * epsilon); } return hessian; } } #endif Shark-3.1.4/Test/ObjectiveFunctions/TestObjectiveFunction.h000066400000000000000000000100511314655040000237350ustar00rootroot00000000000000#ifndef SHARK_TEST_DERIVATIVETESTHELPER_H #define SHARK_TEST_DERIVATIVETESTHELPER_H #include #include #include #include #include namespace shark{ //estimates Derivative using the formula: //df(x)/dx~=(f(x+e)-f(x-e))/2e inline RealVector estimateDerivative( SingleObjectiveFunction& function, const RealVector& point, double epsilon=1.e-10 ){ RealVector gradient(point.size()); for(std::size_t dim=0;dim!=point.size();++dim){ RealVector testPoint1=point; testPoint1(dim)+=epsilon; double result1= function.eval(testPoint1); RealVector testPoint2=point; testPoint2(dim) -= epsilon; double result2 = function.eval(testPoint2); gradient[dim] = (result1-result2)/(2*epsilon); } return gradient; } inline RealMatrix estimateSecondDerivative( SingleObjectiveFunction& function, const RealVector& point, double epsilon=1.e-10 ){ typedef SingleObjectiveFunction::FirstOrderDerivative Derivative; RealMatrix hessian(point.size(),point.size()); for(std::size_t dim=0;dim!=point.size();++dim){ RealVector testPoint1 = point; testPoint1(dim) += epsilon; Derivative result1; function.evalDerivative(testPoint1,result1); RealVector testPoint2=point; testPoint2(dim) -= epsilon; Derivative result2; function.evalDerivative(testPoint2,result2); row(hessian,dim) = (result1-result2)/(2*epsilon); } return hessian; } void testDerivative( SingleObjectiveFunction& function, const RealVector& point, double epsilon=1.e-10, double epsilonSecond = 1.e-10, double maxErrorPercentage = 0.001 ){ //double maxError = epsilon * 100; RealVector estimatedDerivative=estimateDerivative(function,point,epsilon); //calculate derivative and check that eval() and evalDerivative() match. typedef SingleObjectiveFunction Function; Function::FirstOrderDerivative derivative; double resultEvalDerivative = function.evalDerivative(point,derivative); double resultEval = function.eval(point); BOOST_CHECK_CLOSE(resultEvalDerivative,resultEval, maxErrorPercentage); //calculate error between both BOOST_REQUIRE_EQUAL(estimatedDerivative.size(),derivative.size()); //std::cout< 0.01 && estimatedDerivative(i)>0.001){ BOOST_CHECK_CLOSE(estimatedDerivative(i),derivative(i),maxErrorPercentage); } //BOOST_CHECK_SMALL(estimatedDerivative(i) -derivative(i),maxError); } //if possible, calculate second derivative if(function.features() & Function::HAS_SECOND_DERIVATIVE){ double maxErrorSecond = epsilonSecond * 100; RealMatrix estimatedHessian = estimateSecondDerivative(function,point,epsilonSecond); Function::SecondOrderDerivative secondDerivative; double resultEvalSecondDerivative = function.evalDerivative(point,secondDerivative); BOOST_CHECK_CLOSE(resultEvalSecondDerivative,resultEval, 1.e-5); //check first derivative again... BOOST_REQUIRE_EQUAL(estimatedDerivative.size(),secondDerivative.gradient.size()); for(std::size_t i=0;i != estimatedDerivative.size(); ++i){ BOOST_CHECK_CLOSE(estimatedDerivative(i),derivative(i),maxErrorPercentage); //BOOST_CHECK_SMALL(estimatedDerivative(i) - secondDerivative.gradient(i),maxError); //std::cout< #include #include "TestLoss.h" #define BOOST_TEST_MODULE OBJECTIVEFUNCTIONS_TUKEYBIWEIGHTLOSS #include #include using namespace shark; using namespace std; BOOST_AUTO_TEST_SUITE (ObjectiveFunctions_TukeyBiweightLoss) BOOST_AUTO_TEST_CASE( HUBERLOSS_TEST ) { unsigned int maxTests = 10000; for (unsigned int test = 0; test != maxTests; ++test) { TukeyBiweightLoss loss(1.5); RealMatrix testPoint(2,3); testPoint(0,0) = Rng::uni(-1,1); testPoint(1,0) = Rng::uni(-1,1); testPoint(0,1) = Rng::uni(-1,1); testPoint(1,1) = Rng::uni(-1,1); testPoint(0,2) = Rng::uni(-1,1); testPoint(1,2) = Rng::uni(-1,1); RealMatrix testLabel(2,3); testLabel(0,0) = Rng::uni(-1,1); testLabel(1,0) = Rng::uni(-1,1); testLabel(0,1) = Rng::uni(-1,1); testLabel(1,1) = Rng::uni(-1,1); testLabel(0,2) = Rng::uni(-1,1); testLabel(1,2) = Rng::uni(-1,1); //test evalDerivative (first) RealMatrix derivative; double valueEval = loss.eval(testLabel, testPoint); double valueEvalDerivative = loss.evalDerivative(testLabel, testPoint, derivative); BOOST_CHECK_SMALL(valueEval - valueEvalDerivative, 1.e-13); BOOST_REQUIRE_EQUAL(derivative.size1(), 2); BOOST_REQUIRE_EQUAL(derivative.size2(), 3); for(std::size_t i = 0; i != 2; ++i){ double value = loss(row(testLabel,i), row(testPoint,i)); BOOST_CHECK_SMALL(value, sqr(1.5)/6.0+1.e-5); } for(std::size_t i = 0; i != 2; ++i){ RealVector estimatedDerivative = estimateDerivative(loss, RealMatrix(rows(testPoint,i,i+1)), rows(testLabel,i,i+1)); BOOST_CHECK_SMALL(norm_2(row(derivative,i) - estimatedDerivative), 1.e-5); } } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/RBM/000077500000000000000000000000001314655040000141245ustar00rootroot00000000000000Shark-3.1.4/Test/RBM/Analytics.cpp000066400000000000000000000100361314655040000165570ustar00rootroot00000000000000#include #include #include #include #define BOOST_TEST_MODULE RBM_Analytics #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (RBM_Analytics) BOOST_AUTO_TEST_CASE( Energy_Partition_VisibleGreaterHidden ) { //all possible state combinations for 2 hidden units RealMatrix hiddenStateSpace(4,2); hiddenStateSpace(0,0)=0; hiddenStateSpace(0,1)=0; hiddenStateSpace(1,0)=0; hiddenStateSpace(1,1)=1; hiddenStateSpace(2,0)=1; hiddenStateSpace(2,1)=0; hiddenStateSpace(3,0)=1; hiddenStateSpace(3,1)=1; //create RBM with 4 visible and 2 hidden units and initialize it randomly RBM rbm(Rng::globalRng); rbm.setStructure(4,2); initRandomNormal(rbm,2); //now test for several choices of beta for(std::size_t i = 0; i <= 10; ++i){ double beta=i*0.1; //calculate the result by integrating over all states using the energies of every state double partitionTest = 0; for(std::size_t x = 0; x != 16; ++x){ RealMatrix visibleState(4,4);//we need it 4 times for easier batch processing BinarySpace::state(row(visibleState,0),x); BinarySpace::state(row(visibleState,1),x); BinarySpace::state(row(visibleState,2),x); BinarySpace::state(row(visibleState,3),x); partitionTest+=sum(exp(-beta*rbm.energy().energy(hiddenStateSpace,visibleState))); } //now calculate the test double logPartition = logPartitionFunction(rbm,beta); BOOST_CHECK_CLOSE(logPartition,std::log(partitionTest),1.e-5); } } BOOST_AUTO_TEST_CASE( Energy_Partition_HiddenGreaterVisible ) { //all possible state combinations for 2 visible units RealMatrix visibleStateSpace(4,2); visibleStateSpace(0,0)=0; visibleStateSpace(0,1)=0; visibleStateSpace(1,0)=0; visibleStateSpace(1,1)=1; visibleStateSpace(2,0)=1; visibleStateSpace(2,1)=0; visibleStateSpace(3,0)=1; visibleStateSpace(3,1)=1; //create RBM with 2 visible and 4 hidden units and initialize it randomly RBM rbm(Rng::globalRng); rbm.setStructure(2,4); initRandomNormal(rbm,2); //now test for several choices of beta for(std::size_t i = 0; i <= 10; ++i){ double beta=i*0.1; //calculate the result by integrating over all states using the energies of every state double partitionTest = 0; for(std::size_t x = 0; x != 16; ++x){ RealMatrix hiddenState(4,4);//we need it 4 times for easier batch processing BinarySpace::state(row(hiddenState,0),x); BinarySpace::state(row(hiddenState,1),x); BinarySpace::state(row(hiddenState,2),x); BinarySpace::state(row(hiddenState,3),x); partitionTest+=sum(exp(-beta*rbm.energy().energy(hiddenState,visibleStateSpace))); } //now calculate the test double logPartition = logPartitionFunction(rbm,beta); BOOST_CHECK_CLOSE(logPartition,std::log(partitionTest),1.e-5); } } BOOST_AUTO_TEST_CASE( Energy_NegLogLikelihood ) { //create RBM with 8 visible and 16 hidden units RBM rbm(Rng::globalRng); rbm.setStructure(8,16); //now test for several random subsets of possible training data and random initializations of the rbm for(std::size_t i = 0; i != 10; ++i){ initRandomNormal(rbm,2); std::vector dataVec(50,RealVector(8)); for(std::size_t j = 0; j != 50; ++j){ for(std::size_t k = 0; k != 8; ++k){ dataVec[j](k)=Rng::coinToss(0.5); } } UnlabeledData data = createDataFromRange(dataVec,25); //now calculate the test long double logPartition = logPartitionFunction(rbm); long double logProbTest = 0; for(std::size_t j = 0; j != 50; ++j){ RealMatrix temp(1,8); row(temp,0) = dataVec[j]; logProbTest -= rbm.energy().logUnnormalizedProbabilityVisible(temp,blas::repeat(1,1))(0)-logPartition; } long double logProb = negativeLogLikelihood(rbm,data); BOOST_CHECK_CLOSE(logProbTest,logProb,1.e-5); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/RBM/AverageEnergyGradient.cpp000066400000000000000000000244171314655040000210420ustar00rootroot00000000000000#include #include #include #include #define BOOST_TEST_MODULE RBM_AverageEnergyGradient #include #include #include "../ObjectiveFunctions/TestObjectiveFunction.h" using namespace shark; //test, that the weighted gradient produces correct results for binary units when using addVH //test1 is with weight 1 BOOST_AUTO_TEST_SUITE (RBM_AverageEnergyGradient) BOOST_AUTO_TEST_CASE( AverageEnergyGradient_Weighted_One_Visible ) { BinaryRBM rbm(Rng::globalRng); rbm.setStructure(4,4); initRandomNormal(rbm,2); RealMatrix batch(10,4); for(std::size_t j = 0; j != 10; ++j){ for(std::size_t k = 0; k != 4; ++k){ batch(j,k)=Rng::coinToss(0.5); } } BinaryGibbsOperator::HiddenSampleBatch hiddenBatch(10,4); BinaryGibbsOperator::VisibleSampleBatch visibleBatch(10,4); BinaryGibbsOperator gibbs(&rbm); detail::AverageEnergyGradient grad(&rbm); detail::AverageEnergyGradient gradTest(&rbm); gibbs.createSample(hiddenBatch,visibleBatch,batch); grad.addVH(hiddenBatch,visibleBatch); gradTest.addVH(hiddenBatch,visibleBatch,blas::repeat(0.0,10)); RealVector diff = grad.result()-gradTest.result(); BOOST_CHECK_SMALL(norm_1(diff),1.e-5); } //test, that the weighted gradient produces correct results for binary units when using addHV //test1 is with weight 1 BOOST_AUTO_TEST_CASE( AverageEnergyGradient_Weighted_One_Hidden ) { BinaryRBM rbm(Rng::globalRng); rbm.setStructure(4,4); initRandomNormal(rbm,2); RealMatrix batch(10,4); for(std::size_t j = 0; j != 10; ++j){ for(std::size_t k = 0; k != 4; ++k){ batch(j,k)=Rng::coinToss(0.5); } } BinaryGibbsOperator::HiddenSampleBatch hiddenBatch(10,4); BinaryGibbsOperator::VisibleSampleBatch visibleBatch(10,4); BinaryGibbsOperator gibbs(&rbm); detail::AverageEnergyGradient grad(&rbm); detail::AverageEnergyGradient gradTest(&rbm); hiddenBatch.state = batch; gibbs.precomputeVisible(hiddenBatch,visibleBatch,blas::repeat(1.0,10)); grad.addHV(hiddenBatch,visibleBatch); gradTest.addHV(hiddenBatch,visibleBatch,blas::repeat(0.0,10)); RealVector diff = grad.result()-gradTest.result(); BOOST_CHECK_SMALL(norm_1(diff),1.e-5); } //test, that the weighted gradient produces correct results for binary units when using addVH //test2 is with different weights BOOST_AUTO_TEST_CASE( AverageEnergyGradient_Weighted_Visible ) { BinaryRBM rbm(Rng::globalRng); rbm.setStructure(4,4); initRandomNormal(rbm,2); RealMatrix batch(10,4); RealVector weights(10); for(std::size_t j = 0; j != 10; ++j){ for(std::size_t k = 0; k != 4; ++k){ batch(j,k)=Rng::coinToss(0.5); } weights(j)=j; } double logWeightSum=std::log(sum(weights)); BinaryGibbsOperator::HiddenSampleBatch hiddenBatch(10,4); BinaryGibbsOperator::VisibleSampleBatch visibleBatch(10,4); BinaryGibbsOperator gibbs(&rbm); detail::AverageEnergyGradient grad(&rbm); detail::AverageEnergyGradient gradTest(&rbm); gibbs.createSample(hiddenBatch,visibleBatch,batch); grad.addVH(hiddenBatch,visibleBatch,log(weights)); BOOST_CHECK_CLOSE(logWeightSum,grad.logWeightSum(),1.e-5); //calculate ground truth data by incorporating the weights into //the state. we have to correct the probability part of the gradient afterwards //as this is not changed. RealVector newWeights=weights/std::exp(logWeightSum)*10; for(std::size_t i = 0; i != 10; ++i){ row(visibleBatch.state,i)*=newWeights(i); } gradTest.addVH(hiddenBatch,visibleBatch); RealVector gradTestResult= gradTest.result(); noalias(subrange(gradTestResult,16,20))= prod(weights,hiddenBatch.statistics)/std::exp(logWeightSum); RealVector diff = grad.result()-gradTestResult; BOOST_CHECK_SMALL(norm_1(diff),1.e-5); } //test, that the weighted gradient produces correct results for binary units when using addHV //test2 is with different weights BOOST_AUTO_TEST_CASE( AverageEnergyGradient_Weighted_Hidden ) { BinaryRBM rbm(Rng::globalRng); rbm.setStructure(4,4); initRandomNormal(rbm,2); RealMatrix batch(10,4); RealVector weights(10); for(std::size_t j = 0; j != 10; ++j){ for(std::size_t k = 0; k != 4; ++k){ batch(j,k)=Rng::coinToss(0.5); } weights(j)=j; } double logWeightSum=std::log(sum(weights)); BinaryGibbsOperator::HiddenSampleBatch hiddenBatch(10,4); BinaryGibbsOperator::VisibleSampleBatch visibleBatch(10,4); BinaryGibbsOperator gibbs(&rbm); detail::AverageEnergyGradient grad(&rbm); detail::AverageEnergyGradient gradTest(&rbm); hiddenBatch.state = batch; gibbs.precomputeVisible(hiddenBatch,visibleBatch,blas::repeat(1.0,10)); grad.addHV(hiddenBatch,visibleBatch,log(weights)); BOOST_CHECK_CLOSE(logWeightSum,grad.logWeightSum(),1.e-5); //calculate ground truth data RealVector newWeights=weights/std::exp(logWeightSum)*10; for(std::size_t i = 0; i != 10; ++i){ row(hiddenBatch.state,i)*=newWeights(i); } gradTest.addHV(hiddenBatch,visibleBatch); RealVector gradTestResult= gradTest.result(); noalias(subrange(gradTestResult,20,24))= prod(weights,visibleBatch.statistics)/std::exp(logWeightSum); RealVector diff = grad.result()-gradTestResult; BOOST_CHECK_SMALL(norm_1(diff),1.e-5); } //now, that we now, that the unweighted version does the same as the weighted, we check //that the unweighted version works, by calculating the numerical gradient class TestGradientVH : public SingleObjectiveFunction{ public: TestGradientVH(BinaryRBM* rbm): mpe_rbm(rbm){ m_features |= HAS_FIRST_DERIVATIVE; m_features |= CAN_PROPOSE_STARTING_POINT; }; std::string name() const { return "TestGradientVH"; } void setData(UnlabeledData const& data){ m_data = data; } void proposeStartingPoint(SearchPointType& startingPoint) const{ startingPoint = mpe_rbm->parameterVector(); } std::size_t numberOfVariables()const{ return mpe_rbm->numberOfParameters(); } double eval( SearchPointType const & parameter) const { mpe_rbm->setParameterVector(parameter); double result=0; for(std::size_t i =0; i != m_data.numberOfBatches();++i){ result+=sum(mpe_rbm->energy().logUnnormalizedProbabilityVisible( m_data.batch(i),blas::repeat(1.0,m_data.batch(i).size1()) )); } result/=m_data.numberOfElements(); return result; } double evalDerivative( SearchPointType const & parameter, FirstOrderDerivative & derivative ) const { mpe_rbm->setParameterVector(parameter); detail::AverageEnergyGradient gradient(mpe_rbm); GibbsOperator sampler(mpe_rbm); //create Energy from RBM //calculate the expectation of the energy gradient with respect to the data for(std::size_t i=0; i != m_data.numberOfBatches(); i++){ std::size_t currentBatchSize=m_data.batch(i).size1(); GibbsOperator::HiddenSampleBatch hiddenSamples(currentBatchSize,mpe_rbm->numberOfHN()); GibbsOperator::VisibleSampleBatch visibleSamples(currentBatchSize,mpe_rbm->numberOfVN()); sampler.createSample(hiddenSamples,visibleSamples,m_data.batch(i)); gradient.addVH(hiddenSamples, visibleSamples); } derivative = gradient.result(); return eval(parameter); } private: BinaryRBM* mpe_rbm; UnlabeledData m_data; }; BOOST_AUTO_TEST_CASE( AverageEnergyGradient_DerivativeVH ) { BinaryRBM rbm(Rng::globalRng); rbm.setStructure(10,16); initRandomNormal(rbm,2); RealVector parameters = rbm.parameterVector(); initRandomNormal(rbm,2); std::vector dataVec(50,RealVector(10)); for(std::size_t j = 0; j != 50; ++j){ for(std::size_t k = 0; k != 10; ++k){ dataVec[j](k)=Rng::coinToss(0.5); } } UnlabeledData data = createDataFromRange(dataVec,25); TestGradientVH gradient(&rbm); gradient.setData(data); testDerivative(gradient,parameters,1.e-3,1.e-10,0.1); } class TestGradientHV : public SingleObjectiveFunction{ public: TestGradientHV(BinaryRBM* rbm): mpe_rbm(rbm){ m_features |= HAS_FIRST_DERIVATIVE; m_features |= CAN_PROPOSE_STARTING_POINT; }; std::string name() const { return "TestGradientHV"; } void setData(UnlabeledData const& data){ m_data = data; } void proposeStartingPoint(SearchPointType& startingPoint) const{ startingPoint = mpe_rbm->parameterVector(); } std::size_t numberOfVariables()const{ return mpe_rbm->numberOfParameters(); } double eval( SearchPointType const & parameter) const { mpe_rbm->setParameterVector(parameter); double result=0; for(std::size_t i =0; i != m_data.numberOfBatches();++i){ result+=sum(mpe_rbm->energy().logUnnormalizedProbabilityHidden( m_data.batch(i),blas::repeat(1,m_data.batch(i).size1()) )); } result/=m_data.numberOfElements(); return result; } double evalDerivative( SearchPointType const & parameter, FirstOrderDerivative & derivative ) const { mpe_rbm->setParameterVector(parameter); detail::AverageEnergyGradient gradient(mpe_rbm); GibbsOperator sampler(mpe_rbm); //create Energy from RBM //calculate the expectation of the energy gradient with respect to the data for(std::size_t i=0; i != m_data.numberOfBatches(); i++){ std::size_t currentBatchSize=m_data.batch(i).size1(); GibbsOperator::HiddenSampleBatch hiddenSamples(currentBatchSize,mpe_rbm->numberOfHN()); GibbsOperator::VisibleSampleBatch visibleSamples(currentBatchSize,mpe_rbm->numberOfVN()); hiddenSamples.state = m_data.batch(i); sampler.precomputeVisible(hiddenSamples,visibleSamples,blas::repeat(1.0,10)); gradient.addHV(hiddenSamples, visibleSamples); } derivative = gradient.result(); return eval(parameter); } private: BinaryRBM* mpe_rbm; UnlabeledData m_data; }; BOOST_AUTO_TEST_CASE( AverageEnergyGradient_DerivativeHV ) { BinaryRBM rbm(Rng::globalRng); rbm.setStructure(16,10); initRandomNormal(rbm,2); RealVector parameters = rbm.parameterVector(); initRandomNormal(rbm,2); std::vector dataVec(1,RealVector(10)); for(std::size_t j = 0; j != 1; ++j){ for(std::size_t k = 0; k != 10; ++k){ dataVec[j](k)=Rng::coinToss(0.5); } } UnlabeledData data = createDataFromRange(dataVec,25); TestGradientHV gradient(&rbm); gradient.setData(data); testDerivative(gradient,parameters,1.e-3,1.e-10,0.1); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/RBM/BinaryLayer.cpp000066400000000000000000000062261314655040000170570ustar00rootroot00000000000000#include #include #define BOOST_TEST_MODULE RBM_BinaryLayer #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (RBM_BinaryLayer) BOOST_AUTO_TEST_CASE( BinaryLayer_SufficientStatistics){ BinaryLayer layer; BinaryLayer::StatisticsBatch statistics(10,3); layer.resize(3); RealMatrix input(10,3); RealMatrix testInput(10,3); Rng::seed(42); for(std::size_t test = 0; test != 10000; ++test){ double beta = Rng::uni(0,1); for(std::size_t j = 0; j != 3; ++j){ layer.bias()(j) = Rng::gauss(0,10); } for(std::size_t i = 0; i != 10; ++i){ for(std::size_t j = 0; j != 3; ++j){ input(i,j) = Rng::gauss(0,10); } //calculate result row(testInput,i) = row(input,i) + layer.bias(); row(testInput,i) *= beta; } layer.sufficientStatistics(input,statistics,blas::repeat(beta,10)); for(std::size_t i = 0; i != 10; ++i){ for(std::size_t j = 0; j != 3; ++j){ double sigmoid = 1.0/(1+std::exp(-testInput(i,j))); BOOST_CHECK_SMALL(sigmoid-statistics(i,j),1.e-15); } } } } BOOST_AUTO_TEST_CASE( BinaryLayer_Parameters){ BinaryLayer layer; layer.resize(20); RealVector parameters(20); Rng::seed(42); for(std::size_t j = 0; j != 20; ++j){ parameters(j) = Rng::gauss(0,1); } layer.setParameterVector(parameters); for(std::size_t j = 0; j != 20; ++j){ BOOST_CHECK_SMALL(layer.parameterVector()(j) - parameters(j),1.e-15); } } BOOST_AUTO_TEST_CASE( BinaryLayer_Sample){ BinaryLayer layer; BinaryLayer::StatisticsBatch statistics(10,5); layer.resize(5); Rng::seed(42); std::vector alphas; alphas.push_back(0); alphas.push_back(1); for(std::size_t i = 0; i != 10; ++i){ alphas.push_back(Rng::uni(0,1)); } const std::size_t numSamples = 100000; const double epsilon = 1.e-4; for(std::size_t a = 0; a != alphas.size(); ++a){ double alpha = alphas[a]; for(std::size_t i = 0; i != 10; ++i){ for(std::size_t j = 0; j != 5; ++j){ statistics(i,j) = Rng::uni(0.0,1.0); } } RealMatrix mean(10,5,0.0); RealMatrix samples(10,5,0.0); for(std::size_t i = 0; i != numSamples; ++i){ layer.sample(statistics,samples,alpha,Rng::globalRng); mean+=samples; } mean/=numSamples; for(std::size_t i = 0; i != 10; ++i){ for(std::size_t j = 0; j != 5; ++j){ BOOST_CHECK_SMALL(sqr(mean(i,j) - statistics(i,j)),epsilon); } } } } BOOST_AUTO_TEST_CASE( BinaryLayer_LogMarginalize){ BinaryLayer layer; layer.resize(3); RealVector input(3); Rng::seed(42); for(std::size_t j = 0; j != 3; ++j){ layer.bias()(j) = -0.5*j-1; } for(std::size_t j = 0; j != 3; ++j){ input(j) = -0.5*j-2; } double testResult = std::log((1+std::exp(-3.0))*(1+std::exp(-4.0))*(1+std::exp(-5.0)) ); double testResultHalvedBeta = std::log((1+std::exp(-1.5))*(1+std::exp(-2.0))*(1+std::exp(-2.5))); double result = layer.logMarginalize(input,1); double resultHalvedBeta = layer.logMarginalize(input,0.5); BOOST_CHECK_CLOSE(result , testResult , 0.01); BOOST_CHECK_CLOSE(resultHalvedBeta , testResultHalvedBeta , 0.01); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/RBM/BipolarLayer.cpp000066400000000000000000000063631314655040000172250ustar00rootroot00000000000000#include #include #define BOOST_TEST_MODULE RBM_BipolarLayer #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (RBM_BipolarLayer) BOOST_AUTO_TEST_CASE( BipolarLayer_SufficientStatistics){ BipolarLayer layer; BipolarLayer::StatisticsBatch statistics(10,3); layer.resize(3); RealMatrix input(10,3); RealMatrix testInput(10,3); Rng::seed(42); for(std::size_t test = 0; test != 10000; ++test){ double beta = Rng::uni(0,1); for(std::size_t j = 0; j != 3; ++j){ layer.bias()(j) = Rng::gauss(0,10); } for(std::size_t i = 0; i != 10; ++i){ for(std::size_t j = 0; j != 3; ++j){ input(i,j) = Rng::gauss(0,10); } //calculate result row(testInput,i) = row(input,i) + layer.bias(); row(testInput,i) *= beta; } layer.sufficientStatistics(input,statistics,blas::repeat(beta,10)); for(std::size_t i = 0; i != 10; ++i){ for(std::size_t j = 0; j != 3; ++j){ double sigmoid = 1.0/(1+std::exp(-2*testInput(i,j))); BOOST_CHECK_SMALL(sigmoid-statistics(i,j),1.e-15); } } } } BOOST_AUTO_TEST_CASE( BipolarLayer_Parameters){ BipolarLayer layer; layer.resize(20); RealVector parameters(20); Rng::seed(42); for(std::size_t j = 0; j != 20; ++j){ parameters(j) = Rng::gauss(0,1); } layer.setParameterVector(parameters); for(std::size_t j = 0; j != 20; ++j){ BOOST_CHECK_SMALL(layer.parameterVector()(j) - parameters(j),1.e-15); } } BOOST_AUTO_TEST_CASE( BipolarLayer_Sample){ BipolarLayer layer; BipolarLayer::StatisticsBatch statistics(10,5); layer.resize(5); Rng::seed(42); std::vector alphas; alphas.push_back(0); alphas.push_back(1); for(std::size_t i = 0; i != 10; ++i){ alphas.push_back(Rng::uni(0,1)); } const std::size_t numSamples = 100000; const double epsilon = 1.e-4; for(std::size_t a = 0; a != alphas.size(); ++a){ double alpha = alphas[a]; for(std::size_t i = 0; i != 10; ++i){ for(std::size_t j = 0; j != 5; ++j){ statistics(i,j) = Rng::uni(0.0,1.0); } } RealMatrix mean(10,5,0.0); RealMatrix samples(10,5,0.0); for(std::size_t i = 0; i != numSamples; ++i){ layer.sample(statistics,samples,alpha,Rng::globalRng); mean+=samples; } mean/=numSamples; for(std::size_t i = 0; i != 10; ++i){ for(std::size_t j = 0; j != 5; ++j){ BOOST_CHECK_SMALL(sqr(mean(i,j) - (2*statistics(i,j)-1)),epsilon); } } } } BOOST_AUTO_TEST_CASE( BipolarLayer_LogMarginalize){ BipolarLayer layer; layer.resize(3); RealVector input(3); Rng::seed(42); for(std::size_t j = 0; j != 3; ++j){ layer.bias()(j) = -0.5*j-1; } for(std::size_t j = 0; j != 3; ++j){ input(j) = -0.5*j-2; } double testResult = std::log((std::exp(3.0)+std::exp(-3.0))*(std::exp(4.0)+std::exp(-4.0))*(std::exp(5.0)+std::exp(-5.0)) ); double testResultHalvedBeta = std::log((std::exp(1.5)+std::exp(-1.5))*(std::exp(2.0)+std::exp(-2.0))*(std::exp(2.5)+std::exp(-2.5))); double result = layer.logMarginalize(input,1); double resultHalvedBeta = layer.logMarginalize(input,0.5); BOOST_CHECK_CLOSE(result , testResult , 0.01); BOOST_CHECK_CLOSE(resultHalvedBeta , testResultHalvedBeta , 0.01); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/RBM/ContrastiveDivergence.cpp000066400000000000000000000145071314655040000211340ustar00rootroot00000000000000#include #include #define BOOST_TEST_MODULE RBM_ContrastiveDivergence #include #include //this is a mockup test, meaning we implement a failsafe RBM and use an template specialization of GibbsSampling to dive deep into the CDs Structure //obviously, this is a hack! namespace shark{ //mockup of the RBM struct RBMMockup{ public: typedef IntVector VectorType; struct Energy{ struct Structure{ bool requireHiddenSample; bool parametersSet; Structure():requireHiddenSample(false), parametersSet(false){} template void configure(T const& t){} std::size_t numberOfParameters()const{ return 1; } void setParameterVector(RealVector const&){ parametersSet = true; } RealVector parameterVector(){ return RealVector(); } }; struct AverageEnergyGradient{ int id; Structure* m_structure; AverageEnergyGradient(Structure* structure):id(0),m_structure(structure){} template void addVH(HiddenSampleBatch const& hidden,VisibleSampleBatch const& visible){ BOOST_CHECK_EQUAL(visible.calledPrecompute,visible.calledSample); BOOST_CHECK_EQUAL(hidden.calledPrecompute,visible.calledPrecompute); if(m_structure->requireHiddenSample){ BOOST_CHECK(hidden.calledSample == 0 ||hidden.calledSample == 5); } else{ if(hidden.calledSample != 0) BOOST_CHECK_EQUAL( hidden.calledSample , 4); } BOOST_CHECK_EQUAL(hidden.id,visible.id); BOOST_CHECK_EQUAL(hidden.id,id); ++id; } IntVector result()const{ return IntVector(1); } }; Energy(Structure*){} }; Energy::Structure& structure(){ static Energy::Structure structure; return structure; } std::size_t numberOfHN()const{ return 0; } std::size_t numberOfVN()const{ return 0; } std::size_t numberOfParameters()const{ return 0; } }; //specialization for Gibbs Sampling //during sampling the order of the calls is checked template<> class GibbsOperator{ public: typedef RBMMockup RBM; typedef RBMMockup::Energy Energy; typedef IntVector VectorType; typedef Energy::Structure Structure; RBM* mpe_rbm; SamplingFlags m_flags; ///\brief Represents the state of the hidden units struct HiddenSampleBatch{ int id; int calledPrecompute; int calledSample; HiddenSampleBatch(std::size_t,std::size_t){} }; template void configure(T const& t){} ///\brief Represents the state of the visible units struct VisibleSampleBatch{ int calledPrecompute; int calledSample; int id; VisibleSampleBatch(std::size_t,std::size_t){} }; GibbsOperator(RBM* rbm):mpe_rbm(rbm){} RBM* rbm()const{ return mpe_rbm; } SamplingFlags& flags(){ return m_flags; } SamplingFlags const& flags()const{ return m_flags; } //the methods themselves only check the ordering of the calls void precomputeHidden(HiddenSampleBatch& hidden,const VisibleSampleBatch& visible)const{ BOOST_CHECK_EQUAL(hidden.calledPrecompute, hidden.calledSample); BOOST_CHECK_EQUAL(hidden.calledSample+1, visible.calledSample); BOOST_CHECK_EQUAL(hidden.calledPrecompute+1, visible.calledPrecompute); BOOST_CHECK_EQUAL(hidden.id, visible.id); ++hidden.calledPrecompute; } void precomputeVisible(const HiddenSampleBatch& hidden, VisibleSampleBatch& visible)const{ BOOST_CHECK_EQUAL(visible.calledPrecompute, visible.calledSample); BOOST_CHECK_EQUAL(visible.calledSample, hidden.calledSample); BOOST_CHECK_EQUAL(visible.calledPrecompute, hidden.calledPrecompute); BOOST_CHECK_EQUAL(hidden.id, visible.id); ++visible.calledPrecompute; } void sampleHidden(HiddenSampleBatch& hidden)const{ BOOST_CHECK_EQUAL(hidden.calledPrecompute, hidden.calledSample+1); ++hidden.calledSample; } void sampleVisible(VisibleSampleBatch& visible)const{ BOOST_CHECK_EQUAL(visible.calledPrecompute, visible.calledSample+1); ++visible.calledSample; } template void createSample(HiddenSampleBatch& hidden,VisibleSampleBatch& visible, M const& state)const{ visible.calledPrecompute = 0; visible.calledSample = 0; hidden.calledPrecompute = 0; hidden.calledSample = 0; hidden.id = (int) state(0,0); visible.id = (int) state(0,0); } }; } using namespace shark; BOOST_AUTO_TEST_SUITE (RBM_ContrastiveDivergence) BOOST_AUTO_TEST_CASE( ContrastiveDivergence_noHiddenSample ) { RBMMockup rbm; ContrastiveDivergence > cd(&rbm); std::vector vec(5); for(std::size_t i = 0; i != 5; ++i){ vec[i].resize(1); vec[i](0)=i; } UnlabeledData data = createDataFromRange(vec,1); cd.setData(data); cd.setK(5); rbm.structure().requireHiddenSample = false; RealVector point; ContrastiveDivergence >::FirstOrderDerivative derivative; cd.evalDerivative(point,derivative); //test whether parameters were set BOOST_CHECK(rbm.structure().parametersSet); } BOOST_AUTO_TEST_CASE( ContrastiveDivergence_noHiddenSample_batch ) { RBMMockup rbm; ContrastiveDivergence > cd(&rbm); std::vector vec(10); for(std::size_t i = 0; i != 10; i+=2){ vec[i].resize(1); vec[i+1].resize(1); vec[i](0)=i/2; vec[i+1](0)=i/2; } UnlabeledData data = createDataFromRange(vec,2); cd.setData(data); cd.setK(5); rbm.structure().requireHiddenSample = false; RealVector point; ContrastiveDivergence >::FirstOrderDerivative derivative; cd.evalDerivative(point,derivative); //test whether parameters were set BOOST_CHECK(rbm.structure().parametersSet); } BOOST_AUTO_TEST_CASE( ContrastiveDivergence_WithHiddenSample ) { RBMMockup rbm; ContrastiveDivergence > cd(&rbm); std::vector vec(5); for(std::size_t i = 0; i != 5; ++i){ vec[i].resize(1); vec[i](0)=i; } rbm.structure().requireHiddenSample = true; UnlabeledData data = createDataFromRange(vec,1); cd.setData(data); cd.setK(5); RealVector point; ContrastiveDivergence >::FirstOrderDerivative derivative; cd.evalDerivative(point,derivative); //test whether parameters were set BOOST_CHECK(rbm.structure().parametersSet); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/RBM/ContrastiveDivergenceTraining.cpp000066400000000000000000000163661314655040000226350ustar00rootroot00000000000000#define BOOST_TEST_MODULE RBM_ContrastiveDivergenceTraining #include #include #include #include #include #include #include #include #include using namespace shark; using namespace std; //calculats the *exact* CD gradient and check that the CD gradient approaches it in the limit BOOST_AUTO_TEST_SUITE (RBM_ContrastiveDivergenceTraining) BOOST_AUTO_TEST_CASE( ContrastiveDivergence_ExactGradient) { BarsAndStripes problem; UnlabeledData data = problem.data(); BarsAndStripes problem2(8);//small batches to catch errors with batching and minibatches UnlabeledData dataBatched = problem2.data(); std::size_t inputs = data.numberOfElements(); BinaryRBM rbm(Rng::globalRng); rbm.setStructure(16,4); BinaryCD cd(&rbm); cd.setData(dataBatched); Energy energy(rbm.energy()); BinarySpace space; std::size_t numHiddenStates = space.numberOfStates(4); std::size_t numVisibleStates = space.numberOfStates(16); //matrix of all hidden states RealMatrix hiddenStates(numHiddenStates,4,0.0); for(std::size_t i = 0; i != numHiddenStates; ++i){ space.state(row(hiddenStates,i),i); } std::size_t tests = 2; //calculate the exact gradient for some random weights of the RBM for(std::size_t t = 0; t != tests; ++t){ initRandomUniform(rbm,-1,1); //define the distribution for every input pattern RealMatrix hiddenInput(inputs,4); BinaryLayer::StatisticsBatch hiddenStatistics(inputs,4); energy.inputHidden(hiddenInput,data.batch(0)); rbm.hiddenNeurons().sufficientStatistics(hiddenInput,hiddenStatistics, blas::repeat(1.0,inputs)); //calculate the propability for every hidden state given the visible state p(h|v) RealMatrix phv(inputs,numHiddenStates,1.0); for(std::size_t i = 0; i != inputs; ++i){ for(std::size_t j = 0; j != numHiddenStates; ++j){ for(std::size_t k = 0; k != 4; ++k){//probability of state j given visible state i if(hiddenStates(j,k) > 0.5) phv(i,j) *= hiddenStatistics(i,k); else phv(i,j) *= 1.0-hiddenStatistics(i,k); } } } //marginalize over v -> p_s(h) RealVector pSh=sum_rows(phv) / inputs; //get p(v_i=1|h) for all h RealMatrix visibleInput(numHiddenStates,16); BinaryLayer::StatisticsBatch visibleStatistics(numHiddenStates,16); energy.inputVisible(visibleInput,hiddenStates); rbm.visibleNeurons().sufficientStatistics(visibleInput,visibleStatistics, blas::repeat(1.0,numHiddenStates)); //now we calculate for all 2^16 states sum_h p(v|h)p_s(h)-> p_s(v), this likely makes this test really really slow RealVector pSv(numVisibleStates,0.0); RealVector state(16); for(std::size_t i = 0; i != numVisibleStates; ++i){ space.state(state,i); for(std::size_t j = 0; j != numHiddenStates; ++j){ double p = 1;//p(v|h) for(std::size_t k = 0; k != 16; ++k){ if(state(k) == 1.0) p *= visibleStatistics(j,k); else p *= 1.0-visibleStatistics(j,k); } pSv(i) += p*pSh(j); } } //now we compute the gradient RealVector visibleGrad(16,0.0); RealVector hiddenGrad(4,0.0); RealMatrix weightGrad(4,16,0.0); //again summing over all visible states, this time we compute the weighted gradient //we will analytically sum over p(h|v)...talking about slowness, this is likely slower than //the step beforehand ;) RealMatrix v(1,16); RealMatrix hInput(1,4); BinaryLayer::StatisticsBatch hstat(1,4); for(std::size_t i = 0; i != inputs; ++i){ row(v,0) = data.element(i); energy.inputHidden(hInput,v); rbm.hiddenNeurons().sufficientStatistics(hInput,hstat, blas::repeat(1.0,1)); noalias(visibleGrad) -=row(v,0); noalias(hiddenGrad) -=row(hstat,0); noalias(weightGrad) -=outer_prod(row(hstat,0), row(v,0)); } visibleGrad /= inputs; hiddenGrad /= inputs; weightGrad /= inputs; for(std::size_t i = 0; i != numVisibleStates; ++i){ space.state(row(v,0),i); energy.inputHidden(hInput,v); rbm.hiddenNeurons().sufficientStatistics(hInput,hstat, blas::repeat(1.0,1)); noalias(visibleGrad) +=pSv(i)*row(v,0); noalias(hiddenGrad) +=pSv(i)*row(hstat,0); noalias(weightGrad) +=pSv(i)*outer_prod(row(hstat,0), row(v,0)); } { RealVector testVisibleGrad(16,0.0); RealVector testHiddenGrad(4,0.0); RealMatrix testWeightGrad(4,16,0.0); //compute the real gradient of CD RealVector approxCDGrad(rbm.numberOfParameters(),0.0); RealVector params = rbm.parameterVector(); for(std::size_t i = 0; i != 5000; ++i){ BinaryCD::FirstOrderDerivative der; cd.evalDerivative(params,der); approxCDGrad+=der; } approxCDGrad /=5000; init(approxCDGrad) >> toVector(testWeightGrad),testHiddenGrad,testVisibleGrad; double diffH=norm_inf(hiddenGrad-testHiddenGrad); double diffV=norm_inf(visibleGrad-testVisibleGrad); double diffW=norm_inf(weightGrad-testWeightGrad); BOOST_CHECK_SMALL(diffH, 5.e-3); BOOST_CHECK_SMALL(diffV, 5.e-2); BOOST_CHECK_SMALL(diffW, 5.e-2); } //now perform the same test with minibatch learning { RealVector testVisibleGrad(16,0.0); RealVector testHiddenGrad(4,0.0); RealMatrix testWeightGrad(4,16,0.0); //compute the real gradient of CD RealVector approxCDGrad(rbm.numberOfParameters(),0.0); RealVector params = rbm.parameterVector(); cd.numBatches() = 2; for(std::size_t i = 0; i != 5000; ++i){ BinaryCD::FirstOrderDerivative der; cd.evalDerivative(params,der); approxCDGrad+=der; } approxCDGrad /=5000; init(approxCDGrad) >> toVector(testWeightGrad),testHiddenGrad,testVisibleGrad; std::cout< data = problem.data(); Rng::seed(42); BinaryRBM rbm(Rng::globalRng); rbm.setStructure(16,8); BinaryCD cd(&rbm); cd.numBatches() =1; cd.setData(data); for(unsigned int trial = 0; trial != trials; ++trial){ Rng::seed(42+trial); RealVector params(rbm.numberOfParameters()); for(std::size_t i = 0; i != params.size();++i){ params(i) = Rng::uni(-0.1,0.1); } rbm.setParameterVector(params); BinaryCD cd(&rbm); cd.setData(data); SteepestDescent optimizer; optimizer.setLearningRate(0.05); optimizer.setMomentum(0); optimizer.init(cd); double logLikelyHood = 0; for(std::size_t i = 0; i != steps; ++i){ if(i % updateStep == 0){ //std::cout< #include #include #include #include #include #include "../ObjectiveFunctions/TestObjectiveFunction.h" #include #include using namespace shark; using namespace std; //calculats the *exact* CD gradient and check that the CD gradient approaches it in the limit BOOST_AUTO_TEST_SUITE (RBM_ConvolutionalCDTraining) BOOST_AUTO_TEST_CASE( ConvolutionalCDTraining_Bars ){ unsigned int trials = 1; unsigned int steps = 7001; unsigned int updateStep = 1000; BarsAndStripes problem(9); UnlabeledData data = problem.data(); Rng::seed(42); ConvolutionalBinaryRBM rbm(Rng::globalRng); rbm.setStructure(4,4,5,3); for(unsigned int trial = 0; trial != trials; ++trial){ Rng::seed(42+trial); RealVector params(rbm.numberOfParameters()); for(std::size_t i = 0; i != params.size();++i){ params(i) = Rng::uni(-0.1,0.1); } rbm.setParameterVector(params); ConvolutionalBinaryCD cd(&rbm); cd.setData(data); cd.setK(10); SteepestDescent optimizer; optimizer.setLearningRate(0.05); optimizer.setMomentum(0); optimizer.init(cd); double logLikelyHood = 0; for(std::size_t i = 0; i != steps; ++i){ if(i % updateStep == 0){ //std::cout< #include #include #include #define BOOST_TEST_MODULE RBM_ConvolutionalEnergyGradient #include #include #include "../ObjectiveFunctions/TestObjectiveFunction.h" using namespace shark; //test, that the weighted gradient produces correct results for binary units when using addVH //test1 is with weight 1 BOOST_AUTO_TEST_SUITE (RBM_ConvolutionalEnergyGradient) BOOST_AUTO_TEST_CASE( ConvolutionalEnergyGradient_Weighted_One_Visible ) { ConvolutionalBinaryRBM rbm(Rng::globalRng); rbm.setStructure(4,4,1,3); initRandomNormal(rbm,2); RealMatrix batch(10,16); for(std::size_t j = 0; j != 10; ++j){ for(std::size_t k = 0; k != 16; ++k){ batch(j,k)=Rng::coinToss(0.5); } } ConvolutionalBinaryGibbsOperator::HiddenSampleBatch hiddenBatch(10,4); ConvolutionalBinaryGibbsOperator::VisibleSampleBatch visibleBatch(10,16); ConvolutionalBinaryGibbsOperator gibbs(&rbm); detail::ConvolutionalEnergyGradient grad(&rbm); detail::ConvolutionalEnergyGradient gradTest(&rbm); gibbs.createSample(hiddenBatch,visibleBatch,batch); grad.addVH(hiddenBatch,visibleBatch); gradTest.addVH(hiddenBatch,visibleBatch,blas::repeat(0.0,10)); RealVector diff = grad.result()-gradTest.result(); BOOST_CHECK_SMALL(norm_1(diff),1.e-5); } //test, that the weighted gradient produces correct results for binary units when using addHV //test1 is with weight 1 BOOST_AUTO_TEST_CASE( ConvolutionalEnergyGradient_Weighted_One_Hidden ) { ConvolutionalBinaryRBM rbm(Rng::globalRng); rbm.setStructure(4,4,1,3); initRandomNormal(rbm,2); RealMatrix batch(10,4); for(std::size_t j = 0; j != 10; ++j){ for(std::size_t k = 0; k != 4; ++k){ batch(j,k)=Rng::coinToss(0.5); } } ConvolutionalBinaryGibbsOperator::HiddenSampleBatch hiddenBatch(10,4); ConvolutionalBinaryGibbsOperator::VisibleSampleBatch visibleBatch(10,16); ConvolutionalBinaryGibbsOperator gibbs(&rbm); detail::ConvolutionalEnergyGradient grad(&rbm); detail::ConvolutionalEnergyGradient gradTest(&rbm); hiddenBatch.state = batch; gibbs.precomputeVisible(hiddenBatch,visibleBatch,blas::repeat(1.0,10)); grad.addHV(hiddenBatch,visibleBatch); gradTest.addHV(hiddenBatch,visibleBatch,blas::repeat(0.0,10)); RealVector diff = grad.result()-gradTest.result(); BOOST_CHECK_SMALL(norm_1(diff),1.e-5); } //~ //test, that the weighted gradient produces correct results for binary units when using addVH //~ //test2 is with different weights //~ BOOST_AUTO_TEST_CASE( ConvolutionalEnergyGradient_Weighted_Visible ) //~ { //~ ConvolutionalBinaryRBM rbm(Rng::globalRng); //~ rbm.setStructure(4,4); //~ initRandomNormal(rbm,2); //~ RealMatrix batch(10,4); //~ RealVector weights(10); //~ for(std::size_t j = 0; j != 10; ++j){ //~ for(std::size_t k = 0; k != 4; ++k){ //~ batch(j,k)=Rng::coinToss(0.5); //~ } //~ weights(j)=j; //~ } //~ double logWeightSum=std::log(sum(weights)); //~ ConvolutionalBinaryGibbsOperator::HiddenSampleBatch hiddenBatch(10,4); //~ ConvolutionalBinaryGibbsOperator::VisibleSampleBatch visibleBatch(10,4); //~ ConvolutionalBinaryGibbsOperator gibbs(&rbm); //~ detail::ConvolutionalEnergyGradient grad(&rbm); //~ detail::ConvolutionalEnergyGradient gradTest(&rbm); //~ gibbs.createSample(hiddenBatch,visibleBatch,batch); //~ grad.addVH(hiddenBatch,visibleBatch,log(weights)); //~ BOOST_CHECK_CLOSE(logWeightSum,grad.logWeightSum(),1.e-5); //~ //calculate ground truth data by incorporating the weights into //~ //the state. we have to correct the probability part of the gradient afterwards //~ //as this is not changed. //~ RealVector newWeights=weights/std::exp(logWeightSum)*10; //~ for(std::size_t i = 0; i != 10; ++i){ //~ row(visibleBatch.state,i)*=newWeights(i); //~ } //~ gradTest.addVH(hiddenBatch,visibleBatch); //~ RealVector gradTestResult= gradTest.result(); //~ noalias(subrange(gradTestResult,16,20))= //~ prod(weights,hiddenBatch.statistics)/std::exp(logWeightSum); //~ RealVector diff = grad.result()-gradTestResult; //~ BOOST_CHECK_SMALL(norm_1(diff),1.e-5); //~ } //~ //test, that the weighted gradient produces correct results for binary units when using addHV //~ //test2 is with different weights //~ BOOST_AUTO_TEST_CASE( ConvolutionalEnergyGradient_Weighted_Hidden ) //~ { //~ ConvolutionalBinaryRBM rbm(Rng::globalRng); //~ rbm.setStructure(4,4); //~ initRandomNormal(rbm,2); //~ RealMatrix batch(10,4); //~ RealVector weights(10); //~ for(std::size_t j = 0; j != 10; ++j){ //~ for(std::size_t k = 0; k != 4; ++k){ //~ batch(j,k)=Rng::coinToss(0.5); //~ } //~ weights(j)=j; //~ } //~ double logWeightSum=std::log(sum(weights)); //~ ConvolutionalBinaryGibbsOperator::HiddenSampleBatch hiddenBatch(10,4); //~ ConvolutionalBinaryGibbsOperator::VisibleSampleBatch visibleBatch(10,4); //~ ConvolutionalBinaryGibbsOperator gibbs(&rbm); //~ detail::ConvolutionalEnergyGradient grad(&rbm); //~ detail::ConvolutionalEnergyGradient gradTest(&rbm); //~ hiddenBatch.state = batch; //~ gibbs.precomputeVisible(hiddenBatch,visibleBatch,blas::repeat(1.0,10)); //~ grad.addHV(hiddenBatch,visibleBatch,log(weights)); //~ BOOST_CHECK_CLOSE(logWeightSum,grad.logWeightSum(),1.e-5); //~ //calculate ground truth data //~ RealVector newWeights=weights/std::exp(logWeightSum)*10; //~ for(std::size_t i = 0; i != 10; ++i){ //~ row(hiddenBatch.state,i)*=newWeights(i); //~ } //~ gradTest.addHV(hiddenBatch,visibleBatch); //~ RealVector gradTestResult= gradTest.result(); //~ noalias(subrange(gradTestResult,20,24))= //~ prod(weights,visibleBatch.statistics)/std::exp(logWeightSum); //~ RealVector diff = grad.result()-gradTestResult; //~ BOOST_CHECK_SMALL(norm_1(diff),1.e-5); //~ } //now, that we now, that the unweighted version does the same as the weighted, we check //that the unweighted version works, by calculating the numerical gradient class TestGradientVH : public SingleObjectiveFunction{ public: TestGradientVH(ConvolutionalBinaryRBM* rbm): mpe_rbm(rbm){ m_features |= HAS_FIRST_DERIVATIVE; m_features |= CAN_PROPOSE_STARTING_POINT; }; std::string name() const { return "TestGradientVH"; } void setData(UnlabeledData const& data){ m_data = data; } void proposeStartingPoint(SearchPointType& startingPoint) const{ startingPoint = mpe_rbm->parameterVector(); } std::size_t numberOfVariables()const{ return mpe_rbm->numberOfParameters(); } double eval( SearchPointType const & parameter) const { mpe_rbm->setParameterVector(parameter); double result=0; for(std::size_t i =0; i != m_data.numberOfBatches();++i){ result+=sum(mpe_rbm->energy().logUnnormalizedProbabilityVisible( m_data.batch(i),blas::repeat(1.0,m_data.batch(i).size1()) )); } result/=m_data.numberOfElements(); return result; } double evalDerivative( SearchPointType const & parameter, FirstOrderDerivative & derivative ) const { mpe_rbm->setParameterVector(parameter); detail::ConvolutionalEnergyGradient gradient(mpe_rbm); GibbsOperator sampler(mpe_rbm); //create Energy from RBM //calculate the expectation of the energy gradient with respect to the data for(std::size_t i=0; i != m_data.numberOfBatches(); i++){ std::size_t currentBatchSize=m_data.batch(i).size1(); GibbsOperator::HiddenSampleBatch hiddenSamples(currentBatchSize,mpe_rbm->numberOfHN()); GibbsOperator::VisibleSampleBatch visibleSamples(currentBatchSize,mpe_rbm->numberOfVN()); sampler.createSample(hiddenSamples,visibleSamples,m_data.batch(i)); gradient.addVH(hiddenSamples, visibleSamples); } derivative = gradient.result(); return eval(parameter); } private: ConvolutionalBinaryRBM* mpe_rbm; UnlabeledData m_data; }; BOOST_AUTO_TEST_CASE( ConvolutionalEnergyGradient_DerivativeVH ) { ConvolutionalBinaryRBM rbm(Rng::globalRng); rbm.setStructure(4,4,2,3); std::vector dataVec(50,RealVector(16)); for(std::size_t j = 0; j != 16; ++j){ for(std::size_t k = 0; k != 10; ++k){ dataVec[j](k)=Rng::coinToss(0.5); } } UnlabeledData data = createDataFromRange(dataVec,25); TestGradientVH gradient(&rbm); gradient.setData(data); for(std::size_t i = 0; i != 100; ++i){ initRandomNormal(rbm,1); RealVector parameters = rbm.parameterVector(); testDerivative(gradient,parameters,1.e-2,1.e-10,0.025); } } class TestGradientHV : public SingleObjectiveFunction{ public: TestGradientHV(ConvolutionalBinaryRBM* rbm): mpe_rbm(rbm){ m_features |= HAS_FIRST_DERIVATIVE; m_features |= CAN_PROPOSE_STARTING_POINT; }; std::string name() const { return "TestGradientHV"; } void setData(UnlabeledData const& data){ m_data = data; } void proposeStartingPoint(SearchPointType& startingPoint) const{ startingPoint = mpe_rbm->parameterVector(); } std::size_t numberOfVariables()const{ return mpe_rbm->numberOfParameters(); } double eval( SearchPointType const & parameter) const { mpe_rbm->setParameterVector(parameter); double result=0; for(std::size_t i =0; i != m_data.numberOfBatches();++i){ result+=sum(mpe_rbm->energy().logUnnormalizedProbabilityHidden( m_data.batch(i),blas::repeat(1,m_data.batch(i).size1()) )); } result/=m_data.numberOfElements(); return result; } double evalDerivative( SearchPointType const & parameter, FirstOrderDerivative & derivative ) const { mpe_rbm->setParameterVector(parameter); detail::ConvolutionalEnergyGradient gradient(mpe_rbm); GibbsOperator sampler(mpe_rbm); //create Energy from RBM //calculate the expectation of the energy gradient with respect to the data for(std::size_t i=0; i != m_data.numberOfBatches(); i++){ std::size_t currentBatchSize=m_data.batch(i).size1(); GibbsOperator::HiddenSampleBatch hiddenSamples(currentBatchSize,mpe_rbm->numberOfHN()); GibbsOperator::VisibleSampleBatch visibleSamples(currentBatchSize,mpe_rbm->numberOfVN()); hiddenSamples.state = m_data.batch(i); sampler.precomputeVisible(hiddenSamples,visibleSamples,blas::repeat(1.0,10)); gradient.addHV(hiddenSamples, visibleSamples); } derivative = gradient.result(); return eval(parameter); } private: ConvolutionalBinaryRBM* mpe_rbm; UnlabeledData m_data; }; BOOST_AUTO_TEST_CASE( ConvolutionalEnergyGradient_DerivativeHV ) { ConvolutionalBinaryRBM rbm(Rng::globalRng); rbm.setStructure(4,4,2,3); std::vector dataVec(10,RealVector(8)); for(std::size_t j = 0; j != 10; ++j){ for(std::size_t k = 0; k != 8; ++k){ dataVec[j](k)=Rng::coinToss(0.5); } } UnlabeledData data = createDataFromRange(dataVec,25); TestGradientHV gradient(&rbm); gradient.setData(data); for(std::size_t i = 0; i != 100; ++i){ initRandomNormal(rbm,1); RealVector parameters = rbm.parameterVector(); testDerivative(gradient,parameters,2.e-2,1.e-10,0.01); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/RBM/ConvolutionalPTTraining.cpp000066400000000000000000000032071314655040000214260ustar00rootroot00000000000000#define BOOST_TEST_MODULE RBM_ConvolutionalPTTraining #include #include #include #include #include #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (RBM_ConvolutionalPTTraining) BOOST_AUTO_TEST_CASE( ConvolutionalPTTraining_Bars ){ unsigned int trials = 1; unsigned int steps = 7001; unsigned int updateStep = 1000; std::size_t numTemperatures = 10; double learningRate = 0.05; BarsAndStripes problem(8); UnlabeledData data = problem.data(); for(unsigned int trial = 0; trial != trials; ++trial){ ConvolutionalBinaryRBM rbm(Rng::globalRng); rbm.setStructure(4,4,5,3); Rng::seed(42+trial); RealVector params(rbm.numberOfParameters()); for(std::size_t i = 0; i != params.size();++i){ params(i) = Rng::uni(-0.1,0.1); } rbm.setParameterVector(params); ConvolutionalBinaryParallelTempering cd(&rbm); cd.chain().setUniformTemperatureSpacing(numTemperatures); cd.setData(data); SteepestDescent optimizer; optimizer.setLearningRate(learningRate); optimizer.init(cd); double logLikelyHood = 0; for(std::size_t i = 0; i != steps; ++i){ if(i % updateStep == 0){ rbm.setParameterVector(optimizer.solution().point); logLikelyHood = negativeLogLikelihood(rbm,data); std::cout< #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (RBM_ConvolutionalRBMBasic) BOOST_AUTO_TEST_CASE( Structure ){ std::size_t inputSize1=6; std::size_t inputSize2=7; std::size_t visibleUnits = 42; std::size_t filterSize = 3; std::size_t numFilters = 5; std::size_t responseSize1 =4; std::size_t responseSize2 =5; std::size_t hiddenUnits = 4*5*numFilters; std::size_t numParams = hiddenUnits+visibleUnits + numFilters*filterSize*filterSize; //init RBMs and set structure ConvolutionalBinaryRBM rbm(Rng::globalRng); rbm.setStructure(inputSize1,inputSize2,numFilters,filterSize); //check that setStructure actually created the right structure BOOST_CHECK_EQUAL(rbm.inputSize1(), inputSize1); BOOST_CHECK_EQUAL(rbm.inputSize2(), inputSize2); BOOST_CHECK_EQUAL(rbm.numberOfVN(), visibleUnits); BOOST_CHECK_EQUAL(rbm.visibleNeurons().size(), visibleUnits); BOOST_CHECK_EQUAL(rbm.filterSize1(), filterSize); BOOST_CHECK_EQUAL(rbm.filterSize2(), filterSize); BOOST_CHECK_EQUAL(rbm.numFilters(), numFilters); BOOST_CHECK_EQUAL(rbm.responseSize1(), responseSize1); BOOST_CHECK_EQUAL(rbm.responseSize2(), responseSize2); BOOST_CHECK_EQUAL(rbm.numberOfHN(), hiddenUnits); BOOST_CHECK_EQUAL(rbm.hiddenNeurons().size(), hiddenUnits); //test parameters BOOST_CHECK_EQUAL(rbm.numberOfParameters(), numParams); RealVector vector(numParams,0.0); for(std::size_t i = 0; i != numParams; ++i){ vector(i)=i; } //check that if we set the right parameters, the correct one comes out again rbm.setParameterVector(vector); RealVector paramsRet=rbm.parameterVector(); for(std::size_t i = 0; i != numParams; ++i){ BOOST_CHECK_CLOSE(vector(i),paramsRet(i),1.e-14); } //now check, that all parameters are at their right places std::size_t currParam = 0; for(std::size_t i = 0; i != numFilters; ++i){ for(std::size_t j = 0; j != filterSize; ++j){ for(std::size_t k = 0; k != filterSize; ++k,++currParam){ BOOST_CHECK_CLOSE(rbm.filters()[i](j,k), vector(currParam),1.e-14); } } } } BOOST_AUTO_TEST_CASE( Input ){ std::size_t inputSize1=4; std::size_t inputSize2=4; std::size_t visibleUnits = 16; std::size_t filterSize = 3; std::size_t numFilters = 2; std::size_t hiddenUnits = 8; unsigned int filterIndizes[][9]={ {0,1,2 ,4,5,6 ,8,9,10 }, {1,2,3 ,5,6,7 ,9,10,11 }, {4,5,6 ,8,9,10 ,12,13,14 }, {5,6,7 ,9,10,11 ,13,14,15 } }; //init RBMs and set structure ConvolutionalBinaryRBM rbm(Rng::globalRng); rbm.setStructure(inputSize1,inputSize2,numFilters,filterSize); initRandomNormal(rbm,1.0); RealMatrix W(hiddenUnits,visibleUnits,0.0); for(std::size_t i =0; i != 4; ++i){ for(std::size_t j =0; j != 9; ++j){ std::size_t x1 = j/3; std::size_t x2 = j%3; W(i,filterIndizes[i][j])=rbm.filters()[0](x1,x2); W(i+4,filterIndizes[i][j])=rbm.filters()[1](x1,x2); } } RealMatrix batchV(10,16); RealMatrix batchH(10,8); for(std::size_t i = 0; i != 10; ++i){ for(std::size_t j = 0; j != 16; ++j){ batchV(i,j)=Rng::coinToss(); } for(std::size_t j = 0; j != 8; ++j){ batchH(i,j)=Rng::coinToss(); } } RealMatrix resultWV=prod(batchV,trans(W)); RealMatrix resultWH=prod(batchH,W); RealMatrix rbmWV(10,8,1.0); RealMatrix rbmWH(10,16,1.0); rbm.inputHidden(rbmWV,batchV); rbm.inputVisible(rbmWH,batchH); for(std::size_t i = 0; i != 10; ++i){ for(std::size_t j = 0; j != 16; ++j){ BOOST_CHECK_CLOSE(rbmWH(i,j),resultWH(i,j),1.e-12); } } for(std::size_t i = 0; i != 10; ++i){ for(std::size_t j = 0; j != 8; ++j){ BOOST_CHECK_CLOSE(rbmWV(i,j),resultWV(i,j),1.e-12); } } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/RBM/Energy.cpp000066400000000000000000000136701314655040000160700ustar00rootroot00000000000000#include #include #include #define BOOST_TEST_MODULE RBM_Energy #include #include using namespace shark; //structure of values which is used for the tests struct RBMFixture { RBMFixture():rbm(Rng::globalRng) { rbm.setStructure(5,5); rbm.weightMatrix().clear(); hiddenState.resize(1,5); visibleState.resize(1,5); for(std::size_t i = 0; i != 5; ++i) { rbm.weightMatrix()(i,i) = double(i); rbm.hiddenNeurons().bias()(i) = double(i); rbm.visibleNeurons().bias()(i) = 5.0+i; visibleState(0,i) = 10.0+i; hiddenState(0,i) = 15.0+i; } visibleInput = prod(hiddenState,rbm.weightMatrix()); hiddenInput = prod(visibleState,trans(rbm.weightMatrix())); visibleStateVec = row(visibleState,0); hiddenStateVec = row(hiddenState,0); visibleInputVec = row(visibleInput,0); hiddenInputVec = row(hiddenInput,0); energyResult = - inner_prod(rbm.hiddenNeurons().bias(),hiddenStateVec); energyResult-= inner_prod(rbm.visibleNeurons().bias(),visibleStateVec); energyResult-= inner_prod(hiddenStateVec,prod(rbm.weightMatrix(),visibleStateVec)); } BinaryRBM rbm; RealMatrix visibleState; RealMatrix hiddenState; RealMatrix visibleInput; RealMatrix hiddenInput; RealVector visibleStateVec; RealVector hiddenStateVec; RealVector visibleInputVec; RealVector hiddenInputVec; double energyResult; }; BOOST_FIXTURE_TEST_SUITE (RBM_Energy, RBMFixture ) BOOST_AUTO_TEST_CASE( Energy_Input) { Energy energy(rbm.energy()); RealMatrix visibleInputResult(1,5); RealMatrix hiddenInputResult(1,5); energy.inputHidden(hiddenInputResult,visibleState); energy.inputVisible(visibleInputResult,hiddenState); BOOST_CHECK_SMALL(norm_sqr(row(visibleInputResult-visibleInput,0)), 1.e-15); BOOST_CHECK_SMALL(norm_sqr(row(hiddenInputResult-hiddenInput,0)), 1.e-15); } BOOST_AUTO_TEST_CASE( Energy_EnergyFromInput ) { Energy energy(rbm.energy()); RealVector energyVisible = energy.energyFromVisibleInput(visibleInput,hiddenState,visibleState); RealVector energyHidden = energy.energyFromHiddenInput(hiddenInput,hiddenState,visibleState); BOOST_CHECK_SMALL(energyVisible(0) - energyResult, 1.e-15); BOOST_CHECK_SMALL(energyHidden(0) - energyResult, 1.e-15); } BOOST_AUTO_TEST_CASE( Energy_SimpleEnergy ) { Energy energy(rbm.energy()); RealVector simpleEnergy = energy.energy(hiddenState,visibleState); BOOST_REQUIRE_SMALL(simpleEnergy(0) - energyResult, 1.e-15); //now some random sampling to get the energy { BinaryRBM bigRBM(Rng::globalRng); bigRBM.setStructure(10,18); initRandomNormal(bigRBM,2); RealMatrix inputBatch(10,10); RealMatrix hiddenBatch(10,18); RealVector energies(10); for(std::size_t j = 0; j != 10; ++j) { for(std::size_t k = 0; k != 10; ++k) { inputBatch(j,k)=Rng::coinToss(0.5); } for(std::size_t k = 0; k != 18; ++k) { hiddenBatch(j,k)=Rng::coinToss(0.5); } energies(j) = - inner_prod(bigRBM.hiddenNeurons().bias(),row(hiddenBatch,j)); energies(j)-= inner_prod(bigRBM.visibleNeurons().bias(),row(inputBatch,j)); energies(j)-= inner_prod(row(hiddenBatch,j),prod(bigRBM.weightMatrix(),row(inputBatch,j))); } Energy bigEnergy(bigRBM.energy()); RealVector testEnergies=bigEnergy.energy(hiddenBatch,inputBatch); for(std::size_t i = 0; i != 10; ++i) { BOOST_CHECK_CLOSE(energies(i),testEnergies(i),1.e-5); } } } BOOST_AUTO_TEST_CASE( Energy_UnnormalizedProbabilityHidden ) { //all possible state combinations for 2 visible units RealMatrix visibleStateSpace(4,2); visibleStateSpace(0,0)=0; visibleStateSpace(0,1)=0; visibleStateSpace(1,0)=0; visibleStateSpace(1,1)=1; visibleStateSpace(2,0)=1; visibleStateSpace(2,1)=0; visibleStateSpace(3,0)=1; visibleStateSpace(3,1)=1; //create RBM with 2 visible and 4 hidden units and initialize it randomly BinaryRBM rbm(Rng::globalRng); rbm.setStructure(2,4); initRandomNormal(rbm,2); //the hiddenstate to test is the most complex (1,1,1,1) case RealMatrix hiddenState = RealScalarMatrix(4,4,1.0); //calculate energies for the state space, we now from the previous tests, that this result is correct Energy energy(rbm.energy()); RealVector energies = energy.energy(hiddenState,visibleStateSpace); //now test for several choices of beta for(std::size_t i = 0; i <= 10; ++i) { //calculate unnormalized probability of the hiddenState by integrating over the visible state space double pTest=sum(exp(-(i*0.1)*energies)); //calculate now the test itself double p=std::exp(energy.logUnnormalizedProbabilityHidden(hiddenState,blas::repeat(i*0.1,4))(0)); BOOST_CHECK_CLOSE(pTest,p,2.e-5); } } BOOST_AUTO_TEST_CASE( Energy_UnnormalizedProbabilityVisible ) { //all possible state combinations for 2 hidden units RealMatrix hiddenStateSpace(4,2); hiddenStateSpace(0,0)=0; hiddenStateSpace(0,1)=0; hiddenStateSpace(1,0)=0; hiddenStateSpace(1,1)=1; hiddenStateSpace(2,0)=1; hiddenStateSpace(2,1)=0; hiddenStateSpace(3,0)=1; hiddenStateSpace(3,1)=1; //create RBM with 4 visible and 2 hidden units and initialize it randomly BinaryRBM rbm(Rng::globalRng); rbm.setStructure(4,2); initRandomNormal(rbm,2); //the hiddenstate to test is the most complex (1,1,1,1) case RealMatrix visibleState = RealScalarMatrix(4,4,1.0); //calculate energies for the state space, we now from the previous tests, that this result is correct Energy energy(rbm.energy()); RealVector energies = energy.energy(hiddenStateSpace,visibleState); //now test for several choices of beta for(std::size_t i = 0; i <= 10; ++i) { //calculate unnormalized probability of the visible state by integrating over the hidden state space double pTest=sum(exp(-(i*0.1)*energies)); //calculate now the test itself double p=std::exp(energy.logUnnormalizedProbabilityVisible(visibleState,blas::repeat(i*0.1,4))(0)); BOOST_CHECK_CLOSE(pTest,p,2.e-5); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/RBM/ExactGradient.cpp000066400000000000000000000072271314655040000173620ustar00rootroot00000000000000#include #include #include #define BOOST_TEST_MODULE RBM_ExactGradient #include #include #include "../ObjectiveFunctions/TestObjectiveFunction.h" using namespace shark; BOOST_AUTO_TEST_SUITE (RBM_ExactGradient) BOOST_AUTO_TEST_CASE( ExactGradient_NegLogLikelihood_MoreHidden ) { //create RBM with 8 visible and 16 hidden units BinaryRBM rbm(Rng::globalRng); rbm.setStructure(8,16); //now test for several random subsets of possible training data and random initializations of the rbm for(std::size_t i = 0; i != 10; ++i){ initRandomNormal(rbm,2); RealVector parameters=rbm.parameterVector(); initRandomNormal(rbm,2); std::vector dataVec(50,RealVector(8)); for(std::size_t j = 0; j != 50; ++j){ for(std::size_t k = 0; k != 8; ++k){ dataVec[j](k)=Rng::coinToss(0.5); } } UnlabeledData data = createDataFromRange(dataVec,25); //now calculate the test ExactGradient gradient(&rbm); ExactGradient::FirstOrderDerivative derivative; gradient.setData(data); double logProbTest = gradient.evalDerivative(parameters,derivative); long double logProb = negativeLogLikelihood(rbm,data)/50; BOOST_CHECK_CLOSE(logProbTest,logProb,1.e-5); } } BOOST_AUTO_TEST_CASE( ExactGradient_NegLogLikelihood_LessHidden ) { //create RBM with 8 visible and 16 hidden units BinaryRBM rbm(Rng::globalRng); rbm.setStructure(8,4); //now test for several random subsets of possible training data and random initializations of the rbm for(std::size_t i = 0; i != 10; ++i){ initRandomNormal(rbm,2); RealVector parameters=rbm.parameterVector(); initRandomNormal(rbm,2); std::vector dataVec(50,RealVector(8)); for(std::size_t j = 0; j != 50; ++j){ for(std::size_t k = 0; k != 8; ++k){ dataVec[j](k)=Rng::coinToss(0.5); } } UnlabeledData data = createDataFromRange(dataVec,25); //now calculate the test ExactGradient gradient(&rbm); ExactGradient::FirstOrderDerivative derivative; gradient.setData(data); double logProbTest = gradient.evalDerivative(parameters,derivative); long double logProb = negativeLogLikelihood(rbm,data)/50; BOOST_CHECK_CLOSE(logProbTest,logProb,1.e-5); } } BOOST_AUTO_TEST_CASE( ExactGradient_NegLogLikelihood_Gradient_MoreHidden ) { BinaryRBM rbm(Rng::globalRng); rbm.setStructure(4,8); std::vector dataVec(50,RealVector(4)); for(std::size_t j = 0; j != 50; ++j){ for(std::size_t k = 0; k != 4; ++k){ dataVec[j](k)=Rng::coinToss(0.5); } } UnlabeledData data = createDataFromRange(dataVec,25); for(std::size_t i = 0; i != 10; ++i){ initRandomNormal(rbm,1); RealVector parameters=rbm.parameterVector(); ExactGradient gradient(&rbm); gradient.setData(data); testDerivative(gradient,parameters,1.e-2,1.e-10,0.5); } } BOOST_AUTO_TEST_CASE( ExactGradient_NegLogLikelihood_Gradient_LessHidden ) { BinaryRBM rbm(Rng::globalRng); rbm.setStructure(8,4); std::vector dataVec(50,RealVector(8)); for(std::size_t j = 0; j != 50; ++j){ for(std::size_t k = 0; k != 8; ++k){ dataVec[j](k)=Rng::coinToss(0.5); } } UnlabeledData data = createDataFromRange(dataVec,25); for(std::size_t i = 0; i != 10; ++i){ initRandomNormal(rbm,2); RealVector parameters=rbm.parameterVector(); ExactGradient gradient(&rbm); gradient.setData(data); testDerivative(gradient,parameters,1.e-2,1.e-10,0.5); } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/RBM/ExactGradientTraining.cpp000066400000000000000000000027441314655040000210550ustar00rootroot00000000000000#define BOOST_TEST_MODULE RBM_ExactGradientTraining #include #include #include #include #include #include #include #include using namespace shark; using namespace std; BOOST_AUTO_TEST_SUITE (RBM_ExactGradientTraining) BOOST_AUTO_TEST_CASE( ExactGradientTraining_Bars ){ unsigned int trials = 1; unsigned int steps = 1001; unsigned int updateStep = 100; BarsAndStripes problem; UnlabeledData data = problem.data(); for(unsigned int trial = 0; trial != trials; ++trial){ Rng::seed(42+trial); BinaryRBM rbm(Rng::globalRng); rbm.setStructure(16,8); RealVector params(rbm.numberOfParameters()); for(std::size_t i = 0; i != params.size();++i){ params(i) = Rng::uni(-0.1,0.1); } rbm.setParameterVector(params); ExactGradient gradient(&rbm); gradient.setData(data); SteepestDescent optimizer; optimizer.setLearningRate(0.2); optimizer.setMomentum(0); optimizer.init(gradient); double logLikelyHood = 0; for(std::size_t i = 0; i != steps; ++i){ if(i % updateStep == 0){ std::cout< #include #define BOOST_TEST_MODULE RBM_GaussianLayer #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (RBM_GaussianLayer) BOOST_AUTO_TEST_CASE( GaussianLayer_SufficientStatistics){ GaussianLayer layer; RealMatrix statistics(10,3); layer.resize(3); RealMatrix input(10,3); RealMatrix testInput(10,3); Rng::seed(42); for(std::size_t test = 0; test != 10000; ++test){ double beta = Rng::uni(0,1); for(std::size_t j = 0; j != 3; ++j){ layer.bias()(j) = Rng::gauss(0,10); } for(std::size_t i = 0; i != 10; ++i){ for(std::size_t j = 0; j != 3; ++j){ input(i,j) = Rng::gauss(0,10); } //calculate result row(testInput,i) = row(input,i)*beta + layer.bias(); } layer.sufficientStatistics(input,statistics,blas::repeat(beta,10)); for(std::size_t i = 0; i != 10; ++i){ for(std::size_t j = 0; j != 3; ++j){ BOOST_CHECK_SMALL(testInput(i,j)-statistics(i,j),1.e-13); } } } } BOOST_AUTO_TEST_CASE( GaussianLayer_Parameters){ GaussianLayer layer; layer.resize(20); RealVector parameters(20); Rng::seed(42); for(std::size_t j = 0; j != 20; ++j){ parameters(j) = Rng::gauss(0,1); } layer.setParameterVector(parameters); for(std::size_t j = 0; j != 20; ++j){ BOOST_CHECK_SMALL(layer.parameterVector()(j) - parameters(j),1.e-15); } } BOOST_AUTO_TEST_CASE( GaussianLayer_Sample){ GaussianLayer layer; GaussianLayer::StatisticsBatch statistics(3,5); layer.resize(5); Rng::seed(42); for(std::size_t i = 0; i != 3; ++i){ for(std::size_t j = 0; j != 5; ++j){ statistics(i,j) = Rng::uni(0.0,1.0); } } const std::size_t numSamples = 5000000; RealMatrix mean(3,5); RealMatrix variance(3,5); mean.clear(); variance.clear(); RealMatrix samples(3,5); for(std::size_t s = 0; s != numSamples; ++s){ layer.sample(statistics,samples,0.0,Rng::globalRng); mean +=samples; noalias(variance) += sqr(samples-statistics); } mean/=numSamples; variance/=numSamples; for(std::size_t i = 0; i != 3; ++i){ for(std::size_t j = 0; j != 5; ++j){ BOOST_CHECK_SMALL(sqr(mean(i,j) - statistics(i,j)),1.e-5); BOOST_CHECK_SMALL(sqr(variance(i,j) - 1),1.e-2); } } } BOOST_AUTO_TEST_CASE( GaussianLayer_Marginalize){ GaussianLayer layer; layer.resize(3); RealVector input(3); Rng::seed(42); //set distribution, sample input and calculate statistics for(std::size_t j = 0; j != 3; ++j){ layer.bias()(j) = 0.5*j+1; } for(std::size_t j = 0; j != 3; ++j){ input(j) = 0.5*j+2; } double result = layer.logMarginalize(input,1); double resultHalvedBeta = layer.logMarginalize(input,0.5); //result by wolfram alpha double testResult = std::log(225.639 * 7472.15 * 672622); double testResultHalvedBeta = std::log(33.6331 * 193.545 * 1836.31); BOOST_CHECK_CLOSE(result, testResult, 0.01); BOOST_CHECK_CLOSE(resultHalvedBeta, testResultHalvedBeta, 0.01); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/RBM/GibbsOperator.cpp000066400000000000000000000251711314655040000174000ustar00rootroot00000000000000#include #include #include #include #include #define BOOST_TEST_MODULE RBM_GibbsOperator #include #include using namespace shark; //this is a mockup test, meaning we implement a failsafe Energy& Neuron to test the Gibbs Operator without noise! //mockup for the neuron struct NeuronMockup{ NeuronMockup(int statMove,int sampleMul):statMove(statMove),sampleMul(sampleMul){} void sufficientStatistics(RealVector input,RealVector& statistics, double beta){ statistics(0) = input(0)*beta + statMove; } template void sample(RealVector stat,RealVector& state,Rng& rng){ state(0) = stat(0)*sampleMul; } double statMove; double sampleMul; }; //mockup for the Energy encapsulated in an empty RBM Mockup struct RBMMockup{ public: typedef int VectorType; struct Energy{ struct Structure{ NeuronMockup hiddenNeurons(){ return NeuronMockup(1,2); } NeuronMockup visibleNeurons(){ return NeuronMockup(5,7); } }; typedef double HiddenInput; typedef double HiddenStatistics; typedef double HiddenFeatures; typedef double HiddenState; typedef double VisibleInput; typedef double VisibleStatistics; typedef double VisibleFeatures; typedef double VisibleState; typedef double VectorType; Energy(Structure*){} //since we want to check that both versions are used, we will choose different return values for both void inputHidden(RealVector& input, RealVector state, RealVector& features){ features(0) = state(0)*state(0); input(0) = features(0)+9; } void inputHidden(RealVector& input, RealVector state){ input(0) = state(0)*state(0); //omit the +9 } void inputVisible(RealVector& input, RealVector state, RealVector& features){ features(0) = state(0)*state(0)*state(0); input(0) = features(0)/2.0; } void inputVisible(RealVector& input, RealVector state){ input(0) = state(0)*state(0) *state(0); //omit the /2.0 } RealVector energyFromVisibleInput( RealVector const& input, RealVector const& hiddenState, RealVector const& visibleState ){ RealVector ret(1); ret(0)=hiddenState(0)*visibleState(0) + input(0); return ret; } }; Energy::Structure& structure(){ static Energy::Structure structure; return structure; } NeuronMockup hiddenNeurons(){ return structure().hiddenNeurons(); } NeuronMockup visibleNeurons(){ return structure().visibleNeurons(); } int& rng(){ static int i; return i; } }; BOOST_AUTO_TEST_SUITE (RBM_GibbsOperator) BOOST_AUTO_TEST_CASE( GibbsOperator_Test_Store ) { RBMMockup rbmMockup; GibbsOperator chain(&rbmMockup); GibbsOperator::HiddenSampleBatch hidden(1,0); GibbsOperator::HiddenSampleBatch hiddenTest(1,0); GibbsOperator::VisibleSampleBatch visible(1,0); GibbsOperator::VisibleSampleBatch visibleTest(1,0); chain.flags() |= StoreHiddenFeatures; chain.flags() |= StoreVisibleFeatures; hidden.state(0) = 3; hidden.features(0) = 4; hidden.input(0) = 5; hidden.statistics(0) = 6; visible.state(0) = 5; visible.features(0) = 6; visible.input(0) = 7; visible.statistics(0) = 8; //test hidden statistics visibleTest=visible; chain.precomputeHidden(hiddenTest,visibleTest,0.5); BOOST_CHECK_SMALL(visibleTest.features(0)-25.0,1.e-20); BOOST_CHECK_SMALL(hiddenTest.input(0)-34.0,1.e-20); BOOST_CHECK_SMALL(hiddenTest.statistics(0)-18.0,1.e-20); //test hidden sampling chain.sampleHidden(hiddenTest); //old values should not change BOOST_CHECK_SMALL(hiddenTest.input(0)-34.0,1.e-20); BOOST_CHECK_SMALL(hiddenTest.statistics(0)-18.0,1.e-20); BOOST_CHECK_SMALL(hiddenTest.state(0)-36,1.e-20); //test visible statistics hiddenTest=hidden; chain.precomputeVisible(hiddenTest,visibleTest,0.5); BOOST_CHECK_SMALL(hiddenTest.features(0)-27.0,1.e-20); BOOST_CHECK_SMALL(visibleTest.input(0)-13.5,1.e-20); BOOST_CHECK_SMALL(visibleTest.statistics(0)-11.75,1.e-20); //test visible sampling chain.sampleVisible(visibleTest); BOOST_CHECK_SMALL(visibleTest.input(0)-13.5,1.e-20); BOOST_CHECK_SMALL(visibleTest.statistics(0)-11.75,1.e-20); BOOST_CHECK_SMALL(visibleTest.state(0)-11.75*7,1.e-20); } BOOST_AUTO_TEST_CASE( GibbsOperator_Test ) { RBMMockup rbmMockup; GibbsOperator chain(&rbmMockup); GibbsOperator::HiddenSampleBatch hidden(1,0); GibbsOperator::HiddenSampleBatch hiddenTest(1,0); GibbsOperator::VisibleSampleBatch visible(1,0); GibbsOperator::VisibleSampleBatch visibleTest(1,0); hidden.state(0) = 3; hidden.features(0) = 4; hidden.input(0) = 5; hidden.statistics(0) = 6; visible.state(0) = 5; visible.features(0) = 6; visible.input(0) = 7; visible.statistics(0) = 8; //test hidden statistics chain.precomputeHidden(hiddenTest,visible,0.5); BOOST_CHECK_SMALL(hiddenTest.input(0)-25.0,1.e-20); BOOST_CHECK_SMALL(hiddenTest.statistics(0)-13.5,1.e-20); //test hidden sampling chain.sampleHidden(hiddenTest); //old values should not change BOOST_CHECK_SMALL(hiddenTest.input(0)-25.0,1.e-20); BOOST_CHECK_SMALL(hiddenTest.statistics(0)-13.5,1.e-20); BOOST_CHECK_SMALL(hiddenTest.state(0)-27,1.e-20); //test visible statistics chain.precomputeVisible(hidden,visibleTest,0.5); BOOST_CHECK_SMALL(visibleTest.input(0)-27,1.e-20); BOOST_CHECK_SMALL(visibleTest.statistics(0)-18.5,1.e-20); //test visible sampling chain.sampleVisible(visibleTest); BOOST_CHECK_SMALL(visibleTest.input(0)-27,1.e-20); BOOST_CHECK_SMALL(visibleTest.statistics(0)-18.5,1.e-20); BOOST_CHECK_SMALL(visibleTest.state(0)-18.5*7,1.e-20); } BOOST_AUTO_TEST_CASE( GibbsOperator_Energy) { RBMMockup rbmMockup; GibbsOperator chain(&rbmMockup); GibbsOperator::HiddenSampleBatch hidden(1,0); GibbsOperator::VisibleSampleBatch visible(1,0); //test swap of hidden hidden.state(0) = 6; hidden.features(0) = 2; hidden.input(0) = 3; hidden.statistics(0) = 4; visible.state(0) = 5; visible.features(0) = 6; visible.input(0) = 7; visible.statistics(0) = 8; RealVector energy = chain.calculateEnergy(hidden,visible); BOOST_CHECK_SMALL(energy(0)-37.0, 1.e-20); } BOOST_AUTO_TEST_CASE( GibbsOperator_Sample_Swap) { RBMMockup rbmMockup; GibbsOperator chain(&rbmMockup); GibbsOperator::HiddenSampleBatch hidden1(1,0); GibbsOperator::HiddenSampleBatch hidden2(1,0); GibbsOperator::VisibleSampleBatch visible1(1,0); GibbsOperator::VisibleSampleBatch visible2(1,0); //test swap of hidden hidden1.state(0) = 1; hidden1.features(0) = 2; hidden1.input(0) = 3; hidden1.statistics(0) = 4; hidden2.state(0) = 5; hidden2.features(0) = 6; hidden2.input(0) = 7; hidden2.statistics(0) = 8; swap(hidden1,hidden2); BOOST_CHECK_SMALL(hidden1.state(0)-5.0,1.e-20); BOOST_CHECK_SMALL(hidden1.features(0)-6.0,1.e-20); BOOST_CHECK_SMALL(hidden1.input(0)-7.0,1.e-20); BOOST_CHECK_SMALL(hidden1.statistics(0)-8.0,1.e-20); BOOST_CHECK_SMALL(hidden2.state(0)-1.0,1.e-20); BOOST_CHECK_SMALL(hidden2.features(0)-2.0,1.e-20); BOOST_CHECK_SMALL(hidden2.input(0)-3.0,1.e-20); BOOST_CHECK_SMALL(hidden2.statistics(0)-4.0,1.e-20); //test swap of visible visible1.state(0) = 1; visible1.features(0) = 2; visible1.input(0) = 3; visible1.statistics(0) = 4; visible2.state(0) = 5; visible2.features(0) = 6; visible2.input(0) = 7; visible2.statistics(0) = 8; swap(hidden1,hidden2); BOOST_CHECK_SMALL(visible2.state(0)-5.0,1.e-20); BOOST_CHECK_SMALL(visible2.features(0)-6.0,1.e-20); BOOST_CHECK_SMALL(visible2.input(0)-7.0,1.e-20); BOOST_CHECK_SMALL(visible2.statistics(0)-8.0,1.e-20); BOOST_CHECK_SMALL(visible1.state(0)-1.0,1.e-20); BOOST_CHECK_SMALL(visible1.features(0)-2.0,1.e-20); BOOST_CHECK_SMALL(visible1.input(0)-3.0,1.e-20); BOOST_CHECK_SMALL(visible1.statistics(0)-4.0,1.e-20); } ////BOOST_AUTO_TEST_CASE_EXPECTED_FAILURES(GibbsOperator_Create_Sample, 1); BOOST_AUTO_TEST_CASE( GibbsOperator_Create_Sample_Store) { RBMMockup rbmMockup; GibbsOperator chain(&rbmMockup); chain.flags() |= StoreHiddenFeatures; chain.flags() |= StoreVisibleStatistics; chain.flags() |= StoreVisibleFeatures; chain.flags() |= StoreVisibleInput; GibbsOperator::HiddenSampleBatch hidden(1,0); GibbsOperator::VisibleSampleBatch visible(1,0); RealVector state(1); state(0) = 2; chain.createSample( hidden, visible, state, 0.5); //check the states BOOST_CHECK_SMALL(hidden.state(0)-15.0,1.e-20); BOOST_CHECK_SMALL(hidden.features(0)-3375,1.e-20); BOOST_CHECK_SMALL(hidden.input(0)-13.0,1.e-20); BOOST_CHECK_SMALL(hidden.statistics(0)-7.5,1.e-20); BOOST_CHECK_SMALL(visible.state(0) - 2,1.e-20); BOOST_CHECK_SMALL(visible.features(0) - 4.0,1.e-20); BOOST_CHECK_SMALL(visible.input(0) - 1687.5,1.e-20); BOOST_CHECK_SMALL(visible.statistics(0) - 848.75,1.e-20); } BOOST_AUTO_TEST_CASE( GibbsOperator_Create_Sample) { RBMMockup rbmMockup; GibbsOperator chain(&rbmMockup); GibbsOperator::HiddenSampleBatch hidden(1,0); GibbsOperator::VisibleSampleBatch visible(1,0); RealVector state(1); state(0) = 2; chain.createSample( hidden, visible, state, 0.5); //check the states BOOST_CHECK_SMALL(visible.state(0)-state(0),1.e-20); BOOST_CHECK_SMALL(hidden.state(0)-6.0,1.e-20); BOOST_CHECK_SMALL(hidden.input(0)-4.0,1.e-20); BOOST_CHECK_SMALL(hidden.statistics(0)-3,1.e-20); } //BOOST_AUTO_TEST_CASE( GibbsOperator_Energy_Binary) //{ // typedef RBM,Rng::rng_type > RBMType; // RBMType rbm(Rng::globalRng); // GibbsOperator::HiddenSampleBatch hidden(1,5); // GibbsOperator::VisibleSampleBatch visible(1,5); // GibbsOperator gibbsOperator(&rbm); // // // rbm.structure().setStructure(5,5); // rbm.structure().weightMatrix(0,0).clear(); // // for(std::size_t i = 0; i != 5; ++i){ // rbm.structure().weightMatrix(0,0)(i,i) = i; // rbm.hiddenNeurons().bias()(i) = i; // rbm.visibleNeurons().bias()(i) = 5+i; // visible.state(i) = 10+i; // hidden.state(i) = 15+i; // } // // visible.input = prod(hidden.state,rbm.structure().weightMatrix(0,0)); // // double energyResult = -inner_prod(rbm.hiddenNeurons().bias(),hidden.state); // energyResult-= inner_prod(rbm.visibleNeurons().bias(),visible.state); // energyResult-= inner_prod(hidden.state,prod(rbm.structure().weightMatrix(0,0),visible.state)); // // double testEnergy = gibbsOperator.calculateEnergy(hidden,visible); // BOOST_CHECK_SMALL(testEnergy-energyResult, 1.e-15); // //} BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/RBM/MarkovChain.cpp000066400000000000000000000043251314655040000170360ustar00rootroot00000000000000#include #define BOOST_TEST_MODULE RBM_MarkovChain #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (RBM_MarkovChain) BOOST_AUTO_TEST_CASE( MarkovChain_Distribution ) { const std::size_t batchSize = 16; const std::size_t numSamples = 10000; double states[]={ 0,0,0,0, 1,0,0,0, 0,1,0,0, 1,1,0,0, 0,0,1,0, 1,0,1,0, 0,1,1,0, 1,1,1,0, 0,0,0,1, 1,0,0,1, 0,1,0,1, 1,1,0,1, 0,0,1,1, 1,0,1,1, 0,1,1,1, 1,1,1,1, }; RealMatrix stateMatrix = blas::adapt_matrix(16,4,states); //create rbm and chain BinaryRBM rbm(Rng::globalRng); rbm.setStructure(4,4); RealVector params(rbm.numberOfParameters()); for(std::size_t i = 0; i != params.size();++i){ params(i) = Rng::uni(-1,1); } rbm.setParameterVector(params); MarkovChain > chain(&rbm); chain.setBatchSize(batchSize); chain.initializeChain(RealMatrix(batchSize,4,0)); chain.step(1000);//burn in //evaluate distribution for all beta values RealVector pHidden = exp(rbm.energy().logUnnormalizedProbabilityHidden(stateMatrix,blas::repeat(1.0,16))); RealVector pVisible = exp(rbm.energy().logUnnormalizedProbabilityVisible(stateMatrix,blas::repeat(1.0,16))); //normalize to 1 pHidden /= sum(pHidden); pVisible /= sum(pVisible); RealVector pHiddenHist(16,0.0); RealVector pVisibleHist(16,0.0); for(std::size_t s = 0; s != numSamples; ++s){ chain.step(1); //get state number for every sampled state and add the sample to the histogram for(std::size_t k = 0; k != batchSize; ++k){ std::size_t stateH = 0; std::size_t stateV = 0; for(std::size_t i = 0; i != 4; ++i){ stateH += chain.samples().hidden.state(k,i) > 0? (1< 0? (1< #include #include #include #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (RBM_PCDTraining) BOOST_AUTO_TEST_CASE( PCDTraining_Bars ){ BarsAndStripes problem; UnlabeledData data = problem.data(); Rng::seed(0); BinaryRBM rbm(Rng::globalRng); rbm.setStructure(16,8); RealVector params(rbm.numberOfParameters()); for(std::size_t i = 0; i != params.size();++i){ params(i) = Rng::gauss(0,1); } rbm.setParameterVector(params); BinaryPCD cd(&rbm); cd.setNumberOfSamples(32); cd.setBatchSize(16); cd.setData(data); SteepestDescent optimizer; optimizer.setLearningRate(0.05); optimizer.setMomentum(0); optimizer.init(cd); double logLikelyhood = 0; for(std::size_t i = 0; i != 5001; ++i){ if(i % 5000 == 0){ rbm.setParameterVector(optimizer.solution().point); logLikelyhood = negativeLogLikelihood(rbm,data); std::cout< #include #include #include #include #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (RBM_ParallelTemperingTraining) BOOST_AUTO_TEST_CASE( ParallelTemperingTraining_Bars ){ unsigned int trials = 1; unsigned int steps = 3001; unsigned int updateStep = 1000; std::size_t numHidden = 8; std::size_t numTemperatures = 5; double learningRate = 0.1; BarsAndStripes problem(8); UnlabeledData data = problem.data(); for(unsigned int trial = 0; trial != trials; ++trial){ BinaryRBM rbm(Rng::globalRng); rbm.setStructure(16,numHidden); Rng::seed(42+trial); RealVector params(rbm.numberOfParameters()); for(std::size_t i = 0; i != params.size();++i){ params(i) = Rng::uni(-0.1,0.1); } rbm.setParameterVector(params); BinaryParallelTempering cd(&rbm); cd.chain().setUniformTemperatureSpacing(numTemperatures); cd.numBatches()=2; cd.setData(data); SteepestDescent optimizer; optimizer.setLearningRate(learningRate); optimizer.setMomentum(0); optimizer.init(cd); double logLikelyHood = 0; for(std::size_t i = 0; i != steps; ++i){ if(i % updateStep == 0){ rbm.setParameterVector(optimizer.solution().point); logLikelyHood = negativeLogLikelihood(rbm,data); std::cout< #define BOOST_TEST_MODULE RBM_TemperedMarkovChain #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (RBM_TemperedMarkovChain) BOOST_AUTO_TEST_CASE( TemperedMarkovChain_Distribution ) { const std::size_t numTemperatures = 10; const std::size_t numSamples = 10000; double states[]={ 0,0,0,0, 1,0,0,0, 0,1,0,0, 1,1,0,0, 0,0,1,0, 1,0,1,0, 0,1,1,0, 1,1,1,0, 0,0,0,1, 1,0,0,1, 0,1,0,1, 1,1,0,1, 0,0,1,1, 1,0,1,1, 0,1,1,1, 1,1,1,1, }; RealMatrix stateMatrix = blas::adapt_matrix(16,4,states); //create rbm and pt object BinaryRBM rbm(Rng::globalRng); rbm.setStructure(4,4); RealVector params(rbm.numberOfParameters()); for(std::size_t i = 0; i != params.size();++i){ params(i) = Rng::uni(-1,1); } rbm.setParameterVector(params); TemperedMarkovChain > pt(&rbm); pt.setNumberOfTemperatures(numTemperatures); RealVector beta(numTemperatures); for(std::size_t i = 0; i != numTemperatures; ++i){ double factor = numTemperatures - 1; beta(i) = 1.0 - i/factor; pt.setBeta(i,1.0 - i/factor); } pt.initializeChain(RealMatrix(numTemperatures,4,0)); pt.step(1000);//burn in //evaluate distribution for all beta values RealMatrix pHidden(numTemperatures,16); RealMatrix pVisible(numTemperatures,16); for(std::size_t i = 0; i != numTemperatures; ++i){ row(pHidden,i) = exp(rbm.energy().logUnnormalizedProbabilityHidden(stateMatrix,blas::repeat(beta(i),16))); row(pVisible,i) = exp(rbm.energy().logUnnormalizedProbabilityVisible(stateMatrix,blas::repeat(beta(i),16))); //normalize to 1 row(pHidden,i) /= sum(row(pHidden,i)); row(pVisible,i) /= sum(row(pVisible,i)); } RealMatrix pHiddenHist(numTemperatures,16,0.0); RealMatrix pVisibleHist(numTemperatures,16,0.0); for(std::size_t s = 0; s != numSamples; ++s){ pt.step(1); //get state number for every sampled state and add the sample to the histogram for(std::size_t t = 0; t != numTemperatures; ++t){ std::size_t stateH = 0; std::size_t stateV = 0; for(std::size_t i = 0; i != 4; ++i){ stateH += pt.samples().hidden.state(t,i) > 0? (1< 0? (1< #include #include #define BOOST_TEST_MODULE RBM_TruncatedExponentialLayer #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (RBM_TruncatedExponentialLayer) BOOST_AUTO_TEST_CASE( TruncatedExponentialLayer_SufficientStatistics){ TruncatedExponentialLayer layer; TruncatedExponentialLayer::StatisticsBatch statistics(10,3); layer.resize(3); RealMatrix input(10,3); RealMatrix testInput(10,3); Rng::seed(42); for(std::size_t test = 0; test != 10000; ++test){ double beta = Rng::uni(0,1); for(std::size_t j = 0; j != 3; ++j){ layer.bias()(j) = Rng::gauss(0,10); } for(std::size_t i = 0; i != 10; ++i){ for(std::size_t j = 0; j != 3; ++j){ input(i,j) = Rng::gauss(0,10); } //calculate result row(testInput,i) = row(input,i) + layer.bias(); row(testInput,i) *= beta; } layer.sufficientStatistics(input,statistics,blas::repeat(beta,10)); for(std::size_t i = 0; i != 10; ++i){ for(std::size_t j = 0; j != 3; ++j){ BOOST_CHECK_SMALL(testInput(i,j)+statistics.lambda(i,j),1.e-10); BOOST_CHECK_SMALL(exp(testInput(i,j))-statistics.expMinusLambda(i,j),1.e-7); } } } } BOOST_AUTO_TEST_CASE( TruncatedExponentialLayer_Parameters){ TruncatedExponentialLayer layer; layer.resize(20); RealVector parameters(20); Rng::seed(42); for(std::size_t j = 0; j != 20; ++j){ parameters(j) = Rng::gauss(0,1); } layer.setParameterVector(parameters); for(std::size_t j = 0; j != 20; ++j){ BOOST_CHECK_SMALL(layer.parameterVector()(j) - parameters(j),1.e-10); BOOST_CHECK_SMALL(layer.bias()(j) - parameters(j),1.e-10); } } BOOST_AUTO_TEST_CASE( TruncatedExponentialLayer_Sample){ TruncatedExponentialLayer layer; TruncatedExponentialLayer::StatisticsBatch statistics(10,5); layer.resize(5); Rng::seed(42); for(std::size_t i = 0; i != 10; ++i){ for(std::size_t j = 0; j != 5; ++j){ statistics.lambda(i,j) = Rng::uni(-1,1); } } statistics.expMinusLambda = exp(-statistics.lambda); const std::size_t numSamples = 100000; RealMatrix mean(10,5); mean.clear(); for(std::size_t i = 0; i != numSamples; ++i){ RealMatrix samples(10,5); layer.sample(statistics,samples,1.0,Rng::globalRng); mean+=samples; } mean/=numSamples; for(std::size_t i = 0; i != 10; ++i){ for(std::size_t j = 0; j != 5; ++j){ double analyticMean = 1.0/statistics.lambda(i,j) -1.0/(std::exp(statistics.lambda(i,j))-1); BOOST_CHECK_CLOSE(mean(i,j) , analyticMean,1.); } } } BOOST_AUTO_TEST_CASE( TruncatedExponentialLayer_Marginalize){ TruncatedExponentialLayer layer; layer.resize(3); RealVector input(3); Rng::seed(42); for(std::size_t j = 0; j != 3; ++j){ layer.bias()(j) = -0.5*j-1; } for(std::size_t j = 0; j != 3; ++j){ input(j) = -0.5*j-2; } double testResult = std::log(0.316738 * 0.245421 * 0.198652); double testResultHalvedBeta = std::log(0.517913 * 0.432332 * 0.367166); double result = layer.logMarginalize(input,1); double resultHalvedBeta = layer.logMarginalize(input,0.5); BOOST_CHECK_CLOSE(result , testResult , 0.01); BOOST_CHECK_CLOSE(resultHalvedBeta , testResultHalvedBeta , 0.01); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Rng/000077500000000000000000000000001314655040000142325ustar00rootroot00000000000000Shark-3.1.4/Test/Rng/MultiNomial.cpp000066400000000000000000000042351314655040000171740ustar00rootroot00000000000000 #include #include #define BOOST_TEST_MODULE Rng_MultiNomialDistribution #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (Rng_MultiNomial) BOOST_AUTO_TEST_CASE( MultiNomial_same_probabilities) { std::size_t Samples = 100000; std::size_t Dimensions = 10; //Generate probability vector RealVector probabilities(Dimensions,1.0); MultiNomialDistribution dist(probabilities); RealVector draws(Dimensions,0.0); RealVector drawsDiscrete(Dimensions,0.0); for(std::size_t s = 0; s != Samples; ++s){ MultiNomialDistribution::result_type sample = dist(Rng::globalRng); BOOST_REQUIRE(sample < Dimensions); draws[sample]++; drawsDiscrete[Rng::discrete(0,Dimensions-1)]++; } draws/=Samples; drawsDiscrete/=Samples; double interval=(max(drawsDiscrete)-min(drawsDiscrete))/2; std::cout< 1.0/Dimensions -interval*1.5); BOOST_CHECK(draws(i) < 1.0/Dimensions + interval*1.5); } } BOOST_AUTO_TEST_CASE( MultiNomial_different_probabilities) { std::size_t Samples = 1000000; std::size_t trials = 10; for(std::size_t t = 0; t != trials; ++t){ std::size_t Dimensions = Rng::discrete(6,10); //Generate probability vector RealVector probabilities(Dimensions); for(std::size_t i = 0; i != Dimensions; ++i){ probabilities(i) = Rng::uni(0.5,2); } probabilities[5] = 0;//make it a bit harder by creating a state which is not going to be drawn RealVector probabilitiesNormalized = probabilities/sum(probabilities); MultiNomialDistribution dist(probabilities); RealVector draws(Dimensions,0.0); for(std::size_t s = 0; s != Samples; ++s){ MultiNomialDistribution::result_type sample = dist(Rng::globalRng); BOOST_REQUIRE(sample < Dimensions); draws[sample]++; } draws/=Samples; for(std::size_t i = 0; i != Dimensions; ++i){ BOOST_CHECK_CLOSE(draws(i),probabilitiesNormalized(i),1); } } } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Rng/MultiVariateNormal.cpp000066400000000000000000000064241314655040000205230ustar00rootroot00000000000000 #include #include #define BOOST_TEST_MODULE Rng_MultivariateNormal #include #include using namespace shark; BOOST_AUTO_TEST_SUITE (Rng_MultiVariateNormal) BOOST_AUTO_TEST_CASE( MULTIVARIATENORMAL_EIGENVALUES ) { std::size_t Dimensions = 5; std::size_t Samples = 10000; //Generate covariance matrix RealMatrix base(Dimensions,2*Dimensions); for(std::size_t i = 0; i != Dimensions; ++i){ for(std::size_t j = 0; j != 2*Dimensions; ++j){//2* to guarantue full rank. base(i,j) = Rng::gauss(0,1); } } RealMatrix covariance=prod(base,trans(base)); covariance /= 2.0*Dimensions; MultiVariateNormalDistribution dist(covariance); Data sampleSet(Samples,RealVector(Dimensions)); Data normalSampleSet(Samples,RealVector(Dimensions)); for(std::size_t i = 0; i != Samples; ++i){ MultiVariateNormalDistribution::result_type sample = dist(Rng::globalRng); sampleSet.element(i) = sample.first; normalSampleSet.element(i) = sample.second; } RealVector meanSampled; RealMatrix covarianceSampled; RealVector normalMeanSampled; RealMatrix normalCovarianceSampled; meanvar(sampleSet,meanSampled,covarianceSampled); meanvar(normalSampleSet,normalMeanSampled,normalCovarianceSampled); //check that means are correct BOOST_CHECK_SMALL(norm_2(meanSampled)/Dimensions,1.e-2); BOOST_CHECK_SMALL(norm_2(normalMeanSampled)/Dimensions,1.e-2); //check that covariances are correct BOOST_CHECK_SMALL(norm_frobenius(covarianceSampled-covariance)/sqr(Dimensions),1.e-2); BOOST_CHECK_SMALL( norm_frobenius( normalCovarianceSampled-blas::identity_matrix(Dimensions) )/sqr(Dimensions) ,1.e-2); } BOOST_AUTO_TEST_CASE( MULTIVARIATENORMAL_Cholesky) { std::size_t Dimensions = 5; std::size_t Samples = 10000; //Generate covariance matrix RealMatrix base(Dimensions,2*Dimensions); for(std::size_t i = 0; i != Dimensions; ++i){ for(std::size_t j = 0; j != 2*Dimensions; ++j){//2* to guarantue full rank. base(i,j) = Rng::gauss(0,1); } } RealMatrix covariance=prod(base,trans(base)); covariance /= 2.0*Dimensions; MultiVariateNormalDistributionCholesky dist(covariance); Data sampleSet(Samples,RealVector(Dimensions)); Data normalSampleSet(Samples,RealVector(Dimensions)); for(std::size_t i = 0; i != Samples; ++i){ MultiVariateNormalDistribution::result_type sample = dist(Rng::globalRng); sampleSet.element(i) = sample.first; normalSampleSet.element(i) = sample.second; } RealVector meanSampled; RealMatrix covarianceSampled; RealVector normalMeanSampled; RealMatrix normalCovarianceSampled; meanvar(sampleSet,meanSampled,covarianceSampled); meanvar(normalSampleSet,normalMeanSampled,normalCovarianceSampled); //check that means are correct BOOST_CHECK_SMALL(norm_2(meanSampled)/Dimensions,1.e-2); BOOST_CHECK_SMALL(norm_2(normalMeanSampled)/Dimensions,1.e-2); //check that covariances are correct BOOST_CHECK_SMALL(norm_frobenius(covarianceSampled-covariance)/sqr(Dimensions),1.e-2); BOOST_CHECK_SMALL( norm_frobenius( normalCovarianceSampled-blas::identity_matrix(Dimensions) )/sqr(Dimensions) ,1.e-2); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Rng/Rng.cpp000066400000000000000000000320441314655040000154670ustar00rootroot00000000000000#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define BOOST_TEST_MODULE Rng_Distributions #include #include #include #include #include #include #include #include #include #include #include namespace shark { template void check_distribution( Distribution & dist, double mean, double variance, unsigned int noTrials = 100000 ) { double resultMean = 0; double resultVariance = 0; for( unsigned int i = 0; i < noTrials; i++ ){ double val = dist(); resultMean += val/noTrials; resultVariance += val*val/noTrials; } resultVariance -= resultMean*resultMean; BOOST_CHECK_CLOSE( resultMean, mean, 1. ); BOOST_CHECK_CLOSE( resultVariance, variance, 1. ); } } BOOST_AUTO_TEST_SUITE (Rng_Rng) BOOST_AUTO_TEST_CASE( Distribution_DefaultTemplateArgumentCheck ) { shark::Weibull<> dist1( shark::Rng::globalRng ); shark::Bernoulli<> dist2( shark::Rng::globalRng ); shark::Binomial<> dist3( shark::Rng::globalRng ); shark::Cauchy<> dist4( shark::Rng::globalRng ); shark::DiffGeometric<> dist5( shark::Rng::globalRng ); shark::DiscreteUniform<> dist7( shark::Rng::globalRng ); shark::Erlang<> dist8( shark::Rng::globalRng ); shark::Gamma<> dist9( shark::Rng::globalRng ); shark::Geometric<> dist10( shark::Rng::globalRng ); shark::HyperGeometric<> dist12( shark::Rng::globalRng ); shark::LogNormal<> dist13( shark::Rng::globalRng ); shark::NegExponential<> dist14( shark::Rng::globalRng ); shark::Normal<> dist15( shark::Rng::globalRng ); shark::Poisson<> dist16( shark::Rng::globalRng ); shark::Uniform<> dist17( shark::Rng::globalRng ); } BOOST_AUTO_TEST_CASE( Distribution_Bernoulli ) { shark::Rng::seed(42); shark::Bernoulli< shark::Rng::rng_type > bernoulli( shark::Rng::globalRng, 0.3 ); BOOST_CHECK_CLOSE( bernoulli.prob(), 0.3, 1E-10 ); BOOST_CHECK_CLOSE( bernoulli.p( true ), 0.3, 1E-10 ); BOOST_CHECK_CLOSE( bernoulli.p( false ), 0.7, 1E-10 ); shark::check_distribution( bernoulli, 0.3, 0.3 * (1. - 0.3) ); boost::math::bernoulli_distribution bd( 0.3 ); } BOOST_AUTO_TEST_CASE( Distribution_Binomial ) { shark::Rng::seed(42); double probs[] = { 0, 0, 0, 0, 0.00001, 0.00004, 0.00022, 0.00102, 0.00386, 0.01201, 0.03082, 0.06537, 0.11440, 0.16426, 0.19164, 0.17886, 0.13042, 0.07160, 0.02785, 0.00684, 0.00080 }; unsigned int n = 20; double p = 0.7; shark::Binomial< shark::Rng::rng_type > binomial( shark::Rng::globalRng, n, p ); BOOST_CHECK( binomial.n() == n ); BOOST_CHECK_CLOSE( binomial.prob(), p, 1E-1 ); shark::check_distribution( binomial, n * p, n*p*(1-p) ); for( unsigned int i = 0; i < n; i++ ) BOOST_CHECK_SMALL( binomial.p( i ) - probs[ i ], 1E-5 ); } BOOST_AUTO_TEST_CASE( Distribution_Cauchy ) { shark::Rng::seed(42); struct Pair { double m_first; double m_second; } pairs[] = { {-5 ,0.01224268793}, {-4.9 ,0.01272730453}, {-4.8 ,0.01324084385}, {-4.7 ,0.01378561655}, {-4.6 ,0.01436416454}, {-4.5 ,0.01497928876}, {-4.4 ,0.01563408085}, {-4.3 ,0.01633195927}, {-4.2 ,0.01707671063}, {-4.1 ,0.01787253712}, {-4 ,0.01872411095}, {-3.9 ,0.01963663703}, {-3.8 ,0.02061592527}, {-3.7 ,0.02166847421}, {-3.6 ,0.02280156778}, {-3.5 ,0.02402338764}, {-3.4 ,0.0253431438}, {-3.3 ,0.02677122676}, {-3.2 ,0.02831938489}, {-3.1 ,0.03000093178}, {-3 ,0.03183098862}, {-2.9 ,0.03382676793}, {-2.8 ,0.03600790568}, {-2.7 ,0.03839684996}, {-2.6 ,0.04101931523}, {-2.5 ,0.04390481189}, {-2.4 ,0.04708726127}, {-2.3 ,0.05060570528}, {-2.2 ,0.0545051175}, {-2.1 ,0.05883731722}, {-2 ,0.06366197724}, {-1.9 ,0.06904769765}, {-1.8 ,0.07507308636}, {-1.7 ,0.08182773424}, {-1.6 ,0.08941288938}, {-1.5 ,0.09794150344}, {-1.4 ,0.1075371237}, {-1.3 ,0.1183308127}, {-1.2 ,0.1304548714}, {-1.1 ,0.1440316227}, {-1 ,0.1591549431}, {-0.9 ,0.1758618156}, {-0.8 ,0.194091394}, {-0.7 ,0.2136307961}, {-0.6 ,0.2340513869}, {-0.5 ,0.2546479089}, {-0.4 ,0.2744050743}, {-0.3 ,0.2920274185}, {-0.2 ,0.3060671983}, {-0.1 ,0.3151583032}, {0 ,0.3183098862}, {0.1 ,0.3151583032}, {0.2 ,0.3060671983}, {0.3 ,0.2920274185}, {0.4 ,0.2744050743}, {0.5 ,0.2546479089}, {0.6 ,0.2340513869}, {0.7 ,0.2136307961}, {0.8 ,0.194091394}, {0.9 ,0.1758618156}, {1 ,0.1591549431}, {1.1 ,0.1440316227}, {1.2 ,0.1304548714}, {1.3 ,0.1183308127}, {1.4 ,0.1075371237}, {1.5 ,0.09794150344}, {1.6 ,0.08941288938}, {1.7 ,0.08182773424}, {1.8 ,0.07507308636}, {1.9 ,0.06904769765}, {2 ,0.06366197724}, {2.1 ,0.05883731722}, {2.2 ,0.0545051175}, {2.3 ,0.05060570528}, {2.4 ,0.04708726127}, {2.5 ,0.04390481189}, {2.6 ,0.04101931523}, {2.7 ,0.03839684996}, {2.8 ,0.03600790568}, {2.9 ,0.03382676793}, {3 ,0.03183098862}, {3.1 ,0.03000093178}, {3.2 ,0.02831938489}, {3.3 ,0.02677122676}, {3.4 ,0.0253431438}, {3.5 ,0.02402338764}, {3.6 ,0.02280156778}, {3.7 ,0.02166847421}, {3.8 ,0.02061592527}, {3.9 ,0.01963663703}, {4 ,0.01872411095}, {4.1 ,0.01787253712}, {4.2 ,0.01707671063}, {4.3 ,0.01633195927}, {4.4 ,0.01563408085}, {4.5 ,0.01497928876}, {4.6 ,0.01436416454}, {4.7 ,0.01378561655}, {4.8 ,0.01324084385}, {4.9 ,0.01272730453}, {5 ,0.01224268793}, }; double median = 0; double sigma = 1; shark::Cauchy< shark::Rng::rng_type > cauchy( shark::Rng::globalRng, median, sigma ); BOOST_CHECK_CLOSE( cauchy.median(), median, 1E-10 ); BOOST_CHECK_CLOSE( cauchy.sigma(), sigma, 1E-10 ); cauchy.median( 5. ); BOOST_CHECK_CLOSE( cauchy.median(), 5., 1E-10 ); cauchy.sigma( 2. ); BOOST_CHECK_CLOSE( cauchy.sigma(), 2, 1E-10 ); cauchy.median( 0. ); BOOST_CHECK_CLOSE( cauchy.median(), 0., 1E-10 ); cauchy.sigma( 1. ); BOOST_CHECK_CLOSE( cauchy.sigma(), 1., 1E-10 ); for( unsigned int i = 0; i < 100; i++ ) { BOOST_CHECK_SMALL( cauchy.p( pairs[ i ].m_first ) - pairs[ i ].m_second, 1E-5 ); } } BOOST_AUTO_TEST_CASE( Distribution_DiscreteUniform ) { shark::Rng::seed(42); std::size_t low = 0, high = 100; shark::DiscreteUniform< shark::Rng::rng_type > disc( shark::Rng::globalRng, low, high ); BOOST_CHECK( disc.low() == low ); BOOST_CHECK( disc.high() == high ); disc.setRange( 10, 30 ); BOOST_CHECK( disc.low() == 10 ); BOOST_CHECK( disc.high() == 30 ); disc.setRange( low, high ); BOOST_CHECK( disc.low() == low ); BOOST_CHECK( disc.high() == high ); shark::check_distribution( disc, (high+low)/2., (boost::math::pow<2>(high-low+1)-1)/12 ); } BOOST_AUTO_TEST_CASE( Distribution_Erlang ) { shark::Rng::seed(42); double mean = 1., variance = 1.; shark::Erlang< shark::Rng::rng_type > erlang( shark::Rng::globalRng, mean, variance ); BOOST_CHECK_CLOSE( erlang.mean() , mean, 1E-10 ); BOOST_CHECK_CLOSE( erlang.variance() , variance, 1E-10 ); erlang.mean( 10 ); BOOST_CHECK_CLOSE( erlang.mean() , 10, 1E-10 ); erlang.variance( 30 ); BOOST_CHECK_CLOSE( erlang.variance() , 30, 1E-10 ); erlang.mean( mean ); erlang.variance( variance ); BOOST_CHECK_CLOSE( erlang.mean() , mean, 1E-10 ); BOOST_CHECK_CLOSE( erlang.variance() , variance, 1E-10 ); shark::check_distribution( erlang, mean, variance, 100000 ); } BOOST_AUTO_TEST_CASE( Distribution_Geometric ) { shark::Rng::seed(42); double p = 0.3; shark::Geometric< shark::Rng::rng_type > geom( shark::Rng::globalRng, p ); BOOST_CHECK_SMALL( geom.prob() - p, 1E-10 ); geom.prob( 0.7 ); BOOST_CHECK_SMALL( geom.prob() - 0.7, 1E-10 ); geom.prob( p ); BOOST_CHECK_SMALL( geom.prob() - p, 1E-10 ); shark::check_distribution( geom, 1./p, (1.-p)/boost::math::pow<2>( p ) ); } BOOST_AUTO_TEST_CASE( Distribution_HyperGeometric ) { //TODO } BOOST_AUTO_TEST_CASE( Distribution_LogNormal ) { shark::Rng::seed(42); shark::LogNormal< shark::Rng::rng_type > logNormal( shark::Rng::globalRng ); double location = logNormal.location(); double scale = logNormal.scale(); logNormal.location( 2. ); BOOST_CHECK_SMALL( logNormal.location() - 2., 1E-10 ); logNormal.scale( 5 ); BOOST_CHECK_SMALL( logNormal.scale() - 5., 1E-10 ); logNormal.location( location ); BOOST_CHECK_SMALL( logNormal.location() - location, 1E-10 ); logNormal.scale( scale ); BOOST_CHECK_SMALL( logNormal.scale() - scale, 1E-10 ); boost::math::lognormal_distribution lnd( location, scale ); shark::check_distribution( logNormal, boost::math::mean( lnd ), boost::math::variance( lnd ), 5000000 ); for( double x = 0.1; x < 10; x += 0.1 ) BOOST_CHECK_SMALL( logNormal.p( x ) - boost::math::pdf( lnd, x ), 1E-5 ); } BOOST_AUTO_TEST_CASE( Distribution_NegExponential ) { shark::Rng::seed(42); shark::NegExponential< shark::Rng::rng_type > negExponential(shark::Rng::globalRng,2.0); BOOST_CHECK_SMALL( negExponential.mean() - 2., 1E-10 ); negExponential.mean( 5. ); BOOST_CHECK_SMALL( negExponential.mean() - 5., 1E-10 ); boost::math::exponential_distribution lnd( 0.2 ); shark::check_distribution( negExponential, boost::math::mean( lnd ), boost::math::variance( lnd ) ); for( double x = 0.1; x < 10; x += 0.1 ) BOOST_CHECK_SMALL( negExponential.p( x ) - boost::math::pdf( lnd, x ), 1E-5 ); } BOOST_AUTO_TEST_CASE( Distribution_Normal ) { shark::Rng::seed(42); shark::Normal< shark::Rng::rng_type > normal( shark::Rng::globalRng, 1., 1. ); double mean = normal.mean(); double variance = normal.variance(); BOOST_CHECK_SMALL( mean - 1., 1E-10 ); BOOST_CHECK_SMALL( variance - 1., 1E-10 ); normal.mean( 2. ); BOOST_CHECK_SMALL( normal.mean() - 2., 1E-10 ); normal.variance( 5. ); BOOST_CHECK_SMALL( normal.variance() - 5., 1E-10 ); normal.mean( mean ); BOOST_CHECK_SMALL( normal.mean() - mean, 1E-10 ); normal.variance( variance ); BOOST_CHECK_SMALL( normal.variance() - variance, 1E-10 ); shark::check_distribution( normal, mean, variance ); boost::math::normal_distribution nd( 1., 1. ); for( double x = -5; x < 5; x += 0.1 ) BOOST_CHECK_SMALL( normal.p( x ) - boost::math::pdf( nd, x ), 1E-10 ); // Test that normal distribution implementation in Shark works correctly with different mean and variance { using namespace shark; // Calculate posterior probability of height given male const double mean = 5.855; const double variance = 3.5033e-02; const double standardDeviation = sqrt(variance); const double theX = 6.0; const double expected = 1.5789; // Construct two Normal-s boost::math::normal_distribution boostNormal( mean, standardDeviation ); Normal<> sharkNormal(Rng::globalRng, mean, variance); // Tolerances will be used in the validation const double tolerancePercentage = 0.1; const double toleranceAbsolute = 1E-10; // Test shark Normal gets expected result BOOST_CHECK_CLOSE(sharkNormal.p(theX), expected, tolerancePercentage); // Test shark and boost implementations should get same numbers in range [-10, 10] for (double x = -10.; x < 10.; x += 0.1) BOOST_CHECK_SMALL( sharkNormal.p( x ) - boost::math::pdf( boostNormal, x ), toleranceAbsolute ); } } BOOST_AUTO_TEST_CASE( Distribution_Poisson ) { shark::Rng::seed(42); shark::Poisson< shark::Rng::rng_type > poisson( shark::Rng::globalRng, 1. ); BOOST_CHECK_SMALL( poisson.mean() - 1., 1E-10 ); poisson.mean( 5 ); BOOST_CHECK_SMALL( poisson.mean() - 5., 1E-10 ); shark::check_distribution( poisson, 5., 5. ); boost::math::poisson_distribution pd( 5. ); for( unsigned int k = 0; k < 100; k++ ) BOOST_CHECK_SMALL( poisson.p( k ) - boost::math::pdf( pd, k ), 1E-5 ); } BOOST_AUTO_TEST_CASE( Distribution_Uniform ) { shark::Rng::seed(42); shark::Uniform< shark::Rng::rng_type > uniform( shark::Rng::globalRng, 1, 5 ); BOOST_CHECK_SMALL( uniform.low() - 1, 1E-10 ); BOOST_CHECK_SMALL( uniform.high() - 5, 1E-10 ); uniform.setRange( 3, 7 ); BOOST_CHECK_SMALL( uniform.low() - 3, 1E-10 ); BOOST_CHECK_SMALL( uniform.high() - 7, 1E-10 ); uniform.setRange( 9, 2 ); BOOST_CHECK_SMALL( uniform.low() - 2, 1E-10 ); BOOST_CHECK_SMALL( uniform.high() - 9, 1E-10 ); shark::check_distribution( uniform, uniform.low() + (uniform.high() - uniform.low())/2., 1./12. * boost::math::pow<2>( uniform.high() - uniform.low() ), 1000000 ); boost::math::uniform_distribution ud( 2., 9. ); for( double x = 1.; x <= 10.; x += 0.1 ) BOOST_CHECK_SMALL( uniform.p( x ) - boost::math::pdf( ud, x ), 1E-5 ); } BOOST_AUTO_TEST_SUITE_END() Shark-3.1.4/Test/Utils.h000066400000000000000000000017111314655040000147550ustar00rootroot00000000000000#ifndef SHARK_TEST_HELPERS_UTILS_H #define SHARK_TEST_HELPERS_UTILS_H #include #include namespace shark { namespace test { /// Verify whether two Vector's are equal template boost::test_tools::predicate_result verifyVectors( const VectorType& seen, const VectorType2& expected, const double tolerance = 1e-10) { boost::test_tools::predicate_result res(true); if (seen.size() != expected.size()) { res = false; res.message() << boost::format("Size mismatch: seen:%1%, expected:%2%") % seen.size() % expected.size(); return res; } for (std::size_t i = 0; i < seen.size(); ++i) { if (std::abs(seen(i) - expected[i]) > tolerance) { res = false; res.message() << boost::format("index:%1%, seen:%2%, expected:%3%") % i % seen(i) % expected[i]; break; } } return res; } }} // namespace shark { namespace test { #endif // SHARK_TEST_HELPERS_UTILS_H Shark-3.1.4/Test/test_data/000077500000000000000000000000001314655040000154545ustar00rootroot00000000000000Shark-3.1.4/Test/test_data/testfile_for_import.h5000066400000000000000000000174141314655040000220000ustar00rootroot00000000000000‰HDF  ÿÿÿÿÿÿÿÿ ÿÿÿÿÿÿÿÿ`ˆ¨ˆ¨TREEÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿàHEAPX(Ètest1datacsccsc20HhTREEÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ8@HEAP°PHFGHIJKLMøSNOD¨ø Px˜ Hh( ?@4 4ÿ€ `Ž P 8desc( SNOD(à  0 @8 ð?@@$@@@ @&@@@"@(@( ð þ« PpPQRS8 ?@4 4ÿÿÿÿÿÿÿÿÿøi PX ?@4 4ÿ i Pxð?@@( ˆ {y Ppdata1label1nodataone_dimensionthree_dimensionwrong_label`TREEÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ HEAPX 8dataindicesindptr8( ?@4 4ÿè0ò­ PhSNOD@ð?@@@@@( (1® Pp( ÐY® Ppx˜TREEÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ  HEAPX(¸dataindicesidxptrlabel0( h Sà PpSNOD¸ˆ ä 2(<FP( ˜ ”à Pp ÈÖà P€( ôYã PpdÈ,ôXShark-3.1.4/UseShark.cmake000066400000000000000000000021701314655040000153140ustar00rootroot00000000000000# This file sets up include directories, link directories, and # compiler settings for a project to use Shark. It should not be # included directly, but rather through the SHARK_USE_FILE setting # obtained from SharkConfig.cmake. set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SHARK_REQUIRED_C_FLAGS}") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SHARK_REQUIRED_CXX_FLAGS}") set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${SHARK_REQUIRED_EXE_LINKER_FLAGS}") set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${SHARK_REQUIRED_SHARED_LINKER_FLAGS}") set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} ${SHARK_REQUIRED_MODULE_LINKER_FLAGS}") # Add include directories needed to use Shark. include_directories(BEFORE ${SHARK_INCLUDE_DIRS}) # Add link directories needed to use Shark. link_directories(${SHARK_LIBRARY_DIRS}) #add DNDEBUG flag in Release mode get_target_property(SHARK_CONFIGURATION shark IMPORTED_CONFIGURATIONS) list(APPEND COMPILE_DEFINITIONS_RELEASE NDEBUG) if (CMAKE_BUILD_TYPE STREQUAL "") set (CMAKE_BUILD_TYPE Release CACHE STRING "One of: None Debug Release RelWithDebInfo MinSizeRel." FORCE) endif ()Shark-3.1.4/appveyor.yml000066400000000000000000000023721314655040000151610ustar00rootroot00000000000000# specify platforms platform: - Win32 - x64 # configuration configuration: # - Debug - Release # set environment variables environment: BOOST_ROOT: C:\Libraries\boost #OMP_NUM_THREADS: 2 # specify branch branches: only: - master # before everything init: - cmd: cmake --version - cmd: msbuild /version #- cmd: dir %BOOST_LIBRARYDIR% # clone directory clone_folder: C:\projects\shark # scripts to run before build before_build: - cmd: cd C:\projects\shark #- cmd: dir - cmd: md build - cmd: cd build - cmd: if "%platform%"=="Win32" set CMAKE_GENERATOR_NAME=Visual Studio 12 - cmd: if "%platform%"=="x64" set CMAKE_GENERATOR_NAME=Visual Studio 12 Win64 - cmd: if "%platform%"=="Win32" set BOOST_LIBRARYDIR=%BOOST_ROOT%\lib32-msvc-12.0 - cmd: if "%platform%"=="x64" set BOOST_LIBRARYDIR=%BOOST_ROOT%\lib64-msvc-12.0 - cmd: cmake -G "%CMAKE_GENERATOR_NAME%" -DCMAKE_BUILD_TYPE=%configuration% -DBUILD_TESTING=OFF -DBUILD_SHARED_LIBS=ON -DBUILD_EXAMPLES=OFF -DBOOST_ROOT="%BOOST_ROOT%" -DBOOST_LIBRARYDIR="%BOOST_LIBRARYDIR%" .. #- cmd: type C:\projects\shark\build\shark.sln # actual build process build: project: C:\projects\shark\build\shark.sln parallel: true verbosity: minimal # compiler verbosity Shark-3.1.4/cBlasCheck.cpp000066400000000000000000000002151314655040000152510ustar00rootroot00000000000000extern "C"{ #include } int main(){ float* x; float* y; int N,strideX,strideY; float r= cblas_sdot(N, x, strideX, y, strideY); }Shark-3.1.4/cmake_uninstall.cmake.in000066400000000000000000000017631314655040000173540ustar00rootroot00000000000000if (NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt") message(FATAL_ERROR "Cannot find install manifest: \"@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt\"") endif(NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt") file(READ "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt" files) string(REGEX REPLACE "\n" ";" files "${files}") list(REVERSE files) foreach (file ${files}) message(STATUS "Uninstalling \"$ENV{DESTDIR}${file}\"") if (EXISTS "$ENV{DESTDIR}${file}") execute_process( COMMAND @CMAKE_COMMAND@ -E remove "$ENV{DESTDIR}${file}" OUTPUT_VARIABLE rm_out RESULT_VARIABLE rm_retval ) if(NOT ${rm_retval} EQUAL 0) message(FATAL_ERROR "Problem when removing \"$ENV{DESTDIR}${file}\"") endif (NOT ${rm_retval} EQUAL 0) else (EXISTS "$ENV{DESTDIR}${file}") message(STATUS "File \"$ENV{DESTDIR}${file}\" does not exist.") endif (EXISTS "$ENV{DESTDIR}${file}") endforeach(file) Shark-3.1.4/doc/000077500000000000000000000000001314655040000133325ustar00rootroot00000000000000Shark-3.1.4/doc/CMakeLists.txt000066400000000000000000000036651314655040000161040ustar00rootroot00000000000000CMAKE_MINIMUM_REQUIRED( VERSION 2.8 ) find_package( Doxygen REQUIRED ) find_package( PythonInterp REQUIRED ) ADD_SUBDIRECTORY(tutToRst) CONFIGURE_FILE ( "${CMAKE_CURRENT_SOURCE_DIR}/Shark.dox.in" "${CMAKE_CURRENT_BINARY_DIR}/Shark.dox" ) CONFIGURE_FILE ( "${CMAKE_CURRENT_SOURCE_DIR}/sphinx_pages/conf.py.in" "${CMAKE_CURRENT_SOURCE_DIR}/sphinx_pages/conf.py" ) set( SPHINX_EXECUTABLE sphinx-build ) set( SPHINX_PARAMETERS -b html ) add_custom_target(doc_creation) #find all .tut files file(GLOB_RECURSE TutFiles sphinx_pages *.tut) foreach(tut ${TutFiles}) GET_FILENAME_COMPONENT(tutPath ${tut} PATH) GET_FILENAME_COMPONENT(tutName ${tut} NAME_WE) add_custom_command(TARGET doc_creation POST_BUILD COMMAND tut2rst ${tutPath}/${tutName} ${PROJECT_SOURCE_DIR}/examples ) endforeach() add_dependencies(doc_creation tut2rst) add_dependencies(tut2rst tpp2cpp) # defined in examples folder add_custom_target( doc ALL COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Shark.dox COMMAND ${SPHINX_EXECUTABLE} ${SPHINX_PARAMETERS} ${CMAKE_CURRENT_SOURCE_DIR}/sphinx_pages ${CMAKE_CURRENT_BINARY_DIR}/sphinx_pages/build/html ) add_custom_command(TARGET doc PRE_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/doxygen_pages/css/besser.css ${CMAKE_CURRENT_BINARY_DIR}/doxygen_pages/css/besser.css ) add_dependencies(doc doc_creation) OPTION( OPT_INSTALL_DOCUMENTATION "Install documentation." OFF ) IF( OPT_INSTALL_DOCUMENTATION ) INSTALL( FILES index.html DESTINATION ${SHARK_INSTALL_DOC_DIR} ) INSTALL( DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/doxygen_pages DESTINATION ${SHARK_INSTALL_DOC_DIR} PATTERN ".*" EXCLUDE ) INSTALL( DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/doxygen_pages/css DESTINATION ${SHARK_INSTALL_DOC_DIR}/doxygen_pages PATTERN ".*" EXCLUDE ) INSTALL( DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/sphinx_pages DESTINATION ${SHARK_INSTALL_DOC_DIR} PATTERN ".*" EXCLUDE ) ENDIF( OPT_INSTALL_DOCUMENTATION ) Shark-3.1.4/doc/DoxygenLayout.xml000066400000000000000000000127511314655040000166750ustar00rootroot00000000000000 Shark-3.1.4/doc/LinkChecker.cpp000066400000000000000000000475561314655040000162410ustar00rootroot00000000000000 // // Tool to automatically check the links in the Shark documentation. // // Author: Tobias Glasmachers // Date: 2008 // #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef MSG_NOSIGNAL #define MSG_NOSIGNAL 0 #endif class Link; class Target; class Document; class String { public: String() { length = 0; buffer = (char*)malloc(1); buffer[0] = 0; } String(const char* s) { length = strlen(s); buffer = (char*)malloc(length + 1); memcpy(buffer, s, length + 1); } String(const char* s, int len) { length = len; buffer = (char*)malloc(length + 1); memcpy(buffer, s, length); buffer[length] = 0; } String(const String& s) { length = s.getLength(); buffer = (char*)malloc(length + 1); memcpy(buffer, (const char*)s, length + 1); } ~String() { // commenting this is desastrous as we waste several // megabytes of memory, but the error is deeply hidden... // if (buffer != NULL) free(buffer); } inline int getLength() const { return length; } inline bool isEmpty() const { return (length == 0); } inline operator char* () { return buffer; } inline operator const char* () const { return buffer; } inline char& operator [] (int index) { #ifdef DEBUG if (index > length) throw "[String::operator []] access violation"; #endif return buffer[index]; } inline const char& operator [] (int index) const { #ifdef DEBUG if (index > length) throw "[String::operator []] access violation"; #endif return buffer[index]; } void operator = (const char* s) { free(buffer); length = strlen(s); buffer = (char*)malloc(length + 1); memcpy(buffer, s, length + 1); } inline bool operator == (const char* s) const { return (strcmp(buffer, s) == 0); } inline bool operator != (const char* s) const { return (strcmp(buffer, s) != 0); } inline bool operator < (const char* s) const { return (strcmp(buffer, s) < 0); } inline bool operator <= (const char* s) const { return (strcmp(buffer, s) <= 0); } inline bool operator > (const char* s) const { return (strcmp(buffer, s) > 0); } inline bool operator >= (const char* s) const { return (strcmp(buffer, s) >= 0); } int FindFirst(const char* pattern, int start = 0) { int plen = strlen(pattern); if (plen > length) return -1; int i; for (i=start; i<=length-plen; i++) { if (memcmp(buffer + i, pattern, plen) == 0) return i; } return -1; } String Substring(int start, int len = -1) { if (len == -1) return String(buffer + start); else return String(buffer + start, len); } void Delete(int start) { if (start > length) return; buffer[start] = 0; length = start; } int CompareNoCase(const char* s) { int l1 = length; int l2 = strlen(s); int i, l = (l1 < l2) ? l1 : l2; for (i=0; i= 'A' && c1 <= 'Z') c1 += 32; if (c2 >= 'A' && c2 <= 'Z') c2 += 32; if (c1 < c2) return -1; else if (c1 > c2) return 1; } if (l1 < l2) return -1; else if (l1 > l2) return 1; else return 0; } String operator + (const char* s) { int l1 = length; int l2 = strlen(s); char* tmp = (char*)malloc(l1 + l2 + 1); memcpy(tmp, buffer, l1); memcpy(tmp + l1, s, l2); tmp[l1 + l2] = 0; String ret(tmp); delete tmp; return ret; } void operator += (const char* s) { int l1 = length; int l2 = strlen(s); char* tmp = (char*)malloc(l1 + l2 + 1); memcpy(tmp, buffer, l1); memcpy(tmp + l1, s, l2); tmp[l1 + l2] = 0; free(buffer); buffer = tmp; length = l1 + l2; } bool Read(String filename) { FILE* file = fopen((const char*)filename, "r"); if (file == NULL) return false; fseek(file, 0, SEEK_END); length = ftell(file); free(buffer); buffer = (char*)malloc(length + 1); fseek(file, 0, SEEK_SET); int num = fread(buffer, 1, length, file); if (num != length) throw "[String::Read] error reading file"; fclose(file); buffer[length] = 0; return true; } protected: int length; char* buffer; }; class StringList { public: StringList() { } ~StringList() { int i, ic = data.size(); for (i=0; i= (int)data.size()) throw "[StringList::operator []] access violation"; #endif return *data[index]; } int find(const char* s) { int i, ic = data.size(); for (i=0; iCompareNoCase(s) == 0) return i; return -1; } void push_back(const String& s) { String* ps = new String(s); data.push_back(ps); } void erase(int index) { #ifdef DEBUG if (index < 0 || index >= (int)data.size()) throw "[StringList::erase] access violation"; #endif delete data[index]; data.erase(data.begin() + index); } protected: std::vector data; }; template class MyPtrVector : protected std::vector { public: MyPtrVector() { } ~MyPtrVector() { } inline unsigned int size() const { return std::vector::size(); } inline T* operator [](int index) { return std::vector::operator [](index); } inline T* operator [](int index) const { return std::vector::operator [](index); } inline int find(T* element) const { unsigned int index; if (findIndex(element, index)) return index; else return -1; } bool insert(T* element) { unsigned int index; if (findIndex(element, index)) return false; std::vector::insert(std::vector::begin() + index, element); return true; } bool erase(T* element) { unsigned int index; if (! findIndex(element, index)) return false; std::vector::erase(std::vector::begin() + index); return true; } protected: bool findIndex(T* element, unsigned int& index) const { // binary search for the initial step size unsigned int stepsize = 0; unsigned int b = 16; for (b = 16; b > 0; b >>= 1) { if ((int)std::vector::size() >= (1 << (stepsize + b))) stepsize += b; } stepsize = (1 << stepsize); // check if the element is larger than all elements in the list if (std::vector::size() == 0 || *std::vector::operator[](std::vector::size() - 1) < *element) { index = std::vector::size(); return false; } // binary search in the ordered list index = 0; unsigned int i; while (stepsize > 0) { i = index + stepsize; if (i < std::vector::size()) { if (*std::vector::operator[](i - 1) < *element) index = i; } stepsize >>= 1; } return (!(*element < *std::vector::operator[](index))); } }; // representation of an html file after scanning class Document { public: Document(String path) { this->path = path; } ~Document() { } bool operator < (const Document& other) const { return (path < other.path); } String path; StringList anchor; }; // description of the target document of a link class Target { public: Target(String abspath) { this->abspath = abspath; } ~Target() { } bool operator == (const Target& other) const { return (abspath == other.abspath); } bool operator < (const Target& other) const { return (abspath < other.abspath); } void Print(); String abspath; MyPtrVector link; }; // complete description of a link class Link { public: Link(Target* target, Document* doc, String text, String href, String anchor = "") { this->target = target; this->doc = doc; this->text = text; this->href = href; this->anchor = anchor; } ~Link() { } bool operator == (const Link& other) const { return (target == other.target && doc == other.doc && text == other.text && href == other.href && anchor == other.anchor); } bool operator < (const Link& other) const { if (target == other.target) { if (doc == other.doc) { if (text == other.text) { if (href == other.href) { return (anchor < other.anchor); } else return (href < other.href); } else return (text < other.text); } else return ((long long)doc < (long long)other.doc); } else return ((long long)target < (long long)other.target); } void Print() { std::cout << " doc=\"" << doc->path << "\"" << std::endl; std::cout << " text=\"" << text << "\"" << std::endl; std::cout << " href=\"" << href << "\"" << std::endl; } Target* target; Document* doc; String text; String href; String anchor; }; void Target::Print() { std::cout << "target=\"" << abspath << "\"" << std::endl; int i, ic = link.size(); for (i=0; iPrint(); } class LinkChecker { public: LinkChecker() : external("") { } ~LinkChecker() { } void Help() { std::cout << std::endl; std::cout << "LinkChecker checks the links of html documents in and below the" << std::endl; std::cout << "current directory. It checks all local relative links including" << std::endl; std::cout << "anchors and all external http:// links, ignoring anchors." << std::endl; std::cout << "Further it reports all mailto and other links for manual checking." << std::endl; std::cout << std::endl; std::cout << "usage: LinkChecker [--quiet]" << std::endl; std::cout << std::endl; exit(0); } void Run(int argc, char** argv) { if (argc > 2) Help(); verbose = (argc != 2); if (argc == 2 && strcmp(argv[1], "--quiet") != 0) Help(); timeval start; gettimeofday(&start, NULL); numDocs = 0; numInternalLinks = 0; numExternalLinks = 0; numBroken = 0; numMailLinks = 0; numOtherLinks = 0; std::cout << std::endl; std::cout << "Shark documentation link checker" << std::endl; ScanDir("./"); std::cout << std::endl; std::cout << "CHECKING INTERNAL LINKS" << std::endl; int i, ic; ic = internal.size(); for (i=0; ih_addr)->s_addr; memset(&(addr.sin_zero), 0, 8); if (connect(sock, (sockaddr*) & addr, sizeof(sockaddr)) < 0) { std::cout << "broken link (can not connect):" << std::endl; close(sock); return false; } // send http request String request = String("GET ") + resource + " HTTP/1.1\r\nhost: " + domain + "\r\n\r\n"; if (send(sock, (const char*)request, request.getLength(), MSG_NOSIGNAL) <= 0) { std::cout << "broken link (can not send):" << std::endl; close(sock); return false; } // wait for reply timeval timeout; timeout.tv_sec = 5; timeout.tv_usec = 0; fd_set read_socks; FD_ZERO(&read_socks); FD_SET(sock, &read_socks); if (select(sock + 1, &read_socks, NULL, NULL, &timeout) != 1) { std::cout << "broken link (can not receive or timeout):" << std::endl; close(sock); return false; } FD_ZERO(&read_socks); // receive char answer[256]; int n = recv(sock, answer, 255, 0); close(sock); if (n <= 0) { std::cout << "broken link (can not receive or timeout):" << std::endl; return false; } answer[n] = 0; if (memcmp(answer, "HTTP/1.", 7) != 0) { std::cout << "broken link (no HTTP/1.x answer):" << std::endl; return false; } answer[14] = 0; int code = atoi(answer + 10); if (code >= 400) { std::cout << "broken link (code " << code << "):" << std::endl; return false; } return true; } void CheckExternalLinks() { StringList mailto; StringList other; std::cout << std::endl; std::cout << "CHECKING EXTERNAL LINKS (this may take some time)" << std::endl; int i, ic = external.link.size(); for (i=0; ihref; if (href.Substring(0, 7).CompareNoCase("http://") == 0) { int a = href.FindFirst("#"); if (a != -1) href.Delete(a); int s = href.FindFirst("/", 7); if (s == -1) { if (! CheckHttpLink(href.Substring(7), "/")) { numBroken++; external.link[i]->Print(); } } else { if (! CheckHttpLink(href.Substring(7, s - 7), href.Substring(s))) { numBroken++; external.link[i]->Print(); } } } else if (href.FindFirst("mailto:") != -1) { if (mailto.findNoCase(href) == -1) mailto.push_back(href); } else { if (other.findNoCase(href) == -1) other.push_back(href); } } std::cout << std::endl; std::cout << "MAIL LINKS (check manually):" << std::endl; ic = mailto.size(); for (i=0; i 0 && component[a - 1] != "..") { component.erase(a); component.erase(a - 1); a -= 2; } } } if (component.size() == 0) return "."; String ret = component[0]; for (a=1; a<(int)component.size(); a++) { ret += "/"; ret += component[a]; } return ret; } Target* getTarget(String abspath) { Target* target = new Target(abspath); int pos = internal.find(target); if (pos == -1) { internal.insert(target); return target; } else { delete target; return internal[pos]; } } Link* getLink(Target* target, Document* doc, String text, String href, String anchor = "") { Link* link = new Link(target, doc, text, href, anchor); int pos = target->link.find(link); if (pos == -1) { target->link.insert(link); return link; } else { delete link; return target->link[pos]; } } void ScanDir(String path) { if (path[path.getLength() - 1] != '/') path += "/"; DIR* dir = opendir((const char*)path); if (dir == NULL) return; String s; dirent* entry; while (true) { entry = readdir(dir); if (entry == NULL) break; s = entry->d_name; int len = s.getLength(); if (len == 0) break; if (len > 5 && s.Substring(len - 5).CompareNoCase(".html") == 0) { ScanFile(path, s); } if (s[0] == '.') continue; ScanDir(path + s); } closedir(dir); } void ScanFile(String path, String filename) { numDocs++; String content; int pos, end; if (! content.Read(path + filename)) { std::cout << "failed to open " << (const char*)path << (const char*)filename << std::endl; } else { if (verbose) std::cout << "scanning " << (const char*)path << (const char*)filename << " ..." << std::flush; Document* doc = new Document(path + filename); scanned.insert(doc); // find links and anchors pos = 0; while (true) { pos = content.FindFirst("", pos + 4); if (end == -1) break; int m, n; String text; m = content.FindFirst(">", pos); if (m != -1 && m < end) text = content.Substring(m + 1, end - m - 1); m = content.FindFirst("href=\"", pos); if (m != -1 && m < end) { m += 6; n = content.FindFirst("\"", m); if (n != -1 && n < end) { // link String link = content.Substring(m, n - m); if (link.FindFirst("javascript:") == -1) { int a = link.FindFirst("#"); if (a == -1) a = link.getLength(); if ((link.FindFirst("://") == -1) && (link.FindFirst("mailto:") == -1)) { Target* t = getTarget(ConcatPaths(path, link.Substring(0, a))); getLink(t, doc, text, link, link.Substring(a)); } else { getLink(&external, doc, text, link); } } } } m = content.FindFirst("name=\"", pos); if (m != -1 && m < end) { m += 6; n = content.FindFirst("\"", m); if (n != -1 && n < end) { // anchor String anchor = content.Substring(m, n - m); doc->anchor.push_back(anchor); } } pos = end + 4; } if (verbose) std::cout << " done." << std::endl; } } void CheckLink(Target* target) { String msg; Document tmpdoc(target->abspath); int pos = scanned.find(&tmpdoc); if (pos == -1) { // check if the file exists FILE* file = fopen((const char*)target->abspath, "r"); if (file != NULL) fclose(file); else { // error message msg = String("broken link: file '") + target->abspath + "' not found.\n"; int i, ic = target->link.size(); for (i=0; ilink[i]; msg += String(" doc=\"") + link->doc->path + "\"\n"; msg += String(" text=\"") + link->text + "\"\n"; msg += String(" href=\"") + link->href + "\"\n"; } } } else { // check all anchors bool err = false; Document* doc = scanned[pos]; int i, ic = target->link.size(); for (i=0; ilink[i]; if (! link->anchor.isEmpty()) { int pos = doc->anchor.find(link->anchor); if (pos == -1) { // error message if (! err) { msg = String("broken link: anchors in file '") + doc->path + "' not found.\n"; err = true; } msg += String(" doc=\"") + link->doc->path + "\"\n"; msg += String(" text=\"") + link->text + "\"\n"; msg += String(" href=\"") + link->href + "\"\n"; } } } } if (! msg.isEmpty()) { numBroken++; std::cout << msg; } } // files already scanned MyPtrVector scanned; // links to test MyPtrVector internal; // external links to report Target external; // statistics int numDocs; int numInternalLinks; int numExternalLinks; int numBroken; int numMailLinks; int numOtherLinks; bool verbose; }; int main(int argc, char** argv) { try { LinkChecker theApp; theApp.Run(argc, argv); } catch (const char* exception) { std::cout << std::endl; std::cout << "EXCEPTION: " << exception << std::endl; std::cout << std::endl; } } Shark-3.1.4/doc/README.txt000066400000000000000000000062761314655040000150430ustar00rootroot00000000000000 This file provides basic assistance for getting the Shark documentation installed. Note that there is a somewhat more detailed tutorial as part of the online documentation. DOCUMENTATION INSTALLATION GUIDE -------------------------------- What you need to have: - A working Doxygen installation - EITHER a working GraphViz installation (more specifically, the dot tool), OR manually edited doc/shark.dox(.in) to use a different plotting tool by setting HAVE_DOT = NO - A working python installation, ideally set up not to easily conflict with different versions of itself. - Either an internet connection, or a completely functional dependency tree for the Sphinx and Doxylink documentation tools. What you need to do: - Make sure that both Sphinx and Doxylink are installed to your system (if you install doxylink via some python package manager, e.g., via "pip install --install-option="--prefix=/home/user/path/to/your/pip_python_packages" sphinxcontrib_doxylink" or similar, Sphinx will be automatically installed as a dependency). Then make sure that $PATH is set to find the Sphinx executable "sphinx-build" and that the doxylink package is in the $PYTHONPATH (e.g. from the example above, /home/user/path/to/your/pip_python_packages/lib/python3.3/site-packages) - Configure/populate the CMake build system for the documentation via "ccmake ." in /doc. Note that you can build the documentation both in- and out-of-source. We recommend not to build the documentation together with the possible several versions of the Shark library, but rather separately either in- or out-of-source. - Issue "make doc" in the doc/ subdirectory. You know that you are done when "make doc" exits with "build succeeded. Built target doc", and when you can successfully view the page $SHARKHOME/doc/index.html. Common problems can be: - You have to configure the build with "ccmake ." first - The sphinx-build executable is not found. In such a case, consider * installing Sphinx through your distribution again. * examining PATH; maybe adding a manual alias from "sphinx-build" to the correct executable of your Sphinx installation; and/or examining your PYTHONPATH * it can also help to issue "make doc" with a user-controlled pythonpath, e.g. as in " PYTHONPATH=/usr/lib/python2.7/site-packages/:/path/to/your/Shark3/contrib/doxylink/build/lib/python make doc " * in extreme cases, you might want to look into the www.virtualenv.org tool for managing concurrent python installations. ] Further troubleshooting: - In general, if you run into troubles, you should try to make sure all dependencies are installed and accessible. The most relevant dependencies are Sphinx and Doxylink, which in turn rely on a number of tools, e.g., Docutils, Jinja2, Pygments, and Pyparsing, which however should be taken care of automatically by the parent packages. Then it usually boils down to either installing what's missing or making the path known in the correct manner. Good luck! Shark-3.1.4/doc/Shark.dox.in000066400000000000000000002270721314655040000155350ustar00rootroot00000000000000# Doxyfile 1.8.1.2 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project. # # All text after a hash (#) is considered a comment and will be ignored. # The format is: # TAG = value [value, ...] # For lists items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (" "). #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the config file # that follow. The default is UTF-8 which is also the encoding used for all # text before the first occurrence of this tag. Doxygen uses libiconv (or the # iconv built into libc) for the transcoding. See # http://www.gnu.org/software/libiconv for the list of possible encodings. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or sequence of words) that should # identify the project. Note that if you do not use Doxywizard you need # to put quotes around the project name if it contains spaces. PROJECT_NAME = Shark # The PROJECT_NUMBER tag can be used to enter a project or revision number. # This could be handy for archiving the generated documentation or # if some version control system is used. PROJECT_NUMBER = 3.0.0 # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer # a quick idea about the purpose of the project. Keep the description short. PROJECT_BRIEF = # With the PROJECT_LOGO tag one can specify an logo or icon that is # included in the documentation. The maximum height of the logo should not # exceed 55 pixels and the maximum width should not exceed 200 pixels. # Doxygen will copy the logo to the output directory. PROJECT_LOGO = # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. OUTPUT_DIRECTORY = @CMAKE_CURRENT_BINARY_DIR@/doxygen_pages # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output # format and will distribute the generated files over these directories. # Enabling this option can be useful when feeding doxygen a huge amount of # source files, where putting all generated files in the same directory would # otherwise cause performance problems for the file system. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # The default language is English, other supported languages are: # Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, # Croatian, Czech, Danish, Dutch, Farsi, Finnish, French, German, Greek, # Hungarian, Italian, Japanese, Japanese-en (Japanese with English messages), # Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, Polish, # Portuguese, Romanian, Russian, Serbian, Slovak, Slovene, Spanish, Swedish, # and Ukrainian. OUTPUT_LANGUAGE = English # If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will # include brief member descriptions after the members that are listed in # the file and class documentation (similar to JavaDoc). # Set to NO to disable this. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend # the brief description of a member or function before the detailed description. # Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator # that is used to form the text in various listings. Each string # in this list, if found as the leading text of the brief description, will be # stripped from the text and the result after processing the whole list, is # used as the annotated text. Otherwise, the brief description is used as-is. # If left blank, the following values are used ("$name" is automatically # replaced with the name of the entity): "The $name class" "The $name widget" # "The $name file" "is" "provides" "specifies" "contains" # "represents" "a" "an" "the" ABBREVIATE_BRIEF = # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # Doxygen will generate a detailed section even if there is only a brief # description. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full # path before files name in the file list and in the header files. If set # to NO the shortest path that makes the file name unique will be used. FULL_PATH_NAMES = YES # If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag # can be used to strip a user-defined part of the path. Stripping is # only done if one of the specified strings matches the left-hand part of # the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the # path to strip. STRIP_FROM_PATH = @CMAKE_CURRENT_SOURCE_DIR@/.. # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of # the path mentioned in the documentation of a class, which tells # the reader which header file to include in order to use a class. # If left blank only the name of the header file containing the class # definition is used. Otherwise one should specify the include paths that # are normally passed to the compiler using the -I flag. STRIP_FROM_INC_PATH = @CMAKE_CURRENT_SOURCE_DIR@/../include # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter # (but less readable) file names. This can be useful if your file system # doesn't support long names like on DOS, Mac, or CD-ROM. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen # will interpret the first line (until the first dot) of a JavaDoc-style # comment as the brief description. If set to NO, the JavaDoc # comments will behave just like regular Qt-style comments # (thus requiring an explicit @brief command for a brief description.) JAVADOC_AUTOBRIEF = NO # If the QT_AUTOBRIEF tag is set to YES then Doxygen will # interpret the first line (until the first dot) of a Qt-style # comment as the brief description. If set to NO, the comments # will behave just like regular Qt-style comments (thus requiring # an explicit \brief command for a brief description.) QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen # treat a multi-line C++ special comment block (i.e. a block of //! or /// # comments) as a brief description. This used to be the default behaviour. # The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = NO # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce # a new page for each member. If set to NO, the documentation of a member will # be part of the file/class/namespace that contains it. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 4 # This tag can be used to specify a number of aliases that acts # as commands in the documentation. An alias has the form "name=value". # For example adding "sideeffect=\par Side Effects:\n" will allow you to # put the command \sideeffect (or @sideeffect) in the documentation, which # will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. ALIASES = # This tag can be used to specify a number of word-keyword mappings (TCL only). # A mapping has the form "name=value". For example adding # "class=itcl::class" will allow you to use the command class in the # itcl::class meaning. TCL_SUBST = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C # sources only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = NO # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java # sources only. Doxygen will then generate output that is more tailored for # Java. For instance, namespaces will be presented as packages, qualified # scopes will look different, etc. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources only. Doxygen will then generate output that is more tailored for # Fortran. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for # VHDL. OPTIMIZE_OUTPUT_VHDL = NO # Doxygen selects the parser to use depending on the extension of the files it # parses. With this tag you can assign which parser to use for a given extension. # Doxygen has a built-in mapping, but you can override or extend it using this # tag. The format is ext=language, where ext is a file extension, and language # is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C, # C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make # doxygen treat .inc files as Fortran files (default is PHP), and .f files as C # (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions # you also need to set FILE_PATTERNS otherwise the files are not read by doxygen. EXTENSION_MAPPING = # If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all # comments according to the Markdown format, which allows for more readable # documentation. See http://daringfireball.net/projects/markdown/ for details. # The output of markdown processing is further processed by doxygen, so you # can mix doxygen, HTML, and XML commands with Markdown formatting. # Disable only in case of backward compatibilities issues. MARKDOWN_SUPPORT = YES # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should # set this tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); v.s. # func(std::string) {}). This also makes the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. BUILTIN_STL_SUPPORT = YES # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. # Doxygen will parse them like normal C++ but will assume all classes use public # instead of private inheritance when no explicit protection keyword is present. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate getter # and setter methods for a property. Setting this option to YES (the default) # will make doxygen replace the get and set methods by a property in the # documentation. This will only work if the methods are indeed getting or # setting a simple type. If this is not the case, or you want to show the # methods anyway, you should set this option to NO. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a # subgroup of that type (e.g. under the Public Functions section). Set it to # NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES # When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and # unions are shown inside the group in which they are included (e.g. using # @ingroup) instead of on a separate page (for HTML and Man pages) or # section (for LaTeX and RTF). INLINE_GROUPED_CLASSES = NO # When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and # unions with only public data fields will be shown inline in the documentation # of the scope in which they are defined (i.e. file, namespace, or group # documentation), provided this scope is documented. If set to NO (the default), # structs, classes, and unions are shown on a separate page (for HTML and Man # pages) or section (for LaTeX and RTF). INLINE_SIMPLE_STRUCTS = NO # When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum # is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically # be useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. TYPEDEF_HIDES_STRUCT = NO # Similar to the SYMBOL_CACHE_SIZE the size of the symbol lookup cache can be # set using LOOKUP_CACHE_SIZE. This cache is used to resolve symbols given # their name and scope. Since this can be an expensive process and often the # same symbol appear multiple times in the code, doxygen keeps a cache of # pre-resolved symbols. If the cache is too small doxygen will become slower. # If the cache is too large, memory is wasted. The cache size is given by this # formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range is 0..9, the default is 0, # corresponding to a cache size of 2^16 = 65536 symbols. LOOKUP_CACHE_SIZE = 0 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES EXTRACT_ALL = YES # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = NO # If the EXTRACT_PACKAGE tag is set to YES all members with package or # internal scope will be included in the documentation. EXTRACT_PACKAGE = YES # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = YES # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) # defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = NO # This flag is only useful for Objective-C code. When set to YES local # methods, which are defined in the implementation section but not in # the interface are included in the documentation. # If set to NO (the default) only methods in the interface are included. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base # name of the file that contains the anonymous namespace. By default # anonymous namespaces are hidden. EXTRACT_ANON_NSPACES = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. # If set to NO (the default) these members will be included in the # various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. # If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all # friend (class|struct|union) declarations. # If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any # documentation blocks found inside the body of a function. # If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation # that is typed after a \internal command is included. If the tag is set # to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate # file names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. CASE_SENSE_NAMES = NO # If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen # will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = YES # If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen # will list include files with double quotes in the documentation # rather than with sharp brackets. FORCE_LOCAL_INCLUDES = NO # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen # will sort the (detailed) documentation of file and class members # alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the # brief documentation of file, namespace and class members alphabetically # by member name. If set to NO (the default) the members will appear in # declaration order. SORT_BRIEF_DOCS = NO # If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen # will sort the (brief and detailed) documentation of class members so that # constructors and destructors are listed first. If set to NO (the default) # the constructors will appear in the respective orders defined by # SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. # This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO # and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. SORT_MEMBERS_CTORS_1ST = NO # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the # hierarchy of group names into alphabetical order. If set to NO (the default) # the group names will appear in their defined order. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be # sorted by fully-qualified names, including namespaces. If set to # NO (the default), the class list will be sorted only by class name, # not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the # alphabetical list. SORT_BY_SCOPE_NAME = NO # If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to # do proper type resolution of all parameters of a function it will reject a # match between the prototype and the implementation of a member function even # if there is only one candidate or it is obvious which candidate to choose # by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen # will still accept a match between prototype and implementation in such cases. STRICT_PROTO_MATCHING = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or # disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or # disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or # disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if sectionname ... \endif. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines # the initial value of a variable or macro consists of for it to appear in # the documentation. If the initializer consists of more lines than specified # here it will be hidden. Use a value of 0 to hide initializers completely. # The appearance of the initializer of individual variables and macros in the # documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated # at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = YES # Set the SHOW_FILES tag to NO to disable the generation of the Files page. # This will remove the Files entry from the Quick Index and from the # Folder Tree View (if specified). The default is YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the # Namespaces page. # This will remove the Namespaces entry from the Quick Index # and from the Folder Tree View (if specified). The default is YES. SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command , where is the value of # the FILE_VERSION_FILTER tag, and is the name of an input file # provided by doxygen. Whatever the program writes to standard output # is used as the file version. See the manual for examples. FILE_VERSION_FILTER = "svnversion @CMAKE_CURRENT_SOURCE_DIR@/.." # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed # by doxygen. The layout file controls the global structure of the generated # output files in an output format independent way. To create the layout file # that represents doxygen's defaults, run doxygen with the -l option. # You can optionally specify a file name after the option, if omitted # DoxygenLayout.xml will be used as the name of the layout file. LAYOUT_FILE = # The CITE_BIB_FILES tag can be used to specify one or more bib files # containing the references data. This must be a list of .bib files. The # .bib extension is automatically appended if omitted. Using this command # requires the bibtex tool to be installed. See also # http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style # of the bibliography can be controlled using LATEX_BIB_STYLE. To use this # feature you need bibtex and perl available in the search path. CITE_BIB_FILES = #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES # If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings # for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = YES # If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some # parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES # The WARN_NO_PARAMDOC option can be enabled to get warnings for # functions that are documented, but have no documentation for their parameters # or return value. If set to NO (the default) doxygen will only warn about # wrong or incomplete parameter documentation, but not about the absence of # documentation. WARN_NO_PARAMDOC = NO # The WARN_FORMAT tag determines the format of the warning messages that # doxygen can produce. The string should contain the $file, $line, and $text # tags, which will be replaced by the file and line number from which the # warning originated and the warning text. Optionally the format may contain # $version, which will be replaced by the version of the file (if it could # be obtained via FILE_VERSION_FILTER) WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning # and error messages should be written. If left blank the output is written # to stderr. WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag can be used to specify the files and/or directories that contain # documented source files. You may enter file names like "myfile.cpp" or # directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = @CMAKE_CURRENT_SOURCE_DIR@/../include \ @CMAKE_CURRENT_SOURCE_DIR@/../examples \ @CMAKE_CURRENT_BINARY_DIR@/../examples # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is # also the default input encoding. Doxygen uses libiconv (or the iconv built # into libc) for the transcoding. See http://www.gnu.org/software/libiconv for # the list of possible encodings. INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank the following patterns are tested: # *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh # *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py # *.f90 *.f *.for *.vhd *.vhdl FILE_PATTERNS = *.h \ *.inl \ *.hpp \ *.cpp # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = YES # The EXCLUDE tag can be used to specify files and/or directories that should be # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. # Note that relative paths are relative to the directory from which doxygen is # run. EXCLUDE = # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or # directories that are symbolic links (a Unix filesystem feature) are excluded # from the input. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. Note that the wildcards are matched # against the file with absolute path, so to exclude all test directories # for example use the pattern */test/* # # MT: THIS WORKS AROUND UGLY PROBLEMS WITH MULTI-DECLARED DEFINES AND # FUNCTION NAMES IN THE GLPK SOLVER # EXCLUDE_PATTERNS = */.svn/* */Test/* *boost* */Impl/* */detail/* # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test EXCLUDE_SYMBOLS = boost::noncopyable *type* shark::detail shark::blas::detail shark::blas::bindings shark::tag # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see # the \include command). EXAMPLE_PATH = @CMAKE_CURRENT_SOURCE_DIR@/../examples # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. EXAMPLE_PATTERNS = *.cpp # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude # commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = YES # The IMAGE_PATH tag can be used to specify one or more files or # directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command , where # is the value of the INPUT_FILTER tag, and is the name of an # input file. Doxygen will then use the output that the filter program writes # to standard output. # If FILTER_PATTERNS is specified, this tag will be # ignored. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. # Doxygen will compare the file name with each pattern and apply the # filter if there is a match. # The filters are a list of the form: # pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further # info on how filters are used. If FILTER_PATTERNS is empty or if # non of the patterns match the file name, INPUT_FILTER is applied. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO # The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file # pattern. A pattern will override the setting for FILTER_PATTERN (if any) # and it is also possible to disable source filtering for a specific pattern # using *.ext= (so without naming a filter). This option only has effect when # FILTER_SOURCE_FILES is enabled. FILTER_SOURCE_PATTERNS = #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will # be generated. Documented entities will be cross-referenced with these sources. # Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = YES # Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct # doxygen to hide any special comment blocks from generated source code # fragments. Normal C, C++ and Fortran comments will always remain visible. STRIP_CODE_COMMENTS = NO # If the REFERENCED_BY_RELATION tag is set to YES # then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = YES # If the REFERENCES_RELATION tag is set to YES # then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = YES # If the REFERENCES_LINK_SOURCE tag is set to YES (the default) # and SOURCE_BROWSER tag is set to YES, then the hyperlinks from # functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will # link to the source code. # Otherwise they will link to the documentation. REFERENCES_LINK_SOURCE = NO # If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the # source code will show a tooltip with additional information such as prototype, # brief description and links to the definition and documentation. Since this # will make the HTML file larger and loading of large files a bit slower, you # can opt to disable this feature. # The default value is: YES. # This tag requires that the tag SOURCE_BROWSER is set to YES. # mt: this produces strange artifacts with our setup. may be fixable # by merging the current css (or fixing ours in general), but we # prefer simply setting this to no. SOURCE_TOOLTIPS = NO # If the USE_HTAGS tag is set to YES then the references to source code # will point to the HTML generated by the htags(1) tool instead of doxygen # built-in source browser. The htags tool is part of GNU's global source # tagging system (see http://www.gnu.org/software/global/global.html). You # will need version 4.8.6 or higher. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index # of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = YES # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 2 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for # each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a personal HTML header for # each generated HTML page. If it is left blank doxygen will generate a # standard header. Note that when using a custom header you are responsible # for the proper inclusion of any scripts and style sheets that doxygen # needs, which is dependent on the configuration options used. # It is advised to generate a default header using "doxygen -w html # header.html footer.html stylesheet.css YourConfigFile" and then modify # that header. Note that the header is subject to change so you typically # have to redo this when upgrading to a newer version of doxygen or when # changing the value of configuration settings such as GENERATE_TREEVIEW! HTML_HEADER = @CMAKE_CURRENT_SOURCE_DIR@/doxygen_pages/templates/header.html #HTML_HEADER = # The HTML_FOOTER tag can be used to specify a personal HTML footer for # each generated HTML page. If it is left blank doxygen will generate a # standard footer. HTML_FOOTER = @CMAKE_CURRENT_SOURCE_DIR@/doxygen_pages/templates/footer.html #HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to # fine-tune the look of the HTML output. If the tag is left blank doxygen # will generate a default style sheet. Note that doxygen will try to copy # the style sheet file to the HTML output directory, so don't put your own # stylesheet in the HTML output directory as well, or it will be erased! HTML_STYLESHEET = # The HTML_EXTRA_FILES tag can be used to specify one or more extra images or # other source files which should be copied to the HTML output directory. Note # that these files will be copied to the base HTML output directory. Use the # $relpath$ marker in the HTML_HEADER and/or HTML_FOOTER files to load these # files. In the HTML_STYLESHEET file, use the file name only. Also note that # the files will be copied as-is; there are no commands or markers available. HTML_EXTRA_FILES = # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. # Doxygen will adjust the colors in the style sheet and background images # according to this color. Hue is specified as an angle on a colorwheel, # see http://en.wikipedia.org/wiki/Hue for more information. # For instance the value 0 represents red, 60 is yellow, 120 is green, # 180 is cyan, 240 is blue, 300 purple, and 360 is red again. # The allowed range is 0 to 359. HTML_COLORSTYLE_HUE = 220 # The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of # the colors in the HTML output. For a value of 0 the output will use # grayscales only. A value of 255 will produce the most vivid colors. HTML_COLORSTYLE_SAT = 100 # The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to # the luminance component of the colors in the HTML output. Values below # 100 gradually make the output lighter, whereas values above 100 make # the output darker. The value divided by 100 is the actual gamma applied, # so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2, # and 100 does not change the gamma. HTML_COLORSTYLE_GAMMA = 80 # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML # page will contain the date and time when the page was generated. Setting # this to NO can help when comparing the output of multiple runs. HTML_TIMESTAMP = YES # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. HTML_DYNAMIC_SECTIONS = YES # With HTML_INDEX_NUM_ENTRIES one can control the preferred number of # entries shown in the various tree structured indices initially; the user # can expand and collapse entries dynamically later on. Doxygen will expand # the tree to such a level that at most the specified number of entries are # visible (unless a fully collapsed tree already exceeds this amount). # So setting the number of entries 1 will produce a full collapsed tree by # default. 0 is a special value representing an infinite number of entries # and will result in a full expanded tree by default. HTML_INDEX_NUM_ENTRIES = 100 # If the GENERATE_DOCSET tag is set to YES, additional index files # will be generated that can be used as input for Apple's Xcode 3 # integrated development environment, introduced with OSX 10.5 (Leopard). # To create a documentation set, doxygen will generate a Makefile in the # HTML output directory. Running make will produce the docset in that # directory and running "make install" will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find # it at startup. # See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html # for more information. GENERATE_DOCSET = NO # When GENERATE_DOCSET tag is set to YES, this tag determines the name of the # feed. A documentation feed provides an umbrella under which multiple # documentation sets from a single provider (such as a company or product suite) # can be grouped. DOCSET_FEEDNAME = "Doxygen generated docs" # When GENERATE_DOCSET tag is set to YES, this tag specifies a string that # should uniquely identify the documentation set bundle. This should be a # reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen # will append .docset to the name. DOCSET_BUNDLE_ID = org.doxygen.Project # When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify # the documentation publisher. This should be a reverse domain-name style # string, e.g. com.mycompany.MyDocSet.documentation. DOCSET_PUBLISHER_ID = org.doxygen.Publisher # The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher. DOCSET_PUBLISHER_NAME = Publisher # If the GENERATE_HTMLHELP tag is set to YES, additional index files # will be generated that can be used as input for tools like the # Microsoft HTML help workshop to generate a compiled HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You # can add a path in front of the file if the result should not be # written to the html output directory. CHM_FILE = # If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can # be used to specify the location (absolute path including file name) of # the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. HHC_LOCATION = # If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag # controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING # is used to encode HtmlHelp index (hhk), content (hhc) and project file # content. CHM_INDEX_ENCODING = # If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag # controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = NO # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and # QHP_VIRTUAL_FOLDER are set, an additional index file will be generated # that can be used as input for Qt's qhelpgenerator to generate a # Qt Compressed Help (.qch) of the generated HTML documentation. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can # be used to specify the file name of the resulting .qch file. # The path specified is relative to the HTML output folder. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#namespace QHP_NAMESPACE = org.doxygen.Project # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#virtual-folders QHP_VIRTUAL_FOLDER = doc # If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to # add. For more information please see # http://doc.trolltech.com/qthelpproject.html#custom-filters QHP_CUST_FILTER_NAME = # The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the # custom filter to add. For more information please see # # Qt Help Project / Custom Filters. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this # project's # filter section matches. # # Qt Help Project / Filter Attributes. QHP_SECT_FILTER_ATTRS = # If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can # be used to specify the location of Qt's qhelpgenerator. # If non-empty doxygen will try to run qhelpgenerator on the generated # .qhp file. QHG_LOCATION = # If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files # will be generated, which together with the HTML files, form an Eclipse help # plugin. To install this plugin and make it available under the help contents # menu in Eclipse, the contents of the directory containing the HTML and XML # files needs to be copied into the plugins directory of eclipse. The name of # the directory within the plugins directory should be the same as # the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before # the help appears. GENERATE_ECLIPSEHELP = NO # A unique identifier for the eclipse help plugin. When installing the plugin # the directory name containing the HTML and XML files should also have # this name. ECLIPSE_DOC_ID = org.doxygen.Project # The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) # at top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. Since the tabs have the same information as the # navigation tree you can set this option to NO if you already set # GENERATE_TREEVIEW to YES. DISABLE_INDEX = NO # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. # If the tag value is set to YES, a side panel will be generated # containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports # JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). # Windows users are probably better off using the HTML help feature. # Since the tree basically has the same information as the tab index you # could consider to set DISABLE_INDEX to NO when enabling this option. GENERATE_TREEVIEW = NO # The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values # (range [0,1..20]) that doxygen will group on one line in the generated HTML # documentation. Note that a value of 0 will completely suppress the enum # values from appearing in the overview section. ENUM_VALUES_PER_LINE = 4 # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 # When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open # links to external symbols imported via tag files in a separate window. EXT_LINKS_IN_WINDOW = NO # Use this tag to change the font size of Latex formulas included # as images in the HTML documentation. The default is 10. Note that # when you change the font size after a successful doxygen run you need # to manually remove any form_*.png images from the HTML output directory # to force them to be regenerated. FORMULA_FONTSIZE = 12 # Use the FORMULA_TRANPARENT tag to determine whether or not the images # generated for formulas are transparent PNGs. Transparent PNGs are # not supported properly for IE 6.0, but are supported on all modern browsers. # Note that when changing this option you need to delete any form_*.png files # in the HTML output before the changes have effect. FORMULA_TRANSPARENT = YES # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax # (see http://www.mathjax.org) which uses client side Javascript for the # rendering instead of using prerendered bitmaps. Use this if you do not # have LaTeX installed or if you want to formulas look prettier in the HTML # output. When enabled you may also need to install MathJax separately and # configure the path to it using the MATHJAX_RELPATH option. USE_MATHJAX = YES # When MathJax is enabled you need to specify the location relative to the # HTML output directory using the MATHJAX_RELPATH option. The destination # directory should contain the MathJax.js script. For instance, if the mathjax # directory is located at the same level as the HTML output directory, then # MATHJAX_RELPATH should be ../mathjax. The default value points to # the MathJax Content Delivery Network so you can quickly see the result without # installing MathJax. # However, it is strongly recommended to install a local # copy of MathJax from http://www.mathjax.org before deployment. MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML # The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension # names that should be enabled during MathJax rendering. MATHJAX_EXTENSIONS = # When the SEARCHENGINE tag is enabled doxygen will generate a search box # for the HTML output. The underlying search engine uses javascript # and DHTML and should work on any modern browser. Note that when using # HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets # (GENERATE_DOCSET) there is already a search function so this one should # typically be disabled. For large projects the javascript based search engine # can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. SEARCHENGINE = NO # When the SERVER_BASED_SEARCH tag is enabled the search engine will be # implemented using a PHP enabled web server instead of at the web client # using Javascript. Doxygen will generate the search PHP script and index # file to put on the web server. The advantage of the server # based approach is that it scales better to large projects and allows # full text search. The disadvantages are that it is more difficult to setup # and does not have live searching capabilities. SERVER_BASED_SEARCH = NO #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = NO # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. # Note that when enabling USE_PDFLATEX this option is only used for # generating bitmaps for formulas in the HTML output, but not in the # Makefile that is written to the output directory. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to # generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = makeindex # If the COMPACT_LATEX tag is set to YES Doxygen generates more compact # LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used # by the printer. Possible values are: a4, letter, legal and # executive. If left blank a4wide will be used. PAPER_TYPE = # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. EXTRA_PACKAGES = amsmath EXTRA_PACKAGES += amsfonts # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until # the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! LATEX_HEADER = # The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for # the generated latex document. The footer should contain everything after # the last chapter. If it is left blank doxygen will generate a # standard footer. Notice: only use this tag if you know what you are doing! LATEX_FOOTER = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated # is prepared for conversion to pdf (using ps2pdf). The pdf file will # contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = YES # If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of # plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = YES # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. # command to the generated LaTeX files. This will instruct LaTeX to keep # running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = NO # If LATEX_HIDE_INDICES is set to YES then doxygen will not # include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO # If LATEX_SOURCE_CODE is set to YES then doxygen will include # source code with syntax highlighting in the LaTeX output. # Note that which sources are shown also depends on other settings # such as SOURCE_BROWSER. LATEX_SOURCE_CODE = NO # The LATEX_BIB_STYLE tag can be used to specify the style to use for the # bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See # http://en.wikipedia.org/wiki/BibTeX for more info. LATEX_BIB_STYLE = plain #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output # The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES Doxygen generates more compact # RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated # will contain hyperlink fields. The RTF file will # contain links (just like the HTML output) instead of page references. # This makes the output suitable for online browsing using WORD or other # programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO # Load stylesheet definitions from file. Syntax is similar to doxygen's # config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = NO # The MAN_OUTPUT tag is used to specify where the man pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .3 # If the MAN_LINKS tag is set to YES and Doxygen generates man output, # then it will generate one additional man file for each entity # documented in the real man page(s). These additional files # only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES Doxygen will # generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = NO # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as the default path. XML_OUTPUT = xml # The XML_SCHEMA tag can be used to specify an XML schema, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_SCHEMA = # The XML_DTD tag can be used to specify an XML DTD, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_DTD = # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will # generate an AutoGen Definitions (see autogen.sf.net) file # that captures the structure of the code including all # documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES Doxygen will # generate a Perl module file that captures the structure of # the code including all documentation. Note that this # feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES Doxygen will generate # the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be # nicely formatted so it can be parsed by a human reader. # This is useful # if you want to understand what is going on. # On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file # are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. # This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will # evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro # names in the source code. If set to NO (the default) only conditional # compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = YES # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES # then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_DEFINED tags. EXPAND_ONLY_PREDEF = YES # If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # pointed to by INCLUDE_PATH will be searched when a #include is found. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by # the preprocessor. INCLUDE_PATH = @CMAKE_CURRENT_SOURCE_DIR@/../include # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. INCLUDE_FILE_PATTERNS = *.h # The PREDEFINED tag can be used to specify one or more macro names that # are defined before the preprocessor is started (similar to the -D option of # gcc). The argument of the tag is a list of macros of the form: name # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. To prevent a macro definition from being # undefined via #undef or recursively expanded use the := operator # instead of the = operator. PREDEFINED = DOXYGEN_SHOULD_SKIP_THIS # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition that # overrules the definition found in the source code. EXPAND_AS_DEFINED = SHARK_FEATURE_INTERFACE # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all references to function-like macros # that are alone on a line, have an all uppercase name, and do not end with a # semicolon, because these will confuse the parser if not removed. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- # The TAGFILES option can be used to specify one or more tagfiles. For each # tag file the location of the external documentation should be added. The # format of a tag file without this location is as follows: # # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # # TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths # or URLs. Note that each tag file must have a unique name (where the name does # NOT include the path). If a tag file is not located in the directory in which # doxygen is run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. GENERATE_TAGFILE = @CMAKE_CURRENT_SOURCE_DIR@/doxygen_pages/tag_files/all.tag # If the ALLEXTERNALS tag is set to YES all external classes will be listed # in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed # in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will # generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base # or super classes. Setting the tag to NO turns the diagrams off. Note that # this option also works with HAVE_DOT disabled, but it is recommended to # install and use dot, since it yields more powerful graphs. CLASS_DIAGRAMS = YES # You can define message sequence charts within doxygen comments using the \msc # command. Doxygen will then run the mscgen tool (see # http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the # documentation. The MSCGEN_PATH tag allows you to specify the directory where # the mscgen tool resides. If left empty the tool is assumed to be found in the # default search path. MSCGEN_PATH = # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz, a graph visualization # toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = YES # The DOT_NUM_THREADS specifies the number of dot invocations doxygen is # allowed to run in parallel. When set to 0 (the default) doxygen will # base this on the number of processors available in the system. You can set it # explicitly to a value larger than 0 to get control over the balance # between CPU load and processing speed. DOT_NUM_THREADS = 0 # By default doxygen will use the Helvetica font for all dot files that # doxygen generates. When you want a differently looking font you can specify # the font name using DOT_FONTNAME. You need to make sure dot is able to find # the font, which can be done by putting it in a standard location or by setting # the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the # directory containing the font. # mt: disabled because of "Warning: doxygen no longer ships with the FreeSans font." # in doxygen output # DOT_FONTNAME = FreeSans # The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. # The default size is 10pt. DOT_FONTSIZE = 10 # By default doxygen will tell dot to use the Helvetica font. # If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to # set the path where dot can find it. # mt: disabled because of "Warning: doxygen no longer ships with the FreeSans font." # in doxygen output # DOT_FONTPATH = # If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect inheritance relations. Setting this tag to YES will force the # CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = YES # If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. #COLLABORATION_GRAPH = YES COLLABORATION_GRAPH = NO # If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen # will generate a graph for groups, showing the direct groups dependencies GROUP_GRAPHS = YES # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO # If the UML_LOOK tag is enabled, the fields and methods are shown inside # the class node. If there are many fields or methods and many nodes the # graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS # threshold limits the number of items for each type to make the size more # managable. Set this to 0 for no limit. Note that the threshold may be # exceeded by 50% before the limit is enforced. UML_LIMIT_NUM_FIELDS = 10 # If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. #TEMPLATE_RELATIONS = YES TEMPLATE_RELATIONS = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT # tags are set to YES then doxygen will generate a graph for each documented # file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and # HAVE_DOT tags are set to YES then doxygen will generate a graph for each # documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = NO # If the CALL_GRAPH and HAVE_DOT options are set to YES then # doxygen will generate a call dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable call graphs # for selected functions only using the \callgraph command. CALL_GRAPH = NO # If the CALLER_GRAPH and HAVE_DOT tags are set to YES then # doxygen will generate a caller dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable caller # graphs for selected functions only using the \callergraph command. CALLER_GRAPH = NO # If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will generate a graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = YES # If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES # then doxygen will show the dependencies a directory has on other directories # in a graphical way. The dependency relations are determined by the #include # relations between the files in the directories. DIRECTORY_GRAPH = NO # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are svg, png, jpg, or gif. # If left blank png will be used. If you choose svg you need to set # HTML_FILE_EXTENSION to xhtml in order to make the SVG files # visible in IE 9+ (other browsers do not have this requirement). DOT_IMAGE_FORMAT = png # If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to # enable generation of interactive SVG images that allow zooming and panning. # Note that this requires a modern browser other than Internet Explorer. # Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you # need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files # visible. Older versions of IE do not have SVG support. INTERACTIVE_SVG = NO # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the # \dotfile command). DOTFILE_DIRS = # The MSCFILE_DIRS tag can be used to specify one or more directories that # contain msc files that are included in the documentation (see the # \mscfile command). MSCFILE_DIRS = # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of # nodes that will be shown in the graph. If the number of nodes in a graph # becomes larger than this value, doxygen will truncate the graph, which is # visualized by representing a node as a red box. Note that doxygen if the # number of direct children of the root node in a graph is already larger than # DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note # that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. DOT_GRAPH_MAX_NODES = 200 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the # graphs generated by dot. A depth value of 3 means that only nodes reachable # from the root by following a path via at most 3 edges will be shown. Nodes # that lay further from the root node will be omitted. Note that setting this # option to 1 or 2 may greatly reduce the computation time needed for large # code bases. Also note that the size of a graph can be further restricted by # DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. MAX_DOT_GRAPH_DEPTH = 1 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, because dot on Windows does not # seem to support this out of the box. Warning: Depending on the platform used, # enabling this option may lead to badly anti-aliased labels on the edges of # a graph (i.e. they become hard to read). DOT_TRANSPARENT = YES # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) # support this, this feature is disabled by default. DOT_MULTI_TARGETS = NO # If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will # generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES (the default) Doxygen will # remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES Shark-3.1.4/doc/SharkLogo.png000066400000000000000000000341711314655040000157370ustar00rootroot00000000000000‰PNG  IHDRôcªQsBIT|dˆ pHYs × ×B(›xtEXtSoftwarewww.inkscape.org›î< IDATxœíÝy|TÕýÿñ×™ÉÌ$3 K lAP´,•Eª­¶ÚÅ_]Zµ‹v´ÞjD$„º¢V+ReYÂ\ê‚RµZmë·VA+.ì„%Ö„„Ìûûc’4„,³Ü™;Ëçùxä1Ã̽w>aÞsι÷eBăAƒåøýþ?øýþëü~ÿ´âââ9V×$„hœÍê„Òe%’R^^žîîT½§þbMEæ‘ð¦yøá‡Â#•À Q(GKåååeEÀ¯ž?~|&.«P¼Z„‡h‰Ùã}òòòÒL>¦‘ÊÚ…¸Ïúh’$›ÍVa³ÙŽÙl¶2¥T©Rj·Ífû\)µaüøñÿŠüoB$¹!@nû¥tx¨ÆþSŠÔµpáB›ßïÿßïì÷ûÏóûýí|>ÐbD|[_0áÌc6›Í°Ûíiiiûívû×6›í?6›í¥Ô_Ç_ÑÌ_…H&Lp_†ÑBzÏn3gNH­òd#-ÁÂ… Ï0 c¬®ë¿Ôuýl]×mÐø‡s¢ñûýJ×uÏÉ“'Ï"°ŒèåÔtQL:õdZZZYZZÚÓÒÒ^²ÙlËÇ/]a©å÷@÷0öÛ`v!‰FZ)lÑ¢Ey>ŸoRuuõYý–…-Pp:ÇœNçgiii¯Ùl¶%&LØÙè†"áØ Ãø8'Œ÷ì¨9sæÌ‰qÉqEZ)fñâÅgøýþ>Ÿï~¿?]¾<üaTUUeUUU]\dÆ}S§N=áp86;Žõv»}é„ >·ºNašsÂÜ÷S3 IDÒòH‹/îì÷ûWù|¾Ÿ†¡ê“²ºÅ/-`^ÓårIOO5--mjAAAJNK‘ lÀ&Ã0ú@XïÙ¶sæÌ9Ӣ㌄G’{òÉ'³ý~ÿrŸÏw•a¶Æþ3X‰µ)¥HOO?àr¹^HKK»¯  `÷i‹¸UPPàÖ„ù^Ý9{öìPÖúHJIlñâÅû|¾|Ã0ìÍýg°:41<ÊÈÈ(q¹\ÅiiiÈ {›8q¢2 ãc`@˜ïÕWfÏž}ML‹ŽC2æ‘„–,Yr¶®ëÕuý ùr}†aPQQÑ¥¢¢¢Àf³MxðÁ¿NOO7qâÄuV×&u0 ‚ýS~¼dz’¤³dÉ’{}>ß—º®Ÿau-©Èï÷«ãÇ«´´ô•ûï¿ûôéÓÍ\¾W˜cL„ûKx ÝVIcéÒ¥ý~ÿÛ>Ÿ¯7„Ö ·º»*Ñ»­ZzÌãñ”¸Ýî 'N|ú´ ELMœ8ñ[ÀfÃ0„ý^í?{öì1,;.IË# ,]ºt´ÏçÛ¥ëzo«k§+//ïRZZúÔÔ©S÷LŸ>ý÷VדâFùûÍ($ÑIË#-_¾¼•®ëoéº~A$ßú­nq${Ë£ác§ÔãñŒœ8qâšÓ6QSXXØÊ0Œ]@V„ïU×ìÙ³OÆ®òø$-µ|ùòžº®—èº~ÕµˆÐ”——ç”––>óðÿ4cÆ ù?;C€¬Q!Á oÜTTTt¡ÏçÛè÷û=V×"Âc‡ºæèÑ£f̘q¾Õõ$»ÂÂBŒ2áP‡L8FRðH0EEE?óù|ï†á´º¹ŠŠŠ¶¥¥¥ÿzä‘Gµº–$wáOERŸ„G RTTt³Ïç{Å0 »Õµó躮8PðÀ|9cÆŒŽVד¤Æšt¹´†„G‚X±bÅx]׋ ó$uôèÑo qnÅŠó}>ß«ë±ã÷û9pàÀŸ¦M›v·Õµ$ºI“&µ!°à“Yä \ 8¶råÊÉ>Ÿo¸ÕuˆØóûý”––Þ?mÚ´©V×’à†fž•ØÚÄc%4 8µjÕª_ëºþ°Õuë†ÁîyôÑG½V×’ˆ&MšdÜÓsë“ð¨!á‡V¯^Ýßçó­‘1á÷û)++[=sæÌ³­®%ý8ÓäcJxÔðˆ3«W¯n¯ëúß ÃéòUUUŽ#GŽ|øØcÉ{"4fž[_«(3!IxÄ‘âââ4¿ßÿ‰\9.:~üxûÊÊÊ­®#QLž<¹ð“(ZZ5$<âˆßïÿ@×õ.V×!âSYYÙÏg̘qµÕu$3OÏ­O£†„GœX½zõ2™äP4Çï÷søð᧦M›æ°º–x6yòävÀMQ:¼sÔ¨QéQ:vB‘ðˆO=õÔ>ŸoÕuˆøWQQ‘©ëúsV×ç†Q<~»(;aHxXìé§ŸvˇEYYÙ53fÌø™ÕuÄ£;î¸# ÈòËôòñ‚„‡Åü~ÿ2@.BQÓ}õ̬Y³äì«Ó] œå×îe$<,õôÓOçû|¾‹­®C$žòòò¬'N¬µºŽ8­òúdý$<,óÌ3Ïäú|¾?[]‡H\üÕŒ3.³ºŽxqÇw|¸$/%ᄇeü~ÿ;r! ˆDM÷Õ³?ü°¬ï‹V@ב#G¦üº+xæ™gîöù|2Ý„ˆXyyy+¥Ô3V×aµ)S¦tnŒáK¦|ëCÂ#ÆÖ¬Y“®ëúV×!’Ç®›9sæ¥V×a±á€+†¯—òƒæ1fÆr¿ßË7¹Hr~¿ŸcÇŽ­yñÅSr"Í)S¦8€1~YiyX]@*Y»vmwŸÏ'Ók S´oßžž={rþùç3pàÀvJ©[­®É"×cüš92¥¯ô—ðˆ!¿ß¿VÖ ‘ÊÌÌä;ßùýû÷§k×®”””`³ÙPJÝÿòË/GóÊêx«òúÚWYðºqC>ÈbdíÚµ—ú|¾”ï'ás8ôêÕ‹ .¸€6mÚP^^ÎÆk7錴ª>+üáøp¡E/ŸÒS IxĈßï_iu "1¹ÝnzöìÉ~ðrssk[(¥Ø¾};ÄçóÕ>vÇ+¯¼’JkNDcÍŽ`]3räÈö¾¾¥$ÿüst]?å±cÇŽÑ·oß »]Ì`·Ûq:8Nt]§ªªê”ÖhQJáp8p8u¯ït:ñx_|ñEcXTVV²iÓ&HZZôÞ^6›öíÛÓ¹sg22N¿´Áï÷SUUEee%•••u÷kou]Ç0ŒºíëßOKKk4œN'‡—ËUwßJYYY´oßž––Ö0¨*c•UwÞyç÷‰¯éA:·‹¬.$–$<¢ä…^¸V×õ,«ëˆ†íÛ·S]]Ýäó•••lÞ¼™sÏ=7*¯Ÿ™™I¯^½p:Mnc·Ûq»Ý1mYáÚk¯å“O>¡W¯^ ÃcÕW^i4µ_‚‹§VG­òóóŸš;wnRvcäTÝ(Ñu}ªÕ5DCUU;wîlq»²²2Ž?núë·mÛ–sÏ=·ÙàH%n·›ïÿûdgg7|ê/VÔmwÝuWà7V×шŽ@JMx*á/½ôR{ŸÏ××ê:¢aïÞ½u}íJ)~ÿûßÓ§OŸF·-))1õµÝn7½zõÂn·×]$§”bóæÍìØ±ã”ÇäG½iê_~üˆ‡Ós›2nĈgY]D¬HxD®ë†‘”3œÖ?%´OŸ>hšÆ´iÓèÝ»÷iÛ–––6Û½ »ÝN¯^½°ÙN}Ë–——³~ýzžþy>ûì3S^+ ì»âŠ+¶Z]„Ùîºë.ñqznS\À4«‹ˆ (Ðuý«kˆÃ08|øpÝŸ;uê>Ø/¹äôÕ?ý~ÝEl‘êÑ£GÝÙKõvíÚ…Ïçàí·ß¦¼¼Üêoüñðó¡)éñç·@«‹hwĈ?´ºˆXð0ÙK/½tƒÏçË´ºŽh¨¨¨8¥%ѪÕÿ¦PJOOotŸòòòˆ_·cÇŽtèÐøgFmpTWWóÏþ3â×K˜}ÀG}tèôéÓ'™}ÜÅã@yc1bDRö<Ô'g[™L×õ{¬®!Vê·Bê_sP_¤×[dffÒ£G”jüÿbÃÓ…wîÜÙä¶)ÄÔ–Ç£>ú3`ð1uËÜ}÷Ý? q`:¸Xnu!Ñ$-­[·®cuuõ·­®#Z^·±gÏžºûŸ~úi£ûTUU…ýz‡ƒ>}úœ6ÎQ_Ãi=ÊÊÊN¹f#ù€™u°3fôžìÀé[±“(­ŽZåååµ¶ºˆh’ð0‘ÏçKÚrà´‹â6oÞÌâÅ‹¹ï¾ûxï½÷Ý'Ü–Gzz:ýû÷Çår5Û¿_»®E­ŒŒŒS¦,OÁŸ—]vYEXé ̘1£;ð2¹²2§OŸžÛÌ.Qq÷Ýw矧ç6§ °2///i?c¥ÛÊD>Ÿïj«kˆ&›Í†Ãá8eÜã¹çžkvŸp®ÇhÕª}úô ê î†á‘•••êÝV¦ŒwÌœ9óÁÑpþ¨>Àn3^##HÌϪk€û»¬.$’6cí¥—^ÊÔu=醩=Ã*X­[ßrWJѽ{wúõëôÔç” ä’ýŠò D<Þ1sæÌ_ïÐøÄƒGzü`ÝsÏ=®{î¹çfNTÈËËK´VSP1ÍãÕ TèkïÞ½;{÷î úú`Ã###ƒ^½z‘•úŒ.gŸ}6eee´k×NZazì±Ç\ÀÝÀšþbyµÙî½÷Þ,àRàjßÜ}5,ËËËÛ<þü¤š¶]ÂÃ$†a$å·‹†\.}ûöeãÆ§œ&Û˜¬¬,rrrZüðÃ6 £a¹@ךÛ\Ã0ºÖ»=HµÉÉΞ>|øU ,HŠ©Û%釜ZòZ°>þøãçÖ„ÆNà ŌiÓ¦”†Ñ‰zátF>Wšrð0`õÅ–¦dèº~[*Œw4F)Effà‚úÚ¿ƒ¦þ.999tíÚÕôYqûöíK—.]Ry¼ÃÞhêÉÙ³gÛ Ã¸¸Ú0Œk€~¼–˜Áþ©¬pøðá/X° ØêB"%áa¿ßÿ+«kˆW6›ììlrrrhÓ¦MÔ>ÜÏ:+e&3mÊÇ?þñ÷×`Μ9ÙÀÏ Ã¸øpÚ¼íÂOÞ~ûí% .|ÇêB"¡Rõ³™^xá…cÕÕÕ™õ¿y7üÏ·á쓞žNëÖ­q8Øívìv{ÝÅy6›­î~FF†¬iÇ_¿}ûöO Ã88£æ¶ `‹Ö{'nj汣}Ìæî7òÜqàÊ… þ%á¡õë×w9|øðî†Âñ ÁÞ»­Ëå¢]»v´mÛ¶Ñ¥_…u¶mÛFyyyÂÀ§Px~ºpáÂH@Òm!ŸÏw{²°Ëå¢K—.r EœòûýTT˜2#‰ˆ­VÀ·ß~û¥ .üÄêbB%á!ŸÏ÷S«kˆ§ÓI—.]hß¾½„F«ßâ §-°~ذa?Y´hQB­f&Ó“DH×õžV×`6§ÓI·nÝèß¿?999q.kÅ‹˜jO @ê³DZòûýIs}‡Ãá S§Näää„}µ·ˆ-Ã08zô¨ÕeˆÈu6 6ìG‹-Úbu1ÁOˆüå/q躞ðWÊÚl6ºtéB¿~ýèØ±£G)//oqš‘0r ·Ýv[7« †´<"àt:Ïv‚Àx•Mnn®éí‰Ø8r$ì)¦D|êN @~´xñâ«‹iŽ|ÅŒÌ÷­. \ôìÙ“³Î:K‚#AI—UÒ:›@€46%~Ü–GdX]@¨ÒÒÒêΠ‰íøñãÒe•¼zëo»í¶Ÿ.^¼x‹[[@Â#~¿?aÎŽPJѾ}{:wî,W|'‰Ã‡[]B(Ûõ~v'€“5?Õõî7õ˜“À4+Áü´º‘¸S¼÷Þ:tèeO>ùd¬Wol‘„GŒÀL¢q/++«nÚr‘ ËêKào5?ïÞu×][M<öÞ`7œ4i’‡À‡ð/Àeb±ÐxwèС?}òÉ'·Y]L}2=I^|ñÅò“'Oº#™ò#š·µW†7\ç[įÊÊÊ BþèÑ£lÙ²%ž¦Ù¬7 c=ðÖ”)Sâî›raaá9†ayòä¡ýÆÖ™8qâu†a<tK ðØgÆeK–,‰‹ål%<ÂôþûïwܳgÏ^hüeEh8:tè Ó‰$“'OòþûïóÁð‹_ü‚~ýZ^fÃ0 6n܈®ëV†ÇàÁI“&í ý·¶^AAmÆ$ ;AÂÃ0W.Y²äÿZþ-£KÂ#L¯¿þú÷;öw°><ìv{]hÈ~‰A×u>úè#Þ}÷]ÊËË0`×^{mPû>|˜­[·¡}¸™t[eÆí“&M* ñWŽK&Lh†1ÈJ€ð€Àl¼W-Y²äý ~Ũ‘ðÓºuën¬¨¨(ëÂÃf³‘““CNNŽœA• j[ 6làСC@`-÷Q£Fáv»ƒ:ÆW_}űcÇêŽ×Üm0Û„p[ \VXXønH¿t?~|;àÃ0FqåÀ/—,Y²¡åß.:äkj˜”R–-h¡”"''‡>}úЩS' ޱyófæÍ›Ç³Ï>[?ÿùσŽ'NX9âÝÉ=öØÁÇ{¬Àzs œÏ<À+C† ñZU€œª&›ÍóSþ”RdggÓ±cGG¬_^„iÛ¶m¼ùæ›ìܹó´çzôèAÿþýƒ>Vii©™¥…âÿ€iV½x¬Ìš5k0rìØ±÷Ã< ³µU5)xfÈ!w,]º4æÿ6Òm¦×_}̱cÇþ Ñï¶r8dgg“MZšä}¢())á­·Þ⫯¾jôy»ÝN~~>999AÏçó±qãÆÞ_Áläíð‰'. ªÐ$2fÌp½acïÅQ·UÃ?/6 cIJeËb6å€|…I)Õ ¡”Rdff’MVVV4_J˜ìàÁƒ¼õÖ[|öÙg§ýg¯ï¢‹. :88Ðìñ¢ÈÓZn·›¬¬,²²²d†ÛSYYÉçŸΦM›øæ›oðûýAíwå•W8=×Âéÿƒž$Ìž={pÏÈ‘#ÿü–@kä|k«ªómàÃÁƒÿjÙ²eÿˆæ Ix„Éf³…ý)¯”ÂãñÔ†Œc$–ªª*6oÞÌÆùòË/Ñu=¤ýÏ:ë, mBæýû-X5ø¾µ2gΜ* (ÊÏÏÿ0¸ë?W;ï4è`ÖòåË£Ò½$ÝVaÚ°aÃ̲²²ñÐr·•RŠôôtÜn7n·›ŒŒŒ„¿<Ø9˜’ʼn'زe 7nä‹/"X±êIDAT¾{*t›ÍÆÈ‘#éØ1ø¥***øïÿ „ÞåÎ>Mܶš8qâ± ‹NQ#FŒÈ%p†ÖM†aœUÿ9‹º´Öƒ—/_núizV'dÂ2 £²©ç’1,ª¬¬äµ×^cïÞ½´k׎öíÛŸr›ÈÁâóù())a×®]ìÞ½›;wž2T$~øÃÒ©ShÞž[ßõÀ«‹ˆwóæÍÛ Ü Ü——÷=àFàKÌZá*à“[n¹å÷EEEo™y`iy„éÍ7ßœ|äÈ‘?A áÓÓÓë#===e¦ Ù¾};¯¾úêi×0dffÒ®]»º@©ýi×®]\uÓ•——SVVÆþýûÙµk»víbß¾}!wE#77—áLJtQguu57n¬K±°åñ©açÊêS!ÊËËSÀ%†aü–@wˆaË£ö¾ŸÀu:÷™r¤„G˜Þ|óÍ”ROgdd‘‘‘2aÑ”;wòÉ'Ÿ°qãÆº©3S{¡cNN:t¨»mÓ¦ ™™™¦·Ð ÃàÈ‘#ùä›az²e˜I×u¾þúk¾þúk¶nÝJIII\|ÇÚ%—\ÂÕW_ή?0`@XßÍ™3çLà`a=À´ð¸¥°°pE85‰¦ 6ÌE`|â·†aü¨›ì,Š­?°Ì0Œ;W®\ò騸ðÃ}2+aªªªØ±cÛ¶mcëÖ­ìÚµËÊkbâ /ä׿þu8»¾<`À€_DúúsæÌ±Õ|«dÆEvh”†ñ`aaáã‘Ö$šwÛm·¥?® Њ¬›ü,J­’ãÀÃÀÌ•+W6y"PCøë_ÿz²]»vrx˜t]g×®]lݺ•­[·²mÛ¶˜AÄÂÀ¹á†ÂÇñýú÷ïÿ…™õ<ñÄ—¿© ‘†a¸ ÙÐØ¼c––]5qâÄr3ëÁ:thgÝ[W†q9õ®»1¹U²À´ôO¯ZµªÅ`ðˆÀºuë*»víóÙu“•aìÙ³ç”0±púñˆÔ¶8Âì֜ӿÿQf×Tßã?î¬ùF›c†p†‘F …±Ø;a„ӧ–ºõÖ[0@«äJÃ0~@Íl&¶J6kV­ZÕäx›„GÖ¬YSÕ»wo™O$ŠJKKO “C‡Åõ¸‰Ûíæúë¯çÛßþv¸‡8œÓ¯_¿&–%’Ô!C<À¥º·®zÕ>gB«ä¿†aüxjõêÕ§».á&MÓ2/¿üòcßýîw­.%¥TUU±wï^öîÝ˾}ûêî——[ߣҳgO®¿þzZµjÉa&÷ë×/é×ÍÑ1xðà\àBàBÃ0..<¶J¾þ¬ª"aÒ4m@ß¾}ÿó«_ýÊêRmkÃdÏž=|˜C‡qèСºû‡¦¢¢‚ªªª ÏúÊÈÈ ++‹¶mÛÒ¯_? Òì¸Íùð׿õÖ[‹€‹‹‹+L9¨ ÜrË-™†a\ÀÿZ(}3©Y£$ˆVÉ&@“ð“¦i“?7ŽÌÌL«ËòûýTUUQUUEeeeÝ}¿ß_wqbfffÔÖ‹ß±cEEEµÔ ü}x¶¸¸ø“¨¼¨5n¾ùf;Ð8Û0Œs¬å~Žag×ÜÏ 0³Á1à(ð/ 0iš¶¸ÍëõÒ»wo«Ë ¬ªªŠE‹qøðá¦6ùxxøgqq±ü§1ó»ßýN«W¯>¥5,á&MÓÞ~|ÑEqùå—[]ŽH`/¼ð7n vóÝ‚ä9àoÅÅÅæÏà(D$< iš8¸sss¹õÖ[­.I$¨M›6ñüóaÏQW ¼@ HÞ*..NîËõE\‰Ÿ¹±ËÅÔÌ=³wï^t]«iÆEb8räëÖ­‹ä9À°šŸÃ𦽬^-..–©ÓET¥ö<âá«ë§Òu’’+k Èï÷óüóÏ›9Kà÷Ùwkšö¨¦içšup!’¯Ëá¹¢þ¾ùæºwïnU-"­[·Ž;v˜~ÜšëL:J©‚›nºé_ÀR¥TñÊ•+›‘"T2æ"MÓÚû©×js»ÝL˜0!j§qŠäòÁðÆæ,‰Qÿ¢Ä†(6x®’ÀøÈ¥Ô[EEEÍ®"DK¤åº+hÐÝWQQÁÆ8p E%‰D±yófÞ|ó͈ŽÑT`´p?¸Q)u#°sРAË•RË–-[öMDň”%-išö&pYÃÇ;uêD^^ž‰D±wï^–,YÂÉ“¡-!fX³ü@·Öš%K–X?A˜H!Ð4í“„5:Ñ­·Þ*c¢Q`Ù²eÍ®ï^Ë„Pçþq`RjÉâÅ‹ßk±H‘ò¤Û*4Ãi"8 З-á!Ú³g+V¬hræß(‡B°¯‘ QJ 6lØWÀ2¥TÑÂ… w5ûˉ”%- išæví›ÚÆf³1nÜ8Ú´i»ÂD\Û±c+W®¤²òÔÕ=£fÝ÷oèÖzaþüù2Û¯¨#á$MÓnV¶´ÝyçÇu×]ƒŠD¼ûúë¯)..¦ºº:ÞB!œ}«•RKçÎûˆ”'áMÓð–lÑï~÷;™,1Åýûßÿæå—_F×õx…ï+¥6h¬|â‰'J)IÂ#š¦Ý»½ÇãaôèÑx<ž(V%âQUU/¾ø"›6mª{,žƒ ØíêßÖ»_ ¼¢”Z ¬›5k–L‰’B$>ýôSÞ~ûmŽ= ÄWT·@jou×ÌUJ½QXX(NqL£MÓ2÷þÍm§”ªkm¤§§7A½¦Óé$77—víÚ™ðˆH†±íã?Þµ~ýú Oœ8á¨}<Õº¢â`߯ùJ©¥&L(CÄ š¦Ù¿W7·]Kƒâ‡ÃÑÜ!åñxèÖ­™™™aþ":°˜ ¼Þ½{wcÈ!=•R¿€ØwE%pëÁìcœžVJÍ;vì¿qC£†¦i£›Û¦±Añ†áÑðƒ¢>kƒl©ù):vìØñâÎ;p82¢}û€ÅÀÂîÝ»7Ú×>tèÐË”R3~H<ˆÇÙ¾)¥æO5êÂR€¦i£Ç›z>œAñ&ºÅŠ5^¯·ÑIåvìØ‘L&2A–ùÞæÏwëÖ­º¥‡ fnîWJu¨}<aÑð6‘Ã#ŠÇ8¤óiÍ1bÄ×K¤|xhšvð"ÐèÈvsƒâµ÷›¯ñoñ´×ëÝlm;vìè Œn§™uDDPŽEÀ¼nݺý7œ >¼‹FÇ*¥ê¾-´Ô±úƒ×ê:¢¸¯¡”z“Àé¾/6LGÄLJ‡‡¦iý ´NhP*âAñ¯€U@±×ëý2’:wîÜ™hÀB¸öD°x•Àxƺ3Î8£ÂŒƒæååõPJM~±íŠJµ}C8ÖN`Rjñ­·Þºu)š¦uþœÑð¹Å·÷+½^¯ßìºwîÜùC!òkdá†tÿ¦ë„ÆÇgœqFÔÞàùùù?RJ=|'>€S¤sxN)5wРACDMJ†‡¦inà]àü†ÏE0(¾xXèõz[ìGÔ®]»ÎF¿RyáôRà5añz×®]czZçèÑ£mÀ ¥ÔΉ‰¸o˜Çبói­¸ùæ›#L•rá¡iš x–À7÷:µƒâ.—«Énª&ÅOx½^SºEBµk×®~À/ œVú= ÑtK~à#þ×õQ×®]Moá…jܸq™ÀJ© @F}Çå1BÜç˜ ¬52GӴϦH©ð¨YØi9ñƒ:aŠ—³€é^¯÷Hl~ƒ–íÞ½»#p ¹p[[QÄt‹q}l^ËÍÍÛ¥O º(¥nÒ“è8.öð†RjðÈ 7Ü ]ZJ™ð¨éªzøYícJ…5(^,þèõz÷Çî7ÝîݻӋïX} Ј×ùPtà ÓÃ|TsûŸÜÜ\KZt‘˜4iRàz¥Ô-À%ñøAluxYù;è1xÑëõ¦Æ‡ ÉR"<4Mk ¼|¿ö±0Å×£¼^ïÖ–oª’’7éWÖûéNàT`ÕÌ®fÑN,¨½@r+E·6uéÒ%á‚¢%S¦Lé¡”ºø‘Rêlà ¥”û8^?„°hÊiÀªXŒU&“¤MÓºoçÖ>VP¼±ÐhdP|0Îëõ>ãòc¦¤¤Ät:7ò“8œÝe¯¹MkðçJàp3?mÀö.]ºøbôkÅ¥»ï¾Û©”:8§&Lz(¥Ü*𦳅s«”²ÕÜùV)•´W Ó’!€Â° xŒÀ /2¸„¤MÓ¾E 8΄°Åýº¨¦ÄÓ¸†ÑðÄO(¥T{¥Tg “Rª3È:5òçÖñÔ1Ñ!sœ=ïÝÒVKÚðÐ4m S8;@ãƒâ ãÁ ø§Àp¯×û¡å ×-Z”Q"Ý•R=”R=´ j[RíÁ²®(3TKG½^ï–X¼`¢IÊðÐ4í'fÈm¥TȃâÀT`¦×ëMéî!ÂUTT”E Dz4¸=›@à8!´ð°ˆ¬¦y½Þ­,$Þ$]xhšvÓqÓív{]k£á xíýƒâ¯ù^¯w›µ ‘ Š‹‹mJ©®ü/Tz*¥ú}•RÝ,‹æ¼<àõzß³ºx4áQ³îø#ÀyP|/06™Ä…Hk×®m ô­ùéWï6Ûʺê©.õz½ï[]ˆÕ’"<4Më< ü(ŒAñUÀH"~­]»¶ §Ê·à–ì4×f¯×ÛÛ‚×+ š¦]<t®?(ÞTk£Þ ø]T«-+^¶µk×Ú€³ \·ô#à§Ô;%?ÊÚy½Þ”^7¡ÃCÓ´qÀt¥TZˆƒâï7ËØ†ÉeíÚµKÿG LzDáeŽí½^ïÉ(;a$dxhš– < ÜÐØ xÃð¨7(î 0µˆ,#D’[»vmwARûÓÅ„Ã.ðz½y&'¡%\xhšÖxèâ øà&¹nCˆÔµvíÚÞÀeÀUZ(é!âàZ™Ê$ÁÂCÓ´<`†Ífs75(^{¿Á x9©ŽYS¹"Þ¬]»ÖM këjà s¼55iè{ÀLà/ÑXä-%DxhšÖ@7Õ5!Šò¼^ïÓ–/„Hk×®uèÖ:ÈŽ;€òÅótqš¦]<©”êâ ø»Àï½^ïk*Bˆä·k`׬¿1È cPü>àai^ !DtÄexhšv°èâ ø×ÅÿiYñB‘â*<4M³w÷Úl6GˆƒâK12¿BD_Ü„‡¦ig+€KGÝÜTA Š"0uú«jBˆTá¡iÚ-ÀJ©V!Š¿M`P|—%… !Dв4Ëz…B4.fáQ3ÅÈZ¥ÔCßÜâõzwǪV!„Í‹IxÔ\ô÷¼ÝnïZ;(îv»mmÔ× \)þ Š !D|‰zxÔœ†»Àét¦‡0(¾ø×ë};Úõ !„]ÔÂCÓ4éÓÇÕ†FFFF£­ƒâo8›j_´jB™¨„‡¦iéÀj‡Ãqmƒâ~à~àé¦BˆøfzxhšÖF)õ¢ÓéüaýAñ†¡Ñ`P|ÖÆ[f×#„Â|¦†‡¦i¹v»ý5—ËÕ7ÈAq€¿ߨkf-B!¢Ç´ðÐ4­Óé|Íétvk8(^ÿ~=~àÀT¯×«›U‡Bˆè3e%Á›nºéN§ó%—Ë•ä x)nª7#~q!„1qxÜrË-¿p:O;ÎŒ¦NÁµÙNYSþ]@óz½%½°BËDƒêr¹¸\.{ƒâð'àné¦BˆÄvx :4ßétÎv»Ý*ˆAñÖÝxÍŒ¢…BX+¬ð6lØ(—ËõDc­ƒâï7Ê‚MB‘~ëõzwš]¬BˆøTxºÝîi OÁMK;íÃÀ¯×[b…BćÃcòäÉ“<Ï#- Š{½Þ£U¨BˆøÑlxÜyçC=ÏâfÅþA ›j{4 B?š {ï½÷jÇóÇcobPë‘O’n*!„H-†Çƒ>x¡ÛíÞàñxÜM Šï†z½Þ—cQ¤BˆørZxüéOêév»ß÷x<í›™@pìU‘B!âË)áñè£vòx<w»Ýg52(^x½Þù±,P!Dü© Y³fex<ž÷Ýn÷ÀFÅ?"0…ú—±.P!Dü©oݺõ#­[·n:›~ Á!„¢–2 ƒ+V\–‘‘ñ ê=·ÀL¸ïYSšBˆxeïÝ»w—Ëõ:кæ1?°øµ×ëýʺ҄BÄ«´´´´@W]TOz½Þ/¬-K!D¼Ò› Ê\ÔËzzS¯´ï¹s%é—…;W 4þlh¥2œÌy_-à̧Ê8ÀÒ©æß›%ÌöºF×Ãõ_åî5•n!IEND®B`‚Shark-3.1.4/doc/doxygen_pages/000077500000000000000000000000001314655040000161665ustar00rootroot00000000000000Shark-3.1.4/doc/doxygen_pages/css/000077500000000000000000000000001314655040000167565ustar00rootroot00000000000000Shark-3.1.4/doc/doxygen_pages/css/besser.css000066400000000000000000000740451314655040000207650ustar00rootroot00000000000000/* --------------------------------------------------------------------------------*/ /* --------------------------- START CSS RESET ------------------------------------*/ /* --------------------------------------------------------------------------------*/ /* http://meyerweb.com/eric/tools/css/reset/ v2.0 | 20110126 License: none (public domain) */ html, body, div, span, applet, object, iframe, h1, h2, h3, h4, h5, h6, p, blockquote, pre, a, abbr, acronym, address, big, cite, code, del, dfn, em, img, ins, kbd, q, s, samp, small, strike, strong, sub, sup, tt, var, b, u, i, center, dl, dt, dd, ol, ul, li, fieldset, form, label, legend, table, caption, tbody, tfoot, thead, tr, th, td, article, side, canvas, details, embed, figure, figcaption, footer, header, hgroup, menu, nav, output, ruby, section, summary, time, mark, audio, video { margin: 0; padding: 0; border: 0; font-size: 100%; font: inherit; vertical-align: baseline; } /* HTML5 display-role reset for older browsers */ article, aside, details, figcaption, figure, footer, header, hgroup, menu, nav, section { display: block; } body { line-height: 1; } ol, ul { list-style: none; } blockquote, q { quotes: none; } blockquote:before, blockquote:after, q:before, q:after { content: ''; content: none; } table { border-collapse: collapse; border-spacing: 0; } /* --------------------------------------------------------------------------------*/ /* ----------------------------- END CSS RESET ------------------------------------*/ /* --------------------------------------------------------------------------------*/ /* --------------------------------------------------------------------------------*/ /* ------------------------ START MT HACKED CSS -----------------------------------*/ /* --------------------------------------------------------------------------------*/ html { background-color: #BFD1D4; margin: 0 80px; } body { line-height: 150%; color: black; padding: 0; background-color: white; border-width: 1px; border-color: #aaa; border-style: none solid solid solid; } a { text-decoration: none; } #doxywrapper { padding: 25px; font-family: "DejaVu Sans", "Trebuchet MS", Arial, Verdana, Helvetica, Sans-serif; text-align: left; background-image: url("../../sphinx_pages/build/html/_static/contents.png"); background-repeat: repeat-x; font-size: 0.9em; } /* #global_doxytitle { float: left; margin-top: 10px; margin-right: 80px; margin-bottom: 10px; } */ /* HERE BE DRAGONS: ON THE CPP SOURCE FILES, !!ALL!! CONTENT IS WITHIN THIS WRAPPER. IN ALL/MOST OTHER FILES, THIS IS ONLY THE MENU, AND ALL OTHER CONTENT IS IN SEPARATE DIVS :( THIS MUST EITHER BE A DOXYGEN BUG OR SOMETHING I DONT UNDERSTAND */ #navrow_wrapper { /* width: 400px; margin-left: auto; margin-right: auto; */ } #doxywrapper .contents { /* font-size: 90%; */ line-height: 150%; } /* SUPPRESS "GO TO PUBLIC TYPES" and "GO TO PUBLIC MEMBER FUNCTIONS"!!! */ .summary, .tabs, .tabs2, .navpath{ display: none; } div.header { background-color: #F9FAFC; border-bottom: 1px solid #C4CFE5; padding: 25px 0px 5px 15px; margin: -10px 0px 40px 0px; } div.header div.headertitle { font-size: 1.8em; font-weight: bold; color: #D37900; line-height: 140%; } div.body { margin: 0; padding: 0.5em 20px 20px 20px; } p { margin: 0.8em 0 0.5em 0; } /* --------------------------------------------------------------------------------*/ /* ------------------------- STOP MT HACKED CSS------------------------------------*/ /* --------------------------------------------------------------------------------*/ /* --------------------------------------------------------------------------------*/ /* ------------------------ START GENERAL PAGE ------------------------------------*/ /* --------------------------------------------------------------------------------*/ /* this is bad for people who rely on this accessibility feature, but they can use user style sheets */ a:focus { outline: 0px; } /* this css style is derived from the mollio templates * available at http://mollio.org/. Mollio is triple-licensed * under CC-BY, GPL, and CPL. */ a:link, a:visited { text-decoration: none; color: #892900; } a:hover, a:active { text-decoration: none; color: #530E00; } /* More layout and styles */ h1 { font-size: 1.6em; font-weight: bold; color: #D37900; margin-top: 30px; } h2 { font-size: 1.2em; font-weight: italic; color: #0c3762; margin-top: 30px; } h3 { font-size: 1.0em; font-weight: bold; font-style: italic; color: #0c3762; margin-top: 30px; } h4 { font-size: 0.9em; font-weight: normal; font-style: italic; color: #0c3762; margin-top: 25px; } div.body h1 a, div.body h2 a, div.body h3 a, div.body h4 a, div.body h5 a, div.body h6 a { color: black; } h1 a.anchor, h2 a.anchor, h3 a.anchor, h4 a.anchor, h5 a.anchor, h6 a.anchor { display: none; margin: 0 0 0 0.3em; padding: 0 0.2em 0 0.2em; color: #aaa!important; } h1:hover a.anchor, h2:hover a.anchor, h3:hover a.anchor, h4:hover a.anchor, h5:hover a.anchor, h6:hover a.anchor { display: inline; } h1 a.anchor:hover, h2 a.anchor:hover, h3 a.anchor:hover, h4 a.anchor:hover, h5 a.anchor:hover, h6 a.anchor:hover { color: #777; background-color: #eee; } /* --------------------------------------------------------------------------------*/ /* -------------------------- END GENERAL PAGE ------------------------------------*/ /* --------------------------------------------------------------------------------*/ /* --------------------------------------------------------------------------------*/ /* ------------------------ START SHARK MENU --------------------------------------*/ /* --------------------------------------------------------------------------------*/ /* this css style is derived from the mollio templates * available at http://mollio.org/. Mollio is triple-licensed * under CC-BY, GPL, and CPL. */ #shark_old {margin:0;} body #shark_old { padding: 0 0 0px; color: #333; font: 68% arial,tahoma,verdana,sans-serif; /* margin: 0 80px 0; */ } #shark_old ul {margin: 0 0 1.0em} #shark_old ul {margin: .3em 0 1.5em 0;list-style-type:square;} #shark_old ul.related {margin-top: -1em} #shark_old ol {margin: .5em .5em 1.5em} #shark_old ol li {margin-left: 1.4em;padding-left: 0;background: none; list-style-type: decimal} #shark_old li {line-height: 1.4em;} #shark_old li.doc {background-position: 3px -500px} #shark_old ul.nomarker li {background:none;padding-left:0} img #shark_old {border:none} /* LAYOUT - HEADER */ #shark_old #header {background: url("../../sphinx_pages/build/html/_static/sphinx_sprites_aqua.png") repeat-x 0 100%;margin: 0 0 0;padding: 2px 0 6px} /* MT: site name props !COLOR1:SITE NAME */ #shark_old #header #site-name {text-align:left; font: 285% arial;letter-spacing: +0.0em;margin:10px 0 8px 40px;padding:0px 0;color:#D2D2D2;border:none} #shark_old #header #site-name a, #shark_old #header #site-name a:hover, #shark_old #header #site-name a:active, #shark_old #header #site-name a:visited{color:#D2D2D2} /* NAV - top horizontal nav */ #shark_old #nav, #shark_old #nav ul {padding: 0;margin: 0;list-style: none} /* MT: height means height of the overall title box */ #shark_old #nav {font-weight:bold;height:2.75em;font: bold 96% arial;margin: 0 0 0 40px} #shark_old #nav a {font-weight:bold;} /* !COLOR2:COLUMN-HEADER-BG !COLOR3+4:SHADOWS-BOTTOM-AND-RIGHT*/ #shark_old #nav li {position:relative;background: #999;float: left;width: 10em;display:block;margin: 0;border-bottom: 3px solid #666;border-right: 3px solid #252525;padding:0} /* MT: size of the menu column headers !COLOR5: COLUMN-HEADER-MENU-FONTS */ #shark_old #nav a, #shark_old #nav a:link, #shark_old #nav a:visited, #shark_old #nav a:hover, #shark_old #nav a:active {font-size:130%; text-decoration:none;cursor:pointer;color:#fff;display: block;padding: 6px 10px 4px} /* MT: !COLOR6: mouse-over-column-headers */ #shark_old #nav a:hover {color:#FFC500} /* MT: font sizes _inside_ roll-out boxes here !COLOR7: left margin of roll-out */ #shark_old #nav li ul {margin-top:6px;padding-bottom:6px;border-left: 1px solid #00606D;background: url("../../sphinx_pages/build/html/_static/sphinx_sprites_aqua_box_bg.png") no-repeat 100% 100%;width:19em;font-size:85%;letter-spacing: +0.1em; text-align:left;margin-top:3px;position: absolute;font-weight:bold;left: -999em} #shark_old #nav li:hover ul, #shark_old #nav li.sfhover ul {left: 0;z-index:99999} /* !COLOR8+9: between-item-horizontal-lines-top-and-bottom */ #shark_old #nav li li {background:none;float:none;border:none;border: 1px solid #999;border-top:1px solid #fff;border-right:none;border-left:none;padding-left:0} #shark_old #nav li li.first {border-top:none} #shark_old #nav li li.last {border-bottom:none} /* !COLOR10: fonts in collapsible menu. MT: CHANGED THE 2ND TO LAST PX FROM 10 TO 8 TO MAKE RED HOVER LIST ITEMS NARROWER */ #shark_old #nav li li a, #shark_old #nav li li a:link, #shark_old #nav li li a:visited, #shark_old #nav li li a:hover {color:#D2D2D2;padding: 5px 10px 5px;width:12.89em} /* !COLOR11+12: mouse over fonts and BG in collapsible menu */ #shark_old #nav li li a:hover {color:#FFC500;} /* !COLOR13+14: active tab (current location) and shadow*/ #shark_old #nav li.active {background: #D37900;border-bottom: 3px solid #D37900} #shark_old #nav li.active a {color:#FFC500} #shark_old #nav li.active a:hover {color:#FFC500} /* hide from IE mac \*/ #shark_old #nav li {width:auto} /* end hiding from IE5 mac */ /* POWERED BY - Shark logo in this case */ #shark_old #poweredby { width:120px; position:absolute; top:10px; right:0 } /* LAYOUT - main body of page */ #shark_old #wrap {min-width:450px;margin: 0 auto;position:relative} /* --------------------------------------------------------------------------------*/ /* ------------------------- END SHARK MENU ---------------------------------------*/ /* --------------------------------------------------------------------------------*/ /* --------------------------------------------------------------------------------*/ /* ----------------------- START STANDARD DOXYGEN CSS -----------------------------*/ /* --------------------------------------------------------------------------------*/ /* The standard CSS for doxygen */ body, table, div, p, dl { /* mt */ /* font-family: Lucida Grande, Verdana, Geneva, Arial, sans-serif; font-size: 13px; line-height: 1.3; */ } /* @group Heading Levels */ h1 { font-size: 150%; } /* mt */ /* .title { font-size: 150%; font-weight: bold; margin: 10px 2px; } */ h2 { font-size: 120%; } h3 { font-size: 100%; } h1, h2, h3, h4, h5, h6 { -webkit-transition: text-shadow 0.5s linear; -moz-transition: text-shadow 0.5s linear; -ms-transition: text-shadow 0.5s linear; -o-transition: text-shadow 0.5s linear; transition: text-shadow 0.5s linear; margin-right: 15px; } h1.glow, h2.glow, h3.glow, h4.glow, h5.glow, h6.glow { text-shadow: 0 0 15px cyan; } dt { font-weight: bold; } div.multicol { -moz-column-gap: 1em; -webkit-column-gap: 1em; -moz-column-count: 3; -webkit-column-count: 3; } p.startli, p.startdd, p.starttd { margin-top: 2px; } p.endli { margin-bottom: 0px; } p.enddd { margin-bottom: 4px; } p.endtd { margin-bottom: 2px; } /* @end */ caption { font-weight: bold; } span.legend { font-size: 70%; text-align: center; } h3.version { font-size: 90%; text-align: center; } div.qindex, div.navtab{ background-color: #EBEFF6; border: 1px solid #A3B4D7; text-align: center; } /* mt */ div.qindex { margin: 10px 0px; } div.qindex, div.navpath { width: 100%; line-height: 140%; } div.navtab { margin-right: 15px; } /* @group Link Styling */ a { color: #3D578C; font-weight: normal; text-decoration: none; } a.qindex { font-weight: bold; } a.qindexHL { font-weight: bold; background-color: #9CAFD4; color: #ffffff; border: 1px double #869DCA; } a.el { font-weight: bold; font-size: 13px; } a.elRef { } /* @end */ dl.el { margin-left: -1cm; } pre.fragment { border: 1px solid #C4CFE5; background-color: #FBFCFD; padding: 4px 6px; margin: 4px 8px 4px 2px; overflow: auto; word-wrap: break-word; font-size: 9pt; line-height: 125%; font-family: monospace, fixed; font-size: 105%; } div.fragment { padding: 4px; margin: 4px; margin-top: 30px; background-color: #FBFCFD; border: 1px solid #C4CFE5; } div.line { font-family: monospace, fixed; font-size: 13px; min-height: 13px; line-height: 1.0; text-wrap: unrestricted; white-space: -moz-pre-wrap; /* Moz */ white-space: -pre-wrap; /* Opera 4-6 */ white-space: -o-pre-wrap; /* Opera 7 */ white-space: pre-wrap; /* CSS3 */ word-wrap: break-word; /* IE 5.5+ */ text-indent: -53px; padding-left: 53px; padding-bottom: 0px; margin: 0px; -webkit-transition-property: background-color, box-shadow; -webkit-transition-duration: 0.5s; -moz-transition-property: background-color, box-shadow; -moz-transition-duration: 0.5s; -ms-transition-property: background-color, box-shadow; -ms-transition-duration: 0.5s; -o-transition-property: background-color, box-shadow; -o-transition-duration: 0.5s; transition-property: background-color, box-shadow; transition-duration: 0.5s; } div.line.glow { background-color: cyan; box-shadow: 0 0 10px cyan; } span.lineno { padding-right: 4px; text-align: right; border-right: 2px solid #0F0; background-color: #E8E8E8; white-space: pre; } span.lineno a { background-color: #D8D8D8; } div.ah { font-weight: bold; color: #D37900; margin-bottom: 15px; margin-top: 30px; font-size: 180%; border-style: outset; border-width: 2px; padding: 2px; } div.groupHeader { margin-left: 16px; margin-top: 12px; font-weight: bold; } div.groupText { margin-left: 16px; font-style: italic; } body { background-color: white; color: black; margin: 0; } div.contents { margin-top: 10px; margin-left: 12px; margin-right: 8px; } td.indexkey { background-color: #EBEFF6; font-weight: bold; border: 1px solid #C4CFE5; margin: 2px 0px 2px 0; padding: 2px 10px; white-space: nowrap; vertical-align: top; } td.indexvalue { background-color: #EBEFF6; border: 1px solid #C4CFE5; padding: 2px 10px; margin: 2px 0px; } tr.memlist { background-color: #EEF1F7; } p.formulaDsp { text-align: center; } img.formulaDsp { } img.formulaInl { vertical-align: middle; } div.center { text-align: center; margin-top: 0px; margin-bottom: 0px; padding: 0px; } div.center img { border: 0px; } address.footer { text-align: right; padding-right: 12px; } img.footer { border: 0px; vertical-align: middle; } /* @group Code Colorization */ span.keyword { color: #008000 } span.keywordtype { color: #604020 } span.keywordflow { color: #e08000 } span.comment { color: #800000 } span.preprocessor { color: #806020 } span.stringliteral { color: #002080 } span.charliteral { color: #008080 } span.vhdldigit { color: #ff00ff } span.vhdlchar { color: #000000 } span.vhdlkeyword { color: #700070 } span.vhdllogic { color: #ff0000 } blockquote { background-color: #F7F8FB; border-left: 2px solid #9CAFD4; margin: 0 24px 0 4px; padding: 0 12px 0 16px; } /* @end */ /* .search { color: #003399; font-weight: bold; } form.search { margin-bottom: 0px; margin-top: 0px; } input.search { font-size: 75%; color: #000080; font-weight: normal; background-color: #e8eef2; } */ td.tiny { font-size: 75%; } .dirtab { padding: 4px; border-collapse: collapse; border: 1px solid #A3B4D7; } th.dirtab { background: #EBEFF6; font-weight: bold; } hr { height: 0px; border: none; border-top: 1px solid #4A6AAA; } hr.footer { height: 1px; } /* @group Member Descriptions */ table.memberdecls { border-spacing: 0px; padding: 0px; } .memberdecls td { -webkit-transition-property: background-color, box-shadow; -webkit-transition-duration: 0.5s; -moz-transition-property: background-color, box-shadow; -moz-transition-duration: 0.5s; -ms-transition-property: background-color, box-shadow; -ms-transition-duration: 0.5s; -o-transition-property: background-color, box-shadow; -o-transition-duration: 0.5s; transition-property: background-color, box-shadow; transition-duration: 0.5s; } .memberdecls td.glow { background-color: cyan; box-shadow: 0 0 15px cyan; } .mdescLeft, .mdescRight, .memItemLeft, .memItemRight, .memTemplItemLeft, .memTemplItemRight, .memTemplParams { background-color: #F9FAFC; border: none; margin: 4px; padding: 1px 0 0 8px; } .mdescLeft, .mdescRight { padding: 0px 8px 4px 8px; color: #555; } .memItemLeft, .memItemRight, .memTemplParams { border-top: 1px solid #C4CFE5; } .memItemLeft, .memTemplItemLeft { white-space: nowrap; } .memItemRight { width: 100%; } .memTemplParams { color: #4665A2; white-space: nowrap; } /* @end */ /* @group Member Details */ /* Styles for detailed member documentation */ .memtemplate { font-size: 80%; color: #4665A2; font-weight: normal; margin-left: 9px; } .memnav { background-color: #EBEFF6; border: 1px solid #A3B4D7; text-align: center; margin: 2px; margin-right: 15px; padding: 2px; } .mempage { width: 100%; } .memitem { padding: 0; margin-bottom: 10px; margin-right: 5px; -webkit-transition: box-shadow 0.5s linear; -moz-transition: box-shadow 0.5s linear; -ms-transition: box-shadow 0.5s linear; -o-transition: box-shadow 0.5s linear; transition: box-shadow 0.5s linear; display: table !important; width: 100%; } .memitem.glow { box-shadow: 0 0 15px cyan; } .memname { font-weight: bold; margin-left: 6px; } .memname td { vertical-align: bottom; } .memproto, dl.reflist dt { border-top: 1px solid #A8B8D9; border-left: 1px solid #A8B8D9; border-right: 1px solid #A8B8D9; padding: 6px 0px 6px 0px; color: #253555; font-weight: bold; text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9); background-image:url('nav_f.png'); background-repeat:repeat-x; background-color: #E2E8F2; /* opera specific markup */ box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); border-top-right-radius: 4px; border-top-left-radius: 4px; /* firefox specific markup */ -moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px; -moz-border-radius-topright: 4px; -moz-border-radius-topleft: 4px; /* webkit specific markup */ -webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); -webkit-border-top-right-radius: 4px; -webkit-border-top-left-radius: 4px; } .memdoc, dl.reflist dd { border-bottom: 1px solid #A8B8D9; border-left: 1px solid #A8B8D9; border-right: 1px solid #A8B8D9; padding: 6px 10px 2px 10px; background-color: #FBFCFD; border-top-width: 0; background-image:url('nav_g.png'); background-repeat:repeat-x; background-color: #FFFFFF; /* opera specific markup */ border-bottom-left-radius: 4px; border-bottom-right-radius: 4px; box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); /* firefox specific markup */ -moz-border-radius-bottomleft: 4px; -moz-border-radius-bottomright: 4px; -moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px; /* webkit specific markup */ -webkit-border-bottom-left-radius: 4px; -webkit-border-bottom-right-radius: 4px; -webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); } dl.reflist dt { padding: 5px; } dl.reflist dd { margin: 0px 0px 10px 0px; padding: 5px; } .paramkey { text-align: right; } .paramtype { white-space: nowrap; } .paramname { color: #602020; white-space: nowrap; } .paramname em { font-style: normal; } .paramname code { line-height: 14px; } .params, .retval, .exception, .tparams { margin-left: 0px; padding-left: 0px; } .params .paramname, .retval .paramname { font-weight: bold; vertical-align: top; } .params .paramtype { font-style: italic; vertical-align: top; } .params .paramdir { font-family: "courier new",courier,monospace; vertical-align: top; } table.mlabels { border-spacing: 0px; } td.mlabels-left { width: 100%; padding: 0px; } td.mlabels-right { vertical-align: bottom; padding: 0px; white-space: nowrap; } span.mlabels { margin-left: 8px; } span.mlabel { background-color: #728DC1; border-top:1px solid #5373B4; border-left:1px solid #5373B4; border-right:1px solid #C4CFE5; border-bottom:1px solid #C4CFE5; text-shadow: none; color: white; margin-right: 4px; padding: 2px 3px; border-radius: 3px; font-size: 7pt; white-space: nowrap; } /* @end */ /* these are for tree view when not used as main index */ div.directory { margin: 10px 0px; border-top: 1px solid #A8B8D9; border-bottom: 1px solid #A8B8D9; width: 100%; } .directory table { border-collapse:collapse; } .directory td { margin: 0px; padding: 0px; vertical-align: top; } .directory td.entry { white-space: nowrap; padding-right: 6px; } .directory td.entry a { outline:none; } .directory td.entry a img { border: none; } .directory td.desc { width: 100%; padding-left: 6px; padding-right: 6px; border-left: 1px solid rgba(0,0,0,0.05); } .directory tr.even { padding-left: 6px; background-color: #F7F8FB; } .directory img { vertical-align: -30%; } .directory .levels { white-space: nowrap; width: 100%; text-align: right; font-size: 9pt; } .directory .levels span { cursor: pointer; padding-left: 2px; padding-right: 2px; color: #3D578C; } div.dynheader { margin-top: 8px; -webkit-touch-callout: none; -webkit-user-select: none; -khtml-user-select: none; -moz-user-select: none; -ms-user-select: none; user-select: none; } address { font-style: normal; color: #2A3D61; } table.doxtable { border-collapse:collapse; margin-top: 4px; margin-bottom: 4px; } table.doxtable td, table.doxtable th { border: 1px solid #2D4068; padding: 3px 7px 2px; } table.doxtable th { background-color: #374F7F; color: #FFFFFF; font-size: 110%; padding-bottom: 4px; padding-top: 5px; } table.fieldtable { width: 100%; margin-bottom: 10px; border: 1px solid #A8B8D9; border-spacing: 0px; -moz-border-radius: 4px; -webkit-border-radius: 4px; border-radius: 4px; -moz-box-shadow: rgba(0, 0, 0, 0.15) 2px 2px 2px; -webkit-box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.15); box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.15); } .fieldtable td, .fieldtable th { padding: 3px 7px 2px; } .fieldtable td.fieldtype, .fieldtable td.fieldname { white-space: nowrap; border-right: 1px solid #A8B8D9; border-bottom: 1px solid #A8B8D9; vertical-align: top; } .fieldtable td.fielddoc { border-bottom: 1px solid #A8B8D9; width: 100%; } .fieldtable tr:last-child td { border-bottom: none; } .fieldtable th { background-image:url('nav_f.png'); background-repeat:repeat-x; background-color: #E2E8F2; font-size: 90%; color: #253555; padding-bottom: 4px; padding-top: 5px; text-align:left; -moz-border-radius-topleft: 4px; -moz-border-radius-topright: 4px; -webkit-border-top-left-radius: 4px; -webkit-border-top-right-radius: 4px; border-top-left-radius: 4px; border-top-right-radius: 4px; border-bottom: 1px solid #A8B8D9; } .tabsearch { top: 0px; left: 10px; height: 36px; background-image: url('tab_b.png'); z-index: 101; overflow: hidden; font-size: 13px; } .navpath ul { font-size: 11px; background-image:url('tab_b.png'); background-repeat:repeat-x; height:30px; line-height:30px; color:#8AA0CC; border:solid 1px #C2CDE4; overflow:hidden; margin:0px; padding:0px; } .navpath li { list-style-type:none; float:left; padding-left:10px; padding-right:15px; background-image:url('bc_s.png'); background-repeat:no-repeat; background-position:right; color:#364D7C; } .navpath li.navelem a { height:32px; display:block; text-decoration: none; outline: none; } .navpath li.footer { list-style-type:none; float:right; padding-left:10px; padding-right:15px; background-image:none; background-repeat:no-repeat; background-position:right; color:#364D7C; font-size: 8pt; } div.summary { float: right; font-size: 8pt; padding-right: 5px; width: 50%; text-align: right; } div.summary a { white-space: nowrap; } div.ingroups { font-size: 8pt; width: 50%; text-align: left; } div.ingroups a { white-space: nowrap; } dl { padding: 0 0 0 10px; } /* dl.note, dl.warning, dl.attention, dl.pre, dl.post, dl.invariant, dl.deprecated, dl.todo, dl.test, dl.bug */ dl.section { margin-left: 0px; padding-left: 0px; } dl.note { margin-left:-7px; padding-left: 3px; border-left:4px solid; border-color: #D0C000; } dl.warning, dl.attention { margin-left:-7px; padding-left: 3px; border-left:4px solid; border-color: #FF0000; } dl.pre, dl.post, dl.invariant { margin-left:-7px; padding-left: 3px; border-left:4px solid; border-color: #00D000; } dl.deprecated { margin-left:-7px; padding-left: 3px; border-left:4px solid; border-color: #505050; } dl.todo { margin-left:-7px; padding-left: 3px; border-left:4px solid; border-color: #00C0E0; } dl.test { margin-left:-7px; padding-left: 3px; border-left:4px solid; border-color: #3030E0; } dl.bug { margin-left:-7px; padding-left: 3px; border-left:4px solid; border-color: #C08050; } dl.section dd { margin-bottom: 6px; } #projectlogo { text-align: center; vertical-align: bottom; border-collapse: separate; } #projectlogo img { border: 0px none; } #projectname { font: 300% Tahoma, Arial,sans-serif; margin: 0px; padding: 2px 0px; } #projectbrief { font: 120% Tahoma, Arial,sans-serif; margin: 0px; padding: 0px; } #projectnumber { font: 50% Tahoma, Arial,sans-serif; margin: 0px; padding: 0px; } #titlearea { padding: 0px; margin: 0px; width: 100%; border-bottom: 1px solid #5373B4; } .image { text-align: center; } .dotgraph { text-align: center; } .mscgraph { text-align: center; } .caption { font-weight: bold; } div.zoom { border: 1px solid #90A5CE; } dl.citelist { margin-bottom:50px; } dl.citelist dt { color:#334975; float:left; font-weight:bold; margin-right:10px; padding:5px; } dl.citelist dd { margin:2px 0; padding:5px 0; } div.toc { padding: 14px 25px; background-color: #F4F6FA; border: 1px solid #D8DFEE; border-radius: 7px 7px 7px 7px; float: right; height: auto; margin: 0 20px 10px 10px; width: 200px; } div.toc li { background: url("bdwn.png") no-repeat scroll 0 5px transparent; font: 10px/1.2 Verdana,DejaVu Sans,Geneva,sans-serif; margin-top: 5px; padding-left: 10px; padding-top: 2px; } div.toc h3 { font: bold 12px/1.2 Arial,FreeSans,sans-serif; color: #4665A2; border-bottom: 0 none; margin: 0; } div.toc ul { list-style: none outside none; border: medium none; padding: 0px; } div.toc li.level1 { margin-left: 0px; } div.toc li.level2 { margin-left: 15px; } div.toc li.level3 { margin-left: 30px; } div.toc li.level4 { margin-left: 45px; } .inherit_header { font-weight: bold; color: gray; cursor: pointer; -webkit-touch-callout: none; -webkit-user-select: none; -khtml-user-select: none; -moz-user-select: none; -ms-user-select: none; user-select: none; } .inherit_header td { padding: 6px 0px 2px 5px; } .inherit { display: none; } tr.heading h2 { margin-top: 12px; margin-bottom: 4px; } @media print { #top { display: none; } #side-nav { display: none; } #nav-path { display: none; } body { overflow:visible; } h1, h2, h3, h4, h5, h6 { page-break-after: avoid; } .summary { display: none; } .memitem { page-break-inside: avoid; } #doc-content { margin-left:0 !important; height:auto !important; width:auto !important; overflow:inherit; display:inline; } } /* mt: merges of new doxygen.css features/styles */ .ttc { position: absolute; display: none; } /* --------------------------------------------------------------------------------*/ /* ------------------------ END STANDARD DOXYGEN CSS ------------------------------*/ /* --------------------------------------------------------------------------------*/ Shark-3.1.4/doc/doxygen_pages/templates/000077500000000000000000000000001314655040000201645ustar00rootroot00000000000000Shark-3.1.4/doc/doxygen_pages/templates/footer.html000066400000000000000000000000271314655040000223470ustar00rootroot00000000000000 Shark-3.1.4/doc/doxygen_pages/templates/header.html000066400000000000000000000112141314655040000223010ustar00rootroot00000000000000 $title