pax_global_header00006660000000000000000000000064134124420170014510gustar00rootroot0000000000000052 comment=44c052bdb69aaf26d7010074547356c80e55421a libcds-2.3.3/000077500000000000000000000000001341244201700127555ustar00rootroot00000000000000libcds-2.3.3/.gitignore000066400000000000000000000005671341244201700147550ustar00rootroot00000000000000/doc /sandbox *.o *.d /bin /obj /projects/Win/vc14/cds.sdf /projects/Win/vc14/cds.v14.suo /projects/Win/vc14/*.user /projects/Win/vc14/*.opensdf /projects/Win/vc14/.vs/ /projects/Win/vc141/.vs/ /projects/Win/vc141/*.user *.log /.project /projects/Win/vc14/*.opendb /test/stress/data/dictionary.txt /projects/Win/vc14/cds.VC.db /.cproject /.settings/ /tools/change_license.pl libcds-2.3.3/.travis.yml000066400000000000000000000202331341244201700150660ustar00rootroot00000000000000language: cpp install: - chmod +x ./build/CI/travis-ci/install.sh - ./build/CI/travis-ci/install.sh script: - chmod +x ./build/CI/travis-ci/run.sh - ./build/CI/travis-ci/run.sh linux: &linux_gcc os: linux addons: apt: sources: - ubuntu-toolchain-r-test packages: - g++-6 compiler: - g++-6 before_install: - eval "CC=gcc-6 && CXX=g++-6" linux: &linux_clang os: linux addons: apt: sources: - ubuntu-toolchain-r-test - llvm-toolchain-trusty-4.0 packages: - clang-4.0 compiler: - clang-4.0 before_install: - eval "CC=clang-4.0 && CXX=clang++-4.0" osx: &osx os: osx osx_image: xcode8.3 compiler: - clang before_install: - eval "CC=clang && CXX=clang++" matrix: include: ## BUILD_TYPE=Release CXX_COMPILER=g++-6 - <<: *linux_gcc env: TARGET=unit-deque BUILD_TYPE=Release - <<: *linux_gcc env: TARGET=unit-ilist BUILD_TYPE=Release - <<: *linux_gcc env: TARGET=unit-list BUILD_TYPE=Release - <<: *linux_gcc env: TARGET=unit-map BUILD_TYPE=Release - <<: *linux_gcc env: TARGET=unit-misc BUILD_TYPE=Release - <<: *linux_gcc env: TARGET=unit-pqueue BUILD_TYPE=Release - <<: *linux_gcc env: TARGET=unit-queue BUILD_TYPE=Release - <<: *linux_gcc env: TARGET=unit-iset-feldman BUILD_TYPE=Release - <<: *linux_gcc env: TARGET=unit-iset-michael-michael BUILD_TYPE=Release - <<: *linux_gcc env: TARGET=unit-iset-michael-lazy BUILD_TYPE=Release - <<: *linux_gcc env: TARGET=unit-iset-michael-iterable BUILD_TYPE=Release - <<: *linux_gcc env: TARGET=unit-iset-skip BUILD_TYPE=Release - <<: *linux_gcc env: TARGET=unit-iset-split-michael BUILD_TYPE=Release - <<: *linux_gcc env: TARGET=unit-iset-split-lazy BUILD_TYPE=Release - <<: *linux_gcc env: TARGET=unit-iset-split-iterable BUILD_TYPE=Release - <<: *linux_gcc env: TARGET=unit-set BUILD_TYPE=Release - <<: *linux_gcc env: TARGET=unit-striped-set BUILD_TYPE=Release - <<: *linux_gcc env: TARGET=unit-stack BUILD_TYPE=Release - <<: *linux_gcc env: TARGET=unit-tree BUILD_TYPE=Release ## BUILD_TYPE=Debug CXX_COMPILER=g++-6 - <<: *linux_gcc env: TARGET=unit-deque BUILD_TYPE=Debug - <<: *linux_gcc env: TARGET=unit-ilist BUILD_TYPE=Debug - <<: *linux_gcc env: TARGET=unit-list BUILD_TYPE=Debug - <<: *linux_gcc env: TARGET=unit-map BUILD_TYPE=Debug - <<: *linux_gcc env: TARGET=unit-misc BUILD_TYPE=Debug - <<: *linux_gcc env: TARGET=unit-pqueue BUILD_TYPE=Debug - <<: *linux_gcc env: TARGET=unit-queue BUILD_TYPE=Debug - <<: *linux_gcc env: TARGET=unit-iset BUILD_TYPE=Debug - <<: *linux_gcc env: TARGET=unit-set BUILD_TYPE=Debug - <<: *linux_gcc env: TARGET=unit-striped-set BUILD_TYPE=Debug - <<: *linux_gcc env: TARGET=unit-stack BUILD_TYPE=Debug - <<: *linux_gcc env: TARGET=unit-tree BUILD_TYPE=Debug ## BUILD_TYPE=Release CXX_COMPILER=clang-4.0 - <<: *linux_clang env: TARGET=unit-deque BUILD_TYPE=Release - <<: *linux_clang env: TARGET=unit-ilist BUILD_TYPE=Release - <<: *linux_clang env: TARGET=unit-list BUILD_TYPE=Release - <<: *linux_clang env: TARGET=unit-misc BUILD_TYPE=Release LINKER_FLAGS=-latomic - <<: *linux_clang env: TARGET=unit-pqueue BUILD_TYPE=Release - <<: *linux_clang env: TARGET=unit-queue BUILD_TYPE=Release - <<: *linux_clang env: TARGET=unit-set-feldman BUILD_TYPE=Release - <<: *linux_clang env: TARGET=unit-set-michael-michael BUILD_TYPE=Release - <<: *linux_clang env: TARGET=unit-set-michael-iterable BUILD_TYPE=Release - <<: *linux_clang env: TARGET=unit-set-michael-lazy BUILD_TYPE=Release - <<: *linux_clang env: TARGET=unit-set-skip BUILD_TYPE=Release - <<: *linux_clang env: TARGET=unit-set-split-iterable BUILD_TYPE=Release - <<: *linux_clang env: TARGET=unit-set-split-michael BUILD_TYPE=Release - <<: *linux_clang env: TARGET=unit-set-split-lazy BUILD_TYPE=Release - <<: *linux_clang env: TARGET=unit-striped-set BUILD_TYPE=Release - <<: *linux_clang env: TARGET=unit-stack BUILD_TYPE=Release # FIXME: building too long. Travis-ci will stop building. # - BUILD_TYPE=Release TARGET=unit-map # - BUILD_TYPE=Release TARGET=unit-iset # - BUILD_TYPE=Release TARGET=unit-tree ## BUILD_TYPE=Debug CXX_COMPILER=clang-4.0 - <<: *linux_clang env: TARGET=unit-deque BUILD_TYPE=Debug - <<: *linux_clang env: TARGET=unit-ilist BUILD_TYPE=Debug - <<: *linux_clang env: TARGET=unit-list BUILD_TYPE=Debug - <<: *linux_clang env: TARGET=unit-map BUILD_TYPE=Debug - <<: *linux_clang env: TARGET=unit-misc BUILD_TYPE=Debug LINKER_FLAGS=-latomic - <<: *linux_clang env: TARGET=unit-pqueue BUILD_TYPE=Debug - <<: *linux_clang env: TARGET=unit-queue BUILD_TYPE=Debug - <<: *linux_clang env: TARGET=unit-iset BUILD_TYPE=Debug - <<: *linux_clang env: TARGET=unit-set BUILD_TYPE=Debug - <<: *linux_clang env: TARGET=unit-striped-set BUILD_TYPE=Debug - <<: *linux_clang env: TARGET=unit-stack BUILD_TYPE=Debug - <<: *linux_clang env: TARGET=unit-tree BUILD_TYPE=Debug # RELEASE - <<: *osx env: BUILD_TYPE=Release TARGET=unit-deque - <<: *osx env: BUILD_TYPE=Release TARGET=unit-ilist - <<: *osx env: BUILD_TYPE=Release TARGET=unit-list - <<: *osx env: BUILD_TYPE=Release TARGET=unit-misc - <<: *osx env: BUILD_TYPE=Release TARGET=unit-pqueue - <<: *osx env: BUILD_TYPE=Release TARGET=unit-queue - <<: *osx env: BUILD_TYPE=Release TARGET=unit-iset - <<: *osx env: BUILD_TYPE=Release TARGET=unit-iset-feldman - <<: *osx env: BUILD_TYPE=Release TARGET=unit-iset-michael-michael - <<: *osx env: BUILD_TYPE=Release TARGET=unit-iset-michael-lazy - <<: *osx env: BUILD_TYPE=Release TARGET=unit-iset-michael-iterable - <<: *osx env: BUILD_TYPE=Release TARGET=unit-iset-skip - <<: *osx env: BUILD_TYPE=Release TARGET=unit-iset-split-michael - <<: *osx env: BUILD_TYPE=Release TARGET=unit-iset-split-lazy - <<: *osx env: BUILD_TYPE=Release TARGET=unit-iset-split-iterable - <<: *osx env: BUILD_TYPE=Release TARGET=unit-striped-set - <<: *osx env: BUILD_TYPE=Release TARGET=unit-stack # FIXME: building too long. Travis-ci will stop building. # - <<: *osx # env: BUILD_TYPE=Release TARGET=unit-map # - <<: *osx # env: BUILD_TYPE=Release TARGET=unit-set # - <<: *osx # env: BUILD_TYPE=Release TARGET=unit-tree # DEBUG - <<: *osx env: BUILD_TYPE=Debug TARGET=unit-deque - <<: *osx env: BUILD_TYPE=Debug TARGET=unit-ilist - <<: *osx env: BUILD_TYPE=Debug TARGET=unit-list - <<: *osx env: BUILD_TYPE=Debug TARGET=unit-map - <<: *osx env: BUILD_TYPE=Debug TARGET=unit-misc - <<: *osx env: BUILD_TYPE=Debug TARGET=unit-pqueue - <<: *osx env: BUILD_TYPE=Debug TARGET=unit-queue - <<: *osx env: BUILD_TYPE=Debug TARGET=unit-iset - <<: *osx env: BUILD_TYPE=Debug TARGET=unit-iset-feldman - <<: *osx env: BUILD_TYPE=Debug TARGET=unit-iset-michael-michael - <<: *osx env: BUILD_TYPE=Debug TARGET=unit-iset-michael-lazy - <<: *osx env: BUILD_TYPE=Debug TARGET=unit-iset-michael-iterable - <<: *osx env: BUILD_TYPE=Debug TARGET=unit-iset-skip - <<: *osx env: BUILD_TYPE=Debug TARGET=unit-iset-split-michael - <<: *osx env: BUILD_TYPE=Debug TARGET=unit-iset-split-lazy - <<: *osx env: BUILD_TYPE=Debug TARGET=unit-iset-split-iterable - <<: *osx env: BUILD_TYPE=Debug TARGET=unit-set - <<: *osx env: BUILD_TYPE=Debug TARGET=unit-striped-set - <<: *osx env: BUILD_TYPE=Debug TARGET=unit-stack - <<: *osx env: BUILD_TYPE=Debug TARGET=unit-tree libcds-2.3.3/CMakeLists.txt000066400000000000000000000261751341244201700155300ustar00rootroot00000000000000cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) cmake_policy(SET CMP0016 NEW) if(POLICY CMP0042) cmake_policy(SET CMP0042 NEW) endif() set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/build/cmake ${CMAKE_MODULE_PATH}) include(TargetArch) include(CheckIncludeFileCXX) project(cds) set(PROJECT_VERSION 2.3.3) # Options option(WITH_TESTS "Build unit tests" OFF) option(WITH_TESTS_COVERAGE "Analyze test coverage using gcov (only for gcc)" OFF) option(WITH_BOOST_ATOMIC "Use boost atomics (only for boost >= 1.54)" OFF) option(WITH_ASAN "Build ASan+UBSan instrumented code" OFF) option(WITH_TSAN "Build TSan instrumented code" OFF) option(ENABLE_UNIT_TEST "Enable unit test" ON) option(ENABLE_STRESS_TEST "Enable stress test" ON) set(CMAKE_TARGET_ARCHITECTURE "" CACHE string "Target build architecture") find_package(Threads) if(NOT CMAKE_TARGET_ARCHITECTURE) target_architecture(CMAKE_TARGET_ARCHITECTURE) endif() if(APPLE) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_DARWIN_C_SOURCE") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_DARWIN_C_SOURCE") endif() if(WITH_BOOST_ATOMIC) if(TARGET boost::atomic) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DCDS_USE_BOOST_ATOMIC") link_libraries(boost::atomic) else() find_package(Boost 1.53 COMPONENTS atomic) if(Boost_FOUND) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DCDS_USE_BOOST_ATOMIC") message(STATUS "Boost version allows using of boost.atomic: activated") endif() endif() endif(WITH_BOOST_ATOMIC) if(WITH_ASAN) if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID STREQUAL "Clang") set(CMAKE_CXX_FLAGS_DEBUG "-D_DEBUG") set(CMAKE_CXX_FLAGS_RELEASE "-DNDEBUG") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O1 -fPIC -fsanitize=address,undefined -g -DCDS_ADDRESS_SANITIZER_ENABLED -fno-omit-frame-pointer -fno-optimize-sibling-calls") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -O1 -fsanitize=address,undefined -g -DCDS_ASAN_ENABLED -fno-omit-frame-pointer -fno-optimize-sibling-calls") set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=address,undefined -pie") else() message(WARNING "Compiler does not support AddressSanitizer") endif() endif(WITH_ASAN) if(WITH_TSAN) if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID STREQUAL "Clang") set(CMAKE_CXX_FLAGS_DEBUG "-D_DEBUG") set(CMAKE_CXX_FLAGS_RELEASE "-DNDEBUG") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O1 -fPIC -fsanitize=thread -g -DCDS_THREAD_SANITIZER_ENABLED -fno-omit-frame-pointer") set(CMAKE_C_FLAGS "${CMAKE_CXX_FLAGS} -O1 -fPIC -fsanitize=thread -g -DCDS_THREAD_SANITIZER_ENABLED -fno-omit-frame-pointer") set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=thread -pie") else() message(WARNING "Compiler does not support ThreadSanitizer") endif() endif(WITH_TSAN) if(WITH_TESTS_COVERAGE) if(CMAKE_COMPILER_IS_GNUCXX) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fprofile-arcs -ftest-coverage") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fprofile-arcs -ftest-coverage") set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fprofile-arcs -ftest-coverage") message(STATUS "Test coverage analysis: activated") else() message(WARNING "Compiler is not GNU gcc! Test coverage couldn't be analyzed") endif() endif(WITH_TESTS_COVERAGE) set(CDS_SHARED_LIBRARY ${PROJECT_NAME}) set(CDS_STATIC_LIBRARY ${PROJECT_NAME}-s) set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE) set(CMAKE_INCLUDE_CURRENT_DIR ON) if(CDS_BIN_DIR) set(EXECUTABLE_OUTPUT_PATH ${CDS_BIN_DIR}) set(LIBRARY_OUTPUT_PATH ${CDS_BIN_DIR}) else() set(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/bin) set(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/bin) endif() message(STATUS "Binary output path: ${EXECUTABLE_OUTPUT_PATH}") if(NOT CMAKE_BUILD_TYPE) set(CMAKE_BUILD_TYPE Debug CACHE STRING "Default build type to Debug" FORCE) endif() if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID STREQUAL "Clang" OR CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang") string(REGEX MATCHALL "-std=[^ ]+" cxx_std_found ${CMAKE_CXX_FLAGS} " dummy@rg") if(cxx_std_found) message("C++ std: ${cxx_std_found}") else() list(APPEND LIBCDS_PUBLIC_CXX_FLAGS "-std=c++11") message("C++ std: -std=c++11 (default)") endif() list(APPEND LIBCDS_PRIVATE_CXX_FLAGS "-Wall" "-Wextra" "-pedantic") if(CMAKE_TARGET_ARCHITECTURE STREQUAL "x86_64") list(APPEND LIBCDS_PUBLIC_CXX_FLAGS "-mcx16") set(LIB_SUFFIX "64") # GCC-7: 128-bit atomics support is implemented via libatomic on amd64 # see https://gcc.gnu.org/ml/gcc/2017-01/msg00167.html # Maybe, it will be changed in future if(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER "7.0.0" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS "8.0.0") set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -latomic") endif() endif() endif() if(CMAKE_SYSTEM_NAME STREQUAL "AIX") set(CMAKE_CXX_ARCHIVE_CREATE " -q -c ${CMAKE_STATIC_LINKER_FLAGS} -o ") list(APPEND LIBCDS_PRIVATE_CXX_FLAGS "-Wl,-G") set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-brtl") endif() set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -D_DEBUG") CHECK_INCLUDE_FILE_CXX(linux/membarrier.h CDS_HAVE_LINUX_MEMBARRIER_H CMAKE_CXX_FLAGS) message("Build type -- ${CMAKE_BUILD_TYPE}") message("Compiler version: ${CMAKE_CXX_COMPILER_ID} ${CMAKE_CXX_COMPILER_VERSION}") message("System: ${CMAKE_SYSTEM_NAME} version: ${CMAKE_SYSTEM_VERSION}") message("Target architecture: ${CMAKE_TARGET_ARCHITECTURE}") if(CMAKE_BUILD_TYPE STREQUAL "Debug") message("Compiler flags: ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_DEBUG} ${LIBCDS_PUBLIC_CXX_FLAGS} ${LIBCDS_PRIVATE_CXX_FLAGS}") else() message("Compiler flags: ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_RELEASE} ${LIBCDS_PUBLIC_CXX_FLAGS} ${LIBCDS_PRIVATE_CXX_FLAGS}") endif() message("Exe flags: ${CMAKE_EXE_LINKER_FLAGS}") # Component names for separate distribution in rpms, debs etc. set(LIBRARIES_COMPONENT lib) set(HEADERS_COMPONENT devel) set(SOURCES src/init.cpp src/hp.cpp src/dhp.cpp src/urcu_gp.cpp src/urcu_sh.cpp src/thread_data.cpp src/topology_hpux.cpp src/topology_linux.cpp src/topology_osx.cpp src/dllmain.cpp) add_library(${CDS_SHARED_LIBRARY} SHARED ${SOURCES}) set_target_properties(${CDS_SHARED_LIBRARY} PROPERTIES VERSION ${PROJECT_VERSION} DEBUG_POSTFIX "_d") if(MINGW) set_target_properties(${CDS_SHARED_LIBRARY} PROPERTIES DEFINE_SYMBOL CDS_BUILD_LIB) endif() add_library(${CDS_STATIC_LIBRARY} STATIC ${SOURCES}) set_target_properties(${CDS_STATIC_LIBRARY} PROPERTIES DEBUG_POSTFIX "_d") if(MINGW) target_compile_definitions(${CDS_STATIC_LIBRARY} PRIVATE CDS_BUILD_STATIC_LIB) endif() target_link_libraries(${CDS_SHARED_LIBRARY} PRIVATE ${CMAKE_THREAD_LIBS_INIT}) target_link_libraries(${CDS_STATIC_LIBRARY} PRIVATE ${CMAKE_THREAD_LIBS_INIT}) target_include_directories(${CDS_SHARED_LIBRARY} INTERFACE "$" $) target_include_directories(${CDS_STATIC_LIBRARY} INTERFACE "$" $) target_compile_options(${CDS_SHARED_LIBRARY} PUBLIC "${LIBCDS_PUBLIC_CXX_FLAGS}") target_compile_options(${CDS_STATIC_LIBRARY} PUBLIC "${LIBCDS_PUBLIC_CXX_FLAGS}") target_compile_options(${CDS_SHARED_LIBRARY} PRIVATE "${LIBCDS_PRIVATE_CXX_FLAGS}") target_compile_options(${CDS_STATIC_LIBRARY} PRIVATE "${LIBCDS_PRIVATE_CXX_FLAGS}") install(TARGETS ${CDS_SHARED_LIBRARY} EXPORT LibCDSConfig LIBRARY DESTINATION lib${LIB_SUFFIX} COMPONENT ${LIBRARIES_COMPONENT} NAMELINK_SKIP RUNTIME DESTINATION lib${LIB_SUFFIX}) install(TARGETS ${CDS_SHARED_LIBRARY} EXPORT LibCDSConfig LIBRARY DESTINATION lib${LIB_SUFFIX} COMPONENT ${HEADERS_COMPONENT} NAMELINK_ONLY) install(TARGETS ${CDS_STATIC_LIBRARY} EXPORT LibCDSConfig DESTINATION lib${LIB_SUFFIX} COMPONENT ${LIBRARIES_COMPONENT}) install(EXPORT LibCDSConfig FILE LibCDSConfig.cmake NAMESPACE LibCDS:: DESTINATION lib/cmake/LibCDS) install(DIRECTORY ${PROJECT_SOURCE_DIR}/cds DESTINATION include COMPONENT ${HEADERS_COMPONENT}) if(WITH_TESTS) enable_testing() add_subdirectory(${PROJECT_SOURCE_DIR}/test) message(STATUS "Build tests: activated") endif(WITH_TESTS) ### FOR PACKAGING in RPM, TGZ, DEB, NSYS...############################################################################### set(CPACK_PACKAGE_VERSION ${PROJECT_VERSION}) set(CPACK_PACKAGE_NAME ${PROJECT_NAME}) set(CPACK_PACKAGE_CONTACT "Max Khizhinsky ") set(CPACK_PACKAGE_RELEASE 1) set(CPACK_PACKAGE_INSTALL_DIRECTORY "cds") set(CPACK_PACKAGE_DESCRIPTION_FILE "${PROJECT_SOURCE_DIR}/build/cmake/description.txt") set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "Library of concurrent data structures") set(CPACK_PACKAGE_FILE_NAME "${CPACK_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}-${CPACK_PACKAGE_RELEASE}") set(DEPLOY_PACKAGE_FILE_NAME "${CPACK_PACKAGE_FILE_NAME}") # TGZ specific set(CPACK_ARCHIVE_COMPONENT_INSTALL ON) # RPM specific set(CPACK_RPM_COMPONENT_INSTALL ON) set(CPACK_RPM_PACKAGE_RELEASE ${CPACK_PACKAGE_RELEASE}) set(CPACK_RPM_POST_INSTALL_SCRIPT_FILE "${PROJECT_SOURCE_DIR}/build/cmake/post_install_script.sh") set(CPACK_RPM_POST_UNINSTALL_SCRIPT_FILE "${PROJECT_SOURCE_DIR}/build/cmake/post_uninstall_script.sh") set(CPACK_RPM_PACKAGE_URL https://github.com/khizmax/libcds) set(CPACK_RPM_PACKAGE_LICENSE GPL) set(CPACK_RPM_PACKAGE_GROUP "System Environment/Base") set(CPACK_RPM_PACKAGE_REQUIRES "boost >= 1.50") set(CPACK_RPM_EXCLUDE_FROM_AUTO_FILELIST_ADDITION ${CPACK_PACKAGING_INSTALL_PREFIX}) set(CPACK_RPM_EXCLUDE_FROM_AUTO_FILELIST_ADDITION /usr/local) set(CPACK_RPM_devel_PACKAGE_REQUIRES "boost >= 1.50, cds-lib = ${PROJECT_VERSION}") # DEB specific set(CPACK_DEB_COMPONENT_INSTALL ON) set(CPACK_DEBIAN_PACKAGE_DEPENDS "boost (>= 1.50)") set(CPACK_DEBIAN_PACKAGE_HOMEPAGE "https://github.com/khizmax/libcds") set(CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA "${PROJECT_SOURCE_DIR}/build/cmake/post_install_script.sh;;${PROJECT_SOURCE_DIR}/build/cmake/post_uninstall_script.sh;") # NSYS specific set(CPACK_NSIS_PACKAGE_NAME "${CPACK_PACKAGE_NAME}") set(CPACK_NSIS_DISPLAY_NAME "${CPACK_PACKAGE_NAME}") set(CPACK_NSIS_CONTACT ${CPACK_PACKAGE_CONTACT}) set(CPACK_NSIS_ENABLE_UNINSTALL_BEFORE_INSTALL ON) set(CPACK_NSIS_MODIFY_PATH ON) # Components grouping for Mac OS X and Windows installers set(CPACK_COMPONENT_${LIBRARIES_COMPONENT}_GROUP "Runtime") set(CPACK_COMPONENT_${HEADERS_COMPONENT}_GROUP "Development") set(CPACK_COMPONENT_${LIBRARIES_COMPONENT}_DISPLAY_NAME "Libraries") set(CPACK_COMPONENT_${HEADERS_COMPONENT}_DISPLAY_NAME "C++ Headers") set(CPACK_COMPONENT_${HEADERS_COMPONENT}_DEPENDS ${LIBRARIES_COMPONENT}) set(CPACK_COMPONENT_GROUP_DEVELOPMENT_DESCRIPTION "All of the tools you'll ever need to develop lock-free oriented software with libcds") set(CPACK_COMPONENT_GROUP_RUNTIME_DESCRIPTION "Only libcds library for runtime") include(CPack) libcds-2.3.3/LICENSE000066400000000000000000000024711341244201700137660ustar00rootroot00000000000000Boost Software License - Version 1.0 - August 17th, 2003 Permission is hereby granted, free of charge, to any person or organization obtaining a copy of the software and accompanying documentation covered by this license (the "Software") to use, reproduce, display, distribute, execute, and transmit the Software, and to prepare derivative works of the Software, and to permit third-parties to whom the Software is furnished to do so, all subject to the following: The copyright notices in the Software and this entire statement, including the above license grant, this restriction and the following disclaimer, must be included in all copies of the Software, in whole or in part, and all derivative works of the Software, unless such copies or derivative works are solely in the form of machine-executable object code generated by a source language processor. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.libcds-2.3.3/appveyor.yml000066400000000000000000000064301341244201700153500ustar00rootroot00000000000000build: false shallow_clone: true # (git clone --depth 1) image: - Visual Studio 2017 install: - sed -i "/boost/d" conanfile.txt # delete boost from conanfile. Conan renamed name of boost libs so we'll use appveyour's boost - cmd: echo "Downloading conan..." - cmd: set PATH=%PATH%;%PYTHON%/Scripts/ - cmd: pip.exe install conan - cmd: conan user # Create the conan data directory - cmd: conan --version - cmd: conan install --build=missing -s arch=x86 -s build_type=%configuration% . #- cmd: conan install --build=missing -s arch=x86_64 -s build_type=%configuration% conanfileWin.txt - cmd: echo ======================= - cmd: echo %configuration% - cmd: echo %platform% - cmd: echo ======================= - cmd: set GTEST_ROOT=C:/projects/libcds/deps - cmd: set BOOST_PATH=C:\Libraries\boost_1_66_0 - cmd: set GTEST_LIB32=C:\projects\libcds\deps\lib;C:\Libraries\boost_1_66_0\lib32-msvc-14.1 - cmd: dir %GTEST_LIB32% - cmd: echo ======================= # - cmd: set GTEST_LIB64=C:\projects\libcds\deps\lib environment: matrix: # require a library with name libboost_thread-vc141-mt-x32-1_66.lib, - TARGET: gtest-stack - TARGET: gtest-deque - TARGET: gtest-pqueue - TARGET: gtest-queue - TARGET: gtest-map-skip - TARGET: gtest-map-split-michael - TARGET: gtest-set-split-michael - TARGET: gtest-misc - TARGET: gtest-ilist-iterable - TARGET: gtest-ilist-lazy - TARGET: gtest-ilist-michael - TARGET: gtest-iset-feldman - TARGET: gtest-iset-michael - TARGET: gtest-iset-michael-iterable - TARGET: gtest-iset-michael-lazy - TARGET: gtest-iset-skip - TARGET: gtest-iset-split-iterable - TARGET: gtest-iset-split-lazy - TARGET: gtest-iset-split-michael - TARGET: gtest-list-iterable - TARGET: gtest-list-lazy - TARGET: gtest-list-michael - TARGET: gtest-map-feldman - TARGET: gtest-map-michael - TARGET: gtest-map-michael-iterable - TARGET: gtest-map-michael-lazy - TARGET: gtest-map-split-iterable - TARGET: gtest-map-split-lazy - TARGET: gtest-set-feldman - TARGET: gtest-set-michael - TARGET: gtest-set-michael-iterable - TARGET: gtest-set-michael-lazy - TARGET: gtest-set-skip - TARGET: gtest-set-split-iterable - TARGET: gtest-set-split-lazy - TARGET: gtest-striped-map-boost - TARGET: gtest-striped-map-cuckoo - TARGET: gtest-striped-map-std - TARGET: gtest-striped-set-boost - TARGET: gtest-striped-set-cuckoo - TARGET: gtest-striped-set-std - TARGET: gtest-tree-bronson - TARGET: gtest-tree-ellen configuration: - Release platform: - Win32 build_script: - msbuild projects/Win/vc141/cds.vcxproj /p:Configuration=%configuration% /p:PlatformTarget="Win32" - msbuild projects/Win/vc141/%TARGET%.vcxproj /p:Configuration=%configuration% /p:PlatformTarget="Win32" #- msbuild projects/Win/vc141/cds.vcxproj /p:Configuration=%configuration% /p:PlatformTarget=x64 #- msbuild projects/Win/vc141/%TARGET%.vcxproj /p:Configuration=%configuration% /p:PlatformTarget=x64 test_script: - cmd: set path=%path%;%GTEST_LIB32%;C:\projects\libcds\bin\vc.v141\%platform%-release\ - cmd: C:\projects\libcds\bin\vc.v141\%platform%-release\%TARGET%.exe libcds-2.3.3/build/000077500000000000000000000000001341244201700140545ustar00rootroot00000000000000libcds-2.3.3/build/CI/000077500000000000000000000000001341244201700143475ustar00rootroot00000000000000libcds-2.3.3/build/CI/VASEx-CI-2/000077500000000000000000000000001341244201700157255ustar00rootroot00000000000000libcds-2.3.3/build/CI/VASEx-CI-2/cds-libs000066400000000000000000000177601341244201700173630ustar00rootroot00000000000000######################################### # Generic parameters workspace: $WORKSPACE libcds-source: source make-job: 10 gtest-include: $GTEST_ROOT/googletest/include ######################################### #GCC-4.8 gcc-4.8-root: $GCC48_ROOT/bin gcc-4.8-cxx: g++-4.8 gcc-4.8-cc: gcc-4.8 gcc-4.8-exe-ldflags: -L$GCC48_ROOT/lib64 -Wl,-rpath=$GCC48_ROOT/lib64 gcc-4.8-extlib: rt gcc-4.8-boost: $BOOST_ROOT gcc-4.8-64-boost-lib: stage64-gcc4.8/lib gcc-4.8-gtest: $GTEST_ROOT gcc-4.8-64-gtest-lib: $GTEST_ROOT/lib-gcc4.8/libgtest.a ######################################## #GCC-4.9 gcc-4.9-root: $GCC49_ROOT/bin gcc-4.9-cxx: g++-4.9 gcc-4.9-cc: gcc-4.9 gcc-4.9-exe-ldflags: -Wl,-rpath=$GCC49_ROOT/lib64 gcc-4.9-extlib: rt gcc-4.9-boost: $BOOST_ROOT gcc-4.9-64-boost-lib: stage64-gcc4.9/lib gcc-4.9-gtest: $GTEST_ROOT gcc-4.9-64-gtest-lib: $GTEST_ROOT/lib-gcc4.9/libgtest.a ######################################## #GCC-5 gcc-5-root: $GCC5_ROOT/bin gcc-5-cxx: g++-5 gcc-5-cc: gcc-5 gcc-5-boost: $BOOST_ROOT gcc-5-exe-ldflags: -Wl,-rpath=$GCC5_ROOT/lib64 gcc-5-extlib: rt gcc-5-64-boost-lib: stage64-gcc5/lib gcc-5-64-asan-boost-lib: stage64-gcc5-asan/lib gcc-5-64-tsan-boost-lib: stage64-gcc5-tsan/lib gcc-5-gtest: $GTEST_ROOT gcc-5-64-gtest-lib: $GTEST_ROOT/lib-gcc5/libgtest.a ######################################## #GCC-6 gcc-6-root: $GCC6_ROOT/bin gcc-6-cxx: g++-6 gcc-6-cc: gcc-6 gcc-6-boost: $BOOST_ROOT gcc-6-cxxflags: -march=native -std=c++14 gcc-6-exe-ldflags: -Wl,-rpath=$GCC6_ROOT/lib64 gcc-6-extlib: rt gcc-6-64-boost-lib: stage64-gcc6/lib gcc-6-64-asan-boost-lib: stage64-gcc6-asan/lib gcc-6-64-tsan-boost-lib: stage64-gcc6-tsan/lib gcc-6-gtest: $GTEST_ROOT gcc-6-64-gtest-lib: $GTEST_ROOT/lib-gcc6/libgtest.a ######################################## #GCC-7 gcc-7-root: $GCC7_ROOT/bin gcc-7-cxx: g++-7 gcc-7-cc: gcc-7 gcc-7-boost: $BOOST_ROOT gcc-7-cxxflags: -march=native -std=c++1z gcc-7-exe-ldflags: -Wl,-rpath=$GCC7_ROOT/lib64 gcc-7-extlib: rt gcc-7-64-boost-lib: stage64-gcc7/lib gcc-7-64-asan-boost-lib: stage64-gcc7-asan/lib gcc-7-64-tsan-boost-lib: stage64-gcc7-tsan/lib gcc-7-gtest: $GTEST_ROOT gcc-7-64-gtest-lib: $GTEST_ROOT/lib-gcc7/libgtest.a ######################################## #GCC-8 gcc-8-root: $GCC8_ROOT/bin gcc-8-cxx: g++-8 gcc-8-cc: gcc-8 gcc-8-boost: $BOOST_ROOT gcc-8-cxxflags: -march=native -std=c++17 -Wmultistatement-macros gcc-8-exe-ldflags: -Wl,-rpath=$GCC8_ROOT/lib64 gcc-8-extlib: rt gcc-8-path: $DEVTOOLSET6_BIN gcc-8-64-boost-lib: stage64-gcc7/lib gcc-8-64-asan-boost-lib: stage64-gcc7-asan/lib gcc-8-64-tsan-boost-lib: stage64-gcc7-tsan/lib gcc-8-gtest: $GTEST_ROOT gcc-8-64-gtest-lib: $GTEST_ROOT/lib-gcc7/libgtest.a ######################################## # clang-3.6 clang-3.6-root: $CLANG36_ROOT/bin clang-3.6-ld-lib-path: $GCC5_ROOT/lib64 clang-3.6-cxx: clang++ clang-3.6-cc: clang clang-3.6-cxxflags: -Wdocumentation clang-3.6-exe-ldflags: -L$GCC5_ROOT/lib64 -latomic -Wl,-rpath=$GCC5_ROOT/lib64 clang-3.6-extlib: rt clang-3.6-boost: $BOOST_ROOT clang-3.6-64-boost-lib: stage64-clang3.6/lib clang-3.6-gtest: $GTEST_ROOT clang-3.6-64-gtest-lib: $GTEST_ROOT/lib-clang3.6/libgtest.a ######################################## # clang-3.7 clang-3.7-root: $CLANG37_ROOT/bin clang-3.7-ld-lib-path: $GCC6_ROOT/lib64 clang-3.7-cxx: clang++ clang-3.7-cc: clang clang-3.7-cxxflags: -stdlib=libc++ -Wdocumentation clang-3.7-exe-ldflags: -L$CLANG37_ROOT/lib -Wl,-rpath=$CLANG37_ROOT/lib -lc++abi clang-3.7-extlib: rt clang-3.7-boost: $BOOST_ROOT clang-3.7-64-boost-lib: stage64-clang3.7/lib clang-3.7-gtest: $GTEST_ROOT clang-3.7-64-gtest-lib: $GTEST_ROOT/lib-clang3.7/libgtest.a clang-3.7-cmake-flags: -DCMAKE_C_COMPILER_WORKS=1 -DCMAKE_CXX_COMPILER_WORKS=1 ######################################## # clang-3.8 clang-3.8-root: $CLANG38_ROOT/bin clang-3.8-ld-lib-path: $GCC6_ROOT/lib64 clang-3.8-cxx: clang++ clang-3.8-cc: clang clang-3.8-cxxflags: -stdlib=libc++ -Wdocumentation clang-3.8-exe-ldflags: -L$CLANG38_ROOT/lib -Wl,-rpath=$CLANG38_ROOT/lib clang-3.8-extlib: rt clang-3.8-boost: $BOOST_ROOT clang-3.8-64-boost-lib: stage64-clang3.8/lib clang-3.8-gtest: $GTEST_ROOT clang-3.8-64-gtest-lib: $GTEST_ROOT/lib-clang3.8/libgtest.a ######################################## # clang-3.9 clang-3.9-root: $CLANG39_ROOT/bin clang-3.9-ld-lib-path: $GCC6_ROOT/lib64 clang-3.9-cxx: clang++ clang-3.9-cc: clang clang-3.9-cxxflags: -stdlib=libc++ -Wdocumentation clang-3.9-exe-ldflags: -L$CLANG39_ROOT/lib -Wl,-rpath=$CLANG39_ROOT/lib clang-3.9-extlib: rt clang-3.9-boost: $BOOST_ROOT clang-3.9-64-boost-lib: stage64-clang3.9/lib clang-3.9-64-asan-boost-lib: stage64-clang3.9-asan/lib clang-3.9-64-tsan-boost-lib: stage64-clang3.9-tsan/lib clang-3.9-gtest: $GTEST_ROOT clang-3.9-64-gtest-lib: $GTEST_ROOT/lib-clang3.9/libgtest.a ######################################## # clang-4 clang-4-root: $CLANG4_ROOT/bin clang-4-cxx: clang++ clang-4-cc: clang clang-4-cxxflags: -stdlib=libc++ -Wdocumentation -std=c++14 clang-4-exe-ldflags: -L$CLANG4_ROOT/lib -Wl,-rpath=$CLANG4_ROOT/lib clang-4-extlib: rt clang-4-boost: $BOOST_ROOT clang-4-64-boost-lib: stage64-clang4/lib clang-4-64-asan-boost-lib: stage64-clang4-asan/lib clang-4-64-tsan-boost-lib: stage64-clang4-tsan/lib clang-4-gtest: $GTEST_ROOT clang-4-64-gtest-lib: $GTEST_ROOT/lib-clang4/libgtest.a ######################################## # clang-5 clang-5-root: $CLANG5_ROOT/bin clang-5-cxx: clang++ clang-5-cc: clang clang-5-cxxflags: -stdlib=libc++ -Wdocumentation -std=c++1z clang-5-exe-ldflags: -L$CLANG5_ROOT/lib -Wl,-rpath=$CLANG5_ROOT/lib clang-5-extlib: rt clang-5-path: $DEVTOOLSET6_BIN clang-5-boost: $LIB_ROOT/boost_1_65_1 clang-5-64-boost-lib: stage64-clang5-std17/lib clang-5-64-asan-boost-lib: stage64-clang5-asan/lib clang-5-64-tsan-boost-lib: stage64-clang5-tsan/lib clang-5-gtest: $GTEST_ROOT clang-5-64-gtest-lib: $GTEST_ROOT/lib-clang5/libgtest.a ######################################## # clang-6 clang-6-root: $CLANG6_ROOT/bin clang-6-cxx: clang++ clang-6-cc: clang clang-6-cxxflags: -stdlib=libc++ -Wdocumentation -std=c++17 clang-6-exe-ldflags: -L$CLANG6_ROOT/lib -Wl,-rpath=$CLANG6_ROOT/lib clang-6-extlib: rt clang-6-path: $DEVTOOLSET6_BIN clang-6-boost: $LIB_ROOT/boost_1_65_1 clang-6-64-boost-lib: stage64-clang6-std17/lib clang-6-64-asan-boost-lib: stage64-clang6-asan/lib clang-6-64-tsan-boost-lib: stage64-clang6-tsan/lib clang-6-gtest: $GTEST_ROOT clang-6-64-gtest-lib: $GTEST_ROOT/lib-clang6/libgtest.a ######################################## # clang-7 clang-7-root: $CLANG6_ROOT/bin clang-7-cxx: clang++ clang-7-cc: clang clang-7-cxxflags: -stdlib=libc++ -Wdocumentation -std=c++17 clang-7-exe-ldflags: -fuse-ld=lld -L$CLANG7_ROOT/lib -Wl,-rpath=$CLANG7_ROOT/lib clang-7-extlib: rt clang-7-path: $DEVTOOLSET6_BIN clang-7-boost: $LIB_ROOT/boost_1_65_1 clang-7-64-boost-lib: stage64-clang7-std17/lib clang-7-64-asan-boost-lib: stage64-clang7-asan/lib clang-7-64-tsan-boost-lib: stage64-clang7-tsan/lib clang-7-gtest: $GTEST_ROOT clang-7-64-gtest-lib: $GTEST_ROOT/lib-clang6/libgtest.a libcds-2.3.3/build/CI/VASEx-CI-2/ci-build000077500000000000000000000046271341244201700173540ustar00rootroot00000000000000#! /bin/bash # Useful envvars: # CI_SCRIPT_PATH - path where to find scripts # TOOLSET - toolset: x64-gcc-5, x64-clang-3.9 and so on # BUILD_TYPE - build type: 'dbg', 'rel', 'asan', 'tsan' # WORKSPACE - path where to build env|sort case "$TOOLSET" in "x64-gcc-4.8") echo "GCC-4.8 '$BUILD_TYPE', toolset root: $GCC48_ROOT" $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-gcc-4.8-64 $* EXIT_CODE=$? ;; "x64-gcc-4.9") echo "GCC-4.9 '$BUILD_TYPE', toolset root: $GCC49_ROOT" $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-gcc-4.9-64 $* EXIT_CODE=$? ;; "x64-gcc-5") echo "GCC-5 '$BUILD_TYPE', toolset root: $GCC5_ROOT" $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-gcc-5-64 $* EXIT_CODE=$? ;; "x64-gcc-6") echo "GCC-6 '$BUILD_TYPE', toolset root: $GCC6_ROOT" $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-gcc-6-64 $* EXIT_CODE=$? ;; "x64-gcc-7") echo "GCC-7 '$BUILD_TYPE', toolset root: $GCC7_ROOT" $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-gcc-7-64 $* EXIT_CODE=$? ;; "x64-gcc-8") echo "GCC-8 '$BUILD_TYPE', toolset root: $GCC8_ROOT" $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-gcc-8-64 $* EXIT_CODE=$? ;; "x64-clang-3.6") echo "clang-3.6 '$BUILD_TYPE', toolset root: $CLANG36_ROOT" $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-3.6-64 $* EXIT_CODE=$? ;; "x64-clang-3.7") echo "clang-3.7 '$BUILD_TYPE', toolset root: $CLANG37_ROOT" $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-3.7-64 $* EXIT_CODE=$? ;; "x64-clang-3.8") echo "clang-3.8 '$BUILD_TYPE', toolset root: $CLANG38_ROOT" $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-3.8-64 $* EXIT_CODE=$? ;; "x64-clang-3.9") echo "clang-3.9 '$BUILD_TYPE', toolset root: $CLANG39_ROOT" $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-3.9-64 $* EXIT_CODE=$? ;; "x64-clang-4") echo "clang-4 '$BUILD_TYPE', toolset root: $CLANG4_ROOT" $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-4-64 $* EXIT_CODE=$? ;; "x64-clang-5") echo "clang-5 '$BUILD_TYPE', toolset root: $CLANG5_ROOT" $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-5-64 $* EXIT_CODE=$? ;; "x64-clang-6") echo "clang-6 '$BUILD_TYPE', toolset root: $CLANG6_ROOT" $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-6-64 $* EXIT_CODE=$? ;; "x64-clang-7") echo "clang-7 '$BUILD_TYPE', toolset root: $CLANG7_ROOT" $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-7-64 $* EXIT_CODE=$? ;; * ) echo "Undefined toolset '$TOOLSET'" exit 1 ;; esac exit $EXIT_CODE libcds-2.3.3/build/CI/VASEx-CI-2/ci-env000066400000000000000000000020321341244201700170260ustar00rootroot00000000000000#CMAKE_2_8_12=/home/libcds-ci/bin/cmake/cmake-2.8.12/bin CMAKE_3_6_2=/home/libcds-ci/bin/cmake-3.6/bin CMAKE3=$CMAKE_3_6_2 PATH=$CMAKE3:$PATH:$HOME/.local/bin:$HOME/bin DEVTOOLSET6_BIN=/opt/rh/devtoolset-6/root/usr/bin TOOLSET_ROOT=$HOME/bin GCC48_ROOT=$TOOLSET_ROOT/gcc-4.8 GCC49_ROOT=$TOOLSET_ROOT/gcc-4.9 GCC5_ROOT=$TOOLSET_ROOT/gcc-5 GCC6_ROOT=$TOOLSET_ROOT/gcc-6 GCC7_ROOT=$TOOLSET_ROOT/gcc-7 GCC8_ROOT=$TOOLSET_ROOT/gcc-8 CLANG36_ROOT=$TOOLSET_ROOT/clang-3.6 CLANG37_ROOT=$TOOLSET_ROOT/clang-3.7 CLANG38_ROOT=$TOOLSET_ROOT/clang-3.8 CLANG39_ROOT=$TOOLSET_ROOT/clang-3.9 CLANG4_ROOT=$TOOLSET_ROOT/clang-4 CLANG5_ROOT=$TOOLSET_ROOT/clang-5 CLANG6_ROOT=$TOOLSET_ROOT/clang-6 CLANG7_ROOT=$TOOLSET_ROOT/clang-7 CLANG_STDLIB="-stdlib=libc++" CLANG37_CXXFLAGS=$CLANG_STDLIB CLANG38_CXXFLAGS=$CLANG_STDLIB CLANG39_CXXFLAGS=$CLANG_STDLIB CLANG4_CXXFLAGS=$CLANG_STDLIB CLANG5_CXXFLAGS=$CLANG_STDLIB CLANG6_CXXFLAGS=$CLANG_STDLIB CLANG7_CXXFLAGS="$CLANG_STDLIB -fuse-ld=lld" LIB_ROOT=$HOME/lib BOOST_ROOT=$LIB_ROOT/boost GTEST_ROOT=$LIB_ROOT/gtest libcds-2.3.3/build/CI/VASEx-CI/000077500000000000000000000000001341244201700155665ustar00rootroot00000000000000libcds-2.3.3/build/CI/VASEx-CI/cds-libs000066400000000000000000000154051341244201700172160ustar00rootroot00000000000000######################################### # Generic parameters workspace: $WORKSPACE libcds-source: source make-job: 10 gtest-include: $GTEST_ROOT/googletest/include ######################################### #GCC-4.8 gcc-4.8-root: $GCC48_ROOT/bin gcc-4.8-cxx: g++-4.8 gcc-4.8-cc: gcc-4.8 gcc-4.8-exe-ldflags: -L$GCC48_ROOT/lib64 -Wl,-rpath=$GCC48_ROOT/lib64 gcc-4.8-boost: $BOOST_ROOT gcc-4.8-64-boost-lib: stage64-gcc4.8/lib gcc-4.8-gtest: $GTEST_ROOT gcc-4.8-64-gtest-lib: $GTEST_ROOT/lib-gcc4.8/libgtest.a ######################################## #GCC-4.9 gcc-4.9-root: $GCC49_ROOT/bin gcc-4.9-cxx: g++-4.9 gcc-4.9-cc: gcc-4.9 gcc-4.9-exe-ldflags: -Wl,-rpath=$GCC49_ROOT/lib64 gcc-4.9-boost: $BOOST_ROOT gcc-4.9-64-boost-lib: stage64-gcc4.9/lib gcc-4.9-gtest: $GTEST_ROOT gcc-4.9-64-gtest-lib: $GTEST_ROOT/lib-gcc4.9/libgtest.a ######################################## #GCC-5 gcc-5-root: $GCC5_ROOT/bin gcc-5-cxx: g++-5 gcc-5-cc: gcc-5 gcc-5-boost: $BOOST_ROOT gcc-5-exe-ldflags: -Wl,-rpath=$GCC5_ROOT/lib64 gcc-5-64-boost-lib: stage64-gcc5/lib gcc-5-64-asan-boost-lib: stage64-gcc5-asan/lib gcc-5-64-tsan-boost-lib: stage64-gcc5-tsan/lib gcc-5-gtest: $GTEST_ROOT gcc-5-64-gtest-lib: $GTEST_ROOT/lib-gcc5/libgtest.a ######################################## #GCC-6 gcc-6-root: $GCC6_ROOT/bin gcc-6-cxx: g++-6 gcc-6-cc: gcc-6 gcc-6-boost: $BOOST_ROOT gcc-6-cxxflags: -march=native -std=c++14 gcc-6-exe-ldflags: -Wl,-rpath=$GCC6_ROOT/lib64 gcc-6-64-boost-lib: stage64-gcc6/lib gcc-6-64-asan-boost-lib: stage64-gcc6-asan/lib gcc-6-64-tsan-boost-lib: stage64-gcc6-tsan/lib gcc-6-gtest: $GTEST_ROOT gcc-6-64-gtest-lib: $GTEST_ROOT/lib-gcc6/libgtest.a ######################################## #GCC-7 gcc-7-root: $GCC7_ROOT/bin gcc-7-cxx: g++-7 gcc-7-cc: gcc-7 gcc-7-boost: $BOOST_ROOT gcc-7-cxxflags: -march=native -std=c++1z gcc-7-exe-ldflags: -Wl,-rpath=$GCC7_ROOT/lib64 gcc-7-64-boost-lib: stage64-gcc7/lib gcc-7-64-asan-boost-lib: stage64-gcc7-asan/lib gcc-7-64-tsan-boost-lib: stage64-gcc7-tsan/lib gcc-7-gtest: $GTEST_ROOT gcc-7-64-gtest-lib: $GTEST_ROOT/lib-gcc7/libgtest.a ######################################## #GCC-8 gcc-8-root: $GCC8_ROOT/bin gcc-8-cxx: g++-8 gcc-8-cc: gcc-8 gcc-8-boost: $BOOST_ROOT gcc-8-cxxflags: -march=native -std=c++17 -Wmultistatement-macros gcc-8-exe-ldflags: -Wl,-rpath=$GCC8_ROOT/lib64 gcc-8-extlib: rt gcc-8-64-boost-lib: stage64-gcc7/lib gcc-8-64-asan-boost-lib: stage64-gcc7-asan/lib gcc-8-64-tsan-boost-lib: stage64-gcc7-tsan/lib gcc-8-gtest: $GTEST_ROOT gcc-8-64-gtest-lib: $GTEST_ROOT/lib-gcc7/libgtest.a ######################################## # clang-3.6 clang-3.6-root: $CLANG36_ROOT/bin clang-3.6-ld-lib-path: $GCC6_ROOT/lib64 clang-3.6-cxx: clang++ clang-3.6-cc: clang clang-3.6-cxxflags: -Wdocumentation clang-3.6-exe-ldflags: -L$GCC5_ROOT/lib64 -latomic -Wl,-rpath=$GCC5_ROOT/lib64 clang-3.6-boost: $BOOST_ROOT clang-3.6-64-boost-lib: stage64-clang3.6/lib clang-3.6-gtest: $GTEST_ROOT clang-3.6-64-gtest-lib: $GTEST_ROOT/lib-clang3.6/libgtest.a ######################################## # clang-3.7 clang-3.7-root: $CLANG37_ROOT/bin clang-3.7-cxx: clang++ clang-3.7-cc: clang clang-3.7-cxxflags: -stdlib=libc++ -Wdocumentation clang-3.7-exe-ldflags: -L$CLANG37_ROOT/lib -Wl,-rpath=$CLANG37_ROOT/lib -lc++abi clang-3.7-boost: $BOOST_ROOT clang-3.7-64-boost-lib: stage64-clang3.7/lib clang-3.7-gtest: $GTEST_ROOT clang-3.7-64-gtest-lib: $GTEST_ROOT/lib-clang3.7/libgtest.a clang-3.7-cmake-flags: -DCMAKE_C_COMPILER_WORKS=1 -DCMAKE_CXX_COMPILER_WORKS=1 ######################################## # clang-3.8 clang-3.8-root: $CLANG38_ROOT/bin clang-3.8-cxx: clang++ clang-3.8-cc: clang clang-3.8-cxxflags: -stdlib=libc++ -Wdocumentation clang-3.8-exe-ldflags: -L$CLANG38_ROOT/lib -Wl,-rpath=$CLANG38_ROOT/lib clang-3.8-boost: $BOOST_ROOT clang-3.8-64-boost-lib: stage64-clang3.8/lib clang-3.8-gtest: $GTEST_ROOT clang-3.8-64-gtest-lib: $GTEST_ROOT/lib-clang3.8/libgtest.a ######################################## # clang-3.9 clang-3.9-root: $CLANG39_ROOT/bin clang-3.9-cxx: clang++ clang-3.9-cc: clang clang-3.9-cxxflags: -stdlib=libc++ -Wdocumentation clang-3.9-exe-ldflags: -L$CLANG39_ROOT/lib -Wl,-rpath=$CLANG39_ROOT/lib clang-3.9-boost: $BOOST_ROOT clang-3.9-64-boost-lib: stage64-clang3.9/lib clang-3.9-64-asan-boost-lib: stage64-clang3.9-asan/lib clang-3.9-64-tsan-boost-lib: stage64-clang3.9-tsan/lib clang-3.9-gtest: $GTEST_ROOT clang-3.9-64-gtest-lib: $GTEST_ROOT/lib-clang3.9/libgtest.a ######################################## # clang-4 clang-4-root: $CLANG4_ROOT/bin clang-4-cxx: clang++ clang-4-cc: clang clang-4-cxxflags: -stdlib=libc++ -Wdocumentation -std=c++14 clang-4-exe-ldflags: -L$CLANG4_ROOT/lib -Wl,-rpath=$CLANG4_ROOT/lib clang-4-boost: $BOOST_ROOT clang-4-64-boost-lib: stage64-clang4/lib clang-4-64-asan-boost-lib: stage64-clang4-asan/lib clang-4-64-tsan-boost-lib: stage64-clang4-tsan/lib clang-4-gtest: $GTEST_ROOT clang-4-64-gtest-lib: $GTEST_ROOT/lib-clang4/libgtest.a ######################################## # clang-5 clang-5-root: $CLANG5_ROOT/bin clang-5-cxx: clang++ clang-5-cc: clang clang-5-cxxflags: -stdlib=libc++ -Wdocumentation -std=c++1z clang-5-exe-ldflags: -L$CLANG5_ROOT/lib -Wl,-rpath=$CLANG5_ROOT/lib clang-5-boost: $LIB_ROOT/boost_1_65_1 clang-5-64-boost-lib: stage64-clang5-std17/lib clang-5-64-asan-boost-lib: stage64-clang5-asan/lib clang-5-64-tsan-boost-lib: stage64-clang5-tsan/lib clang-5-gtest: $GTEST_ROOT clang-5-64-gtest-lib: $GTEST_ROOT/lib-clang5/libgtest.a ######################################## # clang-6 clang-6-root: $CLANG6_ROOT/bin clang-6-cxx: clang++ clang-6-cc: clang clang-6-cxxflags: -stdlib=libc++ -Wdocumentation -std=c++17 clang-6-exe-ldflags: -L$CLANG6_ROOT/lib -Wl,-rpath=$CLANG6_ROOT/lib clang-6-boost: $LIB_ROOT/boost_1_65_1 clang-6-64-boost-lib: stage64-clang6-std17/lib clang-6-64-asan-boost-lib: stage64-clang6-asan/lib clang-6-64-tsan-boost-lib: stage64-clang6-tsan/lib clang-6-gtest: $GTEST_ROOT clang-6-64-gtest-lib: $GTEST_ROOT/lib-clang6/libgtest.a libcds-2.3.3/build/CI/VASEx-CI/ci-build000077500000000000000000000042551341244201700172120ustar00rootroot00000000000000#! /bin/bash # Useful envvars: # CI_SCRIPT_PATH - path where to find scripts # TOOLSET - toolset: x64-gcc-5, x64-clang-3.9 and so on # BUILD_TYPE - build type: 'dbg', 'rel', 'asan', 'tsan' # WORKSPACE - path where to build env|sort case "$TOOLSET" in "x64-gcc-4.8") echo "GCC-4.8 '$BUILD_TYPE', toolset root: $GCC48_ROOT" $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-gcc-4.8-64 $* exit $? ;; "x64-gcc-4.9") echo "GCC-4.9 '$BUILD_TYPE', toolset root: $GCC49_ROOT" $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-gcc-4.9-64 $* exit $? ;; "x64-gcc-5") echo "GCC-5 '$BUILD_TYPE', toolset root: $GCC5_ROOT" $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-gcc-5-64 $* exit $? ;; "x64-gcc-6") echo "GCC-6 '$BUILD_TYPE', toolset root: $GCC6_ROOT" $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-gcc-6-64 $* exit $? ;; "x64-gcc-7") echo "GCC-7 '$BUILD_TYPE', toolset root: $GCC7_ROOT" $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-gcc-7-64 $* exit $? ;; "x64-gcc-8") echo "GCC-8 '$BUILD_TYPE', toolset root: $GCC8_ROOT" $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-gcc-8-64 $* exit $? ;; "x64-clang-3.6") echo "clang-3.6 '$BUILD_TYPE', toolset root: $CLANG36_ROOT" $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-3.6-64 $* exit $? ;; "x64-clang-3.7") echo "clang-3.7 '$BUILD_TYPE', toolset root: $CLANG37_ROOT" $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-3.7-64 $* exit $? ;; "x64-clang-3.8") echo "clang-3.8 '$BUILD_TYPE', toolset root: $CLANG38_ROOT" $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-3.8-64 $* exit $? ;; "x64-clang-3.9") echo "clang-3.9 '$BUILD_TYPE', toolset root: $CLANG39_ROOT" $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-3.9-64 $* exit $? ;; "x64-clang-4") echo "clang-4 '$BUILD_TYPE', toolset root: $CLANG4_ROOT" $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-4-64 $* exit $? ;; "x64-clang-5") echo "clang-5 '$BUILD_TYPE', toolset root: $CLANG5_ROOT" $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-5-64 $* exit $? ;; "x64-clang-6") echo "clang-6 '$BUILD_TYPE', toolset root: $CLANG6_ROOT" $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-6-64 $* exit $? ;; * ) echo "Undefined toolset '$TOOLSET'" exit 1 ;; esac libcds-2.3.3/build/CI/VASEx-CI/ci-env000066400000000000000000000017041341244201700166740ustar00rootroot00000000000000CMAKE_2_8_12=/home/libcds-ci/bin/cmake/cmake-2.8.12/bin CMAKE_3_6_2=/home/libcds-ci/bin/cmake/cmake-3.6.2/bin CMAKE3=$CMAKE_3_6_2 PATH=$CMAKE3:$PATH:$HOME/.local/bin:$HOME/bin TOOLSET_ROOT=$HOME/bin GCC48_ROOT=$TOOLSET_ROOT/gcc-4.8 GCC49_ROOT=$TOOLSET_ROOT/gcc-4.9 GCC5_ROOT=$TOOLSET_ROOT/gcc-5 GCC6_ROOT=$TOOLSET_ROOT/gcc-6 GCC7_ROOT=$TOOLSET_ROOT/gcc-7 GCC8_ROOT=$TOOLSET_ROOT/gcc-8 CLANG35_ROOT=$TOOLSET_ROOT/clang-3.5 CLANG36_ROOT=$TOOLSET_ROOT/clang-3.6 CLANG37_ROOT=$TOOLSET_ROOT/clang-3.7 CLANG38_ROOT=$TOOLSET_ROOT/clang-3.8 CLANG39_ROOT=$TOOLSET_ROOT/clang-3.9 CLANG4_ROOT=$TOOLSET_ROOT/clang-4 CLANG5_ROOT=$TOOLSET_ROOT/clang-5 CLANG6_ROOT=$TOOLSET_ROOT/clang-6 CLANG_STDLIB="-stdlib=libc++" CLANG37_CXXFLAGS=$CLANG_STDLIB CLANG38_CXXFLAGS=$CLANG_STDLIB CLANG39_CXXFLAGS=$CLANG_STDLIB CLANG4_CXXFLAGS=$CLANG_STDLIB CLANG5_CXXFLAGS=$CLANG_STDLIB CLANG6_CXXFLAGS=$CLANG_STDLIB LIB_ROOT=$HOME/lib BOOST_ROOT=$LIB_ROOT/boost GTEST_ROOT=$LIB_ROOT/gtest libcds-2.3.3/build/CI/cmake-gen000077500000000000000000000071411341244201700161270ustar00rootroot00000000000000#! /usr/bin/perl my $compiler=shift; my $bitness =shift; my $build =shift; $build="rel" unless $build; my $cmake_build="RELEASE"; $cmake_build="DEBUG" if $build eq 'dbg'; my $cds_libs="cds-libs"; # get generic props my $workspace=get_gen_prop("workspace") || "$HOME"; my $cds_source=get_gen_prop("libcds-source") || "../libcds"; my $make_jobs=get_gen_prop("make-job") || 2; # get compiler-specific props my $comp_root=get_prop("root"); my $boost=get_prop( "boost" ); my $boost_libs=get_prop( "boost-lib" ); my $gtest=get_prop("gtest"); my $gtest_lib=get_prop( "gtest-lib"); my $gtest_inc=get_prop("gtest-include") || get_gen_prop("gtest-include"); my $cxx=get_prop("cxx") or $compiler; my $cc=get_prop("cc") or $compiler; my $cxxflags=get_prop("cxxflags"); my $ldflags=get_prop("ldflags"); my $cmake_exe_ldflags=get_prop("exe-ldflags"); my $ext_lib=get_prop("extlib"); my $ld_lib_path=get_prop("ld-lib-path"); my $sys_path=get_prop("path"); my $cmake_flags=get_prop("cmake-flags"); my $filename="cds-$build-$compiler-$bitness"; open( my $out, ">", $filename ) or die "Cannot open cds-$build-$compiler-$bitness"; print $out "#! /bin/sh\n\n"; print $out "root=$workspace\n"; print $out "CDS_SOURCE=\$root/$cds_source\n"; print $out "OBJ_ROOT=\$root/obj\n"; print $out "BIN_ROOT=\$root/bin\n"; print $out "GTEST_ROOT=$gtest\n" if $gtest; print $out "export PATH=$sys_path:\$PATH\n" if $sys_path; print $out "\n"; print $out "rm -fr \$OBJ_ROOT\n"; print $out "rm -fr \$BIN_ROOT\n"; print $out "mkdir -p \$OBJ_ROOT\n"; print $out "#cp -f run-ctest-rel \$OBJ_ROOT/run-ctest\n" if $build eq 'rel'; print $out "#cp -f run-ctest-dbg \$OBJ_ROOT/run-ctest\n" unless $build eq 'rel'; print $out "cd \$OBJ_ROOT\n"; print $out "\n"; print $out "LD_LIBRARY_PATH=$ld_lib_path:\$LD_LIBRARY_PATH \\\n" if $ld_lib_path; print $out "LDFLAGS=\"$ldflags\" \\\n" if $ldflags; print $out "cmake -G \"Unix Makefiles\" \\\n"; print $out " -DCMAKE_BUILD_TYPE=$cmake_build \\\n"; print $out " -DCMAKE_C_COMPILER=$comp_root/$cc \\\n"; print $out " -DCMAKE_CXX_COMPILER=$comp_root/$cxx \\\n"; print $out " -DCMAKE_CXX_FLAGS=\"$cxxflags\" \\\n" if $cxxflags; print $out " -DCMAKE_EXE_LINKER_FLAGS=\"$cmake_exe_ldflags\" \\\n" if $cmake_exe_ldflags; print $out " -DCDS_BIN_DIR=\$BIN_ROOT \\\n"; print $out " -DWITH_TESTS=ON \\\n"; print $out " -DWITH_ASAN=ON \\\n" if $build eq 'asan'; print $out " -DWITH_TSAN=ON \\\n" if $build eq 'tsan'; print $out " -DBOOST_ROOT=$boost \\\n"; print $out " -DBOOST_LIBRARYDIR=$boost/$boost_libs \\\n" if $boost_libs; print $out " -DGTEST_INCLUDE_DIRS=$gtest_inc \\\n" if $gtest_inc; print $out " -DGTEST_LIBRARIES=$gtest_lib \\\n" if $gtest_lib; print $out " -DEXTERNAL_SYSTEM_LIBS=\"$ext_lib\" \\\n" if $ext_lib; print $out " $cmake_flags \\\n" if $cmake_flags; print $out " \$CDS_SOURCE && \\\n"; print $out "make -j $make_jobs \$* \n"; close $out; chmod 0755, $filename; sub get_prop($@) { my $what=shift; my $key="$compiler-$bitness-$build-$what:"; my $grep = `grep -P $key $cds_libs`; if ( $grep ) { my @ret = $grep =~ /^$key\s+(\S.*\S*)\s+/; return $ret[0] if @ret; } $key = "$compiler-$bitness-$what:"; my $grep = `grep -P $key $cds_libs`; if ( $grep ) { my @ret = $grep =~ /^$key\s+(\S.*\S*)\s+/; return $ret[0] if @ret; } $key = "$compiler-$what:"; my $grep = `grep -P $key $cds_libs`; if ( $grep ) { my @ret = $grep =~ /^$key\s+(\S.*\S*)\s+/; return $ret[0] if @ret; } } sub get_gen_prop($@) { my $key=shift; $key = "$key:"; my $grep = `grep -P $key $cds_libs`; if ( $grep ) { my @ret = $grep =~ /^$key\s+(\S.*\S*)\s+/; return $ret[0] if @ret; } }libcds-2.3.3/build/CI/gen-all000077500000000000000000000023531341244201700156170ustar00rootroot00000000000000#! /bin/sh ./cmake-gen gcc-4.8 64 dbg ./cmake-gen gcc-4.8 64 rel ./cmake-gen gcc-4.9 64 dbg ./cmake-gen gcc-4.9 64 rel ./cmake-gen gcc-5 64 dbg ./cmake-gen gcc-5 64 rel ./cmake-gen gcc-5 64 tsan ./cmake-gen gcc-5 64 asan ./cmake-gen gcc-6 64 dbg ./cmake-gen gcc-6 64 rel ./cmake-gen gcc-6 64 tsan ./cmake-gen gcc-6 64 asan ./cmake-gen gcc-7 64 dbg ./cmake-gen gcc-7 64 rel ./cmake-gen gcc-7 64 tsan ./cmake-gen gcc-7 64 asan ./cmake-gen gcc-8 64 dbg ./cmake-gen gcc-8 64 rel ./cmake-gen gcc-8 64 tsan ./cmake-gen gcc-8 64 asan ./cmake-gen clang-3.6 64 dbg ./cmake-gen clang-3.6 64 rel ./cmake-gen clang-3.7 64 dbg ./cmake-gen clang-3.7 64 rel ./cmake-gen clang-3.8 64 dbg ./cmake-gen clang-3.8 64 rel ./cmake-gen clang-3.9 64 dbg ./cmake-gen clang-3.9 64 rel ./cmake-gen clang-3.9 64 asan ./cmake-gen clang-3.9 64 tsan ./cmake-gen clang-4 64 dbg ./cmake-gen clang-4 64 rel ./cmake-gen clang-4 64 asan ./cmake-gen clang-4 64 tsan ./cmake-gen clang-5 64 dbg ./cmake-gen clang-5 64 rel ./cmake-gen clang-5 64 asan ./cmake-gen clang-5 64 tsan ./cmake-gen clang-6 64 dbg ./cmake-gen clang-6 64 rel ./cmake-gen clang-6 64 asan ./cmake-gen clang-6 64 tsan ./cmake-gen clang-7 64 dbg ./cmake-gen clang-7 64 rel ./cmake-gen clang-7 64 asan ./cmake-gen clang-7 64 tsanlibcds-2.3.3/build/CI/travis-ci/000077500000000000000000000000001341244201700162505ustar00rootroot00000000000000libcds-2.3.3/build/CI/travis-ci/install.sh000077500000000000000000000011031341244201700202500ustar00rootroot00000000000000#!/bin/bash set -e set -x if [[ "$(uname -s)" == 'Darwin' ]]; then brew update || brew update brew outdated pyenv || brew upgrade pyenv brew install pyenv-virtualenv brew install cmake || true if which pyenv > /dev/null; then eval "$(pyenv init -)" fi pyenv install 2.7.10 pyenv virtualenv 2.7.10 conan pyenv rehash pyenv activate conan pip install conan --upgrade pip install conan_package_tools conan user exit 0 fi pip install --user conan --upgrade pip install --user conan_package_tools conan user libcds-2.3.3/build/CI/travis-ci/run.sh000077500000000000000000000013211341244201700174100ustar00rootroot00000000000000#!/bin/bash set -e set -x CONAN_INSTALL_FLAGS="-s compiler.libcxx=libstdc++11" if [[ "$(uname -s)" == 'Darwin' ]]; then if which pyenv > /dev/null; then eval "$(pyenv init -)" fi pyenv activate conan CONAN_INSTALL_FLAGS="" fi #export CXX=$CXX_COMPILER #export CC=$C_COMPILER mkdir build-test && cd build-test conan install --build $CONAN_INSTALL_FLAGS -s build_type=$BUILD_TYPE .. cmake -DCMAKE_PREFIX_PATH="$TRAVIS_BUILD_DIR/build-test/deps" -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DCMAKE_EXE_LINKER_FLAGS=$LINKER_FLAGS -DWITH_TESTS=ON .. cmake --build . -- -j2 $TARGET if [[ "$(uname -s)" == 'Darwin' ]]; then export DYLD_LIBRARY_PATH=$TRAVIS_BUILD_DIR/build-test/deps/lib fi ctest -VV -R $TARGET libcds-2.3.3/build/cmake/000077500000000000000000000000001341244201700151345ustar00rootroot00000000000000libcds-2.3.3/build/cmake/TargetArch.cmake000066400000000000000000000131721341244201700201660ustar00rootroot00000000000000# Source: https://github.com/axr/solar-cmake # Based on the Qt 5 processor detection code, so should be very accurate # https://qt.gitorious.org/qt/qtbase/blobs/master/src/corelib/global/qprocessordetection.h # Currently handles arm (v5, v6, v7), x86 (32/64), ia64, and ppc (32/64) # Regarding POWER/PowerPC, just as is noted in the Qt source, # "There are many more known variants/revisions that we do not handle/detect." set(archdetect_c_code " #if defined(__arm__) || defined(__TARGET_ARCH_ARM) #if defined(__ARM_ARCH_7__) \\ || defined(__ARM_ARCH_7A__) \\ || defined(__ARM_ARCH_7R__) \\ || defined(__ARM_ARCH_7M__) \\ || (defined(__TARGET_ARCH_ARM) && __TARGET_ARCH_ARM-0 >= 7) #error cmake_ARCH armv7 #elif defined(__ARM_ARCH_6__) \\ || defined(__ARM_ARCH_6J__) \\ || defined(__ARM_ARCH_6T2__) \\ || defined(__ARM_ARCH_6Z__) \\ || defined(__ARM_ARCH_6K__) \\ || defined(__ARM_ARCH_6ZK__) \\ || defined(__ARM_ARCH_6M__) \\ || (defined(__TARGET_ARCH_ARM) && __TARGET_ARCH_ARM-0 >= 6) #error cmake_ARCH armv6 #elif defined(__ARM_ARCH_5TEJ__) \\ || (defined(__TARGET_ARCH_ARM) && __TARGET_ARCH_ARM-0 >= 5) #error cmake_ARCH armv5 #else #error cmake_ARCH arm #endif #elif defined(__aarch64__) #if defined(__ARM_ARCH) && __ARM_ARCH == 8 #error cmake_ARCH armv8 #else #error cmake_ARCH arm64 #endif #elif defined(__i386) || defined(__i386__) || defined(_M_IX86) #error cmake_ARCH i386 #elif defined(__x86_64) || defined(__x86_64__) || defined(__amd64) || defined(_M_X64) #error cmake_ARCH x86_64 #elif defined(__ia64) || defined(__ia64__) || defined(_M_IA64) #error cmake_ARCH ia64 #elif defined(__ppc__) || defined(__ppc) || defined(__powerpc__) \\ || defined(_ARCH_COM) || defined(_ARCH_PWR) || defined(_ARCH_PPC) \\ || defined(_M_MPPC) || defined(_M_PPC) #if defined(__ppc64__) || defined(__powerpc64__) || defined(__64BIT__) #error cmake_ARCH ppc64 #else #error cmake_ARCH ppc #endif #endif #error cmake_ARCH unknown ") # Set ppc_support to TRUE before including this file or ppc and ppc64 # will be treated as invalid architectures since they are no longer supported by Apple function(target_architecture output_var) if(APPLE AND CMAKE_OSX_ARCHITECTURES) # On OS X we use CMAKE_OSX_ARCHITECTURES *if* it was set # First let's normalize the order of the values # Note that it's not possible to compile PowerPC applications if you are using # the OS X SDK version 10.6 or later - you'll need 10.4/10.5 for that, so we # disable it by default # See this page for more information: # http://stackoverflow.com/questions/5333490/how-can-we-restore-ppc-ppc64-as-well-as-full-10-4-10-5-sdk-support-to-xcode-4 # Architecture defaults to i386 or ppc on OS X 10.5 and earlier, depending on the CPU type detected at runtime. # On OS X 10.6+ the default is x86_64 if the CPU supports it, i386 otherwise. foreach(osx_arch ${CMAKE_OSX_ARCHITECTURES}) if("${osx_arch}" STREQUAL "ppc" AND ppc_support) set(osx_arch_ppc TRUE) elseif("${osx_arch}" STREQUAL "i386") set(osx_arch_i386 TRUE) elseif("${osx_arch}" STREQUAL "x86_64") set(osx_arch_x86_64 TRUE) elseif("${osx_arch}" STREQUAL "ppc64" AND ppc_support) set(osx_arch_ppc64 TRUE) else() message(FATAL_ERROR "Invalid OS X arch name: ${osx_arch}") endif() endforeach() # Now add all the architectures in our normalized order if(osx_arch_ppc) list(APPEND ARCH ppc) endif() if(osx_arch_i386) list(APPEND ARCH i386) endif() if(osx_arch_x86_64) list(APPEND ARCH x86_64) endif() if(osx_arch_ppc64) list(APPEND ARCH ppc64) endif() else() file(WRITE "${CMAKE_BINARY_DIR}/arch.c" "${archdetect_c_code}") enable_language(C) # Detect the architecture in a rather creative way... # This compiles a small C program which is a series of ifdefs that selects a # particular #error preprocessor directive whose message string contains the # target architecture. The program will always fail to compile (both because # file is not a valid C program, and obviously because of the presence of the # #error preprocessor directives... but by exploiting the preprocessor in this # way, we can detect the correct target architecture even when cross-compiling, # since the program itself never needs to be run (only the compiler/preprocessor) try_run( run_result_unused compile_result_unused "${CMAKE_BINARY_DIR}" "${CMAKE_BINARY_DIR}/arch.c" COMPILE_OUTPUT_VARIABLE ARCH CMAKE_FLAGS CMAKE_OSX_ARCHITECTURES=${CMAKE_OSX_ARCHITECTURES} ) # Parse the architecture name from the compiler output string(REGEX MATCH "cmake_ARCH ([a-zA-Z0-9_]+)" ARCH "${ARCH}") # Get rid of the value marker leaving just the architecture name string(REPLACE "cmake_ARCH " "" ARCH "${ARCH}") # If we are compiling with an unknown architecture this variable should # already be set to "unknown" but in the case that it's empty (i.e. due # to a typo in the code), then set it to unknown if (NOT ARCH) set(ARCH unknown) endif() endif() set(${output_var} "${ARCH}" PARENT_SCOPE) endfunction() libcds-2.3.3/build/cmake/description.txt000066400000000000000000000000561341244201700202210ustar00rootroot00000000000000libcds - Concurrent Data Structure C++ librarylibcds-2.3.3/build/cmake/post_install_script.sh000077500000000000000000000000101341244201700215610ustar00rootroot00000000000000ldconfiglibcds-2.3.3/build/cmake/post_uninstall_script.sh000077500000000000000000000000101341244201700221240ustar00rootroot00000000000000ldconfiglibcds-2.3.3/build/cmake/readme.md000066400000000000000000000100711341244201700167120ustar00rootroot00000000000000Building library with CMake =============== CDS suports both in-source and out-of-source cmake build types. Now project uses: - CMake: general cross-platform building - CTest: all unit tests can be run in a standard way by *ctest* command - CPack: for making rpm/deb/nsys etc. packages Compiling and testing ---------- **Building out-of-source in "RELEASE" mode ("DEBUG" is default)** - Wherever create empty directory for building, for instance `libcds-debug` - Prepare: `cmake -DCMAKE_BUILD_TYPE=RELEASE ` - Compile: `make -j4` - As a result you'll see shared and static cds libraries in the build directory **Warning**: We strongly recommend not to use static cds library. Static library is not tested and not maintained. You can use it on own risk. After using command cmake -L one can see some additional variables, that can activate additional features: - `WITH_TESTS:BOOL=OFF`: if you want to build library with unit testing support use *-DWITH_TESTS=ON* on prepare step. Be careful with this flag, because compile time will dramatically increase - `WITH_TESTS_COVERAGE:BOOL=OFF`: Analyze test coverage using gcov (only for gcc) - `WITH_BOOST_ATOMIC:BOOL=OFF`: Use boost atomics (only for boost >= 1.54) - `WITH_ASAN:BOOL=OFF`: compile libcds with AddressSanitizer instrumentation - `WITH_TSAN:BOOL=OFF`: compile libcds with ThreadSanitizer instrumentation Additional gtest hints (for unit and stress tests only): - `GTEST_INCLUDE_DIRS=path`: gives full `path` to gtest include dir. - `GTEST_LIBRARY=path`: gives full `path` to `libgtest.a`. Packaging ---------- In order to package library *CPack* is used, command *cpack -G * should create correspondent packages for particular operating system. Now the project supports building the following package types: - *RPM*: redhat-based linux distribs - *DEB*: debian-based linux distribs - *TGZ*: simple "*tgz*" archive with library and headers - *NSYS*: windows installer package (NSYS should be installed) "Live" building and packaging example ---------- - `git clone https://github.com/khizmax/libcds.git` - `mkdir libcds-release` - `cd libcds-release` - `cmake -DWITH\_TESTS=ON -DCMAKE\_BUILD_TYPE=RELEASE ../libcds` ``` -- The C compiler identification is GNU 4.8.3 -- The CXX compiler identification is GNU 4.8.3 ... -- Found Threads: TRUE -- Boost version: 1.54.0 -- Found the following Boost libraries: -- system -- thread Build type -- RELEASE -- Configuring done -- Generating done -- Build files have been written to: <...>/libcds-release ``` - `make -j4` ``` Scanning dependencies of target cds Scanning dependencies of target test-common Scanning dependencies of target cds-s Scanning dependencies of target test-hdr-offsetof [ 1%] Building CXX object CMakeFiles/cds-s.dir/src/hp_gc.cpp.o ... [100%] Built target test-hdr ``` - `ctest` ``` Test project /home/kel/projects_cds/libcds-debug Start 1: test-hdr 1/7 Test #1: test-hdr ......................... Passed 1352.24 sec Start 2: cdsu-misc 2/7 Test #2: cdsu-misc ........................ Passed 0.00 sec Start 3: cdsu-map ... ``` - `cpack -G RPM` ``` CPack: Create package using RPM CPack: Install projects CPack: - Run preinstall target for: cds CPack: - Install project: cds CPack: - Install component: devel CPack: - Install component: lib CPack: Create package CPackRPM:Debug: Adding /usr/local to builtin omit list. CPackRPM: Will use GENERATED spec file: /home/kel/projects_cds/libcds-debug/_CPack_Packages/Linux/RPM/SPECS/cds-devel.spec CPackRPM: Will use GENERATED spec file: /home/kel/projects_cds/libcds-debug/_CPack_Packages/Linux/RPM/SPECS/cds-lib.spec CPack: - package: /home/kel/projects_cds/libcds-debug/cds-2.1.0-1-devel.rpm generated. CPack: - package: /home/kel/projects_cds/libcds-debug/cds-2.1.0-1-lib.rpm generated. ``` Future development ---------- - CDash: use CI systemlibcds-2.3.3/cds/000077500000000000000000000000001341244201700135265ustar00rootroot00000000000000libcds-2.3.3/cds/algo/000077500000000000000000000000001341244201700144505ustar00rootroot00000000000000libcds-2.3.3/cds/algo/atomic.h000066400000000000000000000406421341244201700161030ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CXX11_ATOMIC_H #define CDSLIB_CXX11_ATOMIC_H #include #include namespace cds { /// C++11 Atomic library support /** @anchor cds_cxx11_atomic \p libcds can use the following implementations of the atomics: - STL \p <atomic>. This is used by default - \p boost.atomic for boost 1.54 and above. To use it you should define \p CDS_USE_BOOST_ATOMIC for your compiler invocation, for example, for gcc specify \p -DCDS_USE_BOOST_ATOMIC in command line - \p libcds implementation of atomic operation according to C++11 standard as specified in N3242, p.29. \p libcds implementation is not the full standard compliant, it provides only C++ part of standard, for example, \p libcds has no static initialization of the atomic variables and some other C features. However, that imlementation is enough for the library purposes. Supported architecture: x86, amd64, ia64 (Itanium) 64bit, 64bit Sparc. To use \p libcds atomic you should define \p CDS_USE_LIBCDS_ATOMIC in the compiler command line (\p -DCDS_USE_LIBCDS_ATOMIC for gcc/clang). @note For Clang compiler \p libcds doesn't use native \p libc++ \p <atomic> due some problems. Instead, \p libcds atomic is used by default, or you can try to use \p boost.atomic. The library defines \p atomics alias for atomic namespace: - namespace atomics = std for STL - namespace atomics = boost for \p boost.atomic - namespace atomics = cds::cxx11_atomic for library-provided atomic implementation */ namespace cxx11_atomic { }} // namespace cds::cxx11_atomic //@cond #if defined(CDS_USE_BOOST_ATOMIC) // boost atomic # include # if BOOST_VERSION >= 105400 # include namespace atomics = boost; # define CDS_CXX11_ATOMIC_BEGIN_NAMESPACE namespace boost { # define CDS_CXX11_ATOMIC_END_NAMESPACE } # else # error "Boost version 1.54 or above is needed for boost.atomic" # endif #elif defined(CDS_USE_LIBCDS_ATOMIC) // libcds atomic # include namespace atomics = cds::cxx11_atomic; # define CDS_CXX11_ATOMIC_BEGIN_NAMESPACE namespace cds { namespace cxx11_atomic { # define CDS_CXX11_ATOMIC_END_NAMESPACE }} #else // Compiler provided C++11 atomic # include namespace atomics = std; # define CDS_CXX11_ATOMIC_BEGIN_NAMESPACE namespace std { # define CDS_CXX11_ATOMIC_END_NAMESPACE } #endif //@endcond namespace cds { /// Atomic primitives /** This namespace contains useful primitives derived from std::atomic. */ namespace atomicity { /// Atomic event counter. /** This class is based on std::atomic_size_t. It uses relaxed memory ordering \p memory_order_relaxed and may be used as a statistic counter. */ class event_counter { //@cond atomics::atomic_size_t m_counter; //@endcond public: typedef size_t value_type ; ///< Type of counter public: // Initializes event counter with zero event_counter() noexcept : m_counter(size_t(0)) {} /// Assign operator /** Returns \p n. */ value_type operator =( value_type n ///< new value of the counter ) noexcept { m_counter.exchange( n, atomics::memory_order_relaxed ); return n; } /// Addition /** Returns new value of the atomic counter. */ size_t operator +=( size_t n ///< addendum ) noexcept { return m_counter.fetch_add( n, atomics::memory_order_relaxed ) + n; } /// Substraction /** Returns new value of the atomic counter. */ size_t operator -=( size_t n ///< subtrahend ) noexcept { return m_counter.fetch_sub( n, atomics::memory_order_relaxed ) - n; } /// Get current value of the counter operator size_t () const noexcept { return m_counter.load( atomics::memory_order_relaxed ); } /// Preincrement size_t operator ++() noexcept { return m_counter.fetch_add( 1, atomics::memory_order_relaxed ) + 1; } /// Postincrement size_t operator ++(int) noexcept { return m_counter.fetch_add( 1, atomics::memory_order_relaxed ); } /// Predecrement size_t operator --() noexcept { return m_counter.fetch_sub( 1, atomics::memory_order_relaxed ) - 1; } /// Postdecrement size_t operator --(int) noexcept { return m_counter.fetch_sub( 1, atomics::memory_order_relaxed ); } /// Get current value of the counter size_t get() const noexcept { return m_counter.load( atomics::memory_order_relaxed ); } /// Resets the counter to 0 void reset() noexcept { m_counter.store( 0, atomics::memory_order_release ); } }; /// Atomic item counter /** This class is simplified interface around \p std::atomic_size_t. The class supports getting current value of the counter and increment/decrement its value. See also: improved version that eliminates false sharing - \p cache_friendly_item_counter. */ class item_counter { public: typedef atomics::atomic_size_t atomic_type; ///< atomic type used typedef size_t counter_type; ///< Integral item counter type (size_t) private: //@cond atomic_type m_Counter; ///< Atomic item counter //@endcond public: /// Default ctor initializes the counter to zero. item_counter() : m_Counter(counter_type(0)) {} /// Returns current value of the counter counter_type value(atomics::memory_order order = atomics::memory_order_relaxed) const { return m_Counter.load( order ); } /// Same as \ref value() with relaxed memory ordering operator counter_type() const { return value(); } /// Returns underlying atomic interface atomic_type& getAtomic() { return m_Counter; } /// Returns underlying atomic interface (const) const atomic_type& getAtomic() const { return m_Counter; } /// Increments the counter. Semantics: postincrement counter_type inc(atomics::memory_order order = atomics::memory_order_relaxed ) { return m_Counter.fetch_add( 1, order ); } /// Increments the counter. Semantics: postincrement counter_type inc( counter_type count, atomics::memory_order order = atomics::memory_order_relaxed ) { return m_Counter.fetch_add( count, order ); } /// Decrements the counter. Semantics: postdecrement counter_type dec(atomics::memory_order order = atomics::memory_order_relaxed) { return m_Counter.fetch_sub( 1, order ); } /// Decrements the counter. Semantics: postdecrement counter_type dec( counter_type count, atomics::memory_order order = atomics::memory_order_relaxed ) { return m_Counter.fetch_sub( count, order ); } /// Preincrement counter_type operator ++() { return inc() + 1; } /// Postincrement counter_type operator ++(int) { return inc(); } /// Predecrement counter_type operator --() { return dec() - 1; } /// Postdecrement counter_type operator --(int) { return dec(); } /// Increment by \p count counter_type operator +=( counter_type count ) { return inc( count ) + count; } /// Decrement by \p count counter_type operator -=( counter_type count ) { return dec( count ) - count; } /// Resets count to 0 void reset(atomics::memory_order order = atomics::memory_order_relaxed) { m_Counter.store( 0, order ); } }; #if CDS_COMPILER == CDS_COMPILER_CLANG // CLang unhappy: pad1_ and pad2_ - unused private field warning # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wunused-private-field" #endif /// Atomic cache-friendly item counter /** Atomic item counter with cache-line padding to avoid false sharing. Adding cache-line padding before and after atomic counter eliminates the contention in read path of many containers and can notably improve search operations in sets/maps. */ class cache_friendly_item_counter { public: typedef atomics::atomic_size_t atomic_type; ///< atomic type used typedef size_t counter_type; ///< Integral item counter type (size_t) private: //@cond char pad1_[cds::c_nCacheLineSize]; atomic_type m_Counter; ///< Atomic item counter char pad2_[cds::c_nCacheLineSize - sizeof( atomic_type )]; //@endcond public: /// Default ctor initializes the counter to zero. cache_friendly_item_counter() : m_Counter(counter_type(0)) {} /// Returns current value of the counter counter_type value(atomics::memory_order order = atomics::memory_order_relaxed) const { return m_Counter.load( order ); } /// Same as \ref value() with relaxed memory ordering operator counter_type() const { return value(); } /// Returns underlying atomic interface atomic_type& getAtomic() { return m_Counter; } /// Returns underlying atomic interface (const) const atomic_type& getAtomic() const { return m_Counter; } /// Increments the counter. Semantics: postincrement counter_type inc(atomics::memory_order order = atomics::memory_order_relaxed ) { return m_Counter.fetch_add( 1, order ); } /// Increments the counter. Semantics: postincrement counter_type inc( counter_type count, atomics::memory_order order = atomics::memory_order_relaxed ) { return m_Counter.fetch_add( count, order ); } /// Decrements the counter. Semantics: postdecrement counter_type dec(atomics::memory_order order = atomics::memory_order_relaxed) { return m_Counter.fetch_sub( 1, order ); } /// Decrements the counter. Semantics: postdecrement counter_type dec( counter_type count, atomics::memory_order order = atomics::memory_order_relaxed ) { return m_Counter.fetch_sub( count, order ); } /// Preincrement counter_type operator ++() { return inc() + 1; } /// Postincrement counter_type operator ++(int) { return inc(); } /// Predecrement counter_type operator --() { return dec() - 1; } /// Postdecrement counter_type operator --(int) { return dec(); } /// Increment by \p count counter_type operator +=( counter_type count ) { return inc( count ) + count; } /// Decrement by \p count counter_type operator -=( counter_type count ) { return dec( count ) - count; } /// Resets count to 0 void reset(atomics::memory_order order = atomics::memory_order_relaxed) { m_Counter.store( 0, order ); } }; #if CDS_COMPILER == CDS_COMPILER_CLANG # pragma GCC diagnostic pop #endif /// Empty item counter /** This class may be used instead of \ref item_counter when you do not need full \ref item_counter interface. All methods of the class is empty and returns 0. The object of this class should not be used in data structure that behavior significantly depends on item counting (for example, in many hash map implementation). */ class empty_item_counter { public: typedef size_t counter_type ; ///< Counter type public: /// Returns 0 static counter_type value(atomics::memory_order /*order*/ = atomics::memory_order_relaxed) { return 0; } /// Same as \ref value(), always returns 0. operator counter_type() const { return value(); } /// Dummy increment. Always returns 0 static counter_type inc(atomics::memory_order /*order*/ = atomics::memory_order_relaxed) { return 0; } /// Dummy increment. Always returns 0 static counter_type inc( counter_type /*count*/, atomics::memory_order /*order*/ = atomics::memory_order_relaxed ) { return 0; } /// Dummy increment. Always returns 0 static counter_type dec(atomics::memory_order /*order*/ = atomics::memory_order_relaxed) { return 0; } /// Dummy increment. Always returns 0 static counter_type dec( counter_type /*count*/, atomics::memory_order /*order*/ = atomics::memory_order_relaxed ) { return 0; } /// Dummy pre-increment. Always returns 0 counter_type operator ++() const { return 0; } /// Dummy post-increment. Always returns 0 counter_type operator ++(int) const { return 0; } /// Dummy pre-decrement. Always returns 0 counter_type operator --() const { return 0; } /// Dummy post-decrement. Always returns 0 counter_type operator --(int) const { return 0; } /// Dummy increment by \p count, always returns 0 counter_type operator +=( counter_type count ) { CDS_UNUSED( count ); return 0; } /// Dummy decrement by \p count, always returns 0 counter_type operator -=( counter_type count ) { CDS_UNUSED( count ); return 0; } /// Dummy function static void reset(atomics::memory_order /*order*/ = atomics::memory_order_relaxed) {} }; } // namespace atomicity } // namespace cds #endif // #ifndef CDSLIB_CXX11_ATOMIC_H libcds-2.3.3/cds/algo/backoff_strategy.h000066400000000000000000000370121341244201700201410ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_BACKOFF_STRATEGY_H #define CDSLIB_BACKOFF_STRATEGY_H /* Filename: backoff_strategy.h Created 2007.03.01 by Maxim Khiszinsky Description: Generic back-off strategies Editions: 2007.03.01 Maxim Khiszinsky Created 2008.10.02 Maxim Khiszinsky Backoff action transfers from contructor to operator() for all backoff schemas 2009.09.10 Maxim Khiszinsky reset() function added */ #include // declval #include #include #include namespace cds { /// Different backoff schemes /** Back-off schema may be used in lock-free algorithms when the algorithm cannot perform some action because a conflict with the other concurrent operation is encountered. In this case current thread can do another work or can call processor's performance hint. The interface of back-off strategy is following: \code struct backoff_strategy { void operator()(); template bool operator()( Predicate pr ); void reset(); }; \endcode \p operator() operator calls back-off strategy's action. It is main part of back-off strategy. Interruptible back-off template < typename Predicate > bool operator()( Predicate pr ) allows to interrupt back-off spinning if \p pr predicate returns \p true. \p Predicate is a functor with the following interface: \code struct predicate { bool operator()(); }; \endcode \p reset() function resets internal state of back-off strategy to initial state. It is required for some back-off strategies, for example, exponential back-off. */ namespace backoff { /// Empty backoff strategy. Do nothing struct empty { //@cond void operator ()() const noexcept {} template bool operator()(Predicate pr) const noexcept( noexcept(std::declval()())) { return pr(); } static void reset() noexcept {} //@endcond }; /// Switch to another thread (yield). Good for thread preemption architecture. struct yield { //@cond void operator ()() const noexcept { std::this_thread::yield(); } template bool operator()(Predicate pr) const noexcept( noexcept(std::declval()())) { if ( pr()) return true; operator()(); return false; } static void reset() noexcept {} //@endcond }; /// Random pause /** This back-off strategy calls processor-specific pause hint instruction if one is available for the processor architecture. */ struct pause { //@cond void operator ()() const noexcept { # ifdef CDS_backoff_hint_defined platform::backoff_hint(); # endif } template bool operator()(Predicate pr) const noexcept( noexcept(std::declval()())) { if ( pr()) return true; operator()(); return false; } static void reset() noexcept {} //@endcond }; /// Processor hint back-off /** This back-off schema calls performance hint instruction if it is available for current processor. Otherwise, it calls \p nop. */ struct hint { //@cond void operator ()() const noexcept { # if defined(CDS_backoff_hint_defined) platform::backoff_hint(); # elif defined(CDS_backoff_nop_defined) platform::backoff_nop(); # endif } template bool operator()(Predicate pr) const noexcept(noexcept(std::declval()())) { if ( pr()) return true; operator()(); return false; } static void reset() noexcept {} //@endcond }; /// \p backoff::exponential const traits struct exponential_const_traits { typedef hint fast_path_backoff; ///< Fast-path back-off strategy typedef yield slow_path_backoff; ///< Slow-path back-off strategy enum: size_t { lower_bound = 16, ///< Minimum spinning limit upper_bound = 16 * 1024 ///< Maximum spinning limit }; }; /// \p nackoff::exponential runtime traits struct exponential_runtime_traits { typedef hint fast_path_backoff; ///< Fast-path back-off strategy typedef yield slow_path_backoff; ///< Slow-path back-off strategy static size_t lower_bound; ///< Minimum spinning limit, default is 16 static size_t upper_bound; ///< Maximum spinning limit, default is 16*1024 }; /// Exponential back-off /** This back-off strategy is composite. It consists of \p SpinBkoff and \p YieldBkoff back-off strategy. In first, the strategy tries to apply repeatedly \p SpinBkoff (spinning phase) until internal counter of failed attempts reaches its maximum spinning value. Then, the strategy transits to high-contention phase where it applies \p YieldBkoff until \p reset() is called. On each spinning iteration the internal spinning counter is doubled. Selecting the best value for maximum spinning limit is platform and application specific task. The limits are described by \p Traits template parameter. There are two types of \p Traits: - constant traits \p exponential_const_traits - specifies the lower and upper limits as a compile-time constants; to change the limits you should recompile your application - runtime traits \p exponential_runtime_traits - specifies the limits as \p s_nExpMin and \p s_nExpMax variables which can be changed at runtime to tune back-off strategy. The traits class must declare two data member: - \p lower_bound - the lower bound of spinning loop - \p upper_bound - the upper boudn of spinning loop You may use \p Traits template parameter to separate back-off implementations. For example, you may define two \p exponential back-offs that is the best for your task A and B: \code #include namespace bkoff = cds::backoff; // the best bounds for task A struct traits_A: public bkoff::exponential_const_traits { static size_t lower_bound; static size_t upper_bound; }; size_t traits_A::lower_bound = 1024; size_t traits_A::upper_bound = 8 * 1024; // the best bounds for task B struct traits_B: public bkoff::exponential_const_traits { static size_t lower_bound; static size_t upper_bound; }; size_t traits_A::lower_bound = 16; size_t traits_A::upper_bound = 1024; // // define your back-off specialization typedef bkoff::exponential expBackOffA; typedef bkoff::exponential expBackOffB; \endcode */ template class exponential { public: typedef Traits traits; ///< Traits typedef typename traits::fast_path_backoff spin_backoff ; ///< spin (fast-path) back-off strategy typedef typename traits::slow_path_backoff yield_backoff ; ///< yield (slow-path) back-off strategy protected: size_t m_nExpCur ; ///< Current spin counter in range [traits::s_nExpMin, traits::s_nExpMax] spin_backoff m_bkSpin ; ///< Spinning (fast-path) phase back-off strategy yield_backoff m_bkYield ; ///< Yield phase back-off strategy public: /// Default ctor exponential() noexcept : m_nExpCur( traits::lower_bound ) {} //@cond void operator ()() noexcept(noexcept(std::declval()()) && noexcept(std::declval()())) { if ( m_nExpCur <= traits::upper_bound ) { for ( size_t n = 0; n < m_nExpCur; ++n ) m_bkSpin(); m_nExpCur *= 2; } else m_bkYield(); } template bool operator()( Predicate pr ) noexcept( noexcept(std::declval()()) && noexcept(std::declval()()) && noexcept(std::declval()())) { if ( m_nExpCur <= traits::upper_bound ) { for ( size_t n = 0; n < m_nExpCur; ++n ) { if ( m_bkSpin(pr)) return true; } m_nExpCur *= 2; } else return m_bkYield(pr); return false; } void reset() noexcept( noexcept( std::declval().reset()) && noexcept( std::declval().reset())) { m_nExpCur = traits::lower_bound; m_bkSpin.reset(); m_bkYield.reset(); } //@endcond }; //@cond template struct make_exponential { struct traits: public exponential_const_traits { typedef FastPathBkOff fast_path_backoff; typedef SlowPathBkOff slow_path_backoff; }; typedef exponential type; }; template using make_exponential_t = typename make_exponential::type; //@endcond /// Constant traits for \ref delay back-off strategy struct delay_const_traits { typedef std::chrono::milliseconds duration_type; ///< Timeout type enum: unsigned { timeout = 5 ///< Delay timeout }; }; /// Runtime traits for \ref delay back-off strategy struct delay_runtime_traits { typedef std::chrono::milliseconds duration_type; ///< Timeout type static unsigned timeout; ///< Delay timeout, default 5 }; /// Delay back-off strategy /** Template arguments: - \p Duration - duration type, default is \p std::chrono::milliseconds - \p Traits - a class that defines default timeout. Choosing the best value for th timeout is platform and application specific task. The default values for timeout is provided by \p Traits class that should \p timeout data member. There are two predefined \p Traits implementation: - \p delay_const_traits - defines \p timeout as a constant (enum). To change timeout you should recompile your application. - \p delay_runtime_traits - specifies timeout as static data member that can be changed at runtime to tune the back-off strategy. You may use \p Traits template parameter to separate back-off implementations. For example, you may define two \p delay back-offs for 5 and 10 ms timeout: \code #include namespace bkoff = cds::backoff; // 5ms delay struct ms5 { typedef std::chrono::milliseconds duration_type; enum: unsigned { timeout = 5 }; }; // 10ms delay, runtime support struct ms10 { typedef std::chrono::milliseconds duration_type; static unsigned timeout; }; unsigned ms10::timeout = 10; // define your back-off specialization typedef bkoff::delay delay5; typedef bkoff::delay delay10; \endcode */ template class delay { public: typedef Traits traits; ///< Traits typedef typename Traits::duration_type duration_type; ///< Duration type (default \p std::chrono::milliseconds) protected: ///@cond duration_type const timeout; ///@endcond public: /// Default ctor takes the timeout from \p traits::timeout delay() noexcept : timeout( traits::timeout ) {} /// Initializes timeout from \p nTimeout constexpr explicit delay( unsigned int nTimeout ) noexcept : timeout( nTimeout ) {} //@cond void operator()() const { std::this_thread::sleep_for( timeout ); } template bool operator()(Predicate pr) const { for ( unsigned int i = 0; i < traits::timeout; i += 2 ) { if ( pr()) return true; std::this_thread::sleep_for( duration_type( 2 )); } return false; } static void reset() noexcept {} //@endcond }; //@cond template struct make_delay_of { struct traits { typedef Duration duration_type; enum: unsigned { timeout = Timeout }; }; typedef delay type; }; //@endcond /// Delay back-off strategy, template version /** This is a simplified version of \p backoff::delay class. Template parameter \p Timeout sets a delay timeout of \p Duration unit. */ template using delay_of = typename make_delay_of< Timeout, Duration >::type; /// Default backoff strategy typedef exponential Default; /// Default back-off strategy for lock primitives typedef exponential LockDefault; } // namespace backoff } // namespace cds #endif // #ifndef CDSLIB_BACKOFF_STRATEGY_H libcds-2.3.3/cds/algo/base.h000066400000000000000000000007221341244201700155340ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_ALGO_BASE_H #define CDSLIB_ALGO_BASE_H #include namespace cds { /// Different approaches and techniques for supporting high-concurrent data structure namespace algo {} } // namespace cds #endif // #ifndef CDSLIB_ALGO_BASE_H libcds-2.3.3/cds/algo/bit_reversal.h000066400000000000000000000200371341244201700173040ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_ALGO_BIT_REVERSAL_H #define CDSLIB_ALGO_BIT_REVERSAL_H #include // Source: http://stackoverflow.com/questions/746171/best-algorithm-for-bit-reversal-from-msb-lsb-to-lsb-msb-in-c namespace cds { namespace algo { /// Bit reversal algorithms namespace bit_reversal { /// SWAR algorithm (source: http://aggregate.org/MAGIC/#Bit%20Reversal) struct swar { /// 32bit uint32_t operator()( uint32_t x ) const { x = ( ( ( x & 0xaaaaaaaa ) >> 1 ) | ( ( x & 0x55555555 ) << 1 )); x = ( ( ( x & 0xcccccccc ) >> 2 ) | ( ( x & 0x33333333 ) << 2 )); x = ( ( ( x & 0xf0f0f0f0 ) >> 4 ) | ( ( x & 0x0f0f0f0f ) << 4 )); x = ( ( ( x & 0xff00ff00 ) >> 8 ) | ( ( x & 0x00ff00ff ) << 8 )); return( ( x >> 16 ) | ( x << 16 )); } /// 64bit uint64_t operator()( uint64_t x ) const { return ( static_cast( operator()( static_cast( x ))) << 32 ) // low 32bit | ( static_cast( operator()( static_cast( x >> 32 )))); // high 32bit } }; /// Lookup table algorithm struct lookup { /// 32bit uint32_t operator()( uint32_t x ) const { static uint8_t const table[] = { 0x00, 0x80, 0x40, 0xC0, 0x20, 0xA0, 0x60, 0xE0, 0x10, 0x90, 0x50, 0xD0, 0x30, 0xB0, 0x70, 0xF0, 0x08, 0x88, 0x48, 0xC8, 0x28, 0xA8, 0x68, 0xE8, 0x18, 0x98, 0x58, 0xD8, 0x38, 0xB8, 0x78, 0xF8, 0x04, 0x84, 0x44, 0xC4, 0x24, 0xA4, 0x64, 0xE4, 0x14, 0x94, 0x54, 0xD4, 0x34, 0xB4, 0x74, 0xF4, 0x0C, 0x8C, 0x4C, 0xCC, 0x2C, 0xAC, 0x6C, 0xEC, 0x1C, 0x9C, 0x5C, 0xDC, 0x3C, 0xBC, 0x7C, 0xFC, 0x02, 0x82, 0x42, 0xC2, 0x22, 0xA2, 0x62, 0xE2, 0x12, 0x92, 0x52, 0xD2, 0x32, 0xB2, 0x72, 0xF2, 0x0A, 0x8A, 0x4A, 0xCA, 0x2A, 0xAA, 0x6A, 0xEA, 0x1A, 0x9A, 0x5A, 0xDA, 0x3A, 0xBA, 0x7A, 0xFA, 0x06, 0x86, 0x46, 0xC6, 0x26, 0xA6, 0x66, 0xE6, 0x16, 0x96, 0x56, 0xD6, 0x36, 0xB6, 0x76, 0xF6, 0x0E, 0x8E, 0x4E, 0xCE, 0x2E, 0xAE, 0x6E, 0xEE, 0x1E, 0x9E, 0x5E, 0xDE, 0x3E, 0xBE, 0x7E, 0xFE, 0x01, 0x81, 0x41, 0xC1, 0x21, 0xA1, 0x61, 0xE1, 0x11, 0x91, 0x51, 0xD1, 0x31, 0xB1, 0x71, 0xF1, 0x09, 0x89, 0x49, 0xC9, 0x29, 0xA9, 0x69, 0xE9, 0x19, 0x99, 0x59, 0xD9, 0x39, 0xB9, 0x79, 0xF9, 0x05, 0x85, 0x45, 0xC5, 0x25, 0xA5, 0x65, 0xE5, 0x15, 0x95, 0x55, 0xD5, 0x35, 0xB5, 0x75, 0xF5, 0x0D, 0x8D, 0x4D, 0xCD, 0x2D, 0xAD, 0x6D, 0xED, 0x1D, 0x9D, 0x5D, 0xDD, 0x3D, 0xBD, 0x7D, 0xFD, 0x03, 0x83, 0x43, 0xC3, 0x23, 0xA3, 0x63, 0xE3, 0x13, 0x93, 0x53, 0xD3, 0x33, 0xB3, 0x73, 0xF3, 0x0B, 0x8B, 0x4B, 0xCB, 0x2B, 0xAB, 0x6B, 0xEB, 0x1B, 0x9B, 0x5B, 0xDB, 0x3B, 0xBB, 0x7B, 0xFB, 0x07, 0x87, 0x47, 0xC7, 0x27, 0xA7, 0x67, 0xE7, 0x17, 0x97, 0x57, 0xD7, 0x37, 0xB7, 0x77, 0xF7, 0x0F, 0x8F, 0x4F, 0xCF, 0x2F, 0xAF, 0x6F, 0xEF, 0x1F, 0x9F, 0x5F, 0xDF, 0x3F, 0xBF, 0x7F, 0xFF }; static_assert( sizeof( table ) / sizeof( table[0] ) == 256, "Table size mismatch" ); return ( static_cast( table[x & 0xff] ) << 24 ) | ( static_cast( table[( x >> 8 ) & 0xff] ) << 16 ) | ( static_cast( table[( x >> 16 ) & 0xff] ) << 8 ) | ( static_cast( table[( x >> 24 ) & 0xff] )); } /// 64bit uint64_t operator()( uint64_t x ) const { return ( static_cast( operator()( static_cast( x ))) << 32 ) | static_cast( operator()( static_cast( x >> 32 ))); } }; /// Mul-Div algorithm for 32bit architectire /// Mul-Div algorithm struct muldiv { //@cond static uint8_t muldiv32_byte( uint8_t b ) { return static_cast( ( ( b * 0x0802LU & 0x22110LU ) | ( b * 0x8020LU & 0x88440LU )) * 0x10101LU >> 16 ); } static uint8_t muldiv64_byte( uint8_t b ) { return static_cast( ( b * 0x0202020202ULL & 0x010884422010ULL ) % 1023 ); } // for 32bit architecture static uint32_t muldiv32( uint32_t x ) { return static_cast( muldiv32_byte( static_cast( x >> 24 ))) | ( static_cast( muldiv32_byte( static_cast( x >> 16 ))) << 8 ) | ( static_cast( muldiv32_byte( static_cast( x >> 8 ))) << 16 ) | ( static_cast( muldiv32_byte( static_cast( x ))) << 24 ); } static uint64_t muldiv32( uint64_t x ) { return static_cast( muldiv32_byte( static_cast( x >> 56 ))) | ( static_cast( muldiv32_byte( static_cast( x >> 48 ))) << 8 ) | ( static_cast( muldiv32_byte( static_cast( x >> 40 ))) << 16 ) | ( static_cast( muldiv32_byte( static_cast( x >> 32 ))) << 24 ) | ( static_cast( muldiv32_byte( static_cast( x >> 24 ))) << 32 ) | ( static_cast( muldiv32_byte( static_cast( x >> 16 ))) << 40 ) | ( static_cast( muldiv32_byte( static_cast( x >> 8 ))) << 48 ) | ( static_cast( muldiv32_byte( static_cast( x ))) << 56 ); } /// for 64bit architectire static uint32_t muldiv64( uint32_t x ) { return static_cast( muldiv64_byte( static_cast( x >> 24 ))) | ( static_cast( muldiv64_byte( static_cast( x >> 16 ))) << 8 ) | ( static_cast( muldiv64_byte( static_cast( x >> 8 ))) << 16 ) | ( static_cast( muldiv64_byte( static_cast( x ))) << 24 ); } static uint64_t muldiv64( uint64_t x ) { return static_cast( muldiv64_byte( static_cast( x >> 56 ))) | ( static_cast( muldiv64_byte( static_cast( x >> 48 ))) << 8 ) | ( static_cast( muldiv64_byte( static_cast( x >> 40 ))) << 16 ) | ( static_cast( muldiv64_byte( static_cast( x >> 32 ))) << 24 ) | ( static_cast( muldiv64_byte( static_cast( x >> 24 ))) << 32 ) | ( static_cast( muldiv64_byte( static_cast( x >> 16 ))) << 40 ) | ( static_cast( muldiv64_byte( static_cast( x >> 8 ))) << 48 ) | ( static_cast( muldiv64_byte( static_cast( x ))) << 56 ); } //@endcond /// 32bit uint32_t operator()( uint32_t x ) const { # if CDS_BUILD_BITS == 32 return muldiv32( x ); # else return muldiv64( x ); # endif } /// 64bit uint64_t operator()( uint64_t x ) const { # if CDS_BUILD_BITS == 32 return muldiv32( x ); # else return muldiv64( x ); # endif } }; } // namespace bit_reversal }} // namespace cds::algo #endif // #ifndef CDSLIB_ALGO_BIT_REVERSAL_H libcds-2.3.3/cds/algo/bitop.h000066400000000000000000000124651341244201700157460ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_BITOP_H #define CDSLIB_BITOP_H /* Different bit algorithms: LSB get least significant bit number MSB get most significant bit number bswap swap byte order of word RBO reverse bit order of word Editions: 2007.10.08 Maxim.Khiszinsky Created */ #include #include namespace cds { /// Bit operations namespace bitop { ///@cond none namespace details { template struct BitOps; // 32-bit bit ops template <> struct BitOps<4> { typedef uint32_t TUInt; static int MSB( TUInt x ) { return bitop::platform::msb32( x ); } static int LSB( TUInt x ) { return bitop::platform::lsb32( x ); } static int MSBnz( TUInt x ) { return bitop::platform::msb32nz( x ); } static int LSBnz( TUInt x ) { return bitop::platform::lsb32nz( x ); } static int SBC( TUInt x ) { return bitop::platform::sbc32( x ) ; } static int ZBC( TUInt x ) { return bitop::platform::zbc32( x ) ; } static TUInt RBO( TUInt x ) { return bitop::platform::rbo32( x ); } static bool complement( TUInt& x, int nBit ) { return bitop::platform::complement32( &x, nBit ); } static TUInt RandXorShift(TUInt x) { return bitop::platform::RandXorShift32(x); } }; // 64-bit bit ops template <> struct BitOps<8> { typedef uint64_t TUInt; static int MSB( TUInt x ) { return bitop::platform::msb64( x ); } static int LSB( TUInt x ) { return bitop::platform::lsb64( x ); } static int MSBnz( TUInt x ) { return bitop::platform::msb64nz( x ); } static int LSBnz( TUInt x ) { return bitop::platform::lsb64nz( x ); } static int SBC( TUInt x ) { return bitop::platform::sbc64( x ) ; } static int ZBC( TUInt x ) { return bitop::platform::zbc64( x ) ; } static TUInt RBO( TUInt x ) { return bitop::platform::rbo64( x ); } static bool complement( TUInt& x, int nBit ) { return bitop::platform::complement64( &x, nBit ); } static TUInt RandXorShift(TUInt x) { return bitop::platform::RandXorShift64(x); } }; } // namespace details //@endcond /// Get least significant bit (LSB) number (1..32/64), 0 if nArg == 0 template static inline int LSB( T nArg ) { return details::BitOps< sizeof(T) >::LSB( (typename details::BitOps::TUInt) nArg ); } /// Get least significant bit (LSB) number (0..31/63) /** Precondition: nArg != 0 */ template static inline int LSBnz( T nArg ) { assert( nArg != 0 ); return details::BitOps< sizeof(T) >::LSBnz( (typename details::BitOps::TUInt) nArg ); } /// Get most significant bit (MSB) number (1..32/64), 0 if nArg == 0 template static inline int MSB( T nArg ) { return details::BitOps< sizeof(T) >::MSB( (typename details::BitOps::TUInt) nArg ); } /// Get most significant bit (MSB) number (0..31/63) /** Precondition: nArg != 0 */ template static inline int MSBnz( T nArg ) { assert( nArg != 0 ); return details::BitOps< sizeof(T) >::MSBnz( (typename details::BitOps::TUInt) nArg ); } /// Get non-zero bit count of a word template static inline int SBC( T nArg ) { return details::BitOps< sizeof(T) >::SBC( (typename details::BitOps::TUInt) nArg ); } /// Get zero bit count of a word template static inline int ZBC( T nArg ) { return details::BitOps< sizeof(T) >::ZBC( (typename details::BitOps::TUInt) nArg ); } /// Reverse bit order of \p nArg template static inline T RBO( T nArg ) { return (T) details::BitOps< sizeof(T) >::RBO( (typename details::BitOps::TUInt) nArg ); } /// Complement bit \p nBit in \p nArg template static inline bool complement( T& nArg, int nBit ) { return details::BitOps< sizeof(T) >::complement( reinterpret_cast< typename details::BitOps::TUInt& >( nArg ), nBit ); } /// Simple random number generator template static inline T RandXorShift( T x) { return (T) details::BitOps< sizeof(T) >::RandXorShift(x); } } // namespace bitop } //namespace cds #endif // #ifndef CDSLIB_BITOP_H libcds-2.3.3/cds/algo/elimination.h000066400000000000000000000051341341244201700171340ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_ALGO_ELIMINATION_H #define CDSLIB_ALGO_ELIMINATION_H #include #include #include #include namespace cds { namespace algo { /// Elimination technique /** @anchor cds_elimination_description Elimination technique allows highly distributed coupling and execution of operations with reverse semantics like the pushes and pops on a stack. If a push followed by a pop are performed on a stack, the data structure's state does not change (similarly for a pop followed by a push). This means that if one can cause pairs of pushes and pops to meet and pair up in separate locations, the threads can exchange values without having to touch a centralized structure since they have anyhow "eliminated" each other's effect on it. Elimination can be implemented by using a collision array in which threads pick random locations in order to try and collide. Pairs of threads that "collide" in some location run through a synchronization protocol, and all such disjoint collisions can be performed in parallel. If a thread has not met another in the selected location or if it met a thread with an operation that cannot be eliminated (such as two push operations), an alternative scheme must be used. */ namespace elimination { /// Base class describing an operation for eliminating /** This class contains some debugng info. Actual operation descriptor depends on real container and its interface. */ struct operation_desc { record * pOwner; ///< Owner of the descriptor }; /// Acquires elimination record for the current thread template static inline record * init_record( OperationDesc& op ) { record& rec = cds::threading::elimination_record(); assert( rec.is_free()); op.pOwner = &rec; rec.pOp = static_cast( &op ); return &rec; } /// Releases elimination record for the current thread static inline void clear_record() { cds::threading::elimination_record().pOp = nullptr; } } // namespace elimination }} // namespace cds::algo #endif // CDSLIB_ALGO_ELIMINATION_H libcds-2.3.3/cds/algo/elimination_opt.h000066400000000000000000000022451341244201700200160ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_ALGO_ELIMINATION_OPT_H #define CDSLIB_ALGO_ELIMINATION_OPT_H #include namespace cds { namespace opt { /// Enable \ref cds_elimination_description "elimination back-off" for the container template struct enable_elimination { //@cond template struct pack: public Base { static constexpr const bool enable_elimination = Enable; }; //@endcond }; /// \ref cds_elimination_description "Elimination back-off strategy" option setter /** Back-off strategy for elimination. Usually, elimination back-off strategy is \p cds::backoff::delay. */ template struct elimination_backoff { //@cond template struct pack: public Base { typedef Type elimination_backoff; }; //@endcond }; }} // namespace cds::opt #endif // #ifndef CDSLIB_ALGO_ELIMINATION_OPT_H libcds-2.3.3/cds/algo/elimination_tls.h000066400000000000000000000015501341244201700200140ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_ALGO_ELIMINATION_TLS_H #define CDSLIB_ALGO_ELIMINATION_TLS_H #include namespace cds { namespace algo { namespace elimination { // Forwards struct operation_desc; /// Per-thread elimination record /** @headerfile cds/algo/elimination.h */ struct record { operation_desc * pOp ; ///< Operation descriptor /// Initialization record() : pOp( nullptr ) {} /// Checks if the record is free bool is_free() const { return pOp == nullptr; } }; }}} // cds::algo::elimination #endif // #ifndef CDSLIB_ALGO_ELIMINATION_TLS_H libcds-2.3.3/cds/algo/flat_combining.h000066400000000000000000000005501341244201700175740ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_ALGO_FLAT_COMBINING_H #define CDSLIB_ALGO_FLAT_COMBINING_H #include #endif // #ifndef CDSLIB_ALGO_FLAT_COMBINING_H libcds-2.3.3/cds/algo/flat_combining/000077500000000000000000000000001341244201700174235ustar00rootroot00000000000000libcds-2.3.3/cds/algo/flat_combining/defs.h000066400000000000000000000044421341244201700205210ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_ALGO_FLAT_COMBINING_DEFS_H #define CDSLIB_ALGO_FLAT_COMBINING_DEFS_H #include namespace cds { namespace algo { namespace flat_combining { /// Special values of \p publication_record::nRequest enum request_value { req_EmptyRecord, ///< Publication record is empty req_Response, ///< Operation is done req_Operation ///< First operation id for derived classes }; /// \p publication_record state enum record_state { inactive, ///< Record is inactive active, ///< Record is active removed ///< Record should be removed }; /// Record of publication list /** Each data structure based on flat combining contains a class derived from \p %publication_record */ struct publication_record { atomics::atomic nRequest; ///< Request field (depends on data structure) atomics::atomic nState; ///< Record state: inactive, active, removed atomics::atomic nAge; ///< Age of the record atomics::atomic pNext; ///< Next record in active publication list atomics::atomic pNextAllocated; ///< Next record in allocated publication list /// Initializes publication record publication_record() : nRequest( req_EmptyRecord ) , nAge( 0 ) , pNext( nullptr ) , pNextAllocated( nullptr ) { nState.store( inactive, atomics::memory_order_release ); } /// Returns the value of \p nRequest field unsigned int op( atomics::memory_order mo = atomics::memory_order_relaxed ) const { return nRequest.load( mo ); } /// Checks if the operation is done bool is_done() const { return nRequest.load( atomics::memory_order_relaxed ) == req_Response; } }; }}} // namespace cds::algo::flat_combining #endif // CDSLIB_ALGO_FLAT_COMBINING_DEFS_H libcds-2.3.3/cds/algo/flat_combining/kernel.h000066400000000000000000001171311341244201700210600ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_ALGO_FLAT_COMBINING_KERNEL_H #define CDSLIB_ALGO_FLAT_COMBINING_KERNEL_H #include #include #include #include #include #include namespace cds { namespace algo { /// @defgroup cds_flat_combining_intrusive Intrusive flat combining containers /// @defgroup cds_flat_combining_container Non-intrusive flat combining containers /// Flat combining /** @anchor cds_flat_combining_description Flat combining (FC) technique is invented by Hendler, Incze, Shavit and Tzafrir in their paper [2010] "Flat Combining and the Synchronization-Parallelism Tradeoff". The technique converts a sequential data structure to its concurrent implementation. A few structures are added to the sequential implementation: a global lock, a count of the number of combining passes, and a pointer to the head of a publication list. The publication list is a list of thread-local records of a size proportional to the number of threads that are concurrently accessing the shared object. Each thread \p t accessing the structure to perform an invocation of some method \p f() on the shared object executes the following sequence of steps:
  1. Write the invocation opcode and parameters (if any) of the method \p f() to be applied sequentially to the shared object in the request field of your thread local publication record (there is no need to use a load-store memory barrier). The request field will later be used to receive the response. If your thread local publication record is marked as active continue to step 2, otherwise continue to step 5.
  2. Check if the global lock is taken. If so (another thread is an active combiner), spin on the request field waiting for a response to the invocation (one can add a yield at this point to allow other threads on the same core to run). Once in a while while spinning check if the lock is still taken and that your record is active (you may use any of \p wait_strategy instead of spinning). If your record is inactive proceed to step 5. Once the response is available, reset the request field to null and return the response.
  3. If the lock is not taken, attempt to acquire it and become a combiner. If you fail, return to spinning in step 2.
  4. Otherwise, you hold the lock and are a combiner.
    • Increment the combining pass count by one.
    • Execute a \p fc_apply() by traversing the publication list from the head, combining all non-null method call invocations, setting the age of each of these records to the current count, applying the combined method calls to the structure D, and returning responses to all the invocations. This traversal is guaranteed to be wait-free.
    • If the count is such that a cleanup needs to be performed, traverse the publication list from the head. Starting from the second item (we always leave the item pointed to by the head in the list), remove from the publication list all records whose age is much smaller than the current count. This is done by removing the node and marking it as inactive.
    • Release the lock.
  5. If you have no thread local publication record allocate one, marked as active. If you already have one marked as inactive, mark it as active. Execute a store-load memory barrier. Proceed to insert the record into the list with a successful CAS to the head. Then proceed to step 1.
As the test results show, the flat combining technique is suitable for non-intrusive containers like stack, queue, deque. For intrusive concurrent containers the flat combining demonstrates less impressive results. \ref cds_flat_combining_container "List of FC-based containers" in libcds. \ref cds_flat_combining_intrusive "List of intrusive FC-based containers" in libcds. */ namespace flat_combining { /// Flat combining internal statistics template struct stat { typedef Counter counter_type; ///< Event counter type counter_type m_nOperationCount ; ///< How many operations have been performed counter_type m_nCombiningCount ; ///< Combining call count counter_type m_nCompactPublicationList; ///< Count of publication list compacting counter_type m_nDeactivatePubRecord; ///< How many publication records were deactivated during compacting counter_type m_nActivatePubRecord; ///< Count of publication record activating counter_type m_nPubRecordCreated ; ///< Count of created publication records counter_type m_nPubRecordDeleted ; ///< Count of deleted publication records counter_type m_nPassiveWaitCall; ///< Count of passive waiting call (\p kernel::wait_for_combining()) counter_type m_nPassiveWaitIteration;///< Count of iteration inside passive waiting counter_type m_nPassiveWaitWakeup; ///< Count of forcing wake-up of passive wait cycle counter_type m_nInvokeExclusive; ///< Count of call \p kernel::invoke_exclusive() counter_type m_nWakeupByNotifying; ///< How many times the passive thread be waked up by a notification counter_type m_nPassiveToCombiner; ///< How many times the passive thread becomes the combiner /// Returns current combining factor /** Combining factor is how many operations perform in one combine pass: combining_factor := m_nOperationCount / m_nCombiningCount */ double combining_factor() const { return m_nCombiningCount.get() ? double( m_nOperationCount.get()) / m_nCombiningCount.get() : 0.0; } //@cond void onOperation() { ++m_nOperationCount; } void onCombining() { ++m_nCombiningCount; } void onCompactPublicationList() { ++m_nCompactPublicationList; } void onDeactivatePubRecord() { ++m_nDeactivatePubRecord; } void onActivatePubRecord() { ++m_nActivatePubRecord; } void onCreatePubRecord() { ++m_nPubRecordCreated; } void onDeletePubRecord() { ++m_nPubRecordDeleted; } void onPassiveWait() { ++m_nPassiveWaitCall; } void onPassiveWaitIteration() { ++m_nPassiveWaitIteration; } void onPassiveWaitWakeup() { ++m_nPassiveWaitWakeup; } void onInvokeExclusive() { ++m_nInvokeExclusive; } void onWakeupByNotifying() { ++m_nWakeupByNotifying; } void onPassiveToCombiner() { ++m_nPassiveToCombiner; } //@endcond }; /// Flat combining dummy internal statistics struct empty_stat { //@cond void onOperation() const {} void onCombining() const {} void onCompactPublicationList() const {} void onDeactivatePubRecord() const {} void onActivatePubRecord() const {} void onCreatePubRecord() const {} void onDeletePubRecord() const {} void onPassiveWait() const {} void onPassiveWaitIteration() const {} void onPassiveWaitWakeup() const {} void onInvokeExclusive() const {} void onWakeupByNotifying() const {} void onPassiveToCombiner() const {} //@endcond }; /// Type traits of \ref kernel class /** You can define different type traits for \ref kernel by specifying your struct based on \p %traits or by using \ref make_traits metafunction. */ struct traits { typedef cds::sync::spin lock_type; ///< Lock type typedef cds::algo::flat_combining::wait_strategy::backoff< cds::backoff::delay_of<2>> wait_strategy; ///< Wait strategy typedef CDS_DEFAULT_ALLOCATOR allocator; ///< Allocator used for TLS data (allocating \p publication_record derivatives) typedef empty_stat stat; ///< Internal statistics typedef opt::v::relaxed_ordering memory_model; ///< /// C++ memory ordering model }; /// Metafunction converting option list to traits /** \p Options are: - \p opt::lock_type - mutex type, default is \p cds::sync::spin - \p opt::wait_strategy - wait strategy, see \p wait_strategy namespace, default is \p wait_strategy::backoff. - \p opt::allocator - allocator type, default is \ref CDS_DEFAULT_ALLOCATOR - \p opt::stat - internal statistics, possible type: \ref stat, \ref empty_stat (the default) - \p opt::memory_model - C++ memory ordering model. List of all available memory ordering see \p opt::memory_model. Default is \p cds::opt::v::relaxed_ordering */ template struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type ; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type ,Options... >::type type; # endif }; /// The kernel of flat combining /** Template parameters: - \p PublicationRecord - a type derived from \ref publication_record - \p Traits - a type traits of flat combining, default is \p flat_combining::traits. \ref make_traits metafunction can be used to create type traits The kernel object should be a member of a container class. The container cooperates with flat combining kernel object. There are two ways to interact with the kernel: - One-by-one processing the active records of the publication list. This mode provides by \p combine() function: the container acquires its publication record by \p acquire_record(), fills its fields and calls \p combine() function of its kernel object. If the current thread becomes a combiner, the kernel calls \p fc_apply() function of the container for each active non-empty record. Then, the container should release its publication record by \p release_record(). Only one pass through the publication list is possible. - Batch processing - \p batch_combine() function. It this mode the container obtains access to entire publication list. This mode allows the container to perform an elimination, for example, the stack can collide \p push() and \p pop() requests. The sequence of invocations is the following: the container acquires its publication record by \p acquire_record(), fills its field and call \p batch_combine() function of its kernel object. If the current thread becomes a combiner, the kernel calls \p fc_process() function of the container passing two iterators pointing to the begin and the end of publication list (see \ref iterator class). The iterators allow multiple pass through active records of publication list. For each processed record the container should call \p operation_done() function. On the end, the container should release its record by \p release_record(). */ template < typename PublicationRecord ,typename Traits = traits > class kernel { public: typedef Traits traits; ///< Type traits typedef typename traits::lock_type global_lock_type; ///< Global lock type typedef typename traits::wait_strategy wait_strategy; ///< Wait strategy type typedef typename traits::allocator allocator; ///< Allocator type (used for allocating publication_record_type data) typedef typename traits::stat stat; ///< Internal statistics typedef typename traits::memory_model memory_model; ///< C++ memory model typedef typename wait_strategy::template make_publication_record::type publication_record_type; ///< Publication record type protected: //@cond typedef cds::details::Allocator< publication_record_type, allocator > cxx11_allocator; ///< internal helper cds::details::Allocator typedef std::lock_guard lock_guard; //@endcond protected: atomics::atomic m_nCount; ///< Total count of combining passes. Used as an age. publication_record_type* m_pHead; ///< Head of active publication list publication_record_type* m_pAllocatedHead; ///< Head of allocated publication list boost::thread_specific_ptr< publication_record_type > m_pThreadRec; ///< Thread-local publication record mutable global_lock_type m_Mutex; ///< Global mutex mutable stat m_Stat; ///< Internal statistics unsigned int const m_nCompactFactor; ///< Publication list compacting factor (the list will be compacted through \p %m_nCompactFactor combining passes) unsigned int const m_nCombinePassCount; ///< Number of combining passes wait_strategy m_waitStrategy; ///< Wait strategy public: /// Initializes the object /** Compact factor = 1024 Combiner pass count = 8 */ kernel() : kernel( 1024, 8 ) {} /// Initializes the object kernel( unsigned int nCompactFactor ///< Publication list compacting factor (the list will be compacted through \p nCompactFactor combining passes) ,unsigned int nCombinePassCount ///< Number of combining passes for combiner thread ) : m_nCount(0) , m_pHead( nullptr ) , m_pAllocatedHead( nullptr ) , m_pThreadRec( tls_cleanup ) , m_nCompactFactor( static_cast( cds::beans::ceil2( static_cast( nCompactFactor )) - 1 )) // binary mask , m_nCombinePassCount( nCombinePassCount ) { assert( m_pThreadRec.get() == nullptr ); publication_record_type* pRec = cxx11_allocator().New(); m_pAllocatedHead = m_pHead = pRec; m_pThreadRec.reset( pRec ); m_Stat.onCreatePubRecord(); } /// Destroys the object and all publication records ~kernel() { m_pThreadRec.reset(); // calls tls_cleanup() // delete all publication records for ( publication_record* p = m_pAllocatedHead; p; ) { publication_record * pRec = p; p = p->pNextAllocated.load( memory_model::memory_order_relaxed ); free_publication_record( static_cast( pRec )); } } /// Gets publication list record for the current thread /** If there is no publication record for the current thread the function allocates it. */ publication_record_type * acquire_record() { publication_record_type * pRec = m_pThreadRec.get(); if ( !pRec ) { // Allocate new publication record pRec = cxx11_allocator().New(); m_pThreadRec.reset( pRec ); m_Stat.onCreatePubRecord(); // Insert in allocated list assert( m_pAllocatedHead != nullptr ); publication_record* p = m_pAllocatedHead->pNextAllocated.load( memory_model::memory_order_relaxed ); do { pRec->pNextAllocated.store( p, memory_model::memory_order_release ); } while ( !m_pAllocatedHead->pNextAllocated.compare_exchange_weak( p, pRec, memory_model::memory_order_release, atomics::memory_order_acquire )); publish( pRec ); } else if ( pRec->nState.load( memory_model::memory_order_acquire ) != active ) publish( pRec ); assert( pRec->op() == req_EmptyRecord ); return pRec; } /// Marks publication record for the current thread as empty void release_record( publication_record_type * pRec ) { assert( pRec->is_done()); pRec->nRequest.store( req_EmptyRecord, memory_model::memory_order_release ); } /// Trying to execute operation \p nOpId /** \p pRec is the publication record acquiring by \ref acquire_record earlier. \p owner is a container that is owner of flat combining kernel object. As a result the current thread can become a combiner or can wait for another combiner performs \p pRec operation. If the thread becomes a combiner, the kernel calls \p owner.fc_apply for each active non-empty publication record. */ template void combine( unsigned int nOpId, publication_record_type * pRec, Container& owner ) { assert( nOpId >= req_Operation ); assert( pRec ); pRec->nRequest.store( nOpId, memory_model::memory_order_release ); m_Stat.onOperation(); try_combining( owner, pRec ); } /// Trying to execute operation \p nOpId in batch-combine mode /** \p pRec is the publication record acquiring by \p acquire_record() earlier. \p owner is a container that owns flat combining kernel object. As a result the current thread can become a combiner or can wait for another combiner performs \p pRec operation. If the thread becomes a combiner, the kernel calls \p owner.fc_process() giving the container the full access over publication list. This function is useful for an elimination technique if the container supports any kind of that. The container can perform multiple pass through publication list. \p owner.fc_process() has two arguments - forward iterators on begin and end of publication list, see \ref iterator class. For each processed record the container should call \p operation_done() function to mark the record as processed. On the end of \p %batch_combine the \p combine() function is called to process rest of publication records. */ template void batch_combine( unsigned int nOpId, publication_record_type* pRec, Container& owner ) { assert( nOpId >= req_Operation ); assert( pRec ); pRec->nRequest.store( nOpId, memory_model::memory_order_release ); m_Stat.onOperation(); try_batch_combining( owner, pRec ); } /// Invokes \p Func in exclusive mode /** Some operation in flat combining containers should be called in exclusive mode i.e the current thread should become the combiner to process the operation. The typical example is \p empty() function. \p %invoke_exclusive() allows do that: the current thread becomes the combiner, invokes \p f exclusively but unlike a typical usage the thread does not process any pending request. Instead, after end of \p f call the current thread wakes up a pending thread if any. */ template void invoke_exclusive( Func f ) { { lock_guard l( m_Mutex ); f(); } m_waitStrategy.wakeup( *this ); m_Stat.onInvokeExclusive(); } /// Marks \p rec as executed /** This function should be called by container if \p batch_combine() mode is used. For usual combining (see \p combine()) this function is excess. */ void operation_done( publication_record& rec ) { rec.nRequest.store( req_Response, memory_model::memory_order_release ); m_waitStrategy.notify( *this, static_cast( rec )); } /// Internal statistics stat const& statistics() const { return m_Stat; } //@cond // For container classes based on flat combining stat& internal_statistics() const { return m_Stat; } //@endcond /// Returns the compact factor unsigned int compact_factor() const { return m_nCompactFactor + 1; } /// Returns number of combining passes for combiner thread unsigned int combine_pass_count() const { return m_nCombinePassCount; } public: /// Publication list iterator /** Iterators are intended for batch processing by container's \p fc_process function. The iterator allows iterate through active publication list. */ class iterator { //@cond friend class kernel; publication_record_type * m_pRec; //@endcond protected: //@cond iterator( publication_record_type * pRec ) : m_pRec( pRec ) { skip_inactive(); } void skip_inactive() { while ( m_pRec && (m_pRec->nState.load( memory_model::memory_order_acquire ) != active || m_pRec->op( memory_model::memory_order_relaxed) < req_Operation )) { m_pRec = static_cast(m_pRec->pNext.load( memory_model::memory_order_acquire )); } } //@endcond public: /// Initializes an empty iterator object iterator() : m_pRec( nullptr ) {} /// Copy ctor iterator( iterator const& src ) : m_pRec( src.m_pRec ) {} /// Pre-increment iterator& operator++() { assert( m_pRec ); m_pRec = static_cast( m_pRec->pNext.load( memory_model::memory_order_acquire )); skip_inactive(); return *this; } /// Post-increment iterator operator++(int) { assert( m_pRec ); iterator it(*this); ++(*this); return it; } /// Dereference operator, can return \p nullptr publication_record_type* operator ->() { return m_pRec; } /// Dereference operator, the iterator should not be an end iterator publication_record_type& operator*() { assert( m_pRec ); return *m_pRec; } /// Iterator equality friend bool operator==( iterator it1, iterator it2 ) { return it1.m_pRec == it2.m_pRec; } /// Iterator inequality friend bool operator!=( iterator it1, iterator it2 ) { return !( it1 == it2 ); } }; /// Returns an iterator to the first active publication record iterator begin() { return iterator(m_pHead); } /// Returns an iterator to the end of publication list. Should not be dereferenced. iterator end() { return iterator(); } public: /// Gets current value of \p rec.nRequest /** This function is intended for invoking from a wait strategy */ int get_operation( publication_record& rec ) { return rec.op( memory_model::memory_order_acquire ); } /// Wakes up any waiting thread /** This function is intended for invoking from a wait strategy */ void wakeup_any() { publication_record* pRec = m_pHead; while ( pRec ) { if ( pRec->nState.load( memory_model::memory_order_acquire ) == active && pRec->op( memory_model::memory_order_acquire ) >= req_Operation ) { m_waitStrategy.notify( *this, static_cast( *pRec )); break; } pRec = pRec->pNext.load( memory_model::memory_order_acquire ); } } private: //@cond static void tls_cleanup( publication_record_type* pRec ) { // Thread done // pRec that is TLS data should be excluded from publication list pRec->nState.store( removed, memory_model::memory_order_release ); } void free_publication_record( publication_record_type* pRec ) { cxx11_allocator().Delete( pRec ); m_Stat.onDeletePubRecord(); } void publish( publication_record_type* pRec ) { assert( pRec->nState.load( memory_model::memory_order_relaxed ) == inactive ); pRec->nAge.store( m_nCount.load(memory_model::memory_order_relaxed), memory_model::memory_order_relaxed ); pRec->nState.store( active, memory_model::memory_order_relaxed ); // Insert record to publication list if ( m_pHead != static_cast(pRec)) { publication_record * p = m_pHead->pNext.load( memory_model::memory_order_relaxed ); if ( p != static_cast( pRec )) { do { pRec->pNext.store( p, memory_model::memory_order_release ); // Failed CAS changes p } while ( !m_pHead->pNext.compare_exchange_weak( p, static_cast(pRec), memory_model::memory_order_release, atomics::memory_order_acquire )); m_Stat.onActivatePubRecord(); } } } void republish( publication_record_type* pRec ) { if ( pRec->nState.load( memory_model::memory_order_relaxed ) != active ) { // The record has been excluded from publication list. Reinsert it publish( pRec ); } } template void try_combining( Container& owner, publication_record_type* pRec ) { if ( m_Mutex.try_lock()) { // The thread becomes a combiner lock_guard l( m_Mutex, std::adopt_lock_t()); // The record pRec can be excluded from publication list. Re-publish it republish( pRec ); combining( owner ); assert( pRec->op( memory_model::memory_order_relaxed ) == req_Response ); } else { // There is another combiner, wait while it executes our request if ( !wait_for_combining( pRec )) { // The thread becomes a combiner lock_guard l( m_Mutex, std::adopt_lock_t()); // The record pRec can be excluded from publication list. Re-publish it republish( pRec ); combining( owner ); assert( pRec->op( memory_model::memory_order_relaxed ) == req_Response ); } } } template void try_batch_combining( Container& owner, publication_record_type * pRec ) { if ( m_Mutex.try_lock()) { // The thread becomes a combiner lock_guard l( m_Mutex, std::adopt_lock_t()); // The record pRec can be excluded from publication list. Re-publish it republish( pRec ); batch_combining( owner ); assert( pRec->op( memory_model::memory_order_relaxed ) == req_Response ); } else { // There is another combiner, wait while it executes our request if ( !wait_for_combining( pRec )) { // The thread becomes a combiner lock_guard l( m_Mutex, std::adopt_lock_t()); // The record pRec can be excluded from publication list. Re-publish it republish( pRec ); batch_combining( owner ); assert( pRec->op( memory_model::memory_order_relaxed ) == req_Response ); } } } template void combining( Container& owner ) { // The thread is a combiner assert( !m_Mutex.try_lock()); unsigned int const nCurAge = m_nCount.fetch_add( 1, memory_model::memory_order_relaxed ) + 1; unsigned int nEmptyPassCount = 0; unsigned int nUsefulPassCount = 0; for ( unsigned int nPass = 0; nPass < m_nCombinePassCount; ++nPass ) { if ( combining_pass( owner, nCurAge )) ++nUsefulPassCount; else if ( ++nEmptyPassCount > nUsefulPassCount ) break; } m_Stat.onCombining(); if ( ( nCurAge & m_nCompactFactor ) == 0 ) compact_list( nCurAge ); } template bool combining_pass( Container& owner, unsigned int nCurAge ) { publication_record* p = m_pHead; bool bOpDone = false; while ( p ) { switch ( p->nState.load( memory_model::memory_order_acquire )) { case active: if ( p->op( memory_model::memory_order_acquire ) >= req_Operation ) { p->nAge.store( nCurAge, memory_model::memory_order_relaxed ); owner.fc_apply( static_cast( p )); operation_done( *p ); bOpDone = true; } break; case inactive: // Only m_pHead can be inactive in the publication list assert( p == m_pHead ); break; case removed: // Such record will be removed on compacting phase break; default: /// ??? That is impossible assert( false ); } p = p->pNext.load( memory_model::memory_order_acquire ); } return bOpDone; } template void batch_combining( Container& owner ) { // The thread is a combiner assert( !m_Mutex.try_lock()); unsigned int const nCurAge = m_nCount.fetch_add( 1, memory_model::memory_order_relaxed ) + 1; for ( unsigned int nPass = 0; nPass < m_nCombinePassCount; ++nPass ) owner.fc_process( begin(), end()); combining_pass( owner, nCurAge ); m_Stat.onCombining(); if ( ( nCurAge & m_nCompactFactor ) == 0 ) compact_list( nCurAge ); } bool wait_for_combining( publication_record_type* pRec ) { m_waitStrategy.prepare( *pRec ); m_Stat.onPassiveWait(); while ( pRec->op( memory_model::memory_order_acquire ) != req_Response ) { // The record can be excluded from publication list. Reinsert it republish( pRec ); m_Stat.onPassiveWaitIteration(); // Wait while operation processing if ( m_waitStrategy.wait( *this, *pRec )) m_Stat.onWakeupByNotifying(); if ( m_Mutex.try_lock()) { if ( pRec->op( memory_model::memory_order_acquire ) == req_Response ) { // Operation is done m_Mutex.unlock(); // Wake up a pending threads m_waitStrategy.wakeup( *this ); m_Stat.onPassiveWaitWakeup(); break; } // The thread becomes a combiner m_Stat.onPassiveToCombiner(); return false; } } return true; } void compact_list( unsigned int nCurAge ) { // Compacts publication list // This function is called only by combiner thread try_again: publication_record * pPrev = m_pHead; for ( publication_record * p = pPrev->pNext.load( memory_model::memory_order_acquire ); p; ) { switch ( p->nState.load( memory_model::memory_order_relaxed )) { case active: if ( p->nAge.load( memory_model::memory_order_relaxed ) + m_nCompactFactor < nCurAge ) { publication_record * pNext = p->pNext.load( memory_model::memory_order_relaxed ); if ( pPrev->pNext.compare_exchange_strong( p, pNext, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { p->nState.store( inactive, memory_model::memory_order_release ); p = pNext; m_Stat.onDeactivatePubRecord(); continue; } } break; case removed: publication_record * pNext = p->pNext.load( memory_model::memory_order_acquire ); if ( cds_likely( pPrev->pNext.compare_exchange_strong( p, pNext, memory_model::memory_order_acquire, atomics::memory_order_relaxed ))) { p = pNext; continue; } else { // CAS can be failed only in beginning of list assert( pPrev == m_pHead ); goto try_again; } } pPrev = p; p = p->pNext.load( memory_model::memory_order_acquire ); } // Iterate over allocated list to find removed records pPrev = m_pAllocatedHead; for ( publication_record * p = pPrev->pNextAllocated.load( memory_model::memory_order_acquire ); p; ) { if ( p->nState.load( memory_model::memory_order_relaxed ) == removed ) { publication_record * pNext = p->pNextAllocated.load( memory_model::memory_order_relaxed ); if ( pPrev->pNextAllocated.compare_exchange_strong( p, pNext, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { free_publication_record( static_cast( p )); p = pNext; continue; } } pPrev = p; p = p->pNextAllocated.load( memory_model::memory_order_relaxed ); } m_Stat.onCompactPublicationList(); } //@endcond }; //@cond class container { public: template void fc_apply( PubRecord * ) { assert( false ); } template void fc_process( Iterator, Iterator ) { assert( false ); } }; //@endcond } // namespace flat_combining }} // namespace cds::algo /* CppMem model (http://svr-pes20-cppmem.cl.cam.ac.uk/cppmem/) // Combiner thread - slave (waiting) thread int main() { atomic_int y = 0; // pRec->op int x = 0; // pRec->data {{{ { // slave thread (not combiner) // Op data x = 1; // Annotate request (op) y.store(1, release); // Wait while request done y.load(acquire).readsvalue(2); // Read result r2=x; } ||| { // Combiner thread // Read request (op) r1=y.load(acquire).readsvalue(1); // Execute request - change request data x = 2; // store "request processed" flag (pRec->op := req_Response) y.store(2, release); } }}}; return 0; } */ #endif // #ifndef CDSLIB_ALGO_FLAT_COMBINING_KERNEL_H libcds-2.3.3/cds/algo/flat_combining/wait_strategy.h000066400000000000000000000351251341244201700224700ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_ALGO_FLAT_COMBINING_WAIT_STRATEGY_H #define CDSLIB_ALGO_FLAT_COMBINING_WAIT_STRATEGY_H #include #include #include #include #include // thread_specific_ptr namespace cds { namespace opt { /// Wait strategy option for \p flat_combining::kernel template struct wait_strategy { //@cond template struct pack: public Base { typedef Strategy wait_strategy; }; //@endcond }; }} // namespace cds::opt namespace cds { namespace algo { namespace flat_combining { /// Wait strategies for \p flat_combining technique /** Wait strategy specifies how a thread waits until its request is performed by the combiner. See \p wait_strategy::empty wait strategy to explain the interface. */ namespace wait_strategy { /// Empty wait strategy /** Empty wait strategy is just spinning on request field. All functions are empty. */ struct empty { /// Metafunction for defining a publication record for flat combining technique /** Any wait strategy may expand the publication record for storing its own private data. \p PublicationRecord is the type specified by \p flat_combining::kernel. - If the strategy has no thread-private data, it should typedef \p PublicationRecord as a return \p type of metafunction. - Otherwise, if the strategy wants to store anything in thread-local data, it should expand \p PublicationRecord, for example: \code template struct make_publication_record { struct type: public PublicationRecord { int strategy_data; }; }; \endcode */ template struct make_publication_record { typedef PublicationRecord type; ///< Metafunction result }; /// Prepares the strategy /** This function is called before enter to waiting cycle. Some strategies need to prepare its thread-local data in \p rec. \p PublicationRecord is thread's publication record of type \p make_publication_record::type */ template void prepare( PublicationRecord& rec ) { CDS_UNUSED( rec ); } /// Waits for the combiner /** The thread calls this function to wait for the combiner process the request. The function returns \p true if the thread was waked up by the combiner, otherwise it should return \p false. \p FCKernel is a \p flat_combining::kernel object, \p PublicationRecord is thread's publication record of type \p make_publication_record::type */ template bool wait( FCKernel& fc, PublicationRecord& rec ) { CDS_UNUSED( fc ); CDS_UNUSED( rec ); return false; } /// Wakes up the thread /** The combiner calls \p %notify() when it has been processed the request. \p FCKernel is a \p flat_combining::kernel object, \p PublicationRecord is thread's publication record of type \p make_publication_record::type */ template void notify( FCKernel& fc, PublicationRecord& rec ) { CDS_UNUSED( fc ); CDS_UNUSED( rec ); } /// Moves control to other thread /** This function is called when the thread becomes the combiner but the request of the thread is already processed. The strategy may call \p fc.wakeup_any() instructs the kernel to wake up any pending thread. \p FCKernel is a \p flat_combining::kernel object, */ template void wakeup( FCKernel& fc ) { CDS_UNUSED( fc ); } }; /// Back-off wait strategy /** Template argument \p Backoff specifies back-off strategy, default is cds::backoff::delay_of<2> */ template > struct backoff { typedef BackOff back_off; ///< Back-off strategy /// Incorporates back-off strategy into publication record template struct make_publication_record { //@cond struct type: public PublicationRecord { back_off bkoff; }; //@endcond }; /// Resets back-off strategy in \p rec template void prepare( PublicationRecord& rec ) { rec.bkoff.reset(); } /// Calls back-off strategy template bool wait( FCKernel& /*fc*/, PublicationRecord& rec ) { rec.bkoff(); return false; } /// Does nothing template void notify( FCKernel& /*fc*/, PublicationRecord& /*rec*/ ) {} /// Does nothing template void wakeup( FCKernel& ) {} }; /// Wait strategy based on the single mutex and the condition variable /** The strategy shares the mutex and conditional variable for all thread. Template parameter \p Milliseconds specifies waiting duration; the minimal value is 1. */ template class single_mutex_single_condvar { //@cond std::mutex m_mutex; std::condition_variable m_condvar; bool m_wakeup; typedef std::unique_lock< std::mutex > unique_lock; //@endcond public: enum { c_nWaitMilliseconds = Milliseconds < 1 ? 1 : Milliseconds ///< Waiting duration }; /// Empty metafunction template struct make_publication_record { typedef PublicationRecord type; ///< publication record type }; /// Default ctor single_mutex_single_condvar() : m_wakeup( false ) {} /// Does nothing template void prepare( PublicationRecord& /*rec*/ ) {} /// Sleeps on condition variable waiting for notification from combiner template bool wait( FCKernel& fc, PublicationRecord& rec ) { if ( fc.get_operation( rec ) >= req_Operation ) { unique_lock lock( m_mutex ); if ( fc.get_operation( rec ) >= req_Operation ) { if ( m_wakeup ) { m_wakeup = false; return true; } bool ret = m_condvar.wait_for( lock, std::chrono::milliseconds( c_nWaitMilliseconds )) == std::cv_status::no_timeout; m_wakeup = false; return ret; } } return false; } /// Calls condition variable function \p notify_all() template void notify( FCKernel& fc, PublicationRecord& /*rec*/ ) { wakeup( fc ); } /// Calls condition variable function \p notify_all() template void wakeup( FCKernel& /*fc*/ ) { unique_lock lock( m_mutex ); m_wakeup = true; m_condvar.notify_all(); } }; /// Wait strategy based on the single mutex and thread-local condition variables /** The strategy shares the mutex, but each thread has its own conditional variable Template parameter \p Milliseconds specifies waiting duration; the minimal value is 1. */ template class single_mutex_multi_condvar { //@cond std::mutex m_mutex; bool m_wakeup; typedef std::unique_lock< std::mutex > unique_lock; //@endcond public: enum { c_nWaitMilliseconds = Milliseconds < 1 ? 1 : Milliseconds ///< Waiting duration }; /// Incorporates a condition variable into \p PublicationRecord template struct make_publication_record { /// Metafunction result struct type: public PublicationRecord { //@cond std::condition_variable m_condvar; //@endcond }; }; /// Default ctor single_mutex_multi_condvar() : m_wakeup( false ) {} /// Does nothing template void prepare( PublicationRecord& /*rec*/ ) {} /// Sleeps on condition variable waiting for notification from combiner template bool wait( FCKernel& fc, PublicationRecord& rec ) { if ( fc.get_operation( rec ) >= req_Operation ) { unique_lock lock( m_mutex ); if ( fc.get_operation( rec ) >= req_Operation ) { if ( m_wakeup ) { m_wakeup = false; return true; } bool ret = rec.m_condvar.wait_for( lock, std::chrono::milliseconds( c_nWaitMilliseconds )) == std::cv_status::no_timeout; m_wakeup = false; return ret; } } return false; } /// Calls condition variable function \p notify_one() template void notify( FCKernel& /*fc*/, PublicationRecord& rec ) { unique_lock lock( m_mutex ); m_wakeup = true; rec.m_condvar.notify_one(); } /// Calls \p fc.wakeup_any() to wake up any pending thread template void wakeup( FCKernel& fc ) { fc.wakeup_any(); } }; /// Wait strategy where each thread has a mutex and a condition variable /** Template parameter \p Milliseconds specifies waiting duration; the minimal value is 1. */ template class multi_mutex_multi_condvar { //@cond typedef std::unique_lock< std::mutex > unique_lock; //@endcond public: enum { c_nWaitMilliseconds = Milliseconds < 1 ? 1 : Milliseconds ///< Waiting duration }; /// Incorporates a condition variable and a mutex into \p PublicationRecord template struct make_publication_record { /// Metafunction result struct type: public PublicationRecord { //@cond std::mutex m_mutex; std::condition_variable m_condvar; bool m_wakeup; type() : m_wakeup( false ) {} //@endcond }; }; /// Does nothing template void prepare( PublicationRecord& /*rec*/ ) {} /// Sleeps on condition variable waiting for notification from combiner template bool wait( FCKernel& fc, PublicationRecord& rec ) { if ( fc.get_operation( rec ) >= req_Operation ) { unique_lock lock( rec.m_mutex ); if ( fc.get_operation( rec ) >= req_Operation ) { if ( rec.m_wakeup ) { rec.m_wakeup = false; return true; } bool ret = rec.m_condvar.wait_for( lock, std::chrono::milliseconds( c_nWaitMilliseconds )) == std::cv_status::no_timeout; rec.m_wakeup = false; return ret; } } return false; } /// Calls condition variable function \p notify_one() template void notify( FCKernel& /*fc*/, PublicationRecord& rec ) { unique_lock lock( rec.m_mutex ); rec.m_wakeup = true; rec.m_condvar.notify_one(); } /// Calls \p fc.wakeup_any() to wake up any pending thread template void wakeup( FCKernel& fc ) { fc.wakeup_any(); } }; } // namespace wait_strategy }}} // namespace cds::algo::flat_combining #endif //CDSLIB_ALGO_FLAT_COMBINING_WAIT_STRATEGY_H libcds-2.3.3/cds/algo/int_algo.h000066400000000000000000000070311341244201700164160ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INT_ALGO_H #define CDSLIB_INT_ALGO_H #include namespace cds { namespace beans { /// Returns largest previous integer for log2( n ) static inline size_t log2floor( size_t n ) { return n ? cds::bitop::MSBnz( n ) : 0; } /// Returns smallest following integer for log2( n ) static inline size_t log2ceil( size_t n ) { size_t i = log2floor( n ); return ( size_t( 1 ) << i ) < n ? i + 1 : i; } /// Returns largest previous power of 2 for \p n /** Examples: \code floor2(0) == 1 // !!! floor2(1) == 1 floor2(2) == 2 floor2(3) == 2 floor2(4) == 4 floor2(15) == 8 floor2(16) == 16 floor2(17) == 16 \endcode */ static inline size_t floor2( size_t n ) { return size_t(1) << log2floor( n ); } /// Returns smallest following power of 2 for \p n /** Examples: \code ceil2(0) == 1 // !!! ceil2(1) == 1 ceil2(2) == 2 ceil2(3) == 4 ceil2(4) == 4 ceil2(15) == 16 ceil2(16) == 16 ceil2(17) == 32 \endcode */ static inline size_t ceil2( size_t n ) { return size_t(1) << log2ceil( n ); } /// Checks if \p n is power of 2 constexpr static inline bool is_power2( size_t n ) noexcept { return (n & (n - 1)) == 0 && n; } /// Returns binary logarithm of \p n if \p n is power of two, otherwise returns 0 static inline size_t log2( size_t n ) { return is_power2(n) ? log2floor(n) : 0; } #if CDS_BUILD_BITS == 32 //@cond // 64bit specializations /// Returns largest previous integer for log2( n ) static inline uint64_t log2floor( uint64_t n ) { return n ? cds::bitop::MSBnz( n ) : 0; } /// Returns smallest following integer for log2( n ) static inline uint64_t log2ceil( uint64_t n ) { uint64_t i = log2floor( n ); return (uint64_t( 1 ) << i) < n ? i + 1 : i; } /// Returns largest previous power of 2 for \p n /** Examples: \code floor2(0) == 1 // !!! floor2(1) == 1 floor2(2) == 2 floor2(3) == 2 floor2(4) == 4 floor2(15) == 8 floor2(16) == 16 floor2(17) == 16 \endcode */ static inline uint64_t floor2( uint64_t n ) { return uint64_t( 1 ) << log2floor( n ); } /// Returns smallest following power of 2 for \p n /** Examples: \code ceil2(0) == 1 // !!! ceil2(1) == 1 ceil2(2) == 2 ceil2(3) == 4 ceil2(4) == 4 ceil2(15) == 16 ceil2(16) == 16 ceil2(17) == 32 \endcode */ static inline uint64_t ceil2( uint64_t n ) { return uint64_t( 1 ) << log2ceil( n ); } /// Checks if \p n is power of 2 constexpr static inline bool is_power2( uint64_t n ) noexcept { return (n & (n - 1)) == 0 && n; } /// Returns binary logarithm of \p n if \p n is power of two, otherwise returns 0 static inline uint64_t log2( uint64_t n ) { return is_power2( n ) ? log2floor( n ) : 0; } //@endcond #endif //#if CDS_BUILD_BITS == 32 }} // namespace cds::beans #endif // #ifndef CDSLIB_INT_ALGO_H libcds-2.3.3/cds/algo/split_bitstring.h000066400000000000000000000342761341244201700200550ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_ALGO_SPLIT_BITSTRING_H #define CDSLIB_ALGO_SPLIT_BITSTRING_H #include namespace cds { namespace algo { /// Cuts a bit sequence from fixed-size bit-string /** The splitter can be used as an iterator over bit-string. Each call of \p cut() or \p safe_cut() cuts the bit count specified and keeps the position inside bit-string for the next call. The splitter stores a const reference to bit-string, not a copy. The maximum count of bits that can be cut in a single call is sizeof(UInt) * 8 The splitter keeps byte order. Template parameters: - \p BitString - a fixed-sized type that interprets as bit string - \p BitStringSize - the size of \p BitString in bytes, default is sizeof( BitString ). You can specify 0 for default. - \p UInt - an unsigned integer, return type for \p cut(), default is \p unsigned There are specialized splitters: - a simplified \p byte_splitter algorithm that is suitable when count is multiple of 8. - \p number_splitter algorithm is suitable for a number */ template class split_bitstring { public: typedef BitString bitstring; ///< Bit-string type typedef UInt uint_type; ///< Result type of \p cut() function static constexpr size_t const c_bitstring_size = BitStringSize ? BitStringSize : sizeof( BitString ); ///< size of \p BitString in bytes //@cond static constexpr unsigned const c_nBitPerByte = 8; //@endcond public: /// Initializises the splitter with reference to \p h and zero start bit offset explicit split_bitstring( bitstring const& h ) : cur_( reinterpret_cast( &h )) , offset_( 0 ) , first_( cur_ ) , last_( cur_ + c_bitstring_size ) {} /// Initializises the splitter with reference to \p h and start bit offset \p nBitOffset split_bitstring( bitstring const& h, size_t nBitOffset ) : cur_( reinterpret_cast( &h ) + nBitOffset / c_nBitPerByte ) , offset_( nBitOffset % c_nBitPerByte ) , first_( reinterpret_cast( &h )) , last_( first_ + c_bitstring_size ) {} /// Returns \p true if end-of-string is not reached yet explicit operator bool() const { return !eos(); } /// Returns \p true if end-of-stream encountered bool eos() const { return cur_ >= last_; } /// Cuts next \p count bits from bit-string /** For performance reason, the function does not manage out-of-bound condition. To control that use \p safe_cut(). */ uint_type cut( unsigned count ) { assert( !eos()); uint_type result = 0; # if defined( CDS_ARCH_LITTLE_ENDIAN ) for ( unsigned done = 0; done < count; ) { assert( cur_ < last_ ); unsigned bits = count - done; if ( bits > c_nBitPerByte - offset_ ) bits = c_nBitPerByte - offset_; result |= static_cast(( *cur_ >> offset_ ) & (( 1 << bits ) - 1 )) << done; offset_ += bits; assert( offset_ <= c_nBitPerByte ); if ( offset_ == c_nBitPerByte ) { offset_ = 0; ++cur_; } done += bits; } # else while ( count ) { assert( cur_ < last_ ); unsigned bits = count <= ( c_nBitPerByte - offset_ ) ? count : c_nBitPerByte - offset_; result = ( result << bits ) | (( *cur_ >> offset_ ) & ( ( 1 << bits ) - 1 )); offset_ += bits; assert( offset_ <= c_nBitPerByte ); if ( offset_ == c_nBitPerByte ) { offset_ = 0; ++cur_; } count -= bits; } # endif return result; } /// Cuts up to \p count from the bit-string /** Safe analog of \p cut() but if \p count is more than the rest of bit-string, only the rest is returned. When \p eos() condition is met the function returns 0. */ uint_type safe_cut( unsigned count ) { if ( eos()) return 0; unsigned const rest = static_cast( last_ - cur_ - 1 ) * c_nBitPerByte + ( c_nBitPerByte - offset_ ); if ( rest < count ) count = rest; return count ? cut( count ) : 0; } /// Resets the splitter void reset() noexcept { cur_ = first_; offset_ = 0; } /// Returns pointer to source bitstring bitstring const * source() const { return reinterpret_cast( first_ ); } /// Returns current bit offset from beginning of bit-string size_t bit_offset() const { return offset_ + (cur_ - first_) * c_nBitPerByte; } /// Returns how many bits remain size_t rest_count() const { return c_bitstring_size * c_nBitPerByte - bit_offset(); } /// Returns \p true for any argument static constexpr bool is_correct( unsigned /*count*/ ) { return true; } private: //@cond uint8_t const* cur_; unsigned offset_; uint8_t const* const first_; uint8_t const* const last_; //@endcond }; /// Simplified \p split_bitstring algorithm when \p count is multiple of 8 template class byte_splitter { public: typedef BitString bitstring; ///< Bit-string type typedef UInt uint_type; ///< Result type of \p cut() function static constexpr size_t const c_bitstring_size = BitStringSize ? BitStringSize : sizeof( BitString ); ///< size of \p BitString in bytes //@cond static constexpr unsigned const c_nBitPerByte = 8; //@endcond public: /// Initializises the splitter with reference to \p h and zero start bit offset explicit byte_splitter( bitstring const& h ) : cur_( reinterpret_cast( &h )) , first_( cur_ ) , last_( cur_ + c_bitstring_size ) {} /// Initializises the splitter with reference to \p h and start bit offset \p nBitOffset byte_splitter( bitstring const& h, size_t nBitOffset ) : cur_( reinterpret_cast( &h ) + nBitOffset / c_nBitPerByte ) , first_( reinterpret_cast( &h )) , last_( first_ + c_bitstring_size ) { assert( is_correct( static_cast( nBitOffset ))); assert( !eos()); } /// Returns \p true if end-of-string is not reached yet explicit operator bool() const { return !eos(); } /// Returns \p true if end-of-stream encountered bool eos() const { return cur_ >= last_; } /// Cuts next \p count bits (must be multiplier of 8) from bit-string /** For performance reason, the function does not manage out-of-bound condition. To control that use \p safe_cut(). */ uint_type cut( unsigned count ) { assert( !eos()); assert( is_correct( count )); uint_type result = 0; # if defined( CDS_ARCH_LITTLE_ENDIAN ) for ( unsigned i = 0; i < count; i += c_nBitPerByte ) { result |= static_cast( *cur_ ) << i; ++cur_; } # else for ( ; count; count -= c_nBitPerByte ) { result = ( result << c_nBitPerByte ) | *cur_; ++cur_; } # endif return result; } /// Cuts up to \p count from the bit-string /** Safe analog of \p cut(): if \p count is more than the rest of bit-string, only the rest is returned. When \p eos() condition is met the function returns 0. */ uint_type safe_cut( unsigned count ) { if ( eos()) return 0; unsigned const rest = static_cast( last_ - cur_ - 1 ) * c_nBitPerByte; if ( rest < count ) count = rest; return count ? cut( count ) : 0; } /// Resets the splitter void reset() noexcept { cur_ = first_; } /// Returns pointer to source bitstring bitstring const* source() const { return reinterpret_cast( first_ ); } /// Returns current bit offset from beginning of bit-string size_t bit_offset() const { return (cur_ - first_) * c_nBitPerByte; } /// Returns how many bits remain size_t rest_count() const { return c_bitstring_size * c_nBitPerByte - bit_offset(); } /// Checks if \p count is multiple of 8 static constexpr bool is_correct( unsigned count ) { return count % 8 == 0; } private: //@cond uint8_t const* cur_; uint8_t const* const first_; uint8_t const* const last_; //@endcond }; /// Cuts a bit sequence from a number /** The splitter can be used as an iterator over bit representation of the number of type \p Int. Each call of \p cut() or \p safe_cut() cuts the bit count specified and keeps the position inside the number for the next call. */ template class number_splitter { public: typedef Int int_type; ///< Number type typedef Int uint_type; ///< Result type of \p cut() function //@cond static constexpr unsigned const c_nBitPerByte = 8; //@endcond public: /// Initalizes the splitter with nymber \p n and initial bit offset 0 explicit number_splitter( int_type n ) : number_( n ) , shift_( 0 ) {} /// Initalizes the splitter with nymber \p n and initial bit offset \p initial_offset number_splitter( int_type n, size_t initial_offset ) : number_( n ) , shift_( static_cast( initial_offset )) { assert( initial_offset < sizeof( int_type ) * c_nBitPerByte ); } /// Returns \p true if end-of-string is not reached yet explicit operator bool() const { return !eos(); } /// Returns \p true if end-of-stream encountered bool eos() const { return shift_ >= sizeof( int_type ) * c_nBitPerByte; } /// Cuts next \p count bits (must be multiplier of 8) from the number /** For performance reason, the function does not manage out-of-bound condition. To control that use \p safe_cut(). */ int_type cut( unsigned count ) { assert( !eos()); assert( is_correct( count )); int_type result = ( number_ >> shift_ ) & (( 1 << count ) - 1 ); shift_ += count; return result; } /// Cuts up to \p count from the bit-string /** Safe analog of \p cut(): if \p count is more than the rest of \p int_type, only the rest is returned. When \p eos() condition is met the function returns 0. */ int_type safe_cut( unsigned count ) { if ( eos()) return 0; unsigned rest = static_cast( rest_count()); if ( rest < count ) count = rest; return count ? cut( count ) : 0; } /// Resets the splitter void reset() noexcept { shift_ = 0; } /// Returns initial number int_type source() const { return number_; } /// Returns current bit offset from beginning of the number size_t bit_offset() const { return shift_; } /// Returns how many bits remain size_t rest_count() const { return sizeof( int_type ) * c_nBitPerByte - shift_; } /// Checks if \p count is multiple of 8 static constexpr bool is_correct( unsigned count ) { return count < sizeof( int_type ) * c_nBitPerByte; } private: //@cond int_type const number_; unsigned shift_; //@endcond }; /// Metafunctin to select a most suitable splitter for type \p BitString of size \p BitStringSize template struct select_splitter { typedef split_bitstring< BitString, BitStringSize > type; ///< metafunction result }; //@cond # define CDS_SELECT_NUMBER_SPLITTER( num_type ) \ template <> struct select_splitter { typedef number_splitter type; } CDS_SELECT_NUMBER_SPLITTER( int ); CDS_SELECT_NUMBER_SPLITTER( unsigned ); CDS_SELECT_NUMBER_SPLITTER( short ); CDS_SELECT_NUMBER_SPLITTER( unsigned short ); CDS_SELECT_NUMBER_SPLITTER( long ); CDS_SELECT_NUMBER_SPLITTER( unsigned long ); CDS_SELECT_NUMBER_SPLITTER( long long ); CDS_SELECT_NUMBER_SPLITTER( unsigned long long ); # undef CDS_SELECT_NUMBER_SPLITTER //@endcond }} // namespace cds::algo #endif // #ifndef CDSLIB_ALGO_SPLIT_BITSTRING_H libcds-2.3.3/cds/compiler/000077500000000000000000000000001341244201700153405ustar00rootroot00000000000000libcds-2.3.3/cds/compiler/backoff.h000066400000000000000000000031321341244201700171030ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_COMPILER_BACKOFF_IMPL_H #define CDSLIB_COMPILER_BACKOFF_IMPL_H #include #if CDS_COMPILER == CDS_COMPILER_MSVC || (CDS_COMPILER == CDS_COMPILER_INTEL && CDS_OS_INTERFACE == CDS_OSI_WINDOWS) # if CDS_PROCESSOR_ARCH == CDS_PROCESSOR_X86 # include # elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_AMD64 # include # else # error "MS VC++ compiler: unsupported processor architecture" # endif #elif CDS_COMPILER == CDS_COMPILER_GCC || CDS_COMPILER == CDS_COMPILER_CLANG || CDS_COMPILER == CDS_COMPILER_INTEL # if CDS_PROCESSOR_ARCH == CDS_PROCESSOR_X86 # include # elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_AMD64 # include # elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_IA64 # include # elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_SPARC # include # elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_PPC64 # include # elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_ARM7 # include # elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_ARM8 # include # endif #else # error "Undefined compiler" #endif #endif // #ifndef CDSLIB_COMPILER_BACKOFF_IMPL_H libcds-2.3.3/cds/compiler/bitop.h000066400000000000000000000035131341244201700166300ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_COMPILER_BITOP_H #define CDSLIB_COMPILER_BITOP_H // Choose appropriate header for current architecture and compiler #if CDS_COMPILER == CDS_COMPILER_MSVC || (CDS_COMPILER == CDS_COMPILER_INTEL && CDS_OS_INTERFACE == CDS_OSI_WINDOWS) /************************************************************************/ /* MS Visual C++ */ /************************************************************************/ # if CDS_PROCESSOR_ARCH == CDS_PROCESSOR_X86 # include # elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_AMD64 # include # endif #elif CDS_COMPILER == CDS_COMPILER_GCC || CDS_COMPILER == CDS_COMPILER_CLANG || CDS_COMPILER == CDS_COMPILER_INTEL /************************************************************************/ /* GCC */ /************************************************************************/ # if CDS_PROCESSOR_ARCH == CDS_PROCESSOR_X86 # include # elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_AMD64 # include # elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_SPARC # include # elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_IA64 # include # elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_PPC64 # include # endif #endif // Compiler choice // Generic (C) implementation #include #endif // #ifndef CDSLIB_COMPILER_BITOP_H libcds-2.3.3/cds/compiler/clang/000077500000000000000000000000001341244201700164245ustar00rootroot00000000000000libcds-2.3.3/cds/compiler/clang/defs.h000066400000000000000000000075661341244201700175340ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_COMPILER_CLANG_DEFS_H #define CDSLIB_COMPILER_CLANG_DEFS_H // Compiler version #define CDS_COMPILER_VERSION (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__) // Compiler name #define CDS_COMPILER__NAME ("clang " __clang_version__) #define CDS_COMPILER__NICK "clang" #if CDS_COMPILER_VERSION < 30600 # error "Compiler version error. Clang version 3.6.0 and above is supported" #endif #if __cplusplus < CDS_CPLUSPLUS_11 # error C++11 and above is required #endif #if defined(_LIBCPP_VERSION) && !defined(CDS_USE_BOOST_ATOMIC) && CDS_COMPILER_VERSION < 30700 // Note: Clang libc++ atomic leads to program crash. // So, we use libcds atomic implementation # define CDS_USE_LIBCDS_ATOMIC #endif // clang for Windows #if defined( _MSC_VER ) # define CDS_OS_INTERFACE CDS_OSI_WINDOWS # if defined(_WIN64) # define CDS_OS_TYPE CDS_OS_WIN64 # define CDS_OS__NAME "Win64" # define CDS_OS__NICK "Win64" # elif defined(_WIN32) # define CDS_OS_TYPE CDS_OS_WIN32 # define CDS_OS__NAME "Win32" # define CDS_OS__NICK "Win32" # endif #endif #include #define alignof __alignof__ // C++11 thread_local keyword #if !(CDS_OS_TYPE == CDS_OS_OSX && CDS_COMPILER_VERSION < 30600) // OS X error? // See http://stackoverflow.com/questions/23791060/c-thread-local-storage-clang-503-0-40-mac-osx // http://stackoverflow.com/questions/28094794/why-does-apple-clang-disallow-c11-thread-local-when-official-clang-supports // clang 3.6 ok?.. # define CDS_CXX11_THREAD_LOCAL_SUPPORT #endif // Attributes #if CDS_COMPILER_VERSION >= 30600 # if __cplusplus == CDS_CPLUSPLUS_11 // C++11 # define CDS_DEPRECATED( reason ) [[gnu::deprecated(reason)]] # else // C++14 # define CDS_DEPRECATED( reason ) [[deprecated(reason)]] # endif #endif #define CDS_NORETURN __attribute__((__noreturn__)) // ************************************************* // Features #if defined(__has_feature) && __has_feature(thread_sanitizer) # ifndef CDS_THREAD_SANITIZER_ENABLED # define CDS_THREAD_SANITIZER_ENABLED # endif #endif #if defined(__has_feature) && __has_feature(address_sanitizer) # ifndef CDS_ADDRESS_SANITIZER_ENABLED # define CDS_ADDRESS_SANITIZER_ENABLED # endif #endif // ************************************************* // Alignment macro #define CDS_TYPE_ALIGNMENT(n) __attribute__ ((aligned (n))) #define CDS_CLASS_ALIGNMENT(n) __attribute__ ((aligned (n))) #define CDS_DATA_ALIGNMENT(n) __attribute__ ((aligned (n))) // likely/unlikely #define cds_likely( expr ) __builtin_expect( !!( expr ), 1 ) #define cds_unlikely( expr ) __builtin_expect( !!( expr ), 0 ) // Exceptions #if defined( __EXCEPTIONS ) && __EXCEPTIONS == 1 # define CDS_EXCEPTION_ENABLED #endif // double-width CAS support - only for libc++ // You can manually suppress wide-atomic support by defining in compiler command line: // for 64bit platform: -DCDS_DISABLE_128BIT_ATOMIC // for 32bit platform: -DCDS_DISABLE_64BIT_ATOMIC #ifdef _LIBCPP_VERSION # if CDS_BUILD_BITS == 64 # if !defined( CDS_DISABLE_128BIT_ATOMIC ) && defined( __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16 ) # define CDS_DCAS_SUPPORT # endif # else # if !defined( CDS_DISABLE_64BIT_ATOMIC ) && defined( __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 ) # define CDS_DCAS_SUPPORT # endif # endif #endif //if constexpr support (C++17) #ifndef constexpr_if # if defined( __cpp_if_constexpr ) && __cpp_if_constexpr >= 201606 # define constexpr_if if constexpr # endif #endif #include #endif // #ifndef CDSLIB_COMPILER_GCC_DEFS_H libcds-2.3.3/cds/compiler/cxx11_atomic.h000066400000000000000000002660361341244201700200260ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_COMPILER_CXX11_ATOMIC_H #define CDSLIB_COMPILER_CXX11_ATOMIC_H //@cond #include // make_unsigned #include #include namespace cds { namespace cxx11_atomic { typedef enum memory_order { memory_order_relaxed, memory_order_consume, memory_order_acquire, memory_order_release, memory_order_acq_rel, memory_order_seq_cst } memory_order; }} // namespace cds::cxx11_atomic #if CDS_COMPILER == CDS_COMPILER_MSVC || (CDS_COMPILER == CDS_COMPILER_INTEL && CDS_OS_INTERFACE == CDS_OSI_WINDOWS) # if CDS_PROCESSOR_ARCH == CDS_PROCESSOR_X86 # include # elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_AMD64 # include # else # error "MS VC++ compiler: unsupported processor architecture" # endif #elif CDS_COMPILER == CDS_COMPILER_GCC || CDS_COMPILER == CDS_COMPILER_CLANG || CDS_COMPILER == CDS_COMPILER_INTEL # if CDS_PROCESSOR_ARCH == CDS_PROCESSOR_X86 # include # elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_AMD64 # include # elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_IA64 # include # elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_SPARC # include # elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_PPC64 # include //# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_ARM7 //# include # else # error "GCC compiler: unsupported processor architecture. Try to use native C++11 atomic or boost.atomic" # endif #else # error "Undefined compiler" #endif namespace cds { namespace cxx11_atomic { // forward declarations template struct atomic; namespace details { template struct atomic_generic_ops; template struct atomic_integral_ops; template struct primary_type; template <> struct primary_type<1> { typedef std::uint8_t type; }; template <> struct primary_type<2> { typedef std::uint16_t type; }; template <> struct primary_type<4> { typedef std::uint32_t type; }; template <> struct primary_type<8> { typedef std::uint64_t type; }; #if CDS_BUILD_BITS == 64 && CDS_DCAS_SUPPORT template <> struct primary_type<16> { typedef unsigned __int128_t type; }; #endif template struct make_atomic_primary { typedef T source_type; typedef Primary primary_type; static primary_type volatile * ptr( source_type volatile * p ) noexcept { return reinterpret_cast(p); } static primary_type const volatile * ptr( source_type const volatile * p ) noexcept { return reinterpret_cast(p); } static primary_type val( source_type v ) noexcept { return *reinterpret_cast(&v); } static primary_type& ref( source_type& v ) noexcept { return reinterpret_cast(v); } static primary_type const& ref( source_type const& v ) noexcept { return reinterpret_cast(v); } static source_type ret( primary_type r ) noexcept { return *reinterpret_cast(&r); } }; template struct make_atomic_primary { typedef T source_type; typedef T primary_type; static primary_type volatile * ptr( source_type volatile * p ) noexcept { return p; } static primary_type const volatile * ptr( source_type const volatile * p ) noexcept { return p; } static primary_type val( source_type v ) noexcept { return v; } static primary_type& ref( source_type& v ) noexcept { return v; } static source_type ret( primary_type r ) noexcept { return r; } }; template struct atomic_integral_bitwise_ops { public: typedef typename std::make_unsigned::type unsigned_type; typedef atomic_generic_ops atomic_ops; static T fetch_and(T volatile * pDest, T val, memory_order order) noexcept { unsigned_type cur = atomic_ops::atomic_load_explicit( reinterpret_cast(pDest), memory_order_relaxed ); do {} while ( !atomic_ops::atomic_compare_exchange_weak_explicit( reinterpret_cast(pDest), &cur, cur & unsigned_type(val), order, memory_order_relaxed )); return T(cur); } static T fetch_or(T volatile * pDest, T val, memory_order order) noexcept { unsigned_type cur = atomic_ops::atomic_load_explicit( reinterpret_cast(pDest), memory_order_relaxed ); do {} while ( !atomic_ops::atomic_compare_exchange_weak_explicit( reinterpret_cast(pDest), &cur, cur | unsigned_type(val), order, memory_order_relaxed )); return T(cur); } static T fetch_xor(T volatile * pDest, T val, memory_order order) noexcept { unsigned_type cur = atomic_ops::atomic_load_explicit( reinterpret_cast(pDest), memory_order_relaxed ); do {} while ( !atomic_ops::atomic_compare_exchange_weak_explicit( reinterpret_cast(pDest), &cur, cur ^ unsigned_type(val), order, memory_order_relaxed )); return T(cur); } }; // 8-bit atomic operations template struct atomic_generic_ops< T, 1, Primary > { typedef make_atomic_primary primary; // store static void atomic_store_explicit( T volatile * pDest, T v, memory_order order ) noexcept { platform::store8( primary::ptr(pDest), primary::val(v), order ); } static void atomic_store_explicit( T * pDest, T v, memory_order order ) noexcept { platform::store8( primary::ptr(pDest), primary::val(v), order ); } static void atomic_store( T volatile * pDest, T v ) noexcept { atomic_store_explicit( pDest, v, memory_order_seq_cst ); } static void atomic_store( T * pDest, T v ) noexcept { atomic_store_explicit( pDest, v, memory_order_seq_cst ); } // load static T atomic_load_explicit( T volatile const * pSrc, memory_order order ) noexcept { return primary::ret( platform::load8( primary::ptr(pSrc), order )); } static T atomic_load_explicit( T const * pSrc, memory_order order ) noexcept { return primary::ret( platform::load8( primary::ptr(pSrc), order )); } static T atomic_load( T volatile const * pSrc ) noexcept { return atomic_load_explicit( pSrc, memory_order_seq_cst ); } static T atomic_load( T const * pSrc ) noexcept { return atomic_load_explicit( pSrc, memory_order_seq_cst ); } // exchange static T atomic_exchange_explicit( T volatile * pDest, T val, memory_order order ) noexcept { return primary::ret( platform::exchange8( primary::ptr(pDest), primary::val(val), order )); } static T atomic_exchange_explicit( T * pDest, T val, memory_order order ) noexcept { return primary::ret( platform::exchange8( primary::ptr(pDest), primary::val(val), order )); } static T atomic_exchange( T volatile * pDest, T val ) noexcept { return atomic_exchange_explicit( pDest, val, memory_order_seq_cst ); } static T atomic_exchange( T * pDest, T val ) noexcept { return atomic_exchange_explicit( pDest, val, memory_order_seq_cst ); } // cas static bool atomic_compare_exchange_weak_explicit( T volatile * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { assert( expected ); return platform::cas8_weak( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); } static bool atomic_compare_exchange_weak_explicit( T * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { assert( expected ); return platform::cas8_weak( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); } static bool atomic_compare_exchange_weak( T volatile * pDest, T * expected, T desired ) noexcept { return atomic_compare_exchange_weak_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); } static bool atomic_compare_exchange_weak( T * pDest, T * expected, T desired ) noexcept { return atomic_compare_exchange_weak_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); } static bool atomic_compare_exchange_strong_explicit( T volatile * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { assert( expected ); return platform::cas8_strong( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); } static bool atomic_compare_exchange_strong_explicit( T * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { assert( expected ); return platform::cas8_strong( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); } static bool atomic_compare_exchange_strong( T volatile * pDest, T * expected, T desired ) noexcept { return atomic_compare_exchange_strong_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); } static bool atomic_compare_exchange_strong( T * pDest, T * expected, T desired ) noexcept { return atomic_compare_exchange_strong_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); } }; template struct atomic_integral_ops< T, 1 > : atomic_generic_ops , atomic_integral_bitwise_ops { typedef atomic_integral_bitwise_ops bitwise_ops; // fetch_add static T atomic_fetch_add_explicit(T volatile * pDest, T val, memory_order order) noexcept { # ifdef CDS_ATOMIC_fetch8_add_defined return platform::fetch8_add( pDest, val, order ); # else T cur = atomic_load_explicit( pDest, memory_order_relaxed ); do {} while ( !atomic_compare_exchange_weak_explicit( pDest, &cur, cur + val, order, memory_order_relaxed )); return cur; # endif } static T atomic_fetch_add_explicit(T * pDest, T val , memory_order order) noexcept { return atomic_fetch_add_explicit( reinterpret_cast( pDest ), val, order ); } static T atomic_fetch_add( T volatile * pDest, T val ) noexcept { return atomic_fetch_add_explicit( pDest, val, memory_order_seq_cst ); } static T atomic_fetch_add( T * pDest, T val ) noexcept { return atomic_fetch_add_explicit( pDest, val, memory_order_seq_cst ); } // fetch_sub static T atomic_fetch_sub_explicit(T volatile * pDest, T val, memory_order order) noexcept { # ifdef CDS_ATOMIC_fetch8_sub_defined return platform::fetch8_sub( pDest, val, order ); # else T cur = atomic_load_explicit( pDest, memory_order_relaxed ); do {} while ( !atomic_compare_exchange_weak_explicit( pDest, &cur, cur - val, order, memory_order_relaxed )); return cur; # endif } static T atomic_fetch_sub_explicit(T * pDest, T val , memory_order order) noexcept { return atomic_fetch_sub_explicit( reinterpret_cast( pDest ), val, order ); } static T atomic_fetch_sub( T volatile * pDest, T val ) noexcept { return atomic_fetch_sub_explicit( pDest, val, memory_order_seq_cst ); } static T atomic_fetch_sub( T * pDest, T val ) noexcept { return atomic_fetch_sub_explicit( pDest, val, memory_order_seq_cst ); } // fetch_and static T atomic_fetch_and_explicit(T volatile * pDest, T val, memory_order order) noexcept { # ifdef CDS_ATOMIC_fetch8_and_defined return platform::fetch8_and( pDest, val, order ); # else return bitwise_ops::fetch_and( pDest, val, order ); # endif } static T atomic_fetch_and_explicit(T * pDest, T val , memory_order order) noexcept { return atomic_fetch_and_explicit( reinterpret_cast( pDest ), val, order ); } static T atomic_fetch_and( T volatile * pDest, T val ) noexcept { return atomic_fetch_and_explicit( pDest, val, memory_order_seq_cst ); } static T atomic_fetch_and( T * pDest, T val ) noexcept { return atomic_fetch_and_explicit( pDest, val, memory_order_seq_cst ); } // fetch_or static T atomic_fetch_or_explicit(T volatile * pDest, T val, memory_order order) noexcept { # ifdef CDS_ATOMIC_fetch8_or_defined return platform::fetch8_or( pDest, val, order ); # else return bitwise_ops::fetch_or( pDest, val, order ); # endif } static T atomic_fetch_or_explicit(T * pDest, T val , memory_order order) noexcept { return atomic_fetch_or_explicit( reinterpret_cast( pDest ), val, order ); } static T atomic_fetch_or( T volatile * pDest, T val ) noexcept { return atomic_fetch_or_explicit( pDest, val, memory_order_seq_cst ); } static T atomic_fetch_or( T * pDest, T val ) noexcept { return atomic_fetch_or_explicit( pDest, val, memory_order_seq_cst ); } // fetch_xor static T atomic_fetch_xor_explicit(T volatile * pDest, T val, memory_order order) noexcept { # ifdef CDS_ATOMIC_fetch8_xor_defined return platform::fetch8_xor( pDest, val, order ); # else return bitwise_ops::fetch_xor( pDest, val, order ); # endif } static T atomic_fetch_xor_explicit(T * pDest, T val , memory_order order) noexcept { return atomic_fetch_xor_explicit( reinterpret_cast( pDest ), val, order ); } static T atomic_fetch_xor( T volatile * pDest, T val ) noexcept { return atomic_fetch_xor_explicit( pDest, val, memory_order_seq_cst ); } static T atomic_fetch_xor( T * pDest, T val ) noexcept { return atomic_fetch_xor_explicit( pDest, val, memory_order_seq_cst ); } }; // 16-bit atomic operations template struct atomic_generic_ops< T, 2, Primary > { typedef make_atomic_primary primary; // store static void atomic_store_explicit( T volatile * pDest, T v, memory_order order ) noexcept { platform::store16( primary::ptr(pDest), primary::val(v), order ); } static void atomic_store_explicit( T * pDest, T v, memory_order order ) noexcept { platform::store16( primary::ptr(pDest), primary::val(v), order ); } static void atomic_store( T volatile * pDest, T v ) noexcept { atomic_store_explicit( pDest, v, memory_order_seq_cst ); } static void atomic_store( T * pDest, T v ) noexcept { atomic_store_explicit( pDest, v, memory_order_seq_cst ); } // load static T atomic_load_explicit( T volatile const * pSrc, memory_order order ) noexcept { return primary::ret( platform::load16( primary::ptr(pSrc), order )); } static T atomic_load_explicit( T const * pSrc, memory_order order ) noexcept { return primary::ret( platform::load16( primary::ptr(pSrc), order )); } static T atomic_load( T volatile const * pSrc ) noexcept { return atomic_load_explicit( pSrc, memory_order_seq_cst ); } static T atomic_load( T const * pSrc ) noexcept { return atomic_load_explicit( pSrc, memory_order_seq_cst ); } // exchange static T atomic_exchange_explicit( T volatile * pDest, T val, memory_order order ) noexcept { return primary::ret( platform::exchange16( primary::ptr(pDest), primary::val(val), order )); } static T atomic_exchange_explicit( T * pDest, T val, memory_order order ) noexcept { return primary::ret( platform::exchange16( primary::ptr(pDest), primary::val(val), order )); } static T atomic_exchange( T volatile * pDest, T val ) noexcept { return atomic_exchange_explicit( pDest, val, memory_order_seq_cst ); } static T atomic_exchange( T * pDest, T val ) noexcept { return atomic_exchange_explicit( pDest, val, memory_order_seq_cst ); } // cas static bool atomic_compare_exchange_weak_explicit( T volatile * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { assert( expected ); return platform::cas16_weak( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); } static bool atomic_compare_exchange_weak_explicit( T * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { assert( expected ); return platform::cas16_weak( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); } static bool atomic_compare_exchange_weak( T volatile * pDest, T * expected, T desired ) noexcept { return atomic_compare_exchange_weak_explicit( pDest, expected, primary::val(desired), memory_order_seq_cst, memory_order_relaxed ); } static bool atomic_compare_exchange_weak( T * pDest, T * expected, T desired ) noexcept { return atomic_compare_exchange_weak_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); } static bool atomic_compare_exchange_strong_explicit( T volatile * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { assert( expected ); return platform::cas16_strong( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); } static bool atomic_compare_exchange_strong_explicit( T * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { assert( expected ); return platform::cas16_strong( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); } static bool atomic_compare_exchange_strong( T volatile * pDest, T * expected, T desired ) noexcept { return atomic_compare_exchange_strong_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); } static bool atomic_compare_exchange_strong( T * pDest, T * expected, T desired ) noexcept { return atomic_compare_exchange_strong_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); } }; template struct atomic_integral_ops< T, 2 > : atomic_generic_ops< T, 2, T > , atomic_integral_bitwise_ops { typedef atomic_integral_bitwise_ops bitwise_ops; // fetch_add static T atomic_fetch_add_explicit(T volatile * pDest, T val, memory_order order) noexcept { # ifdef CDS_ATOMIC_fetch16_add_defined return platform::fetch16_add( pDest, val, order ); # else T cur = atomic_load_explicit( pDest, memory_order_relaxed ); do {} while ( !atomic_compare_exchange_weak_explicit( pDest, &cur, cur + val, order, memory_order_relaxed )); return cur; # endif } static T atomic_fetch_add_explicit(T * pDest, T val , memory_order order) noexcept { return atomic_fetch_add_explicit( reinterpret_cast( pDest ), val, order ); } static T atomic_fetch_add( T volatile * pDest, T val ) noexcept { return atomic_fetch_add_explicit( pDest, val, memory_order_seq_cst ); } static T atomic_fetch_add( T * pDest, T val ) noexcept { return atomic_fetch_add_explicit( pDest, val, memory_order_seq_cst ); } // fetch_sub static T atomic_fetch_sub_explicit(T volatile * pDest, T val, memory_order order) noexcept { # ifdef CDS_ATOMIC_fetch16_sub_defined return platform::fetch16_sub( pDest, val, order ); # else T cur = atomic_load_explicit( pDest, memory_order_relaxed ); do {} while ( !atomic_compare_exchange_weak_explicit( pDest, &cur, cur - val, order, memory_order_relaxed )); return cur; # endif } static T atomic_fetch_sub_explicit(T * pDest, T val , memory_order order) noexcept { return atomic_fetch_sub_explicit( reinterpret_cast( pDest ), val, order ); } static T atomic_fetch_sub( T volatile * pDest, T val ) noexcept { return atomic_fetch_sub_explicit( pDest, val, memory_order_seq_cst ); } static T atomic_fetch_sub( T * pDest, T val ) noexcept { return atomic_fetch_sub_explicit( pDest, val, memory_order_seq_cst ); } // fetch_and static T atomic_fetch_and_explicit(T volatile * pDest, T val, memory_order order) noexcept { # ifdef CDS_ATOMIC_fetch16_and_defined return platform::fetch16_and( pDest, val, order ); # else return bitwise_ops::fetch_and( pDest, val, order ); # endif } static T atomic_fetch_and_explicit(T * pDest, T val , memory_order order) noexcept { return atomic_fetch_and_explicit( reinterpret_cast( pDest ), val, order ); } static T atomic_fetch_and( T volatile * pDest, T val ) noexcept { return atomic_fetch_and_explicit( pDest, val, memory_order_seq_cst ); } static T atomic_fetch_and( T * pDest, T val ) noexcept { return atomic_fetch_and_explicit( pDest, val, memory_order_seq_cst ); } // fetch_or static T atomic_fetch_or_explicit(T volatile * pDest, T val, memory_order order) noexcept { # ifdef CDS_ATOMIC_fetch16_or_defined return platform::fetch16_or( pDest, val, order ); # else return bitwise_ops::fetch_or( pDest, val, order ); # endif } static T atomic_fetch_or_explicit(T * pDest, T val , memory_order order) noexcept { return atomic_fetch_or_explicit( reinterpret_cast( pDest ), val, order ); } static T atomic_fetch_or( T volatile * pDest, T val ) noexcept { return atomic_fetch_or_explicit( pDest, val, memory_order_seq_cst ); } static T atomic_fetch_or( T * pDest, T val ) noexcept { return atomic_fetch_or_explicit( pDest, val, memory_order_seq_cst ); } // fetch_xor static T atomic_fetch_xor_explicit(T volatile * pDest, T val, memory_order order) noexcept { # ifdef CDS_ATOMIC_fetch16_xor_defined return platform::fetch16_xor( pDest, val, order ); # else return bitwise_ops::fetch_xor( pDest, val, order ); # endif } static T atomic_fetch_xor_explicit(T * pDest, T val , memory_order order) noexcept { return atomic_fetch_xor_explicit( reinterpret_cast( pDest ), val, order ); } static T atomic_fetch_xor( T volatile * pDest, T val ) noexcept { return atomic_fetch_xor_explicit( pDest, val, memory_order_seq_cst ); } static T atomic_fetch_xor( T * pDest, T val ) noexcept { return atomic_fetch_xor_explicit( pDest, val, memory_order_seq_cst ); } }; // 32-bit atomic operations template struct atomic_generic_ops< T, 4, Primary > { typedef make_atomic_primary primary; // store static void atomic_store_explicit( T volatile * pDest, T v, memory_order order ) noexcept { platform::store32( primary::ptr(pDest), primary::val(v), order ); } static void atomic_store_explicit( T * pDest, T v, memory_order order ) noexcept { platform::store32( primary::ptr(pDest), primary::val(v), order ); } static void atomic_store( T volatile * pDest, T v ) noexcept { atomic_store_explicit( pDest, v, memory_order_seq_cst ); } static void atomic_store( T * pDest, T v ) noexcept { atomic_store_explicit( pDest, v, memory_order_seq_cst ); } // load static T atomic_load_explicit( T volatile const * pSrc, memory_order order ) noexcept { return primary::ret( platform::load32( primary::ptr(pSrc), order )); } static T atomic_load_explicit( T const * pSrc, memory_order order ) noexcept { return primary::ret( platform::load32( primary::ptr(pSrc), order )); } static T atomic_load( T volatile const * pSrc ) noexcept { return atomic_load_explicit( pSrc, memory_order_seq_cst ); } static T atomic_load( T const * pSrc ) noexcept { return atomic_load_explicit( pSrc, memory_order_seq_cst ); } // exchange static T atomic_exchange_explicit( T volatile * pDest, T val, memory_order order ) noexcept { return primary::ret( platform::exchange32( primary::ptr(pDest), primary::val(val), order )); } static T atomic_exchange_explicit( T * pDest, T val, memory_order order ) noexcept { return primary::ret( platform::exchange32( primary::ptr(pDest), primary::val(val), order )); } static T atomic_exchange( T volatile * pDest, T val ) noexcept { return atomic_exchange_explicit( pDest, val, memory_order_seq_cst ); } static T atomic_exchange( T * pDest, T val ) noexcept { return atomic_exchange_explicit( pDest, val, memory_order_seq_cst ); } // cas static bool atomic_compare_exchange_weak_explicit( T volatile * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { assert( expected ); return platform::cas32_weak( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); } static bool atomic_compare_exchange_weak_explicit( T * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { assert( expected ); return platform::cas32_weak( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); } static bool atomic_compare_exchange_weak( T volatile * pDest, T * expected, T desired ) noexcept { return atomic_compare_exchange_weak_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); } static bool atomic_compare_exchange_weak( T * pDest, T * expected, T desired ) noexcept { return atomic_compare_exchange_weak_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); } static bool atomic_compare_exchange_strong_explicit( T volatile * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { assert( expected ); return platform::cas32_strong( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); } static bool atomic_compare_exchange_strong_explicit( T * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { assert( expected ); return platform::cas32_strong( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); } static bool atomic_compare_exchange_strong( T volatile * pDest, T * expected, T desired ) noexcept { return atomic_compare_exchange_strong_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); } static bool atomic_compare_exchange_strong( T * pDest, T * expected, T desired ) noexcept { return atomic_compare_exchange_strong_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); } }; template struct atomic_integral_ops< T, 4 > : atomic_generic_ops< T, 4, T > , atomic_integral_bitwise_ops { typedef atomic_integral_bitwise_ops bitwise_ops; // fetch_add static T atomic_fetch_add_explicit(T volatile * pDest, T val, memory_order order) noexcept { # ifdef CDS_ATOMIC_fetch32_add_defined return platform::fetch32_add( pDest, val, order ); # else T cur = atomic_load_explicit( pDest, memory_order_relaxed ); do {} while ( !atomic_compare_exchange_weak_explicit( pDest, &cur, cur + val, order, memory_order_relaxed )); return cur; # endif } static T atomic_fetch_add_explicit(T * pDest, T val , memory_order order) noexcept { return atomic_fetch_add_explicit( reinterpret_cast( pDest ), val, order ); } static T atomic_fetch_add( T volatile * pDest, T val ) noexcept { return atomic_fetch_add_explicit( pDest, val, memory_order_seq_cst ); } static T atomic_fetch_add( T * pDest, T val ) noexcept { return atomic_fetch_add_explicit( pDest, val, memory_order_seq_cst ); } // fetch_sub static T atomic_fetch_sub_explicit(T volatile * pDest, T val, memory_order order) noexcept { # ifdef CDS_ATOMIC_fetch32_sub_defined return platform::fetch32_sub( pDest, val, order ); # else T cur = atomic_load_explicit( pDest, memory_order_relaxed ); do {} while ( !atomic_compare_exchange_weak_explicit( pDest, &cur, cur - val, order, memory_order_relaxed )); return cur; # endif } static T atomic_fetch_sub_explicit(T * pDest, T val , memory_order order) noexcept { return atomic_fetch_sub_explicit( reinterpret_cast( pDest ), val, order ); } static T atomic_fetch_sub( T volatile * pDest, T val ) noexcept { return atomic_fetch_sub_explicit( pDest, val, memory_order_seq_cst ); } static T atomic_fetch_sub( T * pDest, T val ) noexcept { return atomic_fetch_sub_explicit( pDest, val, memory_order_seq_cst ); } // fetch_and static T atomic_fetch_and_explicit(T volatile * pDest, T val, memory_order order) noexcept { # ifdef CDS_ATOMIC_fetch32_and_defined return platform::fetch32_and( pDest, val, order ); # else return bitwise_ops::fetch_and( pDest, val, order ); # endif } static T atomic_fetch_and_explicit(T * pDest, T val , memory_order order) noexcept { return atomic_fetch_and_explicit( reinterpret_cast( pDest ), val, order ); } static T atomic_fetch_and( T volatile * pDest, T val ) noexcept { return atomic_fetch_and_explicit( pDest, val, memory_order_seq_cst ); } static T atomic_fetch_and( T * pDest, T val ) noexcept { return atomic_fetch_and_explicit( pDest, val, memory_order_seq_cst ); } // fetch_or static T atomic_fetch_or_explicit(T volatile * pDest, T val, memory_order order) noexcept { # ifdef CDS_ATOMIC_fetch32_or_defined return platform::fetch32_or( pDest, val, order ); # else return bitwise_ops::fetch_or( pDest, val, order ); # endif } static T atomic_fetch_or_explicit(T * pDest, T val , memory_order order) noexcept { return atomic_fetch_or_explicit( reinterpret_cast( pDest ), val, order ); } static T atomic_fetch_or( T volatile * pDest, T val ) noexcept { return atomic_fetch_or_explicit( pDest, val, memory_order_seq_cst ); } static T atomic_fetch_or( T * pDest, T val ) noexcept { return atomic_fetch_or_explicit( pDest, val, memory_order_seq_cst ); } // fetch_xor static T atomic_fetch_xor_explicit(T volatile * pDest, T val, memory_order order) noexcept { # ifdef CDS_ATOMIC_fetch32_xor_defined return platform::fetch32_xor( pDest, val, order ); # else return bitwise_ops::fetch_xor( pDest, val, order ); # endif } static T atomic_fetch_xor_explicit(T * pDest, T val , memory_order order) noexcept { return atomic_fetch_xor_explicit( reinterpret_cast( pDest ), val, order ); } static T atomic_fetch_xor( T volatile * pDest, T val ) noexcept { return atomic_fetch_xor_explicit( pDest, val, memory_order_seq_cst ); } static T atomic_fetch_xor( T * pDest, T val ) noexcept { return atomic_fetch_xor_explicit( pDest, val, memory_order_seq_cst ); } }; // 64-bit atomic operations template struct atomic_generic_ops< T, 8, Primary > { typedef make_atomic_primary primary; // store static void atomic_store_explicit( T volatile * pDest, T v, memory_order order ) noexcept { platform::store64( primary::ptr(pDest), primary::val(v), order ); } static void atomic_store_explicit( T * pDest, T v, memory_order order ) noexcept { platform::store64( primary::ptr(pDest), primary::val(v), order ); } static void atomic_store( T volatile * pDest, T v ) noexcept { atomic_store_explicit( pDest, v, memory_order_seq_cst ); } static void atomic_store( T * pDest, T v ) noexcept { atomic_store_explicit( pDest, v, memory_order_seq_cst ); } // load static T atomic_load_explicit( T volatile const * pSrc, memory_order order ) noexcept { return primary::ret( platform::load64( primary::ptr(pSrc), order )); } static T atomic_load_explicit( T const * pSrc, memory_order order ) noexcept { return primary::ret( platform::load64( primary::ptr(pSrc), order )); } static T atomic_load( T volatile const * pSrc ) noexcept { return atomic_load_explicit( pSrc, memory_order_seq_cst ); } static T atomic_load( T const * pSrc ) noexcept { return atomic_load_explicit( pSrc, memory_order_seq_cst ); } // exchange static T atomic_exchange_explicit( T volatile * pDest, T val, memory_order order ) noexcept { return primary::ret( platform::exchange64( primary::ptr(pDest), primary::val(val), order )); } static T atomic_exchange_explicit( T * pDest, T val, memory_order order ) noexcept { return primary::ret( platform::exchange64( primary::ptr(pDest), primary::val(val), order )); } static T atomic_exchange( T volatile * pDest, T val ) noexcept { return atomic_exchange_explicit( pDest, val, memory_order_seq_cst ); } static T atomic_exchange( T * pDest, T val ) noexcept { return atomic_exchange_explicit( pDest, val, memory_order_seq_cst ); } // cas static bool atomic_compare_exchange_weak_explicit( T volatile * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { assert( expected ); return platform::cas64_weak( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); } static bool atomic_compare_exchange_weak_explicit( T * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { assert( expected ); return platform::cas64_weak( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); } static bool atomic_compare_exchange_weak( T volatile * pDest, T * expected, T desired ) noexcept { return atomic_compare_exchange_weak_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); } static bool atomic_compare_exchange_weak( T * pDest, T * expected, T desired ) noexcept { return atomic_compare_exchange_weak_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); } static bool atomic_compare_exchange_strong_explicit( T volatile * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { assert( expected ); return platform::cas64_strong( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); } static bool atomic_compare_exchange_strong_explicit( T * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { assert( expected ); return platform::cas64_strong( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); } static bool atomic_compare_exchange_strong( T volatile * pDest, T * expected, T desired ) noexcept { return atomic_compare_exchange_strong_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); } static bool atomic_compare_exchange_strong( T * pDest, T * expected, T desired ) noexcept { return atomic_compare_exchange_strong_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); } }; template struct atomic_integral_ops< T, 8 > : atomic_generic_ops< T, 8, T > , atomic_integral_bitwise_ops { typedef atomic_integral_bitwise_ops bitwise_ops; typedef atomic_generic_ops general_ops; // fetch_add static T atomic_fetch_add_explicit(T volatile * pDest, T val, memory_order order) noexcept { # ifdef CDS_ATOMIC_fetch64_add_defined return platform::fetch64_add( pDest, val, order ); # else T cur = general_ops::atomic_load_explicit( pDest, memory_order_relaxed ); do {} while ( !general_ops::atomic_compare_exchange_weak_explicit( pDest, &cur, cur + val, order, memory_order_relaxed )); return cur; # endif } static T atomic_fetch_add_explicit(T * pDest, T val , memory_order order) noexcept { return atomic_fetch_add_explicit( reinterpret_cast( pDest ), val, order ); } static T atomic_fetch_add( T volatile * pDest, T val ) noexcept { return atomic_fetch_add_explicit( pDest, val, memory_order_seq_cst ); } static T atomic_fetch_add( T * pDest, T val ) noexcept { return atomic_fetch_add_explicit( pDest, val, memory_order_seq_cst ); } // fetch_sub static T atomic_fetch_sub_explicit(T volatile * pDest, T val, memory_order order) noexcept { # ifdef CDS_ATOMIC_fetch64_sub_defined return platform::fetch64_sub( pDest, val, order ); # else T cur = general_ops::atomic_load_explicit( pDest, memory_order_relaxed ); do {} while ( !general_ops::atomic_compare_exchange_weak_explicit( pDest, &cur, cur - val, order, memory_order_relaxed )); return cur; # endif } static T atomic_fetch_sub_explicit(T * pDest, T val , memory_order order) noexcept { return atomic_fetch_sub_explicit( reinterpret_cast( pDest ), val, order ); } static T atomic_fetch_sub( T volatile * pDest, T val ) noexcept { return atomic_fetch_sub_explicit( pDest, val, memory_order_seq_cst ); } static T atomic_fetch_sub( T * pDest, T val ) noexcept { return atomic_fetch_sub_explicit( pDest, val, memory_order_seq_cst ); } // fetch_and static T atomic_fetch_and_explicit(T volatile * pDest, T val, memory_order order) noexcept { # ifdef CDS_ATOMIC_fetch64_and_defined return platform::fetch64_and( pDest, val, order ); # else return bitwise_ops::fetch_and( pDest, val, order ); # endif } static T atomic_fetch_and_explicit(T * pDest, T val , memory_order order) noexcept { return atomic_fetch_and_explicit( reinterpret_cast( pDest ), val, order ); } static T atomic_fetch_and( T volatile * pDest, T val ) noexcept { return atomic_fetch_and_explicit( pDest, val, memory_order_seq_cst ); } static T atomic_fetch_and( T * pDest, T val ) noexcept { return atomic_fetch_and_explicit( pDest, val, memory_order_seq_cst ); } // fetch_or static T atomic_fetch_or_explicit(T volatile * pDest, T val, memory_order order) noexcept { # ifdef CDS_ATOMIC_fetch64_or_defined return platform::fetch64_or( pDest, val, order ); # else return bitwise_ops::fetch_or( pDest, val, order ); # endif } static T atomic_fetch_or_explicit(T * pDest, T val , memory_order order) noexcept { return atomic_fetch_or_explicit( reinterpret_cast( pDest ), val, order ); } static T atomic_fetch_or( T volatile * pDest, T val ) noexcept { return atomic_fetch_or_explicit( pDest, val, memory_order_seq_cst ); } static T atomic_fetch_or( T * pDest, T val ) noexcept { return atomic_fetch_or_explicit( pDest, val, memory_order_seq_cst ); } // fetch_xor static T atomic_fetch_xor_explicit(T volatile * pDest, T val, memory_order order) noexcept { # ifdef CDS_ATOMIC_fetch64_xor_defined return platform::fetch64_xor( pDest, val, order ); # else return bitwise_ops::fetch_xor( pDest, val, order ); # endif } static T atomic_fetch_xor_explicit(T * pDest, T val , memory_order order) noexcept { return atomic_fetch_xor_explicit( reinterpret_cast( pDest ), val, order ); } static T atomic_fetch_xor( T volatile * pDest, T val ) noexcept { return atomic_fetch_xor_explicit( pDest, val, memory_order_seq_cst ); } static T atomic_fetch_xor( T * pDest, T val ) noexcept { return atomic_fetch_xor_explicit( pDest, val, memory_order_seq_cst ); } }; // atomic pointer operations template struct atomic_pointer_base { // store static void atomic_store_explicit( T * volatile * pDest, T * v, memory_order order ) noexcept { platform::store_ptr( pDest, v, order ); } static void atomic_store_explicit( T * * pDest, T * v, memory_order order ) noexcept { platform::store_ptr( pDest, v, order ); } static void atomic_store( T * volatile * pDest, T * v ) noexcept { atomic_store_explicit( pDest, v, memory_order_seq_cst ); } static void atomic_store( T * * pDest, T * v ) noexcept { atomic_store_explicit( pDest, v, memory_order_seq_cst ); } // load static T * atomic_load_explicit( T * volatile const * pSrc, memory_order order ) noexcept { return platform::load_ptr( pSrc, order ); } static T * atomic_load_explicit( T * const * pSrc, memory_order order ) noexcept { return platform::load_ptr( pSrc, order ); } static T * atomic_load( T * volatile const * pSrc ) noexcept { return atomic_load_explicit( pSrc, memory_order_seq_cst ); } static T * atomic_load( T * const * pSrc ) noexcept { return atomic_load_explicit( pSrc, memory_order_seq_cst ); } // exchange static T * atomic_exchange_explicit( T * volatile * pDest, T * val, memory_order order ) noexcept { return platform::exchange_ptr( pDest, val, order ); } static T * atomic_exchange_explicit( T * * pDest, T * val, memory_order order ) noexcept { return platform::exchange_ptr( pDest, val, order ); } static T * atomic_exchange( T * volatile * pDest, T * val ) noexcept { return atomic_exchange_explicit( pDest, val, memory_order_seq_cst ); } static T * atomic_exchange( T * * pDest, T * val ) noexcept { return atomic_exchange_explicit( pDest, val, memory_order_seq_cst ); } // cas static bool atomic_compare_exchange_weak_explicit( T * volatile * pDest, T * * expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept { assert( expected ); return platform::cas_ptr_weak( pDest, *expected, desired, mo_success, mo_fail ); } static bool atomic_compare_exchange_weak_explicit( T * * pDest, T * * expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept { assert( expected ); return platform::cas_ptr_weak( pDest, *expected, desired, mo_success, mo_fail ); } static bool atomic_compare_exchange_weak( T * volatile * pDest, T ** expected, T * desired ) noexcept { return atomic_compare_exchange_weak_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); } static bool atomic_compare_exchange_weak( T ** pDest, T ** expected, T * desired ) noexcept { return atomic_compare_exchange_weak_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); } static bool atomic_compare_exchange_strong_explicit( T * volatile * pDest, T ** expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept { assert( expected ); return platform::cas_ptr_strong( pDest, *expected, desired, mo_success, mo_fail ); } static bool atomic_compare_exchange_strong_explicit( T ** pDest, T ** expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept { assert( expected ); return platform::cas_ptr_strong( pDest, *expected, desired, mo_success, mo_fail ); } static bool atomic_compare_exchange_strong( T * volatile * pDest, T ** expected, T * desired ) noexcept { return atomic_compare_exchange_strong_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); } static bool atomic_compare_exchange_strong( T ** pDest, T ** expected, T * desired ) noexcept { return atomic_compare_exchange_strong_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); } }; template struct atomic_pointer: public atomic_pointer_base { typedef atomic_pointer_base base_class; // fetch_add static T * atomic_fetch_add_explicit(T * volatile * pDest, ptrdiff_t val, memory_order order) noexcept { # ifdef CDS_ATOMIC_fetch_ptr_add_defined platform::fetch_ptr_add( pDest, val, order ); # else T * cur = base_class::atomic_load_explicit( pDest, memory_order_relaxed ); do {} while ( !base_class::atomic_compare_exchange_weak_explicit( pDest, &cur, cur + val, order, memory_order_relaxed )); return cur; # endif } static T * atomic_fetch_add_explicit(T * * pDest, ptrdiff_t val , memory_order order) noexcept { return atomic_fetch_add_explicit( reinterpret_cast( pDest ), val, order ); } static T * atomic_fetch_add( T * volatile * pDest, ptrdiff_t val ) noexcept { return atomic_fetch_add_explicit( pDest, val, memory_order_seq_cst ); } static T * atomic_fetch_add( T ** pDest, ptrdiff_t val ) noexcept { return atomic_fetch_add_explicit( pDest, val, memory_order_seq_cst ); } // fetch_sub static T * atomic_fetch_sub_explicit(T * volatile * pDest, ptrdiff_t val, memory_order order) noexcept { # ifdef CDS_ATOMIC_fetch_ptr_sub_defined platform::fetch_ptr_sub( pDest, val, order ); # else T * cur = base_class::atomic_load_explicit( pDest, memory_order_relaxed ); do {} while ( !base_class::atomic_compare_exchange_weak_explicit( pDest, &cur, cur - val, order, memory_order_relaxed )); return cur; # endif } static T * atomic_fetch_sub_explicit(T ** pDest, ptrdiff_t val , memory_order order) noexcept { return atomic_fetch_sub_explicit( reinterpret_cast( pDest ), val, order ); } static T * atomic_fetch_sub( T volatile * pDest, ptrdiff_t val ) noexcept { return atomic_fetch_sub_explicit( pDest, val, memory_order_seq_cst ); } static T * atomic_fetch_sub( T * pDest, ptrdiff_t val ) noexcept { return atomic_fetch_sub_explicit( pDest, val, memory_order_seq_cst ); } }; template <> struct atomic_pointer: public atomic_pointer_base { typedef atomic_pointer_base base_class; // fetch_add static void * atomic_fetch_add_explicit(void * volatile * pDest, ptrdiff_t val, memory_order order) noexcept { void * cur = base_class::atomic_load_explicit( pDest, memory_order_relaxed ); do {} while ( !base_class::atomic_compare_exchange_weak_explicit( pDest, &cur, reinterpret_cast(cur) + val, order, memory_order_relaxed )); return cur; } static void * atomic_fetch_add_explicit(void * * pDest, ptrdiff_t val , memory_order order) noexcept { return atomic_fetch_add_explicit( reinterpret_cast( pDest ), val, order ); } static void * atomic_fetch_add( void * volatile * pDest, ptrdiff_t val ) noexcept { return atomic_fetch_add_explicit( pDest, val, memory_order_seq_cst ); } static void * atomic_fetch_add( void ** pDest, ptrdiff_t val ) noexcept { return atomic_fetch_add_explicit( pDest, val, memory_order_seq_cst ); } // fetch_sub static void * atomic_fetch_sub_explicit(void * volatile * pDest, ptrdiff_t val, memory_order order) noexcept { void * cur = base_class::atomic_load_explicit( pDest, memory_order_relaxed ); do {} while ( !base_class::atomic_compare_exchange_weak_explicit( pDest, &cur, reinterpret_cast(cur) - val, order, memory_order_relaxed )); return cur; } static void * atomic_fetch_sub_explicit(void ** pDest, ptrdiff_t val , memory_order order) noexcept { return atomic_fetch_sub_explicit( reinterpret_cast( pDest ), val, order ); } static void * atomic_fetch_sub( void * volatile * pDest, ptrdiff_t val ) noexcept { return atomic_fetch_sub_explicit( pDest, val, memory_order_seq_cst ); } static void * atomic_fetch_sub( void ** pDest, ptrdiff_t val ) noexcept { return atomic_fetch_sub_explicit( pDest, val, memory_order_seq_cst ); } }; template struct atomic_integral { private: typename cds::details::aligned_type::type volatile m_val; //T volatile m_val; typedef atomic_integral_ops atomic_ops; public: typedef T atomic_type; public: bool is_lock_free() const volatile noexcept { return true; } bool is_lock_free() const noexcept { return true; } void store(T val, memory_order order = memory_order_seq_cst) volatile noexcept { atomic_ops::atomic_store_explicit( &m_val, val, order ); } void store(T val, memory_order order = memory_order_seq_cst) noexcept { atomic_ops::atomic_store_explicit( &m_val, val, order ); } T load(memory_order order = memory_order_seq_cst) const volatile noexcept { return atomic_ops::atomic_load_explicit( &m_val, order ); } T load(memory_order order = memory_order_seq_cst) const noexcept { return atomic_ops::atomic_load_explicit( &m_val, order ); } operator T() const volatile noexcept { return load(); } operator T() const noexcept { return load(); } T exchange(T val, memory_order order = memory_order_seq_cst) volatile noexcept { return atomic_ops::atomic_exchange_explicit( &m_val, val, order ); } T exchange(T val, memory_order order = memory_order_seq_cst) noexcept { return atomic_ops::atomic_exchange_explicit( &m_val, val, order ); } bool compare_exchange_weak(T& expected, T desired , memory_order success_order, memory_order failure_order) volatile noexcept { return atomic_ops::atomic_compare_exchange_weak_explicit( &m_val, &expected, desired, success_order, failure_order ); } bool compare_exchange_weak(T& expected, T desired , memory_order success_order, memory_order failure_order) noexcept { return atomic_ops::atomic_compare_exchange_weak_explicit( &m_val, &expected, desired, success_order, failure_order ); } bool compare_exchange_strong(T& expected, T desired , memory_order success_order, memory_order failure_order) volatile noexcept { return atomic_ops::atomic_compare_exchange_strong_explicit( &m_val, &expected, desired, success_order, failure_order ); } bool compare_exchange_strong(T& expected, T desired , memory_order success_order, memory_order failure_order) noexcept { return atomic_ops::atomic_compare_exchange_strong_explicit( &m_val, &expected, desired, success_order, failure_order ); } bool compare_exchange_weak(T& expected, T desired , memory_order success_order = memory_order_seq_cst) volatile noexcept { return compare_exchange_weak( expected, desired, success_order, memory_order_relaxed ); } bool compare_exchange_weak(T& expected, T desired , memory_order success_order = memory_order_seq_cst) noexcept { return compare_exchange_weak( expected, desired, success_order, memory_order_relaxed ); } bool compare_exchange_strong(T& expected, T desired , memory_order success_order = memory_order_seq_cst) volatile noexcept { return compare_exchange_strong( expected, desired, success_order, memory_order_relaxed ); } bool compare_exchange_strong(T& expected, T desired , memory_order success_order = memory_order_seq_cst) noexcept { return compare_exchange_strong( expected, desired, success_order, memory_order_relaxed ); } T fetch_add(T val, memory_order order = memory_order_seq_cst) volatile noexcept { return atomic_ops::atomic_fetch_add_explicit( &m_val, val, order ); } T fetch_add(T val, memory_order order = memory_order_seq_cst) noexcept { return atomic_ops::atomic_fetch_add_explicit( &m_val, val, order ); } T fetch_sub(T val, memory_order order = memory_order_seq_cst) volatile noexcept { return atomic_ops::atomic_fetch_sub_explicit( &m_val, val, order ); } T fetch_sub(T val, memory_order order = memory_order_seq_cst) noexcept { return atomic_ops::atomic_fetch_sub_explicit( &m_val, val, order ); } T fetch_and(T val, memory_order order = memory_order_seq_cst) volatile noexcept { return atomic_ops::atomic_fetch_and_explicit( &m_val, val, order ); } T fetch_and(T val, memory_order order = memory_order_seq_cst) noexcept { return atomic_ops::atomic_fetch_and_explicit( &m_val, val, order ); } T fetch_or(T val, memory_order order = memory_order_seq_cst) volatile noexcept { return atomic_ops::atomic_fetch_or_explicit( &m_val, val, order ); } T fetch_or(T val, memory_order order = memory_order_seq_cst) noexcept { return atomic_ops::atomic_fetch_or_explicit( &m_val, val, order ); } T fetch_xor(T val, memory_order order = memory_order_seq_cst) volatile noexcept { return atomic_ops::atomic_fetch_xor_explicit( &m_val, val, order ); } T fetch_xor(T val, memory_order order = memory_order_seq_cst) noexcept { return atomic_ops::atomic_fetch_xor_explicit( &m_val, val, order ); } atomic_integral() = default; constexpr atomic_integral(T val) noexcept : m_val(val) {} atomic_integral(const atomic_integral&) = delete; atomic_integral& operator=(const atomic_integral&) = delete; atomic_integral& operator=(const atomic_integral&) volatile = delete; T operator=(T val) volatile noexcept { store(val); return val; } T operator=(T val) noexcept { store(val); return val; } // Post inc/dec T operator++(int) volatile noexcept { return fetch_add( 1 ); } T operator++(int) noexcept { return fetch_add( 1 ); } T operator--(int) volatile noexcept { return fetch_sub( 1 ); } T operator--(int) noexcept { return fetch_sub( 1 ); } // Pre inc/dec T operator++() volatile noexcept { return fetch_add( 1 ) + 1; } T operator++() noexcept { return fetch_add( 1 ) + 1; } T operator--() volatile noexcept { return fetch_sub( 1 ) - 1; } T operator--() noexcept { return fetch_sub( 1 ) - 1; } // op= T operator+=(T val) volatile noexcept { return fetch_add( val ) + val; } T operator+=(T val) noexcept { return fetch_add( val ) + val; } T operator-=(T val) volatile noexcept { return fetch_sub( val ) - val; } T operator-=(T val) noexcept { return fetch_sub( val ) - val; } T operator&=(T val) volatile noexcept { return fetch_and( val ) & val; } T operator&=(T val) noexcept { return fetch_and( val ) & val; } T operator|=(T val) volatile noexcept { return fetch_or( val ) | val; } T operator|=(T val) noexcept { return fetch_or( val ) | val; } T operator^=(T val) volatile noexcept { return fetch_xor( val ) ^ val; } T operator^=(T val) noexcept { return fetch_xor( val ) ^ val; } }; template struct select_primary_type { typedef typename details::primary_type::type type; }; template <> struct select_primary_type { typedef bool type; }; } // namespace details template struct atomic { private: typedef details::atomic_generic_ops::type > atomic_ops; T volatile m_data; public: bool is_lock_free() const volatile noexcept { return true; } bool is_lock_free() const noexcept { return true; } void store(T val, memory_order order = memory_order_seq_cst) volatile noexcept { atomic_ops::atomic_store_explicit( &m_data, val, order ); } void store(T val, memory_order order = memory_order_seq_cst) noexcept { atomic_ops::atomic_store_explicit( &m_data, val, order ); } T load(memory_order order = memory_order_seq_cst) const volatile noexcept { return atomic_ops::atomic_load_explicit( &m_data, order ); } T load(memory_order order = memory_order_seq_cst) const noexcept { return atomic_ops::atomic_load_explicit( &m_data, order ); } operator T() const volatile noexcept { return load(); } operator T() const noexcept { return load(); } T exchange(T val, memory_order order = memory_order_seq_cst) volatile noexcept { return atomic_ops::atomic_exchange_explicit( &m_data, val, order ); } T exchange(T val, memory_order order = memory_order_seq_cst) noexcept { return atomic_ops::atomic_exchange_explicit( &m_data, val, order ); } bool compare_exchange_weak(T& expected, T desired, memory_order success_order, memory_order failure_order) volatile noexcept { return atomic_ops::atomic_compare_exchange_weak_explicit( &m_data, &expected, desired, success_order, failure_order ); } bool compare_exchange_weak(T& expected, T desired, memory_order success_order, memory_order failure_order) noexcept { return atomic_ops::atomic_compare_exchange_weak_explicit( &m_data, &expected, desired, success_order, failure_order ); } bool compare_exchange_strong(T& expected, T desired, memory_order success_order, memory_order failure_order) volatile noexcept { return atomic_ops::atomic_compare_exchange_strong_explicit( &m_data, &expected, desired, success_order, failure_order ); } bool compare_exchange_strong(T& expected, T desired, memory_order success_order, memory_order failure_order) noexcept { return atomic_ops::atomic_compare_exchange_strong_explicit( &m_data, &expected, desired, success_order, failure_order ); } bool compare_exchange_weak(T& expected, T desired, memory_order success_order = memory_order_seq_cst) volatile noexcept { return compare_exchange_weak( expected, desired, success_order, memory_order_relaxed ); } bool compare_exchange_weak(T& expected, T desired, memory_order success_order = memory_order_seq_cst) noexcept { return compare_exchange_weak( expected, desired, success_order, memory_order_relaxed ); } bool compare_exchange_strong(T& expected, T desired, memory_order success_order = memory_order_seq_cst) volatile noexcept { return compare_exchange_strong( expected, desired, success_order, memory_order_relaxed ); } bool compare_exchange_strong(T& expected, T desired, memory_order success_order = memory_order_seq_cst) noexcept { return compare_exchange_strong( expected, desired, success_order, memory_order_relaxed ); } atomic() = default; constexpr atomic(T val) : m_data( val ) {} atomic(const atomic&) = delete; atomic& operator=(const atomic&) = delete; atomic& operator=(const atomic&) volatile = delete; T operator=(T val) volatile noexcept { store( val ); return val; } T operator=(T val) noexcept { store( val ); return val; } }; # define CDS_DECLARE_ATOMIC_INTEGRAL( _type ) \ template <> \ struct atomic<_type>: public details::atomic_integral<_type> \ { \ private: \ typedef details::atomic_integral<_type> base_class ; \ public: \ atomic() = default; \ atomic(_type val) noexcept : base_class(val) {} \ atomic(const atomic&) = delete; \ atomic& operator=(const atomic&) = delete; \ atomic& operator=(const atomic&) volatile = delete; \ _type operator=(_type val) volatile noexcept { return base_class::operator=(val); } \ _type operator=(_type val) noexcept { return base_class::operator=(val); } \ }; CDS_DECLARE_ATOMIC_INTEGRAL(char) CDS_DECLARE_ATOMIC_INTEGRAL(signed char) CDS_DECLARE_ATOMIC_INTEGRAL(unsigned char) CDS_DECLARE_ATOMIC_INTEGRAL(short) CDS_DECLARE_ATOMIC_INTEGRAL(unsigned short) CDS_DECLARE_ATOMIC_INTEGRAL(int) CDS_DECLARE_ATOMIC_INTEGRAL(unsigned int) CDS_DECLARE_ATOMIC_INTEGRAL(long) CDS_DECLARE_ATOMIC_INTEGRAL(unsigned long) CDS_DECLARE_ATOMIC_INTEGRAL(long long) CDS_DECLARE_ATOMIC_INTEGRAL(unsigned long long) //#if CDS_COMPILER == CDS_COMPILER_GCC && CDS_COMPILER_VERSION >= 40400 // CDS_DECLARE_ATOMIC_INTEGRAL(char16_t) // CDS_DECLARE_ATOMIC_INTEGRAL(char32_t) //#endif // CDS_DECLARE_ATOMIC_INTEGRAL(wchar_t) # undef CDS_DECLARE_ATOMIC_INTEGRAL template class atomic { private: T * volatile m_ptr; typedef details::atomic_pointer atomic_ops; public: bool is_lock_free() const volatile noexcept { return true; } bool is_lock_free() const noexcept { return true; } void store(T * val, memory_order order = memory_order_seq_cst) volatile noexcept { atomic_ops::atomic_store_explicit( &m_ptr, val, order ); } void store(T * val, memory_order order = memory_order_seq_cst) noexcept { atomic_ops::atomic_store_explicit( &m_ptr, val, order ); } T * load(memory_order order = memory_order_seq_cst) const volatile noexcept { return atomic_ops::atomic_load_explicit( &m_ptr, order ); } T * load(memory_order order = memory_order_seq_cst) const noexcept { return atomic_ops::atomic_load_explicit( &m_ptr, order ); } operator T *() const volatile noexcept { return load(); } operator T *() const noexcept { return load(); } T * exchange(T * val, memory_order order = memory_order_seq_cst) volatile noexcept { return atomic_ops::atomic_exchange_explicit( &m_ptr, val, order ); } T * exchange(T * val, memory_order order = memory_order_seq_cst) noexcept { return atomic_ops::atomic_exchange_explicit( &m_ptr, val, order ); } bool compare_exchange_weak(T *& expected, T * desired, memory_order success_order, memory_order failure_order) volatile noexcept { return atomic_ops::atomic_compare_exchange_weak_explicit( &m_ptr, &expected, desired, success_order, failure_order ); } bool compare_exchange_weak(T *& expected, T * desired, memory_order success_order, memory_order failure_order) noexcept { return atomic_ops::atomic_compare_exchange_weak_explicit( &m_ptr, &expected, desired, success_order, failure_order ); } bool compare_exchange_strong(T *& expected, T * desired, memory_order success_order, memory_order failure_order) volatile noexcept { return atomic_ops::atomic_compare_exchange_strong_explicit( &m_ptr, &expected, desired, success_order, failure_order ); } bool compare_exchange_strong(T *& expected, T * desired, memory_order success_order, memory_order failure_order) noexcept { return atomic_ops::atomic_compare_exchange_strong_explicit( &m_ptr, &expected, desired, success_order, failure_order ); } bool compare_exchange_weak(T *& expected, T * desired, memory_order success_order = memory_order_seq_cst) volatile noexcept { return compare_exchange_weak( expected, desired, success_order, memory_order_relaxed ); } bool compare_exchange_weak(T *& expected, T * desired, memory_order success_order = memory_order_seq_cst) noexcept { return compare_exchange_weak( expected, desired, success_order, memory_order_relaxed ); } bool compare_exchange_strong(T *& expected, T * desired, memory_order success_order = memory_order_seq_cst) volatile noexcept { return compare_exchange_strong( expected, desired, success_order, memory_order_relaxed ); } bool compare_exchange_strong(T *& expected, T * desired, memory_order success_order = memory_order_seq_cst) noexcept { return compare_exchange_strong( expected, desired, success_order, memory_order_relaxed ); } T * fetch_add(ptrdiff_t offset, memory_order order = memory_order_seq_cst) volatile noexcept { return atomic_ops::atomic_fetch_add_explicit( &m_ptr, offset, order ); } T * fetch_add(ptrdiff_t offset, memory_order order = memory_order_seq_cst) noexcept { return atomic_ops::atomic_fetch_add_explicit( &m_ptr, offset, order ); } T * fetch_sub(ptrdiff_t offset, memory_order order = memory_order_seq_cst) volatile noexcept { return atomic_ops::atomic_fetch_sub_explicit( &m_ptr, offset, order ); } T * fetch_sub(ptrdiff_t offset, memory_order order = memory_order_seq_cst) noexcept { return atomic_ops::atomic_fetch_sub_explicit( &m_ptr, offset, order ); } atomic() = default; constexpr atomic(T * val) noexcept : m_ptr( val ) {} atomic(const atomic&) = delete; atomic& operator=(const atomic&) = delete; atomic& operator=(const atomic&) volatile = delete; T * operator=(T * val) volatile noexcept { store( val ); return val; } T * operator=(T * val) noexcept { store( val ); return val; } }; // Atomic typedefs typedef atomic atomic_bool; typedef atomic atomic_char; typedef atomic atomic_schar; typedef atomic atomic_uchar; typedef atomic atomic_short; typedef atomic atomic_ushort; typedef atomic atomic_int; typedef atomic atomic_uint; typedef atomic atomic_long; typedef atomic atomic_ulong; typedef atomic atomic_llong; typedef atomic atomic_ullong; #if ( CDS_COMPILER == CDS_COMPILER_GCC && CDS_COMPILER_VERSION >= 40400 ) || CDS_COMPILER == CDS_COMPILER_CLANG typedef atomic atomic_char16_t; typedef atomic atomic_char32_t; #endif typedef atomic atomic_wchar_t; typedef atomic atomic_int_least8_t; typedef atomic atomic_uint_least8_t; typedef atomic atomic_int_least16_t; typedef atomic atomic_uint_least16_t; typedef atomic atomic_int_least32_t; typedef atomic atomic_uint_least32_t; typedef atomic atomic_int_least64_t; typedef atomic atomic_uint_least64_t; typedef atomic atomic_int_fast8_t; typedef atomic atomic_uint_fast8_t; typedef atomic atomic_int_fast16_t; typedef atomic atomic_uint_fast16_t; typedef atomic atomic_int_fast32_t; typedef atomic atomic_uint_fast32_t; typedef atomic atomic_int_fast64_t; typedef atomic atomic_uint_fast64_t; typedef atomic atomic_intptr_t; typedef atomic atomic_uintptr_t; typedef atomic atomic_size_t; typedef atomic atomic_ptrdiff_t; typedef atomic atomic_intmax_t; typedef atomic atomic_uintmax_t; template static inline bool atomic_is_lock_free(const volatile atomic * p) noexcept { return p->is_lock_free(); } template static inline bool atomic_is_lock_free(const atomic * p ) noexcept { return p->is_lock_free(); } /* template static inline void atomic_init(volatile atomic * p, T val) noexcept { p->init( val ); } template static inline void atomic_init( atomic * p, T val) noexcept { p->init( val ); } */ template static inline void atomic_store(volatile atomic* p, T val) noexcept { p->store(val); } template static inline void atomic_store(atomic* p, T val) noexcept { p->store( val ); } template static inline void atomic_store_explicit(volatile atomic* p, T val, memory_order order) noexcept { p->store( val, order ); } template static inline void atomic_store_explicit(atomic* p, T val, memory_order order) noexcept { p->store( val, order ); } template static inline T atomic_load(const volatile atomic* p) noexcept { return p->load(); } template static inline T atomic_load(const atomic* p) noexcept { return p->load(); } template static inline T atomic_load_explicit(const volatile atomic* p, memory_order order) noexcept { return p->load( order ); } template static inline T atomic_load_explicit(const atomic* p, memory_order order) noexcept { return p->load( order ); } template static inline T atomic_exchange(volatile atomic* p, T val) noexcept { return p->exchange( val ); } template static inline T atomic_exchange(atomic* p, T val ) noexcept { return p->exchange( val ); } template static inline T atomic_exchange_explicit(volatile atomic* p, T val, memory_order order) noexcept { return p->exchange( val, order ); } template static inline T atomic_exchange_explicit(atomic* p, T val, memory_order order) noexcept { return p->exchange( val, order ); } template static inline bool atomic_compare_exchange_weak(volatile atomic* p, T* expected, T desired) noexcept { return p->compare_exchange_weak( *expected, desired ); } template static inline bool atomic_compare_exchange_weak(atomic* p, T* expected, T desired) noexcept { return p->compare_exchange_weak( *expected, desired ); } template static inline bool atomic_compare_exchange_strong(volatile atomic* p, T* expected, T desired) noexcept { return p->compare_exchange_strong( *expected, desired ); } template static inline bool atomic_compare_exchange_strong(atomic* p, T* expected, T desired) noexcept { return p->compare_exchange_strong( *expected, desired ); } template static inline bool atomic_compare_exchange_weak_explicit(volatile atomic* p, T* expected, T desired, memory_order success_order, memory_order failure_order) noexcept { return p->compare_exchange_weak( *expected, desired, success_order, failure_order ); } template static inline bool atomic_compare_exchange_weak_explicit(atomic* p, T* expected, T desired, memory_order success_order, memory_order failure_order) noexcept { return p->compare_exchange_weak( *expected, desired, success_order, failure_order ); } template static inline bool atomic_compare_exchange_strong_explicit(volatile atomic* p, T* expected, T desired, memory_order success_order, memory_order failure_order) noexcept { return p->compare_exchange_strong( *expected, desired, success_order, failure_order ); } template static inline bool atomic_compare_exchange_strong_explicit(atomic* p, T* expected, T desired, memory_order success_order, memory_order failure_order) noexcept { return p->compare_exchange_strong( *expected, desired, success_order, failure_order ); } template static inline T atomic_fetch_add(volatile atomic* p, T val) noexcept { return p->fetch_add( val ); } template static inline T atomic_fetch_add(atomic* p, T val) noexcept { return p->fetch_add( val ); } template static inline T * atomic_fetch_add(volatile atomic* p, ptrdiff_t offset) noexcept { return p->fetch_add( offset ); } template static inline T * atomic_fetch_add(atomic* p, ptrdiff_t offset) noexcept { return p->fetch_add( offset ); } template static inline T atomic_fetch_add_explicit(volatile atomic* p, T val, memory_order order) noexcept { return p->fetch_add( val, order ); } template static inline T atomic_fetch_add_explicit(atomic* p, T val, memory_order order) noexcept { return p->fetch_add( val, order ); } template static inline T * atomic_fetch_add_explicit(volatile atomic* p, ptrdiff_t offset, memory_order order) noexcept { return p->fetch_add( offset, order ); } template static inline T * atomic_fetch_add_explicit(atomic* p, ptrdiff_t offset, memory_order order) noexcept { return p->fetch_add( offset, order ); } template static inline T atomic_fetch_sub(volatile atomic* p, T val) noexcept { return p->fetch_sub( val ); } template static inline T atomic_fetch_sub(atomic* p, T val) noexcept { return p->fetch_sub( val ); } template static inline T * atomic_fetch_sub(volatile atomic* p, ptrdiff_t offset) noexcept { return p->fetch_sub( offset ); } template static inline T * atomic_fetch_sub(atomic* p, ptrdiff_t offset) noexcept { return p->fetch_sub( offset ); } template static inline T atomic_fetch_sub_explicit(volatile atomic* p, T val, memory_order order) noexcept { return p->fetch_sub( val, order ); } template static inline T atomic_fetch_sub_explicit(atomic* p, T val, memory_order order) noexcept { return p->fetch_sub( val, order ); } template static inline T * atomic_fetch_sub_explicit(volatile atomic* p, ptrdiff_t offset, memory_order order) noexcept { return p->fetch_sub( offset, order ); } template static inline T * atomic_fetch_sub_explicit(atomic* p, ptrdiff_t offset, memory_order order) noexcept { return p->fetch_sub( offset, order ); } template static inline T atomic_fetch_and(volatile atomic* p, T val) noexcept { return p->fetch_and( val ); } template static inline T atomic_fetch_and(atomic* p, T val) noexcept { return p->fetch_and( val ); } template static inline T atomic_fetch_and_explicit(volatile atomic* p, T val, memory_order order) noexcept { return p->fetch_and( val, order ); } template static inline T atomic_fetch_and_explicit(atomic* p, T val, memory_order order) noexcept { return p->fetch_and( val, order ); } template static inline T atomic_fetch_or(volatile atomic* p, T val) noexcept { return p->fetch_or( val ); } template static inline T atomic_fetch_or(atomic* p, T val) noexcept { return p->fetch_or( val ); } template static inline T atomic_fetch_or_explicit(volatile atomic* p, T val, memory_order order) noexcept { return p->fetch_or( val, order ); } template static inline T atomic_fetch_or_explicit(atomic* p, T val, memory_order order) noexcept { return p->fetch_or( val, order ); } template static inline T atomic_fetch_xor(volatile atomic* p, T val) noexcept { return p->fetch_xor( val ); } template static inline T atomic_fetch_xor(atomic* p, T val) noexcept { return p->fetch_xor( val ); } template static inline T atomic_fetch_xor_explicit(volatile atomic* p, T val, memory_order order) noexcept { return p->fetch_xor( val, order ); } template static inline T atomic_fetch_xor_explicit(atomic* p, T val, memory_order order) noexcept { return p->fetch_xor( val, order ); } // Atomic flag type typedef struct atomic_flag { void clear( memory_order order = memory_order_seq_cst ) volatile noexcept { assert( order != memory_order_acquire && order != memory_order_acq_rel && order != memory_order_consume ); platform::atomic_flag_clear( &m_Flag, order ); } void clear( memory_order order = memory_order_seq_cst ) noexcept { assert( order != memory_order_acquire && order != memory_order_acq_rel && order != memory_order_consume ); platform::atomic_flag_clear( &m_Flag, order ); } bool test_and_set( memory_order order = memory_order_seq_cst ) volatile noexcept { return platform::atomic_flag_tas( &m_Flag, order ); } bool test_and_set( memory_order order = memory_order_seq_cst ) noexcept { return platform::atomic_flag_tas( &m_Flag, order ); } atomic_flag() = default; atomic_flag(const atomic_flag&) = delete; atomic_flag& operator=(const atomic_flag&) = delete; atomic_flag& operator=(const atomic_flag&) volatile = delete; platform::atomic_flag_type volatile m_Flag; } atomic_flag; static inline bool atomic_flag_test_and_set(volatile atomic_flag* p) noexcept { return p->test_and_set(); } static inline bool atomic_flag_test_and_set(atomic_flag * p) noexcept { return p->test_and_set(); } static inline bool atomic_flag_test_and_set_explicit(volatile atomic_flag* p, memory_order order) noexcept { return p->test_and_set( order ); } static inline bool atomic_flag_test_and_set_explicit(atomic_flag* p, memory_order order) noexcept { return p->test_and_set( order ); } static inline void atomic_flag_clear(volatile atomic_flag* p) noexcept { return p->clear(); } static inline void atomic_flag_clear(atomic_flag* p) noexcept { return p->clear(); } static inline void atomic_flag_clear_explicit(volatile atomic_flag* p, memory_order order) noexcept { return p->clear( order ); } static inline void atomic_flag_clear_explicit(atomic_flag* p, memory_order order) noexcept { return p->clear( order ); } // Fences static inline void atomic_thread_fence(memory_order order) noexcept { platform::thread_fence( order ); CDS_COMPILER_RW_BARRIER; } static inline void atomic_signal_fence(memory_order order) noexcept { platform::signal_fence( order ); } }} // namespace cds::cxx11_atomic //@endcond #endif // #ifndef CDSLIB_COMPILER_CXX11_ATOMIC_H libcds-2.3.3/cds/compiler/defs.h000066400000000000000000000024541341244201700164370ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_COMPILER_DEFS_H #define CDSLIB_COMPILER_DEFS_H // __cplusplus values #define CDS_CPLUSPLUS_11 201103L #define CDS_CPLUSPLUS_14 201402L #define CDS_CPLUSPLUS_17 201703L // VC 2017 is not full C++11-compatible yet //#if __cplusplus < CDS_CPLUSPLUS_11 //# error C++11 and above is required //#endif #if CDS_COMPILER == CDS_COMPILER_MSVC # include #elif CDS_COMPILER == CDS_COMPILER_GCC # include #elif CDS_COMPILER == CDS_COMPILER_INTEL # include #elif CDS_COMPILER == CDS_COMPILER_CLANG # include #elif CDS_COMPILER == CDS_COMPILER_UNKNOWN # error Unknown compiler. Compilation aborted #else # error Unknown value of CDS_COMPILER macro #endif #ifndef CDS_EXPORT_API # define CDS_EXPORT_API #endif #ifndef cds_likely # define cds_likely( expr ) expr # define cds_unlikely( expr ) expr #endif //if constexpr support (C++17) #ifndef constexpr_if # define constexpr_if if #endif // Features #include #endif // #ifndef CDSLIB_COMPILER_DEFS_H libcds-2.3.3/cds/compiler/feature_tsan.h000066400000000000000000000107061341244201700201750ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_COMPILER_FEATURE_TSAN_H #define CDSLIB_COMPILER_FEATURE_TSAN_H // Thread Sanitizer annotations. // From http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/test/tsan/annotate_happens_before.cc?view=markup //@cond #ifdef CDS_THREAD_SANITIZER_ENABLED # define CDS_TSAN_ANNOTATE_HAPPENS_BEFORE(addr) AnnotateHappensBefore(__FILE__, __LINE__, reinterpret_cast(addr)) # define CDS_TSAN_ANNOTATE_HAPPENS_AFTER(addr) AnnotateHappensAfter(__FILE__, __LINE__, reinterpret_cast(addr)) # define CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN AnnotateIgnoreReadsBegin(__FILE__, __LINE__) # define CDS_TSAN_ANNOTATE_IGNORE_READS_END AnnotateIgnoreReadsEnd(__FILE__, __LINE__) # define CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN AnnotateIgnoreWritesBegin(__FILE__, __LINE__) # define CDS_TSAN_ANNOTATE_IGNORE_WRITES_END AnnotateIgnoreWritesEnd(__FILE__, __LINE__) # define CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN \ CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN; \ CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN # define CDS_TSAN_ANNOTATE_IGNORE_RW_END \ CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;\ CDS_TSAN_ANNOTATE_IGNORE_READS_END # define CDS_TSAN_ANNOTATE_NEW_MEMORY( addr, sz ) AnnotateNewMemory( __FILE__, __LINE__, reinterpret_cast(addr), sz ) // Publish/unpublish - DEPRECATED #if 0 # define CDS_TSAN_ANNOTATE_PUBLISH_MEMORY_RANGE( addr, sz ) AnnotatePublishMemoryRange( __FILE__, __LINE__, reinterpret_cast(addr), sz ) # define CDS_TSAN_ANNOTATE_UNPUBLISH_MEMORY_RANGE( addr, sz ) AnnotateUnpublishMemoryRange( __FILE__, __LINE__, reinterpret_cast(addr), sz ) #endif # define CDS_TSAN_ANNOTATE_MUTEX_CREATE( addr ) AnnotateRWLockCreate( __FILE__, __LINE__, reinterpret_cast(addr)) # define CDS_TSAN_ANNOTATE_MUTEX_DESTROY( addr ) AnnotateRWLockDestroy( __FILE__, __LINE__, reinterpret_cast(addr)) // must be called after actual acquire # define CDS_TSAN_ANNOTATE_MUTEX_ACQUIRED( addr ) AnnotateRWLockAcquired( __FILE__, __LINE__, reinterpret_cast(addr), 1 ) // must be called before actual release # define CDS_TSAN_ANNOTATE_MUTEX_RELEASED( addr ) AnnotateRWLockReleased( __FILE__, __LINE__, reinterpret_cast(addr), 1 ) // provided by TSan extern "C" { void AnnotateHappensBefore(const char *f, int l, void *addr); void AnnotateHappensAfter(const char *f, int l, void *addr); void AnnotateIgnoreReadsBegin(const char *f, int l); void AnnotateIgnoreReadsEnd(const char *f, int l); void AnnotateIgnoreWritesBegin(const char *f, int l); void AnnotateIgnoreWritesEnd(const char *f, int l); #if 0 void AnnotatePublishMemoryRange( const char *f, int l, void * mem, size_t size ); void AnnotateUnpublishMemoryRange( const char *f, int l, void * addr, size_t size ); #endif void AnnotateNewMemory( const char *f, int l, void * mem, size_t size ); void AnnotateRWLockCreate( const char *f, int l, void* m ); void AnnotateRWLockDestroy( const char *f, int l, void* m ); void AnnotateRWLockAcquired( const char *f, int l, void *m, long is_w ); void AnnotateRWLockReleased( const char *f, int l, void *m, long is_w ); } #else // CDS_THREAD_SANITIZER_ENABLED # define CDS_TSAN_ANNOTATE_HAPPENS_BEFORE(addr) # define CDS_TSAN_ANNOTATE_HAPPENS_AFTER(addr) # define CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN # define CDS_TSAN_ANNOTATE_IGNORE_READS_END # define CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN # define CDS_TSAN_ANNOTATE_IGNORE_WRITES_END # define CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN # define CDS_TSAN_ANNOTATE_IGNORE_RW_END #if 0 # define CDS_TSAN_ANNOTATE_PUBLISH_MEMORY_RANGE( addr, sz ) # define CDS_TSAN_ANNOTATE_UNPUBLISH_MEMORY_RANGE( addr, sz ) #endif # define CDS_TSAN_ANNOTATE_NEW_MEMORY( addr, sz ) # define CDS_TSAN_ANNOTATE_MUTEX_CREATE( addr ) # define CDS_TSAN_ANNOTATE_MUTEX_DESTROY( addr ) # define CDS_TSAN_ANNOTATE_MUTEX_ACQUIRED( addr ) # define CDS_TSAN_ANNOTATE_MUTEX_RELEASED( addr ) #endif //@endcond #endif // #ifndef CDSLIB_COMPILER_FEATURE_TSAN_H libcds-2.3.3/cds/compiler/gcc/000077500000000000000000000000001341244201700160745ustar00rootroot00000000000000libcds-2.3.3/cds/compiler/gcc/amd64/000077500000000000000000000000001341244201700170075ustar00rootroot00000000000000libcds-2.3.3/cds/compiler/gcc/amd64/backoff.h000066400000000000000000000015201341244201700205510ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_COMPILER_GCC_AMD64_BACKOFF_H #define CDSLIB_COMPILER_GCC_AMD64_BACKOFF_H //@cond none namespace cds { namespace backoff { namespace gcc { namespace amd64 { # define CDS_backoff_nop_defined static inline void backoff_nop() { asm volatile ( "nop;" ); } # define CDS_backoff_hint_defined static inline void backoff_hint() { asm volatile ( "pause;" ); } }} // namespace gcc::amd64 namespace platform { using namespace gcc::amd64; } }} // namespace cds::backoff //@endcond #endif // #ifndef CDSLIB_COMPILER_GCC_AMD64_BACKOFF_H libcds-2.3.3/cds/compiler/gcc/amd64/bitop.h000066400000000000000000000117311341244201700203000ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_COMPILER_GCC_AMD64_BITOP_H #define CDSLIB_COMPILER_GCC_AMD64_BITOP_H //@cond none namespace cds { namespace bitop { namespace platform { namespace gcc { namespace amd64 { // MSB - return index (1..32) of most significant bit in nArg. If nArg == 0 return 0 # define cds_bitop_msb32_DEFINED static inline int msb32( uint32_t nArg ) { int nRet; __asm__ __volatile__ ( "bsrl %[nArg], %[nRet] ;\n\t" "jnz 1f ;\n\t" "xorl %[nRet], %[nRet] ;\n\t" "subl $1, %[nRet] ;\n\t" "1:" "addl $1, %[nRet] ;\n\t" : [nRet] "=a" (nRet) : [nArg] "r" (nArg) : "cc" ); return nRet; } # define cds_bitop_msb32nz_DEFINED static inline int msb32nz( uint32_t nArg ) { assert( nArg != 0 ); int nRet; __asm__ __volatile__ ( "bsrl %[nArg], %[nRet] ;" : [nRet] "=a" (nRet) : [nArg] "r" (nArg) : "cc" ); return nRet; } // LSB - return index (0..31) of least significant bit in nArg. If nArg == 0 return -1U # define cds_bitop_lsb32_DEFINED static inline int lsb32( uint32_t nArg ) { int nRet; __asm__ __volatile__ ( "bsfl %[nArg], %[nRet] ;" "jnz 1f ;" "xorl %[nRet], %[nRet] ;" "subl $1, %[nRet] ;" "1:" "addl $1, %[nRet] ;" : [nRet] "=a" (nRet) : [nArg] "r" (nArg) : "cc" ); return nRet; } // LSB - return index (0..31) of least significant bit in nArg. // Condition: nArg != 0 # define cds_bitop_lsb32nz_DEFINED static inline int lsb32nz( uint32_t nArg ) { assert( nArg != 0 ); int nRet; __asm__ __volatile__ ( "bsfl %[nArg], %[nRet] ;" : [nRet] "=a" (nRet) : [nArg] "r" (nArg) : "cc" ); return nRet; } # define cds_bitop_msb64_DEFINED static inline int msb64( uint64_t nArg ) { uint64_t nRet; asm volatile ( "bsrq %[nArg], %[nRet] ;\n\t" "jnz 1f ;\n\t" "xorq %[nRet], %[nRet] ;\n\t" "subq $1, %[nRet] ;\n\t" "1:" "addq $1, %[nRet] ;\n\t" : [nRet] "=a" (nRet) : [nArg] "r" (nArg) : "cc" ); return (int) nRet; } # define cds_bitop_msb64nz_DEFINED static inline int msb64nz( uint64_t nArg ) { assert( nArg != 0 ); uint64_t nRet; __asm__ __volatile__ ( "bsrq %[nArg], %[nRet] ;" : [nRet] "=a" (nRet) : [nArg] "r" (nArg) : "cc" ); return (int) nRet; } // LSB - return index (0..31) of least significant bit in nArg. If nArg == 0 return -1U # define cds_bitop_lsb64_DEFINED static inline int lsb64( uint64_t nArg ) { uint64_t nRet; __asm__ __volatile__ ( "bsfq %[nArg], %[nRet] ;" "jnz 1f ;" "xorq %[nRet], %[nRet] ;" "subq $1, %[nRet] ;" "1:" "addq $1, %[nRet] ;" : [nRet] "=a" (nRet) : [nArg] "r" (nArg) : "cc" ); return (int) nRet; } // LSB - return index (0..31) of least significant bit in nArg. // Condition: nArg != 0 # define cds_bitop_lsb64nz_DEFINED static inline int lsb64nz( uint64_t nArg ) { assert( nArg != 0 ); uint64_t nRet; __asm__ __volatile__ ( "bsfq %[nArg], %[nRet] ;" : [nRet] "=a" (nRet) : [nArg] "r" (nArg) : "cc" ); return (int) nRet; } }} // namespace gcc::amd64 using namespace gcc::amd64; }}} // namespace cds::bitop::platform //@endcond #endif // #ifndef CDSLIB_COMPILER_GCC_AMD64_BITOP_H libcds-2.3.3/cds/compiler/gcc/amd64/cxx11_atomic.h000066400000000000000000000163411341244201700214650ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_COMPILER_GCC_AMD64_CXX11_ATOMIC_H #define CDSLIB_COMPILER_GCC_AMD64_CXX11_ATOMIC_H #include #include //@cond namespace cds { namespace cxx11_atomic { namespace platform { inline namespace gcc { inline namespace amd64 { //----------------------------------------------------------------------------- // 64bit primitives //----------------------------------------------------------------------------- template static inline bool cas64_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { static_assert( sizeof(T) == 8, "Illegal size of operand" ); assert( cds::details::is_aligned( pDest, 8 )); T prev = expected; fence_before(mo_success); __asm__ __volatile__ ( "lock ; cmpxchgq %[desired], %[pDest]" : [prev] "+a" (prev), [pDest] "+m" (*pDest) : [desired] "r" (desired) ); bool success = (prev == expected); expected = prev; if (success) fence_after(mo_success); else fence_after(mo_fail); return success; } template static inline bool cas64_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { return cas64_strong( pDest, expected, desired, mo_success, mo_fail ); } template static inline T load64( T volatile const * pSrc, memory_order order ) noexcept { static_assert( sizeof(T) == 8, "Illegal size of operand" ); assert( order == memory_order_relaxed || order == memory_order_consume || order == memory_order_acquire || order == memory_order_seq_cst ); assert( pSrc ); assert( cds::details::is_aligned( pSrc, 8 )); T v = *pSrc; fence_after_load( order ); return v; } template static inline T exchange64( T volatile * pDest, T v, memory_order order ) noexcept { static_assert( sizeof(T) == 8, "Illegal size of operand" ); assert( cds::details::is_aligned( pDest, 8 )); fence_before(order); __asm__ __volatile__ ( "xchgq %[v], %[pDest]" : [v] "+r" (v), [pDest] "+m" (*pDest) ); fence_after(order); return v; } template static inline void store64( T volatile * pDest, T val, memory_order order ) noexcept { static_assert( sizeof(T) == 8, "Illegal size of operand" ); assert( order == memory_order_relaxed || order == memory_order_release || order == memory_order_seq_cst ); assert( pDest ); assert( cds::details::is_aligned( pDest, 8 )); if (order != memory_order_seq_cst) { fence_before(order); *pDest = val; } else { exchange64( pDest, val, order); } } # define CDS_ATOMIC_fetch64_add_defined template static inline T fetch64_add( T volatile * pDest, T v, memory_order order) noexcept { static_assert( sizeof(T) == 8, "Illegal size of operand" ); assert( cds::details::is_aligned( pDest, 8 )); fence_before(order); __asm__ __volatile__ ( "lock ; xaddq %[v], %[pDest]" : [v] "+r" (v), [pDest] "+m" (*pDest) ); fence_after(order); return v; } # define CDS_ATOMIC_fetch64_sub_defined template static inline T fetch64_sub( T volatile * pDest, T v, memory_order order) noexcept { static_assert( sizeof(T) == 8, "Illegal size of operand" ); assert( cds::details::is_aligned( pDest, 8 )); fence_before(order); __asm__ __volatile__ ( "negq %[v] ; \n" "lock ; xaddq %[v], %[pDest]" : [v] "+r" (v), [pDest] "+m" (*pDest) ); fence_after(order); return v; } //----------------------------------------------------------------------------- // pointer primitives //----------------------------------------------------------------------------- template static inline T * exchange_ptr( T * volatile * pDest, T * v, memory_order order ) noexcept { static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); return (T *) exchange64( (uint64_t volatile *) pDest, (uint64_t) v, order ); } template static inline void store_ptr( T * volatile * pDest, T * src, memory_order order ) noexcept { static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); assert( order == memory_order_relaxed || order == memory_order_release || order == memory_order_seq_cst ); assert( pDest ); if ( order != memory_order_seq_cst ) { fence_before( order ); *pDest = src; } else { exchange_ptr( pDest, src, order ); } } template static inline T * load_ptr( T * volatile const * pSrc, memory_order order ) noexcept { static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); assert( order == memory_order_relaxed || order == memory_order_consume || order == memory_order_acquire || order == memory_order_seq_cst ); assert( pSrc ); T * v = *pSrc; fence_after_load( order ); return v; } template static inline bool cas_ptr_strong( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept { static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); return cas64_strong( (uint64_t volatile *) pDest, *reinterpret_cast( &expected ), (uint64_t) desired, mo_success, mo_fail ); } template static inline bool cas_ptr_weak( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept { return cas_ptr_strong( pDest, expected, desired, mo_success, mo_fail ); } }} // namespace gcc::amd64 } // namespace platform }} // namespace cds::cxx11_atomic //@endcond #endif // #ifndef CDSLIB_COMPILER_GCC_AMD64_CXX11_ATOMIC_H libcds-2.3.3/cds/compiler/gcc/arm7/000077500000000000000000000000001341244201700167425ustar00rootroot00000000000000libcds-2.3.3/cds/compiler/gcc/arm7/backoff.h000066400000000000000000000013111341244201700205020ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_COMPILER_GCC_ARM7_BACKOFF_H #define CDSLIB_COMPILER_GCC_ARM7_BACKOFF_H //@cond none namespace cds { namespace backoff { namespace gcc { namespace arm7 { # define CDS_backoff_hint_defined static inline void backoff_hint() { asm volatile( "yield" ::: "memory" ); } }} // namespace gcc::arm7 namespace platform { using namespace gcc::arm7; } }} // namespace cds::backoff //@endcond #endif // #ifndef CDSLIB_COMPILER_GCC_ARM7_BACKOFF_H libcds-2.3.3/cds/compiler/gcc/arm8/000077500000000000000000000000001341244201700167435ustar00rootroot00000000000000libcds-2.3.3/cds/compiler/gcc/arm8/backoff.h000066400000000000000000000013111341244201700205030ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_COMPILER_GCC_ARM8_BACKOFF_H #define CDSLIB_COMPILER_GCC_ARM8_BACKOFF_H //@cond none namespace cds { namespace backoff { namespace gcc { namespace arm8 { # define CDS_backoff_hint_defined static inline void backoff_hint() { asm volatile( "yield" ::: "memory" ); } }} // namespace gcc::arm8 namespace platform { using namespace gcc::arm8; } }} // namespace cds::backoff //@endcond #endif // #ifndef CDSLIB_COMPILER_GCC_ARM8_BACKOFF_H libcds-2.3.3/cds/compiler/gcc/compiler_barriers.h000066400000000000000000000010321341244201700217440ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_COMPILER_GCC_COMPILER_BARRIERS_H #define CDSLIB_COMPILER_GCC_COMPILER_BARRIERS_H #define CDS_COMPILER_RW_BARRIER __asm__ __volatile__ ( "" ::: "memory" ) #define CDS_COMPILER_R_BARRIER CDS_COMPILER_RW_BARRIER #define CDS_COMPILER_W_BARRIER CDS_COMPILER_RW_BARRIER #endif // #ifndef CDSLIB_COMPILER_GCC_COMPILER_BARRIERS_H libcds-2.3.3/cds/compiler/gcc/compiler_macro.h000066400000000000000000000144741341244201700212520ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_COMPILER_GCC_COMPILER_MACRO_H #define CDSLIB_COMPILER_GCC_COMPILER_MACRO_H // OS interface && OS name #ifndef CDS_OS_TYPE # if defined( __linux__ ) # define CDS_OS_INTERFACE CDS_OSI_UNIX # define CDS_OS_TYPE CDS_OS_LINUX # define CDS_OS__NAME "linux" # define CDS_OS__NICK "linux" # elif defined( __sun__ ) # define CDS_OS_INTERFACE CDS_OSI_UNIX # define CDS_OS_TYPE CDS_OS_SUN_SOLARIS # define CDS_OS__NAME "Sun Solaris" # define CDS_OS__NICK "sun" # elif defined( __hpux__ ) # define CDS_OS_INTERFACE CDS_OSI_UNIX # define CDS_OS_TYPE CDS_OS_HPUX # define CDS_OS__NAME "HP-UX" # define CDS_OS__NICK "hpux" # elif defined( _AIX ) # define CDS_OS_INTERFACE CDS_OSI_UNIX # define CDS_OS_TYPE CDS_OS_AIX # define CDS_OS__NAME "AIX" # define CDS_OS__NICK "aix" # elif defined( __FreeBSD__ ) # define CDS_OS_INTERFACE CDS_OSI_UNIX # define CDS_OS_TYPE CDS_OS_FREE_BSD # define CDS_OS__NAME "FreeBSD" # define CDS_OS__NICK "freebsd" # elif defined( __OpenBSD__ ) # define CDS_OS_INTERFACE CDS_OSI_UNIX # define CDS_OS_TYPE CDS_OS_OPEN_BSD # define CDS_OS__NAME "OpenBSD" # define CDS_OS__NICK "openbsd" # elif defined( __NetBSD__ ) # define CDS_OS_INTERFACE CDS_OSI_UNIX # define CDS_OS_TYPE CDS_OS_NET_BSD # define CDS_OS__NAME "NetBSD" # define CDS_OS__NICK "netbsd" # elif defined(__MINGW32__) || defined( __MINGW64__) # define CDS_OS_INTERFACE CDS_OSI_WINDOWS # define CDS_OS_TYPE CDS_OS_MINGW # define CDS_OS__NAME "MinGW" # define CDS_OS__NICK "mingw" # elif defined(__MACH__) # define CDS_OS_INTERFACE CDS_OSI_UNIX # define CDS_OS_TYPE CDS_OS_OSX # define CDS_OS__NAME "OS X" # define CDS_OS__NICK "osx" # else # define CDS_OS_INTERFACE CDS_OSI_UNIX # define CDS_OS_TYPE CDS_OS_PTHREAD # define CDS_OS__NAME "pthread" # define CDS_OS__NICK "pthread" # endif #endif // #ifndef CDS_OS_TYPE // Processor architecture #if defined(__arm__) && !defined(__ARM_ARCH) // GCC 4.6 does not defined __ARM_ARCH # if defined(__ARM_ARCH_8A__) || defined(__ARM_ARCH_8S__) || defined(__aarch64__) || defined(__ARM_ARCH_ISA_A64) # define __ARM_ARCH 8 # elif defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7S__) # define __ARM_ARCH 7 # else # define __ARM_ARCH 5 # endif #endif #if defined(__x86_64__) || defined(__amd64__) || defined(__amd64) # define CDS_PROCESSOR_ARCH CDS_PROCESSOR_AMD64 # define CDS_BUILD_BITS 64 # define CDS_PROCESSOR__NAME "Intel x86-64" # define CDS_PROCESSOR__NICK "amd64" #elif defined(__i386__) # define CDS_PROCESSOR_ARCH CDS_PROCESSOR_X86 # define CDS_BUILD_BITS 32 # define CDS_PROCESSOR__NAME "Intel x86" # define CDS_PROCESSOR__NICK "x86" #elif defined(sparc) || defined (__sparc__) # define CDS_PROCESSOR_ARCH CDS_PROCESSOR_SPARC # define CDS_PROCESSOR__NAME "Sparc" # define CDS_PROCESSOR__NICK "sparc" # ifdef __arch64__ # define CDS_BUILD_BITS 64 # else # error Sparc 32bit is not supported # endif #elif defined( __ia64__) # define CDS_PROCESSOR_ARCH CDS_PROCESSOR_IA64 # define CDS_BUILD_BITS 64 # define CDS_PROCESSOR__NAME "Intel IA64" # define CDS_PROCESSOR__NICK "ia64" #elif defined(_ARCH_PPC64) # define CDS_PROCESSOR_ARCH CDS_PROCESSOR_PPC64 # define CDS_BUILD_BITS 64 # define CDS_PROCESSOR__NAME "IBM PowerPC64" # define CDS_PROCESSOR__NICK "ppc64" #elif defined(__arm__) && __SIZEOF_POINTER__ == 4 && __ARM_ARCH >= 7 && __ARM_ARCH < 8 # define CDS_PROCESSOR_ARCH CDS_PROCESSOR_ARM7 # define CDS_BUILD_BITS 32 # define CDS_PROCESSOR__NAME "ARM v7" # define CDS_PROCESSOR__NICK "arm7" #elif ( defined(__arm__) || defined(__aarch64__)) && __ARM_ARCH >= 8 # define CDS_PROCESSOR_ARCH CDS_PROCESSOR_ARM8 # define CDS_BUILD_BITS 64 # define CDS_PROCESSOR__NAME "ARM v8" # define CDS_PROCESSOR__NICK "arm8" #elif defined(__arm__) || defined(__aarch64__) # define CDS_PROCESSOR_ARCH CDS_PROCESSOR_ARM8 # define CDS_PROCESSOR__NAME "ARM" # define CDS_PROCESSOR__NICK "arm" # if __SIZEOF_POINTER__ == 8 # define CDS_BUILD_BITS 64 # else # define CDS_BUILD_BITS 32 # endif #else # if defined(CDS_USE_LIBCDS_ATOMIC) # error "Libcds does not support atomic implementation for the processor architecture. Try to use C++11-compatible compiler and remove CDS_USE_LIBCDS_ATOMIC flag from compiler command line" # else # define CDS_PROCESSOR_ARCH CDS_PROCESSOR_UNKNOWN # define CDS_BUILD_BITS 32 # define CDS_PROCESSOR__NAME "unknown" # define CDS_PROCESSOR__NICK "unknown" # endif #endif #if CDS_OS_TYPE == CDS_OS_MINGW # ifdef CDS_BUILD_LIB # define CDS_EXPORT_API __declspec(dllexport) # elif !defined(CDS_BUILD_STATIC_LIB) # define CDS_EXPORT_API __declspec(dllimport) # endif #else # ifndef __declspec # define __declspec(_x) # endif #endif // Byte order #if !defined(CDS_ARCH_LITTLE_ENDIAN) && !defined(CDS_ARCH_BIG_ENDIAN) # ifdef __BYTE_ORDER__ # if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ # define CDS_ARCH_LITTLE_ENDIAN # elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ # define CDS_ARCH_BIG_ENDIAN # endif # else # warning "Undefined byte order for current architecture (no __BYTE_ORDER__ preprocessor definition)" # endif #endif // Sanitizer attributes // Example: CDS_DISABLE_SANITIZE( "function" ) #ifdef CDS_ADDRESS_SANITIZER_ENABLED # define CDS_SUPPRESS_SANITIZE( ... ) __attribute__(( no_sanitize( __VA_ARGS__ ))) #else # define CDS_SUPPRESS_SANITIZE( ... ) #endif #endif // #ifndef CDSLIB_COMPILER_GCC_COMPILER_MACRO_H libcds-2.3.3/cds/compiler/gcc/defs.h000066400000000000000000000065361341244201700172000ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_COMPILER_GCC_DEFS_H #define CDSLIB_COMPILER_GCC_DEFS_H // Compiler version #define CDS_COMPILER_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) #if CDS_COMPILER_VERSION < 40800 # error "Compiler version error. GCC version 4.8.0 and above is supported" #endif // Compiler name #ifdef __VERSION__ # define CDS_COMPILER__NAME ("GNU C++ " __VERSION__) #else # define CDS_COMPILER__NAME "GNU C++" #endif #define CDS_COMPILER__NICK "gcc" #if __cplusplus < CDS_CPLUSPLUS_11 # error C++11 and above is required #endif #include #define alignof __alignof__ // *************************************** // C++11 features // C++11 thread_local keyword #define CDS_CXX11_THREAD_LOCAL_SUPPORT // ************************************************* // Features // If you run under Thread Sanitizer, pass -DCDS_THREAD_SANITIZER_ENABLED in compiler command line // UPD: Seems, GCC 5+ has predefined macro __SANITIZE_THREAD__, see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=64354 #if defined(__SANITIZE_THREAD__) && !defined(CDS_THREAD_SANITIZER_ENABLED) # define CDS_THREAD_SANITIZER_ENABLED #endif // ************************************************* // Alignment macro #define CDS_TYPE_ALIGNMENT(n) __attribute__ ((aligned (n))) #define CDS_CLASS_ALIGNMENT(n) __attribute__ ((aligned (n))) #define CDS_DATA_ALIGNMENT(n) __attribute__ ((aligned (n))) // Attributes #if CDS_COMPILER_VERSION >= 40900 # if __cplusplus == CDS_CPLUSPLUS_11 // C++11 # define CDS_DEPRECATED( reason ) [[gnu::deprecated(reason)]] # else // C++14 # define CDS_DEPRECATED( reason ) [[deprecated(reason)]] # endif #else // GCC 4.8 # define CDS_DEPRECATED( reason ) __attribute__((deprecated( reason ))) #endif #define CDS_NORETURN __attribute__((__noreturn__)) // likely/unlikely #define cds_likely( expr ) __builtin_expect( !!( expr ), 1 ) #define cds_unlikely( expr ) __builtin_expect( !!( expr ), 0 ) // Exceptions #if defined( __EXCEPTIONS ) && __EXCEPTIONS == 1 # define CDS_EXCEPTION_ENABLED #endif // double-width CAS support // note: gcc-4.8 does not support double-word atomics // gcc-4.9: a lot of crashes when use DCAS // gcc-7: 128-bit atomic is not lock-free, see https://gcc.gnu.org/ml/gcc/2017-01/msg00167.html // You can manually suppress wide-atomic support by defining in compiler command line: // for 64bit platform: -DCDS_DISABLE_128BIT_ATOMIC // for 32bit platform: -DCDS_DISABLE_64BIT_ATOMIC #if CDS_COMPILER_VERSION >= 50000 # if CDS_BUILD_BITS == 64 # if !defined( CDS_DISABLE_128BIT_ATOMIC ) && defined( __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16 ) && CDS_COMPILER_VERSION < 70000 # define CDS_DCAS_SUPPORT # endif # else # if !defined( CDS_DISABLE_64BIT_ATOMIC ) && defined( __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 ) # define CDS_DCAS_SUPPORT # endif # endif #endif //if constexpr support (C++17) #ifndef constexpr_if # if defined( __cpp_if_constexpr ) && __cpp_if_constexpr >= 201606 # define constexpr_if if constexpr # endif #endif #include #endif // #ifndef CDSLIB_COMPILER_GCC_DEFS_H libcds-2.3.3/cds/compiler/gcc/ia64/000077500000000000000000000000001341244201700166375ustar00rootroot00000000000000libcds-2.3.3/cds/compiler/gcc/ia64/backoff.h000066400000000000000000000015211341244201700204020ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_COMPILER_GCC_IA64_BACKOFF_H #define CDSLIB_COMPILER_GCC_IA64_BACKOFF_H //@cond none namespace cds { namespace backoff { namespace gcc { namespace ia64 { # define CDS_backoff_hint_defined static inline void backoff_hint() { asm volatile ( "hint @pause;;" ); } # define CDS_backoff_nop_defined static inline void backoff_nop() { asm volatile ( "nop;;" ); } }} // namespace gcc::ia64 namespace platform { using namespace gcc::ia64; } }} // namespace cds::backoff //@endcond #endif // #ifndef CDSLIB_COMPILER_GCC_IA64_BACKOFF_H libcds-2.3.3/cds/compiler/gcc/ia64/bitop.h000066400000000000000000000037771341244201700201430ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_COMPILER_GCC_IA64_BITOP_H #define CDSLIB_COMPILER_GCC_IA64_BITOP_H //@cond none namespace cds { namespace bitop { namespace platform { namespace gcc { namespace ia64 { // MSB - return index (1..32) of most significant bit in x. If x == 0 return 0 # define cds_bitop_msb32_DEFINED static inline int msb32( uint32_t nArg ) { if ( !nArg ) return 0; uint64_t x = nArg; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; uint64_t nRes; asm __volatile__( "popcnt %0=%1\n\t" : "=r" (nRes) : "r" (x)); return (int) nRes; } // It is not compiled on HP-UX. Why?.. #if CDS_OS_TYPE != CDS_OS_HPUX // MSB - return index (0..31) of most significant bit in nArg. // !!! nArg != 0 # define cds_bitop_msb32nz_DEFINED static inline int msb32nz( uint32_t nArg ) { assert( nArg != 0 ); long double d = nArg; long nExp; asm __volatile__("getf.exp %0=%1\n\t" : "=r"(nExp) : "f"(d)); return (int) (nExp - 0xffff); } // MSB - return index (0..63) of most significant bit in nArg. // !!! nArg != 0 # define cds_bitop_msb64nz_DEFINED static inline int msb64nz( uint64_t nArg ) { assert( nArg != 0 ); long double d = nArg; long nExp; asm __volatile__("getf.exp %0=%1\n\t" : "=r" (nExp) : "f" (d)); return (int) (nExp - 0xffff); } #endif // #if CDS_OS_TYPE != CDS_OS_HPUX }} // namespace gcc::ia64 using namespace gcc::ia64; }}} // namespace cds::bitop::platform //@endcond #endif // #ifndef CDSLIB_COMPILER_GCC_IA64_BITOP_H libcds-2.3.3/cds/compiler/gcc/ia64/cxx11_atomic.h000066400000000000000000000607771341244201700213310ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_COMPILER_GCC_IA64_CXX11_ATOMIC_H #define CDSLIB_COMPILER_GCC_IA64_CXX11_ATOMIC_H /* Source: 1. load/store: http://www.decadent.org.uk/pipermail/cpp-threads/2008-December/001932.html 2. Mapping to C++ Memory Model: http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html */ #include //@cond namespace cds { namespace cxx11_atomic { namespace platform { inline namespace gcc { inline namespace ia64 { static inline void itanium_full_fence() noexcept { __asm__ __volatile__ ( "mf \n\t" ::: "memory" ); } static inline void fence_before( memory_order order ) noexcept { switch(order) { case memory_order_relaxed: case memory_order_consume: case memory_order_acquire: break; case memory_order_release: case memory_order_acq_rel: CDS_COMPILER_RW_BARRIER; break; case memory_order_seq_cst: itanium_full_fence(); break; } } static inline void fence_after( memory_order order ) noexcept { switch(order) { case memory_order_acquire: case memory_order_acq_rel: CDS_COMPILER_RW_BARRIER; break; case memory_order_relaxed: case memory_order_consume: case memory_order_release: break; case memory_order_seq_cst: itanium_full_fence(); break; } } //----------------------------------------------------------------------------- // fences //----------------------------------------------------------------------------- static inline void thread_fence(memory_order order) noexcept { switch(order) { case memory_order_relaxed: case memory_order_consume: break; case memory_order_release: case memory_order_acquire: case memory_order_acq_rel: CDS_COMPILER_RW_BARRIER; break; case memory_order_seq_cst: itanium_full_fence(); break; default:; } } static inline void signal_fence(memory_order order) noexcept { // C++11: 29.8.8: only compiler optimization, no hardware instructions switch(order) { case memory_order_relaxed: break; case memory_order_consume: case memory_order_release: case memory_order_acquire: case memory_order_acq_rel: case memory_order_seq_cst: CDS_COMPILER_RW_BARRIER; break; default:; } } #define CDS_ITANIUM_ATOMIC_LOAD( n_bytes, n_bits ) \ template \ static inline T load##n_bits( T volatile const * pSrc, memory_order order ) noexcept \ { \ static_assert( sizeof(T) == n_bytes, "Illegal size of operand" ) ; \ assert( order == memory_order_relaxed \ || order == memory_order_consume \ || order == memory_order_acquire \ || order == memory_order_seq_cst \ ) ; \ assert( pSrc ) ; \ T val ; \ __asm__ __volatile__ ( \ "ld" #n_bytes ".acq %[val] = [%[pSrc]] \n\t" \ : [val] "=r" (val) \ : [pSrc] "r" (pSrc) \ : "memory" \ ) ; \ return val ; \ } #define CDS_ITANIUM_ATOMIC_STORE( n_bytes, n_bits ) \ template \ static inline void store##n_bits( T volatile * pDest, T val, memory_order order ) noexcept \ { \ static_assert( sizeof(T) == n_bytes, "Illegal size of operand" ) ; \ assert( order == memory_order_relaxed \ || order == memory_order_release \ || order == memory_order_seq_cst \ ) ; \ assert( pDest ) ; \ if ( order == memory_order_seq_cst ) { \ __asm__ __volatile__ ( \ "st" #n_bytes ".rel [%[pDest]] = %[val] \n\t" \ "mf \n\t" \ :: [pDest] "r" (pDest), [val] "r" (val) \ : "memory" \ ) ; \ } \ else { \ __asm__ __volatile__ ( \ "st" #n_bytes ".rel [%[pDest]] = %[val] \n\t" \ :: [pDest] "r" (pDest), [val] "r" (val) \ : "memory" \ ) ; \ fence_after(order) ; \ } \ } #define CDS_ITANIUM_ATOMIC_CAS( n_bytes, n_bits ) \ template \ static inline bool cas##n_bits##_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order /*mo_fail*/ ) noexcept \ { \ static_assert( sizeof(T) == n_bytes, "Illegal size of operand" ) ; \ T current ; \ switch(mo_success) { \ case memory_order_relaxed: \ case memory_order_consume: \ case memory_order_acquire: \ __asm__ __volatile__ ( \ "mov ar.ccv = %[expected] ;;\n\t" \ "cmpxchg" #n_bytes ".acq %[current] = [%[pDest]], %[desired], ar.ccv\n\t" \ : [current] "=r" (current) \ : [pDest] "r" (pDest), [expected] "r" (expected), [desired] "r" (desired) \ : "ar.ccv", "memory" \ ); \ break ; \ case memory_order_release: \ __asm__ __volatile__ ( \ "mov ar.ccv = %[expected] ;;\n\t" \ "cmpxchg" #n_bytes ".rel %[current] = [%[pDest]], %[desired], ar.ccv\n\t" \ : [current] "=r" (current) \ : [pDest] "r" (pDest), [expected] "r" (expected), [desired] "r" (desired) \ : "ar.ccv", "memory" \ ); \ break ; \ case memory_order_acq_rel: \ case memory_order_seq_cst: \ __asm__ __volatile__ ( \ "mov ar.ccv = %[expected] ;;\n\t" \ "cmpxchg" #n_bytes ".rel %[current] = [%[pDest]], %[desired], ar.ccv\n\t" \ "mf \n\t" \ : [current] "=r" (current) \ : [pDest] "r" (pDest), [expected] "r" (expected), [desired] "r" (desired) \ : "ar.ccv", "memory" \ ); \ break; \ default: \ assert(false); \ } \ bool bSuccess = expected == current ; \ expected = current ; \ return bSuccess ; \ } \ template \ static inline bool cas##n_bits##_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept \ { return cas##n_bits##_strong( pDest, expected, desired, mo_success, mo_fail ); } // xchg is performed with acquire semantics #define CDS_ITANIUM_ATOMIC_EXCHANGE( n_bytes, n_bits ) \ template \ static inline T exchange##n_bits( T volatile * pDest, T val, memory_order order ) noexcept \ { \ static_assert( sizeof(T) == n_bytes, "Illegal size of operand" ) ; \ assert( pDest ) ; \ T current ; \ switch(order) \ { \ case memory_order_relaxed: \ case memory_order_consume: \ case memory_order_acquire: \ __asm__ __volatile__ ( \ "xchg" #n_bytes " %[current] = [%[pDest]], %[val]\n\t" \ : [current] "=r" (current) \ : [pDest] "r" (pDest), [val] "r" (val) \ : "memory" \ ); \ break; \ case memory_order_acq_rel: \ case memory_order_release: \ case memory_order_seq_cst: \ __asm__ __volatile__ ( \ "mf \n\t" \ "xchg" #n_bytes " %[current] = [%[pDest]], %[val]\n\t" \ : [current] "=r" (current) \ : [pDest] "r" (pDest), [val] "r" (val) \ : "memory" \ ); \ break; \ default: assert(false); \ } \ return current ; \ } #define CDS_ITANIUM_ATOMIC_FETCH_ADD( n_bytes, n_add ) \ switch (order) { \ case memory_order_relaxed: \ case memory_order_consume: \ case memory_order_acquire: \ __asm__ __volatile__ ( \ "fetchadd" #n_bytes ".acq %[cur] = [%[pDest]], " #n_add " \n\t" \ : [cur] "=r" (cur) \ : [pDest] "r" (pDest) \ : "memory" \ ); \ break ; \ case memory_order_release: \ __asm__ __volatile__ ( \ "fetchadd" #n_bytes ".rel %[cur] = [%[pDest]], " #n_add " \n\t" \ : [cur] "=r" (cur) \ : [pDest] "r" (pDest) \ : "memory" \ ); \ break ; \ case memory_order_acq_rel: \ case memory_order_seq_cst: \ __asm__ __volatile__ ( \ "fetchadd" #n_bytes ".rel %[cur] = [%[pDest]], " #n_add " \n\t" \ "mf \n\t" \ : [cur] "=r" (cur) \ : [pDest] "r" (pDest) \ : "memory" \ ); \ break ; \ default: \ assert(false); \ } //----------------------------------------------------------------------------- // 8bit primitives //----------------------------------------------------------------------------- CDS_ITANIUM_ATOMIC_LOAD( 1, 8 ) CDS_ITANIUM_ATOMIC_STORE( 1, 8 ) CDS_ITANIUM_ATOMIC_CAS( 1, 8 ) CDS_ITANIUM_ATOMIC_EXCHANGE( 1, 8 ) //----------------------------------------------------------------------------- // 16bit primitives //----------------------------------------------------------------------------- CDS_ITANIUM_ATOMIC_LOAD( 2, 16 ) CDS_ITANIUM_ATOMIC_STORE( 2, 16 ) CDS_ITANIUM_ATOMIC_CAS( 2, 16 ) CDS_ITANIUM_ATOMIC_EXCHANGE( 2, 16 ) //----------------------------------------------------------------------------- // 32bit primitives //----------------------------------------------------------------------------- CDS_ITANIUM_ATOMIC_LOAD( 4, 32 ) CDS_ITANIUM_ATOMIC_STORE( 4, 32 ) CDS_ITANIUM_ATOMIC_CAS( 4, 32 ) CDS_ITANIUM_ATOMIC_EXCHANGE( 4, 32 ) # define CDS_ATOMIC_fetch32_add_defined template static inline T fetch32_add( T volatile * pDest, T val, memory_order order) noexcept { static_assert( sizeof(T) == 4, "Illegal size of operand" ); assert( pDest ); T cur; switch ( val ) { case 1: CDS_ITANIUM_ATOMIC_FETCH_ADD( 4, 1 ); break; case 4: CDS_ITANIUM_ATOMIC_FETCH_ADD( 4, 4 ); break; case 8: CDS_ITANIUM_ATOMIC_FETCH_ADD( 4, 8 ); break; case 16: CDS_ITANIUM_ATOMIC_FETCH_ADD( 4, 16 ); break; default: cur = load32( pDest, memory_order_relaxed ); do {} while ( !cas32_strong( pDest, cur, cur + val, order, memory_order_relaxed )); break; } return cur; } # define CDS_ATOMIC_fetch32_sub_defined template static inline T fetch32_sub( T volatile * pDest, T val, memory_order order) noexcept { static_assert( sizeof(T) == 4, "Illegal size of operand" ); assert( pDest ); T cur; switch ( val ) { case 1: CDS_ITANIUM_ATOMIC_FETCH_ADD( 4, -1 ); break; case 4: CDS_ITANIUM_ATOMIC_FETCH_ADD( 4, -4 ); break; case 8: CDS_ITANIUM_ATOMIC_FETCH_ADD( 4, -8 ); break; case 16: CDS_ITANIUM_ATOMIC_FETCH_ADD( 4, -16 ); break; default: cur = load32( pDest, memory_order_relaxed ); do {} while ( !cas32_strong( pDest, cur, cur - val, order, memory_order_relaxed )); break; } return cur; } //----------------------------------------------------------------------------- // 64bit primitives //----------------------------------------------------------------------------- CDS_ITANIUM_ATOMIC_LOAD( 8, 64 ) CDS_ITANIUM_ATOMIC_STORE( 8, 64 ) CDS_ITANIUM_ATOMIC_CAS( 8, 64 ) CDS_ITANIUM_ATOMIC_EXCHANGE( 8, 64 ) # define CDS_ATOMIC_fetch64_add_defined template static inline T fetch64_add( T volatile * pDest, T val, memory_order order) noexcept { static_assert( sizeof(T) == 8, "Illegal size of operand" ); assert( pDest ); T cur; switch ( val ) { case 1: CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, 1 ); break; case 4: CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, 4 ); break; case 8: CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, 8 ); break; case 16: CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, 16 ); break; default: cur = load64( pDest, memory_order_relaxed ); do {} while ( !cas64_strong( pDest, cur, cur + val, order, memory_order_relaxed )); break; } return cur; } # define CDS_ATOMIC_fetch64_sub_defined template static inline T fetch64_sub( T volatile * pDest, T val, memory_order order) noexcept { static_assert( sizeof(T) == 8, "Illegal size of operand" ); assert( pDest ); T cur; switch ( val ) { case 1: CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, -1 ); break; case 4: CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, -4 ); break; case 8: CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, -8 ); break; case 16: CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, -16 ); break; default: cur = load64( pDest, memory_order_relaxed ); do {} while ( !cas64_strong( pDest, cur, cur - val, order, memory_order_relaxed )); break; } return cur; } //----------------------------------------------------------------------------- // pointer primitives //----------------------------------------------------------------------------- template static inline T * load_ptr( T * volatile const * pSrc, memory_order order ) noexcept { assert( order == memory_order_relaxed || order == memory_order_consume || order == memory_order_acquire || order == memory_order_seq_cst ); assert( pSrc ); T * val; __asm__ __volatile__ ( "ld8.acq %[val] = [%[pSrc]] \n\t" : [val] "=r" (val) : [pSrc] "r" (pSrc) : "memory" ); return val; } template static inline void store_ptr( T * volatile * pDest, T * val, memory_order order ) noexcept { assert( order == memory_order_relaxed || order == memory_order_release || order == memory_order_seq_cst ); assert( pDest ); if ( order == memory_order_seq_cst ) { __asm__ __volatile__ ( "st8.rel [%[pDest]] = %[val] \n\t" "mf \n\t" :: [pDest] "r" (pDest), [val] "r" (val) : "memory" ); } else { __asm__ __volatile__ ( "st8.rel [%[pDest]] = %[val] \n\t" :: [pDest] "r" (pDest), [val] "r" (val) : "memory" ); fence_after(order); } } template static inline bool cas_ptr_strong( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept { static_assert( sizeof(T *) == 8, "Illegal size of operand" ); assert( pDest ); T * current; switch(mo_success) { case memory_order_relaxed: case memory_order_consume: case memory_order_acquire: __asm__ __volatile__ ( "mov ar.ccv = %[expected] ;;\n\t" "cmpxchg8.acq %[current] = [%[pDest]], %[desired], ar.ccv\n\t" : [current] "=r" (current) : [pDest] "r" (pDest), [expected] "r" (expected), [desired] "r" (desired) : "ar.ccv", "memory" ); break; case memory_order_release: __asm__ __volatile__ ( "mov ar.ccv = %[expected] ;;\n\t" "cmpxchg8.rel %[current] = [%[pDest]], %[desired], ar.ccv\n\t" : [current] "=r" (current) : [pDest] "r" (pDest), [expected] "r" (expected), [desired] "r" (desired) : "ar.ccv", "memory" ); break; case memory_order_acq_rel: case memory_order_seq_cst: __asm__ __volatile__ ( "mov ar.ccv = %[expected] ;;\n\t" "cmpxchg8.rel %[current] = [%[pDest]], %[desired], ar.ccv\n\t" "mf \n\t" : [current] "=r" (current) : [pDest] "r" (pDest), [expected] "r" (expected), [desired] "r" (desired) : "ar.ccv", "memory" ); break; default: assert(false); } bool bSuccess = expected == current; expected = current; if ( !bSuccess ) fence_after( mo_fail ); return bSuccess; } template static inline bool cas_ptr_weak( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept { return cas_ptr_strong( pDest, expected, desired, mo_success, mo_fail ); } template static inline T * exchange_ptr( T * volatile * pDest, T * val, memory_order order ) noexcept { static_assert( sizeof(T *) == 8, "Illegal size of operand" ); assert( pDest ); T * current; switch(order) { case memory_order_relaxed: case memory_order_consume: case memory_order_acquire: __asm__ __volatile__ ( "xchg8 %[current] = [%[pDest]], %[val]\n\t" : [current] "=r" (current) : [pDest] "r" (pDest), [val] "r" (val) : "memory" ); break; case memory_order_acq_rel: case memory_order_release: case memory_order_seq_cst: __asm__ __volatile__ ( "mf \n\t" "xchg8 %[current] = [%[pDest]], %[val]\n\t" : [current] "=r" (current) : [pDest] "r" (pDest), [val] "r" (val) : "memory" ); break; default: assert(false); } return current; } template struct atomic_pointer_sizeof { enum { value = sizeof(T) }; }; template <> struct atomic_pointer_sizeof { enum { value = 1 }; }; // It does not work properly // atomic.fetch_add( ... ) returns nullptr, why?.. //# define CDS_ATOMIC_fetch_ptr_add_defined template static inline T * fetch_ptr_add( T * volatile * pDest, ptrdiff_t val, memory_order order) noexcept { static_assert( sizeof(T *) == 8, "Illegal size of operand" ); assert( pDest ); T * cur; val *= atomic_pointer_sizeof::value; switch ( val ) { case 1: CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, 1 ); break; case 4: CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, 4 ); break; case 8: CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, 8 ); break; case 16: CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, 16 ); break; default: cur = load_ptr( pDest, memory_order_relaxed ); do {} while ( !cas_ptr_strong( pDest, cur, reinterpret_cast(reinterpret_cast(cur) + val), order, memory_order_relaxed )); break; } return cur; } // It does not work properly // atomic.fetch_sub( ... ) returns nullptr, why?.. //# define CDS_ATOMIC_fetch_ptr_sub_defined template static inline T * fetch_ptr_sub( T * volatile * pDest, ptrdiff_t val, memory_order order) noexcept { static_assert( sizeof(T *) == 8, "Illegal size of operand" ); assert( pDest ); T * cur; val *= atomic_pointer_sizeof::value; switch ( val ) { case 1: CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, -1 ); break; case 4: CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, -4 ); break; case 8: CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, -8 ); break; case 16: CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, -16 ); break; default: cur = load_ptr( pDest, memory_order_relaxed ); do {} while ( !cas_ptr_strong( pDest, cur, reinterpret_cast(reinterpret_cast(cur) - val), order, memory_order_relaxed )); break; } return cur; } //----------------------------------------------------------------------------- // atomic flag primitives //----------------------------------------------------------------------------- typedef bool atomic_flag_type; static inline bool atomic_flag_tas( atomic_flag_type volatile * pFlag, memory_order order ) noexcept { return exchange8( pFlag, true, order ); } static inline void atomic_flag_clear( atomic_flag_type volatile * pFlag, memory_order order ) noexcept { store8( pFlag, false, order ); } #undef CDS_ITANIUM_ATOMIC_LOAD #undef CDS_ITANIUM_ATOMIC_STORE #undef CDS_ITANIUM_ATOMIC_CAS #undef CDS_ITANIUM_ATOMIC_EXCHANGE #undef CDS_ITANIUM_ATOMIC_FETCH_ADD }} // namespace gcc::ia64 } // namespace platform }} // namespace cds::cxx11_atomic //@endcond #endif // #ifndef CDSLIB_COMPILER_GCC_IA64_CXX11_ATOMIC_H libcds-2.3.3/cds/compiler/gcc/ppc64/000077500000000000000000000000001341244201700170305ustar00rootroot00000000000000libcds-2.3.3/cds/compiler/gcc/ppc64/backoff.h000066400000000000000000000016141341244201700205760ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_COMPILER_GCC_PPC64_BACKOFF_H #define CDSLIB_COMPILER_GCC_PPC64_BACKOFF_H //@cond none namespace cds { namespace backoff { namespace gcc { namespace ppc64 { # define CDS_backoff_hint_defined static inline void backoff_hint() { // Provide a hint that performance will probably be improved // if shared resources dedicated to the executing processor are released for use by other processors asm volatile( "or 27,27,27 # yield" ); } }} // namespace gcc::ppc64 namespace platform { using namespace gcc::ppc64; } }} // namespace cds::backoff //@endcond #endif // #ifndef CDSLIB_COMPILER_GCC_PPC64_BACKOFF_H libcds-2.3.3/cds/compiler/gcc/ppc64/bitop.h000066400000000000000000000010531341244201700203150ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_COMPILER_GCC_PPC64_BITOP_H #define CDSLIB_COMPILER_GCC_PPC64_BITOP_H //@cond none namespace cds { namespace bitop { namespace platform { namespace gcc { namespace ppc64 { }} // namespace gcc::ppc64 using namespace gcc::ppc64; }}} // namespace cds::bitop::platform //@endcond #endif // #ifndef CDSLIB_COMPILER_GCC_PPC64_BITOP_H libcds-2.3.3/cds/compiler/gcc/sparc/000077500000000000000000000000001341244201700172045ustar00rootroot00000000000000libcds-2.3.3/cds/compiler/gcc/sparc/backoff.h000066400000000000000000000013021341244201700207440ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_COMPILER_GCC_SPARC_BACKOFF_H #define CDSLIB_COMPILER_GCC_SPARC_BACKOFF_H //@cond none namespace cds { namespace backoff { namespace gcc { namespace Sparc { # define CDS_backoff_nop_defined static inline void backoff_nop() { asm volatile ( "nop;" ); } }} // namespace gcc::Sparc namespace platform { using namespace gcc::Sparc; } }} // namespace cds::backoff //@endcond #endif // #ifndef CDSLIB_COMPILER_GCC_SPARC_BACKOFF_H libcds-2.3.3/cds/compiler/gcc/sparc/bitop.h000066400000000000000000000027441341244201700205010ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_COMPILER_GCC_SPARC_BITOP_H #define CDSLIB_COMPILER_GCC_SPARC_BITOP_H //@cond none namespace cds { namespace bitop { namespace platform { namespace gcc { namespace Sparc { // MSB - return index (1..64) of most significant bit in nArg. If nArg == 0 return 0 // Source: UltraSPARC Architecture 2007 // // Test result: this variant and its variation about 100 times slower then generic implementation :-( static inline int sparc_msb64( uint64_t nArg ) { uint64_t result; asm volatile ( "neg %[nArg], %[result] \n\t" "xnor %[nArg], %[result], %%g5 \n\t" "popc %%g5, %[result] \n\t" "movrz %[nArg], %%g0, %[result] \n\t" : [result] "=r" (result) : [nArg] "r" (nArg) : "g5" ); return result; } // MSB - return index (1..32) of most significant bit in nArg. If nArg == 0 return 0 static inline int sparc_msb32( uint32_t nArg ) { return sparc_msb64( (uint64_t) nArg ); } }} // namespace gcc::Sparc using namespace gcc::Sparc; }}} // namespace cds::bitop::platform //@endcond #endif // #ifndef CDSLIB_COMPILER_GCC_SPARC_BITOP_H libcds-2.3.3/cds/compiler/gcc/sparc/cxx11_atomic.h000066400000000000000000000530461341244201700216650ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_COMPILER_GCC_SPARC_CXX11_ATOMIC_H #define CDSLIB_COMPILER_GCC_SPARC_CXX11_ATOMIC_H #include /* Source: 1. [Doug Lea "JSR-133 Cookbook for Compiler Writers]: Acquire semantics: load; LoadLoad+LoadStore Release semantics: LoadStore+StoreStore; store 2. boost::atomic library by Helge Bahman 3. OpenSparc source code */ #if CDS_OS_TYPE == CDS_OS_LINUX # define CDS_SPARC_RMO_MEMORY_MODEL #endif #define CDS_SPARC_MB_FULL "membar #Sync \n\t" #ifdef CDS_SPARC_RMO_MEMORY_MODEL // RMO memory model (Linux only?..) Untested # define CDS_SPARC_MB_LL_LS "membar #LoadLoad|#LoadStore \n\t" # define CDS_SPARC_MB_LS_SS "membar #LoadStore|#StoreStore \n\t" # define CDS_SPARC_MB_LL_LS_SS "membar #LoadLoad|#LoadStore|#StoreStore \n\t" #else // TSO memory model (default; Solaris uses this model) # define CDS_SPARC_MB_LL_LS # define CDS_SPARC_MB_LS_SS # define CDS_SPARC_MB_LL_LS_SS #endif #define CDS_SPARC_MB_ACQ CDS_SPARC_MB_LL_LS #define CDS_SPARC_MB_REL CDS_SPARC_MB_LS_SS #define CDS_SPARC_MB_ACQ_REL CDS_SPARC_MB_LL_LS_SS #define CDS_SPARC_MB_SEQ_CST CDS_SPARC_MB_FULL //@cond namespace cds { namespace cxx11_atomic { namespace platform { inline namespace gcc { inline namespace Sparc { static inline void fence_before( memory_order order ) noexcept { switch(order) { case memory_order_relaxed: case memory_order_acquire: case memory_order_consume: break; case memory_order_release: case memory_order_acq_rel: __asm__ __volatile__ ( "" CDS_SPARC_MB_REL ::: "memory" ); break; case memory_order_seq_cst: __asm__ __volatile__ ( "" CDS_SPARC_MB_FULL ::: "memory" ); break; } } static inline void fence_after( memory_order order ) noexcept { switch(order) { case memory_order_relaxed: case memory_order_consume: case memory_order_release: break; case memory_order_acquire: case memory_order_acq_rel: __asm__ __volatile__ ( "" CDS_SPARC_MB_ACQ ::: "memory" ); break; case memory_order_seq_cst: __asm__ __volatile__ ( "" CDS_SPARC_MB_FULL ::: "memory" ); break; } } //----------------------------------------------------------------------------- // fences //----------------------------------------------------------------------------- static inline void thread_fence(memory_order order) noexcept { switch(order) { case memory_order_relaxed: case memory_order_consume: break; case memory_order_acquire: __asm__ __volatile__ ( "" CDS_SPARC_MB_ACQ ::: "memory" ); break; case memory_order_release: __asm__ __volatile__ ( "" CDS_SPARC_MB_REL ::: "memory" ); break; case memory_order_acq_rel: __asm__ __volatile__ ( "" CDS_SPARC_MB_ACQ_REL ::: "memory" ); break; case memory_order_seq_cst: __asm__ __volatile__ ( "" CDS_SPARC_MB_SEQ_CST ::: "memory" ); break; default:; } } static inline void signal_fence(memory_order order) noexcept { // C++11: 29.8.8: only compiler optimization, no hardware instructions switch(order) { case memory_order_relaxed: break; case memory_order_consume: case memory_order_release: case memory_order_acquire: case memory_order_acq_rel: case memory_order_seq_cst: CDS_COMPILER_RW_BARRIER; break; default:; } } //----------------------------------------------------------------------------- // atomic flag primitives //----------------------------------------------------------------------------- typedef unsigned char atomic_flag_type; static inline bool atomic_flag_tas( atomic_flag_type volatile * pFlag, memory_order order ) noexcept { atomic_flag_type fCur; fence_before( order ); __asm__ __volatile__( "ldstub [%[pFlag]], %[fCur] \n\t" : [fCur] "=r"(fCur) : [pFlag] "r"(pFlag) : "memory", "cc" ); fence_after( order ); return fCur != 0; } static inline void atomic_flag_clear( atomic_flag_type volatile * pFlag, memory_order order ) noexcept { fence_before( order ); __asm__ __volatile__( CDS_SPARC_MB_REL "stub %%g0, [%[pFlag]] \n\t" :: [pFlag] "r"(pFlag) : "memory" ); fence_after( order ); } //----------------------------------------------------------------------------- // 32bit primitives //----------------------------------------------------------------------------- template static inline void store32( T volatile * pDest, T src, memory_order order ) noexcept { static_assert( sizeof(T) == 4, "Illegal size of operand" ); assert( order == memory_order_relaxed || order == memory_order_release || order == memory_order_seq_cst ); assert( pDest ); fence_before(order); *pDest = src; fence_after(order); } template static inline T load32( T volatile const * pSrc, memory_order order ) noexcept { static_assert( sizeof(T) == 4, "Illegal size of operand" ); assert( order == memory_order_relaxed || order == memory_order_consume || order == memory_order_acquire || order == memory_order_seq_cst ); assert( pSrc ); fence_before(order); T v = *pSrc; fence_after(order); return v; } template static inline bool cas32_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { static_assert( sizeof(T) == 4, "Illegal size of operand" ); assert( pDest ); fence_before( mo_success ); __asm__ __volatile__( "cas [%[pDest]], %[expected], %[desired]" : [desired] "+r" (desired) : [pDest] "r" (pDest), [expected] "r" (expected) : "memory" ); // desired contains current value bool bSuccess = desired == expected; if ( bSuccess ) fence_after( mo_success ); else { fence_after(mo_fail); expected = desired; } return bSuccess; } template static inline bool cas32_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { return cas32_strong( pDest, expected, desired, mo_success, mo_fail ); } template static inline T exchange32( T volatile * pDest, T v, memory_order order ) noexcept { static_assert( sizeof(T) == 4, "Illegal size of operand" ); assert( pDest ); // This primitive could be implemented via "swap" instruction but "swap" is deprecated in UltraSparc T cur = load32( pDest, memory_order_relaxed ); do {} while ( !cas32_strong( pDest, cur, v, order, memory_order_relaxed )); return cur; } //----------------------------------------------------------------------------- // 64bit primitives //----------------------------------------------------------------------------- template static inline T load64( T volatile const * pSrc, memory_order order ) noexcept { static_assert( sizeof(T) == 8, "Illegal size of operand" ); assert( order == memory_order_relaxed || order == memory_order_consume || order == memory_order_acquire || order == memory_order_seq_cst ); assert( pSrc ); fence_before(order); T v = *pSrc; fence_after(order); return v; } template static inline void store64( T volatile * pDest, T val, memory_order order ) noexcept { static_assert( sizeof(T) == 8, "Illegal size of operand" ); assert( order == memory_order_relaxed || order == memory_order_release || order == memory_order_seq_cst ); assert( pDest ); fence_before(order); *pDest = val; fence_after(order); } template static inline bool cas64_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { static_assert( sizeof(T) == 8, "Illegal size of operand" ); assert( pDest ); fence_before( mo_success ); __asm__ __volatile__( "casx [%[pDest]], %[expected], %[desired]" : [desired] "+r" (desired) : [pDest] "r" (pDest), [expected] "r" (expected) : "memory" ); // desired contains current value bool bSuccess = desired == expected; if ( bSuccess ) { fence_after( mo_success ); } else { fence_after(mo_fail); expected = desired; } return bSuccess; } template static inline bool cas64_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { return cas64_strong( pDest, expected, desired, mo_success, mo_fail ); } template static inline T exchange64( T volatile * pDest, T v, memory_order order ) noexcept { static_assert( sizeof(T) == 8, "Illegal size of operand" ); assert( pDest ); T cur = load64( pDest, memory_order_relaxed ); do {} while ( !cas64_strong( pDest, cur, v, order, memory_order_relaxed )); return cur; } //----------------------------------------------------------------------------- // 8bit primitives //----------------------------------------------------------------------------- template static inline void store8( T volatile * pDest, T src, memory_order order ) noexcept { static_assert( sizeof(T) == 1, "Illegal size of operand" ); assert( order == memory_order_relaxed || order == memory_order_release || order == memory_order_seq_cst ); assert( pDest ); fence_before( order ); *pDest = src; fence_after( order ); } template static inline T load8( T volatile const * pSrc, memory_order order ) noexcept { static_assert( sizeof(T) == 1, "Illegal size of operand" ); assert( order == memory_order_relaxed || order == memory_order_consume || order == memory_order_acquire || order == memory_order_seq_cst ); assert( pSrc ); fence_before( order ); T v = *pSrc; fence_after( order ); return v; } template static inline bool cas8_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { static_assert( sizeof(T) == 1, "Illegal size of operand" ); assert( pDest ); union u32 { uint32_t w; T c[4]; }; static_assert( sizeof(u32) == sizeof(uint32_t), "Argument size error" ); u32 volatile * pDest32 = (u32 *)( uintptr_t( pDest ) & ~0x03 ); size_t const nCharIdx = (size_t)( uintptr_t( pDest ) & 0x03 ); u32 uExpected; u32 uDesired; bool bSuccess; for (;;) { uExpected.w = uDesired.w = pDest32->w; uExpected.c[nCharIdx] = expected; uDesired.c[nCharIdx] = desired; bSuccess = cas32_weak( reinterpret_cast(pDest32), uExpected.w, uDesired.w, mo_success, mo_fail ); if ( bSuccess || uExpected.c[nCharIdx] != expected ) break; } expected = uExpected.c[nCharIdx]; return bSuccess; } template static inline bool cas8_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { static_assert( sizeof(T) == 1, "Illegal size of operand" ); assert( pDest ); union u32 { uint32_t w; T c[4]; }; static_assert( sizeof(u32) == sizeof(uint32_t), "Argument size error" ); u32 volatile * pDest32 = (u32 *)( uintptr_t( pDest ) & ~0x03 ); size_t const nCharIdx = (size_t)( uintptr_t( pDest ) & 0x03 ); u32 uExpected; u32 uDesired; uExpected.w = uDesired.w = pDest32->w; uExpected.c[nCharIdx] = expected; uDesired.c[nCharIdx] = desired; bool bSuccess = cas32_weak( reinterpret_cast(pDest32), uExpected.w, uDesired.w, mo_success, mo_fail ); expected = uExpected.c[nCharIdx]; return bSuccess; } template static inline T exchange8( T volatile * pDest, T v, memory_order order ) noexcept { static_assert( sizeof(T) == 1, "Illegal size of operand" ); assert( pDest ); T cur = load8( pDest, memory_order_relaxed ); do {} while ( !cas8_strong( pDest, cur, v, order, memory_order_relaxed )); return cur; } //----------------------------------------------------------------------------- // 16bit primitives //----------------------------------------------------------------------------- template static inline T load16( T volatile const * pSrc, memory_order order ) noexcept { static_assert( sizeof(T) == 2, "Illegal size of operand" ); assert( order == memory_order_relaxed || order == memory_order_consume || order == memory_order_acquire || order == memory_order_seq_cst ); assert( pSrc ); fence_before( order ); T v = *pSrc; fence_after( order ); return v; } template static inline void store16( T volatile * pDest, T src, memory_order order ) noexcept { static_assert( sizeof(T) == 2, "Illegal size of operand" ); assert( order == memory_order_relaxed || order == memory_order_release || order == memory_order_seq_cst ); assert( pDest ); fence_before(order); *pDest = src; fence_after(order); } template static inline bool cas16_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { static_assert( sizeof(T) == 2, "Illegal size of operand" ); assert( pDest ); union u32 { uint32_t w; T c[2]; }; static_assert( sizeof(u32) == sizeof(uint32_t), "Argument size error" ); u32 volatile * pDest32 = (u32 *)( uintptr_t( pDest ) & ~0x03 ); size_t const nIdx = (size_t)( (uintptr_t( pDest ) >> 1) & 0x01 ); u32 uExpected; u32 uDesired; bool bSuccess; for (;;) { uExpected.w = uDesired.w = pDest32->w; uExpected.c[nIdx] = expected; uDesired.c[nIdx] = desired; bSuccess = cas32_weak( reinterpret_cast(pDest32), uExpected.w, uDesired.w, mo_success, mo_fail ); if ( bSuccess || uExpected.c[nIdx] != expected ) break; } expected = uExpected.c[nIdx]; return bSuccess; } template static inline bool cas16_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { static_assert( sizeof(T) == 2, "Illegal size of operand" ); assert( pDest ); union u32 { uint32_t w; T c[2]; }; static_assert( sizeof(u32) == sizeof(uint32_t), "Argument size error" ); u32 volatile * pDest32 = (u32 *)( uintptr_t( pDest ) & ~0x03 ); size_t const nIdx = (size_t)( (uintptr_t( pDest ) >> 1) & 0x01 ); u32 uExpected; u32 uDesired; uExpected.w = uDesired.w = pDest32->w; uExpected.c[nIdx] = expected; uDesired.c[nIdx] = desired; bool bSuccess = cas32_weak( reinterpret_cast(pDest32), uExpected.w, uDesired.w, mo_success, mo_fail ); expected = uExpected.c[nIdx]; return bSuccess; } template static inline T exchange16( T volatile * pDest, T v, memory_order order ) noexcept { static_assert( sizeof(T) == 2, "Illegal size of operand" ); assert( pDest ); T cur = load16( pDest, memory_order_relaxed ); do {} while ( !cas16_strong( pDest, cur, v, order, memory_order_relaxed )); return cur; } //----------------------------------------------------------------------------- // pointer primitives //----------------------------------------------------------------------------- template static inline void store_ptr( T * volatile * pDest, T * src, memory_order order ) noexcept { static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); assert( order == memory_order_relaxed || order == memory_order_release || order == memory_order_seq_cst ); assert( pDest ); fence_before(order); *pDest = src; fence_after(order); } template static inline T * load_ptr( T * volatile const * pSrc, memory_order order ) noexcept { static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); assert( order == memory_order_relaxed || order == memory_order_consume || order == memory_order_acquire || order == memory_order_seq_cst ); assert( pSrc ); fence_before( order ); T * v = *pSrc; fence_after( order ); return v; } template static inline bool cas_ptr_strong( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept { static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); return cas64_strong( (uint64_t volatile *) pDest, *reinterpret_cast( &expected ), (uint64_t) desired, mo_success, mo_fail ); } template static inline bool cas_ptr_weak( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept { return cas_ptr_strong( pDest, expected, desired, mo_success, mo_fail ); } template static inline T * exchange_ptr( T * volatile * pDest, T * v, memory_order order ) noexcept { static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); return (T *) exchange64( (uint64_t volatile *) pDest, (uint64_t) v, order ); } }} // namespace gcc::Sparc } // namespace platform }} // namespace cds::cxx11_atomic //@endcond #undef CDS_SPARC_MB_ACQ #undef CDS_SPARC_MB_REL #undef CDS_SPARC_MB_SEQ_CST #undef CDS_SPARC_MB_FULL #undef CDS_SPARC_MB_LL_LS #undef CDS_SPARC_MB_LS_SS #undef CDS_SPARC_MB_LL_LS_SS #endif // #ifndef CDSLIB_COMPILER_GCC_AMD64_CXX11_ATOMIC_H libcds-2.3.3/cds/compiler/gcc/x86/000077500000000000000000000000001341244201700165215ustar00rootroot00000000000000libcds-2.3.3/cds/compiler/gcc/x86/backoff.h000066400000000000000000000015041341244201700202650ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_COMPILER_GCC_X86_BACKOFF_H #define CDSLIB_COMPILER_GCC_X86_BACKOFF_H //@cond none namespace cds { namespace backoff { namespace gcc { namespace x86 { # define CDS_backoff_nop_defined static inline void backoff_nop() { asm volatile ( "nop;" ); } # define CDS_backoff_hint_defined static inline void backoff_hint() { asm volatile ( "pause;" ); } }} // namespace gcc::x86 namespace platform { using namespace gcc::x86; } }} // namespace cds::backoff //@endcond #endif // #ifndef CDSLIB_COMPILER_GCC_X86_BACKOFF_H libcds-2.3.3/cds/compiler/gcc/x86/bitop.h000066400000000000000000000054171341244201700200160ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_COMPILER_GCC_X86_BITOP_H #define CDSLIB_COMPILER_GCC_X86_BITOP_H //@cond none namespace cds { namespace bitop { namespace platform { namespace gcc { namespace x86 { // MSB - return index (1..32) of most significant bit in nArg. If nArg == 0 return 0 # define cds_bitop_msb32_DEFINED static inline int msb32( uint32_t nArg ) { int nRet; __asm__ __volatile__ ( "bsrl %[nArg], %[nRet] ;\n\t" "jnz 1f ;\n\t" "xorl %[nRet], %[nRet] ;\n\t" "subl $1, %[nRet] ;\n\t" "1:" "addl $1, %[nRet] ;\n\t" : [nRet] "=a" (nRet) : [nArg] "r" (nArg) : "cc" ); return nRet; } # define cds_bitop_msb32nz_DEFINED static inline int msb32nz( uint32_t nArg ) { assert( nArg != 0 ); int nRet; __asm__ __volatile__ ( "bsrl %[nArg], %[nRet] ;" : [nRet] "=a" (nRet) : [nArg] "r" (nArg) : "cc" ); return nRet; } // LSB - return index (0..31) of least significant bit in nArg. If nArg == 0 return -1U # define cds_bitop_lsb32_DEFINED static inline int lsb32( uint32_t nArg ) { int nRet; __asm__ __volatile__ ( "bsfl %[nArg], %[nRet] ;" "jnz 1f ;" "xorl %[nRet], %[nRet] ;" "subl $1, %[nRet] ;" "1:" "addl $1, %[nRet] ;" : [nRet] "=a" (nRet) : [nArg] "r" (nArg) : "cc" ); return nRet; } // LSB - return index (0..31) of least significant bit in nArg. // Condition: nArg != 0 # define cds_bitop_lsb32nz_DEFINED static inline int lsb32nz( uint32_t nArg ) { assert( nArg != 0 ); int nRet; __asm__ __volatile__ ( "bsfl %[nArg], %[nRet] ;" : [nRet] "=a" (nRet) : [nArg] "r" (nArg) : "cc" ); return nRet; } }} // namespace gcc::x86 using namespace gcc::x86; }}} // namespace cds::bitop::platform //@endcond #endif // #ifndef CDSLIB_ARH_X86_GCC_BITOP_H libcds-2.3.3/cds/compiler/gcc/x86/cxx11_atomic.h000066400000000000000000000154621341244201700212020ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_COMPILER_GCC_X86_CXX11_ATOMIC_H #define CDSLIB_COMPILER_GCC_X86_CXX11_ATOMIC_H #include #include //@cond namespace cds { namespace cxx11_atomic { namespace platform { inline namespace gcc { inline namespace x86 { //----------------------------------------------------------------------------- // 64bit primitives //----------------------------------------------------------------------------- template static inline bool cas64_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { static_assert( sizeof(T) == 8, "Illegal size of operand" ); assert( cds::details::is_aligned( pDest, 8 )); uint32_t ebxStore; T prev = expected; fence_before(mo_success); // We must save EBX in PIC mode __asm__ __volatile__ ( "movl %%ebx, %[ebxStore]\n" "movl %[desiredLo], %%ebx\n" "lock; cmpxchg8b 0(%[pDest])\n" "movl %[ebxStore], %%ebx\n" : [prev] "=A" (prev), [ebxStore] "=m" (ebxStore) : [desiredLo] "D" ((int)desired), [desiredHi] "c" ((int)(desired >> 32)), [pDest] "S" (pDest), "0" (prev) : "memory"); bool success = (prev == expected); if (success) fence_after(mo_success); else { fence_after(mo_fail); expected = prev; } return success; } template static inline bool cas64_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { return cas64_strong( pDest, expected, desired, mo_success, mo_fail ); } template static inline T load64( T volatile const * pSrc, memory_order order ) noexcept { static_assert( sizeof(T) == 8, "Illegal size of operand" ); assert( order == memory_order_relaxed || order == memory_order_consume || order == memory_order_acquire || order == memory_order_seq_cst ); assert( pSrc ); assert( cds::details::is_aligned( pSrc, 8 )); CDS_UNUSED( order ); T CDS_DATA_ALIGNMENT(8) v; __asm__ __volatile__( "movq (%[pSrc]), %[v] ; \n\t" : [v] "=x" (v) : [pSrc] "r" (pSrc) : ); return v; } template static inline T exchange64( T volatile * pDest, T v, memory_order order ) noexcept { static_assert( sizeof(T) == 8, "Illegal size of operand" ); assert( cds::details::is_aligned( pDest, 8 )); T cur = load64( pDest, memory_order_relaxed ); do { } while (!cas64_weak( pDest, cur, v, order, memory_order_relaxed )); return cur; } template static inline void store64( T volatile * pDest, T val, memory_order order ) noexcept { static_assert( sizeof(T) == 8, "Illegal size of operand" ); assert( order == memory_order_relaxed || order == memory_order_release || order == memory_order_seq_cst ); assert( pDest ); assert( cds::details::is_aligned( pDest, 8 )); if ( order != memory_order_seq_cst ) { fence_before( order ); // Atomically stores 64bit value by SSE instruction __asm__ __volatile__( "movq %[val], (%[pDest]) ; \n\t" : : [val] "x" (val), [pDest] "r" (pDest) : "memory" ); } else { exchange64( pDest, val, order ); } } //----------------------------------------------------------------------------- // pointer primitives //----------------------------------------------------------------------------- template static inline T * exchange_ptr( T * volatile * pDest, T * v, memory_order order ) noexcept { static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); return (T *) exchange32( (uint32_t volatile *) pDest, (uint32_t) v, order ); } template static inline void store_ptr( T * volatile * pDest, T * src, memory_order order ) noexcept { static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); assert( order == memory_order_relaxed || order == memory_order_release || order == memory_order_seq_cst ); assert( pDest ); if ( order != memory_order_seq_cst ) { fence_before( order ); *pDest = src; } else { exchange_ptr( pDest, src, order ); } } template static inline T * load_ptr( T * volatile const * pSrc, memory_order order ) noexcept { static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); assert( order == memory_order_relaxed || order == memory_order_consume || order == memory_order_acquire || order == memory_order_seq_cst ); assert( pSrc ); T * v = *pSrc; fence_after_load( order ); return v; } template static inline bool cas_ptr_strong( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept { static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); return cas32_strong( (uint32_t volatile *) pDest, *reinterpret_cast( &expected ), (uint32_t) desired, mo_success, mo_fail ); } template static inline bool cas_ptr_weak( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept { return cas_ptr_strong( pDest, expected, desired, mo_success, mo_fail ); } }} // namespace gcc::x86 } // namespace platform }} // namespace cds::cxx11_atomic //@endcond #endif // #ifndef CDSLIB_COMPILER_GCC_X86_CXX11_ATOMIC_H libcds-2.3.3/cds/compiler/gcc/x86/cxx11_atomic32.h000066400000000000000000000407631341244201700213510ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_COMPILER_GCC_X86_CXX11_ATOMIC32_H #define CDSLIB_COMPILER_GCC_X86_CXX11_ATOMIC32_H #include #include //@cond namespace cds { namespace cxx11_atomic { namespace platform { inline namespace gcc { inline namespace x86 { static inline void fence_before( memory_order order ) noexcept { switch(order) { case memory_order_relaxed: case memory_order_acquire: case memory_order_consume: break; case memory_order_release: case memory_order_acq_rel: CDS_COMPILER_RW_BARRIER; break; case memory_order_seq_cst: CDS_COMPILER_RW_BARRIER; break; } } static inline void fence_after( memory_order order ) noexcept { switch(order) { case memory_order_acquire: case memory_order_acq_rel: CDS_COMPILER_RW_BARRIER; break; case memory_order_relaxed: case memory_order_consume: case memory_order_release: break; case memory_order_seq_cst: CDS_COMPILER_RW_BARRIER; break; } } static inline void fence_after_load(memory_order order) noexcept { switch(order) { case memory_order_relaxed: case memory_order_release: break; case memory_order_acquire: case memory_order_acq_rel: CDS_COMPILER_RW_BARRIER; break; case memory_order_consume: break; case memory_order_seq_cst: __asm__ __volatile__ ( "mfence" ::: "memory" ); break; default:; } } //----------------------------------------------------------------------------- // fences //----------------------------------------------------------------------------- static inline void thread_fence(memory_order order) noexcept { switch(order) { case memory_order_relaxed: case memory_order_consume: break; case memory_order_release: case memory_order_acquire: case memory_order_acq_rel: CDS_COMPILER_RW_BARRIER; break; case memory_order_seq_cst: __asm__ __volatile__ ( "mfence" ::: "memory" ); break; default:; } } static inline void signal_fence(memory_order order) noexcept { // C++11: 29.8.8: only compiler optimization, no hardware instructions switch(order) { case memory_order_relaxed: break; case memory_order_consume: case memory_order_release: case memory_order_acquire: case memory_order_acq_rel: case memory_order_seq_cst: CDS_COMPILER_RW_BARRIER; break; default:; } } //----------------------------------------------------------------------------- // 8bit primitives //----------------------------------------------------------------------------- template static inline bool cas8_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { static_assert( sizeof(T) == 1, "Illegal size of operand" ); T prev = expected; fence_before(mo_success); __asm__ __volatile__ ( "lock ; cmpxchgb %[desired], %[pDest]" : [prev] "+a" (prev), [pDest] "+m" (*pDest) : [desired] "q" (desired) ); bool success = (prev == expected); expected = prev; if (success) fence_after(mo_success); else fence_after(mo_fail); return success; } template static inline bool cas8_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { return cas8_strong( pDest, expected, desired, mo_success, mo_fail ); } template static inline T exchange8( T volatile * pDest, T v, memory_order order ) noexcept { static_assert( sizeof(T) == 1, "Illegal size of operand" ); fence_before(order); __asm__ __volatile__ ( "xchgb %[v], %[pDest]" : [v] "+q" (v), [pDest] "+m" (*pDest) ); fence_after(order); return v; } template static inline void store8( T volatile * pDest, T src, memory_order order ) noexcept { static_assert( sizeof(T) == 1, "Illegal size of operand" ); assert( order == memory_order_relaxed || order == memory_order_release || order == memory_order_seq_cst ); assert( pDest != NULL ); if ( order != memory_order_seq_cst ) { fence_before( order ); *pDest = src; } else { exchange8( pDest, src, order ); } } template static inline T load8( T volatile const * pSrc, memory_order order ) noexcept { static_assert( sizeof(T) == 1, "Illegal size of operand" ); assert( order == memory_order_relaxed || order == memory_order_consume || order == memory_order_acquire || order == memory_order_seq_cst ); assert( pSrc != NULL ); T v = *pSrc; fence_after_load( order ); return v; } # define CDS_ATOMIC_fetch8_add_defined template static inline T fetch8_add( T volatile * pDest, T val, memory_order order ) noexcept { fence_before(order); __asm__ __volatile__ ( "lock ; xaddb %[val], %[pDest]" : [val] "+q" (val), [pDest] "+m" (*pDest) ); fence_after(order); return val; } # define CDS_ATOMIC_fetch8_sub_defined template static inline T fetch8_sub( T volatile * pDest, T val, memory_order order ) noexcept { fence_before(order); __asm__ __volatile__ ( "negb %[val] ; \n" "lock ; xaddb %[val], %[pDest]" : [val] "+q" (val), [pDest] "+m" (*pDest) ); fence_after(order); return val; } //----------------------------------------------------------------------------- // atomic flag primitives //----------------------------------------------------------------------------- typedef bool atomic_flag_type; static inline bool atomic_flag_tas( atomic_flag_type volatile * pFlag, memory_order order ) noexcept { return exchange8( pFlag, true, order ); } static inline void atomic_flag_clear( atomic_flag_type volatile * pFlag, memory_order order ) noexcept { store8( pFlag, false, order ); } //----------------------------------------------------------------------------- // 16bit primitives //----------------------------------------------------------------------------- template static inline T exchange16( T volatile * pDest, T v, memory_order order ) noexcept { static_assert( sizeof(T) == 2, "Illegal size of operand" ); assert( cds::details::is_aligned( pDest, 2 )); fence_before(order); __asm__ __volatile__ ( "xchgw %[v], %[pDest]" : [v] "+q" (v), [pDest] "+m" (*pDest) ); fence_after(order); return v; } template static inline void store16( T volatile * pDest, T src, memory_order order ) noexcept { static_assert( sizeof(T) == 2, "Illegal size of operand" ); assert( order == memory_order_relaxed || order == memory_order_release || order == memory_order_seq_cst ); assert( pDest != NULL ); assert( cds::details::is_aligned( pDest, 2 )); if ( order != memory_order_seq_cst ) { fence_before( order ); *pDest = src; } else { exchange16( pDest, src, order ); } } template static inline T load16( T volatile const * pSrc, memory_order order ) noexcept { static_assert( sizeof(T) == 2, "Illegal size of operand" ); assert( order == memory_order_relaxed || order == memory_order_consume || order == memory_order_acquire || order == memory_order_seq_cst ); assert( pSrc != NULL ); assert( cds::details::is_aligned( pSrc, 2 )); T v = *pSrc; fence_after_load( order ); return v; } template static inline bool cas16_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { static_assert( sizeof(T) == 2, "Illegal size of operand" ); assert( cds::details::is_aligned( pDest, 2 )); T prev = expected; fence_before(mo_success); __asm__ __volatile__ ( "lock ; cmpxchgw %[desired], %[pDest]" : [prev] "+a" (prev), [pDest] "+m" (*pDest) : [desired] "q" (desired) ); bool success = prev == expected; if (success) fence_after(mo_success); else { fence_after(mo_fail); expected = prev; } return success; } template static inline bool cas16_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { return cas16_strong( pDest, expected, desired, mo_success, mo_fail ); } # define CDS_ATOMIC_fetch16_add_defined template static inline T fetch16_add( T volatile * pDest, T val, memory_order order ) noexcept { static_assert( sizeof(T) == 2, "Illegal size of operand" ); assert( cds::details::is_aligned( pDest, 2 )); fence_before(order); __asm__ __volatile__ ( "lock ; xaddw %[val], %[pDest]" : [val] "+q" (val), [pDest] "+m" (*pDest) ); fence_after(order); return val; } # define CDS_ATOMIC_fetch16_sub_defined template static inline T fetch16_sub( T volatile * pDest, T val, memory_order order ) noexcept { static_assert( sizeof(T) == 2, "Illegal size of operand" ); assert( cds::details::is_aligned( pDest, 2 )); fence_before(order); __asm__ __volatile__ ( "negw %[val] ; \n" "lock ; xaddw %[val], %[pDest]" : [val] "+q" (val), [pDest] "+m" (*pDest) ); fence_after(order); return val; } //----------------------------------------------------------------------------- // 32bit primitives //----------------------------------------------------------------------------- template static inline T exchange32( T volatile * pDest, T v, memory_order order ) noexcept { static_assert( sizeof(T) == 4, "Illegal size of operand" ); assert( cds::details::is_aligned( pDest, 4 )); fence_before(order); __asm__ __volatile__ ( "xchgl %[v], %[pDest]" : [v] "+r" (v), [pDest] "+m" (*pDest) ); fence_after(order); return v; } template static inline void store32( T volatile * pDest, T src, memory_order order ) noexcept { static_assert( sizeof(T) == 4, "Illegal size of operand" ); assert( order == memory_order_relaxed || order == memory_order_release || order == memory_order_seq_cst ); assert( pDest != NULL ); assert( cds::details::is_aligned( pDest, 4 )); if ( order != memory_order_seq_cst ) { fence_before( order ); *pDest = src; } else { exchange32( pDest, src, order ); } } template static inline T load32( T volatile const * pSrc, memory_order order ) noexcept { static_assert( sizeof(T) == 4, "Illegal size of operand" ); assert( order == memory_order_relaxed || order == memory_order_consume || order == memory_order_acquire || order == memory_order_seq_cst ); assert( pSrc != NULL ); assert( cds::details::is_aligned( pSrc, 4 )); T v = *pSrc; fence_after_load( order ); return v; } template static inline bool cas32_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { static_assert( sizeof(T) == 4, "Illegal size of operand" ); assert( cds::details::is_aligned( pDest, 4 )); T prev = expected; fence_before(mo_success); __asm__ __volatile__ ( "lock ; cmpxchgl %[desired], %[pDest]" : [prev] "+a" (prev), [pDest] "+m" (*pDest) : [desired] "r" (desired) ); bool success = prev == expected; if (success) fence_after(mo_success); else { fence_after(mo_fail); expected = prev; } return success; } template static inline bool cas32_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { return cas32_strong( pDest, expected, desired, mo_success, mo_fail ); } // fetch_xxx may be emulated via cas32 // If the platform has special fetch_xxx instruction // then it should define CDS_ATOMIC_fetch32_xxx_defined macro # define CDS_ATOMIC_fetch32_add_defined template static inline T fetch32_add( T volatile * pDest, T v, memory_order order) noexcept { static_assert( sizeof(T) == 4, "Illegal size of operand" ); assert( cds::details::is_aligned( pDest, 4 )); fence_before(order); __asm__ __volatile__ ( "lock ; xaddl %[v], %[pDest]" : [v] "+r" (v), [pDest] "+m" (*pDest) ); fence_after(order); return v; } # define CDS_ATOMIC_fetch32_sub_defined template static inline T fetch32_sub( T volatile * pDest, T v, memory_order order) noexcept { static_assert( sizeof(T) == 4, "Illegal size of operand" ); assert( cds::details::is_aligned( pDest, 4 )); fence_before(order); __asm__ __volatile__ ( "negl %[v] ; \n" "lock ; xaddl %[v], %[pDest]" : [v] "+r" (v), [pDest] "+m" (*pDest) ); fence_after(order); return v; } }}} // namespace platform::gcc::x86 }} // namespace cds::cxx11_atomic //@endcond #endif // #ifndef CDSLIB_COMPILER_GCC_X86_CXX11_ATOMIC32_H libcds-2.3.3/cds/compiler/icl/000077500000000000000000000000001341244201700161075ustar00rootroot00000000000000libcds-2.3.3/cds/compiler/icl/compiler_barriers.h000066400000000000000000000017221341244201700217650ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_COMPILER_ICL_COMPILER_BARRIERS_H #define CDSLIB_COMPILER_ICL_COMPILER_BARRIERS_H #if defined(_MSC_VER) && _MSC_VER < 1700 // VC++ up to vc10 # include # pragma intrinsic(_ReadWriteBarrier) # pragma intrinsic(_ReadBarrier) # pragma intrinsic(_WriteBarrier) # define CDS_COMPILER_RW_BARRIER _ReadWriteBarrier() # define CDS_COMPILER_R_BARRIER _ReadBarrier() # define CDS_COMPILER_W_BARRIER _WriteBarrier() #else // MS VC11+, linux # include # define CDS_COMPILER_RW_BARRIER std::atomic_thread_fence( std::memory_order_acq_rel ) # define CDS_COMPILER_R_BARRIER CDS_COMPILER_RW_BARRIER # define CDS_COMPILER_W_BARRIER CDS_COMPILER_RW_BARRIER #endif #endif // #ifndef CDSLIB_COMPILER_ICL_COMPILER_BARRIERS_H libcds-2.3.3/cds/compiler/icl/defs.h000066400000000000000000000077531341244201700172150ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_COMPILER_ICL_DEFS_H #define CDSLIB_COMPILER_ICL_DEFS_H //@cond // Compiler version #ifdef __ICL # define CDS_COMPILER_VERSION __ICL #else # define CDS_COMPILER_VERSION __INTEL_COMPILER #endif // Compiler name // Supported compilers: MS VC 2008, 2010, 2012 // # define CDS_COMPILER__NAME "Intel C++" # define CDS_COMPILER__NICK "icl" // OS name #if defined(_WIN64) # define CDS_OS_INTERFACE CDS_OSI_WINDOWS # define CDS_OS_TYPE CDS_OS_WIN64 # define CDS_OS__NAME "Win64" # define CDS_OS__NICK "Win64" #elif defined(_WIN32) # define CDS_OS_INTERFACE CDS_OSI_WINDOWS # define CDS_OS_TYPE CDS_OS_WIN32 # define CDS_OS__NAME "Win32" # define CDS_OS__NICK "Win32" #elif defined( __linux__ ) # define CDS_OS_INTERFACE CDS_OSI_UNIX # define CDS_OS_TYPE CDS_OS_LINUX # define CDS_OS__NAME "linux" # define CDS_OS__NICK "linux" #endif // Processor architecture #if defined(_M_X64) || defined(_M_AMD64) || defined(__amd64__) || defined(__amd64) # define CDS_BUILD_BITS 64 # define CDS_PROCESSOR_ARCH CDS_PROCESSOR_AMD64 # define CDS_PROCESSOR__NAME "AMD64" # define CDS_PROCESSOR__NICK "amd64" #elif defined(_M_IX86) || defined(__i386__) || defined(__i386) # define CDS_BUILD_BITS 32 # define CDS_PROCESSOR_ARCH CDS_PROCESSOR_X86 # define CDS_PROCESSOR__NAME "Intel x86" # define CDS_PROCESSOR__NICK "x86" #else # define CDS_BUILD_BITS -1 # define CDS_PROCESSOR_ARCH CDS_PROCESSOR_UNKNOWN # define CDS_PROCESSOR__NAME "<>" # error Intel C++ compiler is supported for x86 only #endif #if CDS_OS_INTERFACE == CDS_OSI_WINDOWS # define __attribute__( _x ) #endif #if CDS_OS_INTERFACE == CDS_OSI_WINDOWS # ifdef CDS_BUILD_LIB # define CDS_EXPORT_API __declspec(dllexport) # else # define CDS_EXPORT_API __declspec(dllimport) # endif #endif #if CDS_OS_INTERFACE == CDS_OSI_WINDOWS # define alignof __alignof #else # define alignof __alignof__ #endif // ************************************************* // Alignment macro #if CDS_OS_INTERFACE == CDS_OSI_WINDOWS # define CDS_TYPE_ALIGNMENT(n) __declspec( align(n)) # define CDS_DATA_ALIGNMENT(n) __declspec( align(n)) # define CDS_CLASS_ALIGNMENT(n) __declspec( align(n)) #else # define CDS_TYPE_ALIGNMENT(n) __attribute__ ((aligned (n))) # define CDS_CLASS_ALIGNMENT(n) __attribute__ ((aligned (n))) # define CDS_DATA_ALIGNMENT(n) __attribute__ ((aligned (n))) #endif // Attributes #if CDS_OS_INTERFACE == CDS_OSI_WINDOWS # define CDS_DEPRECATED( reason ) __declspec(deprecated( reason )) # define CDS_NORETURN __declspec(noreturn) #else # define CDS_DEPRECATED( reason ) __attribute__((deprecated( reason ))) # define CDS_NORETURN __attribute__((__noreturn__)) #endif // Exceptions #if CDS_OS_INTERFACE == CDS_OSI_WINDOWS # if defined( _CPPUNWIND ) # define CDS_EXCEPTION_ENABLED # endif #else # if defined( __EXCEPTIONS ) && __EXCEPTIONS == 1 # define CDS_EXCEPTION_ENABLED # endif #endif // Byte order #if !defined(CDS_ARCH_LITTLE_ENDIAN) && !defined(CDS_ARCH_BIG_ENDIAN) # if CDS_OS_INTERFACE == CDS_OSI_WINDOWS # define CDS_ARCH_LITTLE_ENDIAN # else # ifdef __BYTE_ORDER__ # if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ # define CDS_ARCH_LITTLE_ENDIAN # elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ # define CDS_ARCH_BIG_ENDIAN # endif # else # warning "Undefined byte order for current architecture (no __BYTE_ORDER__ preprocessor definition)" # endif # endif #endif // Sanitizer attributes (not supported) #define CDS_SUPPRESS_SANITIZE( ... ) #include //@endcond #endif // #ifndef CDSLIB_COMPILER_VC_DEFS_H libcds-2.3.3/cds/compiler/vc/000077500000000000000000000000001341244201700157505ustar00rootroot00000000000000libcds-2.3.3/cds/compiler/vc/amd64/000077500000000000000000000000001341244201700166635ustar00rootroot00000000000000libcds-2.3.3/cds/compiler/vc/amd64/backoff.h000066400000000000000000000014771341244201700204400ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_COMPILER_VC_AMD64_BACKOFF_H #define CDSLIB_COMPILER_VC_AMD64_BACKOFF_H //@cond none #include namespace cds { namespace backoff { namespace vc { namespace amd64 { # define CDS_backoff_hint_defined static inline void backoff_hint() { _mm_pause(); } # define CDS_backoff_nop_defined static inline void backoff_nop() { __nop(); } }} // namespace vc::amd64 namespace platform { using namespace vc::amd64; } }} // namespace cds::backoff //@endcond #endif // #ifndef CDSLIB_COMPILER_VC_AMD64_BACKOFF_H libcds-2.3.3/cds/compiler/vc/amd64/bitop.h000066400000000000000000000075771341244201700201710ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_COMPILER_VC_AMD64_BITOP_H #define CDSLIB_COMPILER_VC_AMD64_BITOP_H #if _MSC_VER == 1500 /* VC 2008 bug: math.h(136) : warning C4985: 'ceil': attributes not present on previous declaration. intrin.h(142) : see declaration of 'ceil' See http://connect.microsoft.com/VisualStudio/feedback/details/381422/warning-of-attributes-not-present-on-previous-declaration-on-ceil-using-both-math-h-and-intrin-h */ # pragma warning(push) # pragma warning(disable: 4985) # include # pragma warning(pop) #else # include #endif #pragma intrinsic(_BitScanReverse) #pragma intrinsic(_BitScanForward) #pragma intrinsic(_BitScanReverse64) #pragma intrinsic(_BitScanForward64) //@cond none namespace cds { namespace bitop { namespace platform { namespace vc { namespace amd64 { // MSB - return index (1..32) of most significant bit in nArg. If nArg == 0 return 0 # define cds_bitop_msb32_DEFINED static inline int msb32( uint32_t nArg ) { unsigned long nIndex; if ( _BitScanReverse( &nIndex, nArg )) return (int) nIndex + 1; return 0; } # define cds_bitop_msb32nz_DEFINED static inline int msb32nz( uint32_t nArg ) { assert( nArg != 0 ); unsigned long nIndex; _BitScanReverse( &nIndex, nArg ); return (int) nIndex; } // LSB - return index (1..32) of least significant bit in nArg. If nArg == 0 return -1U # define cds_bitop_lsb32_DEFINED static inline int lsb32( uint32_t nArg ) { unsigned long nIndex; if ( _BitScanForward( &nIndex, nArg )) return (int) nIndex + 1; return 0; } # define cds_bitop_lsb32nz_DEFINED static inline int lsb32nz( uint32_t nArg ) { assert( nArg != 0 ); unsigned long nIndex; _BitScanForward( &nIndex, nArg ); return (int) nIndex; } # define cds_bitop_msb64_DEFINED static inline int msb64( uint64_t nArg ) { unsigned long nIndex; if ( _BitScanReverse64( &nIndex, nArg )) return (int) nIndex + 1; return 0; } # define cds_bitop_msb64nz_DEFINED static inline int msb64nz( uint64_t nArg ) { assert( nArg != 0 ); unsigned long nIndex; _BitScanReverse64( &nIndex, nArg ); return (int) nIndex; } # define cds_bitop_lsb64_DEFINED static inline int lsb64( uint64_t nArg ) { unsigned long nIndex; if ( _BitScanForward64( &nIndex, nArg )) return (int) nIndex + 1; return 0; } # define cds_bitop_lsb64nz_DEFINED static inline int lsb64nz( uint64_t nArg ) { assert( nArg != 0 ); unsigned long nIndex; _BitScanForward64( &nIndex, nArg ); return (int) nIndex; } # define cds_bitop_complement32_DEFINED static inline bool complement32( uint32_t * pArg, unsigned int nBit ) { return _bittestandcomplement( reinterpret_cast( pArg ), nBit ) != 0; } # define cds_bitop_complement64_DEFINED static inline bool complement64( uint64_t * pArg, unsigned int nBit ) { return _bittestandcomplement64( reinterpret_cast<__int64 *>( pArg ), nBit ) != 0; } }} // namespace vc::amd64 using namespace vc::amd64; }}} // namespace cds::bitop::platform //@endcond #endif // #ifndef CDSLIB_COMPILER_VC_AMD64_BITOP_H libcds-2.3.3/cds/compiler/vc/amd64/cxx11_atomic.h000066400000000000000000000532671341244201700213510ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_COMPILER_VC_AMD64_CXX11_ATOMIC_H #define CDSLIB_COMPILER_VC_AMD64_CXX11_ATOMIC_H #include #include // for 128bit atomic load/store #include #pragma intrinsic( _InterlockedIncrement ) #pragma intrinsic( _InterlockedDecrement ) #pragma intrinsic( _InterlockedCompareExchange ) #pragma intrinsic( _InterlockedCompareExchangePointer ) #pragma intrinsic( _InterlockedCompareExchange16 ) #pragma intrinsic( _InterlockedCompareExchange64 ) #pragma intrinsic( _InterlockedExchange ) #pragma intrinsic( _InterlockedExchange64 ) #pragma intrinsic( _InterlockedExchangePointer ) #pragma intrinsic( _InterlockedExchangeAdd ) #pragma intrinsic( _InterlockedExchangeAdd64 ) //#pragma intrinsic( _InterlockedAnd ) //#pragma intrinsic( _InterlockedOr ) //#pragma intrinsic( _InterlockedXor ) //#pragma intrinsic( _InterlockedAnd64 ) //#pragma intrinsic( _InterlockedOr64 ) //#pragma intrinsic( _InterlockedXor64 ) #pragma intrinsic( _interlockedbittestandset ) #if _MSC_VER >= 1600 # pragma intrinsic( _InterlockedCompareExchange8 ) # pragma intrinsic( _InterlockedExchange8 ) # pragma intrinsic( _InterlockedExchange16 ) #endif //@cond namespace cds { namespace cxx11_atomic { namespace platform { inline namespace vc { inline namespace amd64 { static inline void fence_before( memory_order order ) noexcept { switch(order) { case memory_order_relaxed: case memory_order_acquire: case memory_order_consume: break; case memory_order_release: case memory_order_acq_rel: CDS_COMPILER_RW_BARRIER; break; case memory_order_seq_cst: CDS_COMPILER_RW_BARRIER; break; } } static inline void fence_after( memory_order order ) noexcept { switch(order) { case memory_order_acquire: case memory_order_acq_rel: CDS_COMPILER_RW_BARRIER; break; case memory_order_relaxed: case memory_order_consume: case memory_order_release: break; case memory_order_seq_cst: CDS_COMPILER_RW_BARRIER; break; } } static inline void full_fence() { // MS VC does not support inline assembler in C code. // So, we use InterlockedExchange for full fence instead of mfence inst long t; _InterlockedExchange( &t, 0 ); } static inline void fence_after_load(memory_order order) noexcept { switch(order) { case memory_order_relaxed: case memory_order_release: break; case memory_order_acquire: case memory_order_acq_rel: CDS_COMPILER_RW_BARRIER; break; case memory_order_consume: break; case memory_order_seq_cst: full_fence(); break; default:; } } //----------------------------------------------------------------------------- // fences //----------------------------------------------------------------------------- static inline void thread_fence(memory_order order) noexcept { switch(order) { case memory_order_relaxed: case memory_order_consume: break; case memory_order_release: case memory_order_acquire: case memory_order_acq_rel: CDS_COMPILER_RW_BARRIER; break; case memory_order_seq_cst: full_fence(); break; default:; } } static inline void signal_fence(memory_order order) noexcept { // C++11: 29.8.8: only compiler optimization, no hardware instructions switch(order) { case memory_order_relaxed: break; case memory_order_consume: case memory_order_release: case memory_order_acquire: case memory_order_acq_rel: case memory_order_seq_cst: CDS_COMPILER_RW_BARRIER; break; default:; } } //----------------------------------------------------------------------------- // atomic flag primitives //----------------------------------------------------------------------------- typedef unsigned char atomic_flag_type; static inline bool atomic_flag_tas( atomic_flag_type volatile * pFlag, memory_order /*order*/ ) noexcept { return _interlockedbittestandset( (long volatile *) pFlag, 0 ) != 0; } static inline void atomic_flag_clear( atomic_flag_type volatile * pFlag, memory_order order ) noexcept { assert( order != memory_order_acquire && order != memory_order_acq_rel ); fence_before( order ); *pFlag = 0; fence_after( order ); } //----------------------------------------------------------------------------- // 8bit primitives //----------------------------------------------------------------------------- #if _MSC_VER >= 1600 # pragma warning(push) // Disable warning C4800: 'char' : forcing value to bool 'true' or 'false' (performance warning) # pragma warning( disable: 4800 ) #endif template static inline bool cas8_strong( T volatile * pDest, T& expected, T desired, memory_order /*mo_success*/, memory_order /*mo_fail*/ ) noexcept { static_assert( sizeof(T) == 1, "Illegal size of operand" ); # if _MSC_VER >= 1600 // VC 2010 + T prev = expected; expected = (T) _InterlockedCompareExchange8( (char volatile*) pDest, (char) desired, (char) expected ); return expected == prev; # else // VC 2008 unsigned int * pnDest = (unsigned int *)( ((unsigned __int64) pDest) & ~(unsigned __int64(3))); unsigned int nOffset = ((unsigned __int64) pDest) & 3; unsigned int nExpected; unsigned int nDesired; for (;;) { nExpected = nDesired = *pnDest; memcpy( reinterpret_cast(&nExpected) + nOffset, &expected, sizeof(T)); memcpy( reinterpret_cast(&nDesired) + nOffset, &desired, sizeof(T)); unsigned int nPrev = (unsigned int) _InterlockedCompareExchange( (long *) pnDest, (long) nDesired, (long) nExpected ); if ( nPrev == nExpected ) return true; T nByte; memcpy( &nByte, reinterpret_cast(&nPrev) + nOffset, sizeof(T)); if ( nByte != expected ) { expected = nByte; return false; } } # endif } #if _MSC_VER >= 1600 # pragma warning(pop) #endif template static inline bool cas8_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { return cas8_strong( pDest, expected, desired, mo_success, mo_fail ); } #if _MSC_VER >= 1600 # pragma warning(push) // Disable warning C4800: 'char' : forcing value to bool 'true' or 'false' (performance warning) # pragma warning( disable: 4800 ) #endif template static inline T exchange8( T volatile * pDest, T v, memory_order order ) noexcept { static_assert( sizeof(T) == 1, "Illegal size of operand" ); # if _MSC_VER >= 1600 CDS_UNUSED(order); return (T) _InterlockedExchange8( (char volatile *) pDest, (char) v ); # else T expected = *pDest; do {} while ( !cas8_strong( pDest, expected, v, order, memory_order_relaxed )); return expected; # endif } #if _MSC_VER >= 1600 # pragma warning(pop) #endif template static inline void store8( T volatile * pDest, T src, memory_order order ) noexcept { static_assert( sizeof(T) == 1, "Illegal size of operand" ); assert( order == memory_order_relaxed || order == memory_order_release || order == memory_order_seq_cst ); assert( pDest ); if ( order != memory_order_seq_cst ) { fence_before( order ); *pDest = src; } else { exchange8( pDest, src, order ); } } template static inline T load8( T volatile const * pSrc, memory_order order ) noexcept { static_assert( sizeof(T) == 1, "Illegal size of operand" ); assert( order == memory_order_relaxed || order == memory_order_consume || order == memory_order_acquire || order == memory_order_seq_cst ); assert( pSrc ); T v = *pSrc; fence_after_load( order ); return v; } //----------------------------------------------------------------------------- // 16bit primitives //----------------------------------------------------------------------------- template static inline bool cas16_strong( T volatile * pDest, T& expected, T desired, memory_order /*mo_success*/, memory_order /*mo_fail*/ ) noexcept { static_assert( sizeof(T) == 2, "Illegal size of operand" ); assert( cds::details::is_aligned( pDest, 2 )); // _InterlockedCompareExchange behave as read-write memory barriers T prev = expected; expected = (T) _InterlockedCompareExchange16( (short *) pDest, (short) desired, (short) expected ); return expected == prev; } template static inline bool cas16_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { return cas16_strong( pDest, expected, desired, mo_success, mo_fail ); } template static inline T exchange16( T volatile * pDest, T v, memory_order order ) noexcept { static_assert( sizeof(T) == 2, "Illegal size of operand" ); assert( cds::details::is_aligned( pDest, 2 )); # if _MSC_VER >= 1600 order; return (T) _InterlockedExchange16( (short volatile *) pDest, (short) v ); # else T expected = *pDest; do {} while ( !cas16_strong( pDest, expected, v, order, memory_order_relaxed )); return expected; # endif } template static inline void store16( T volatile * pDest, T src, memory_order order ) noexcept { static_assert( sizeof(T) == 2, "Illegal size of operand" ); assert( order == memory_order_relaxed || order == memory_order_release || order == memory_order_seq_cst ); assert( pDest ); assert( cds::details::is_aligned( pDest, 2 )); if ( order != memory_order_seq_cst ) { fence_before( order ); *pDest = src; } else { exchange16( pDest, src, order ); } } template static inline T load16( T volatile const * pSrc, memory_order order ) noexcept { static_assert( sizeof(T) == 2, "Illegal size of operand" ); assert( order == memory_order_relaxed || order == memory_order_consume || order == memory_order_acquire || order == memory_order_seq_cst ); assert( pSrc ); assert( cds::details::is_aligned( pSrc, 2 )); T v = *pSrc; fence_after_load( order ); return v; } //----------------------------------------------------------------------------- // 32bit primitives //----------------------------------------------------------------------------- template static inline T exchange32( T volatile * pDest, T v, memory_order /*order*/ ) noexcept { static_assert( sizeof(T) == 4, "Illegal size of operand" ); assert( cds::details::is_aligned( pDest, 4 )); return (T) _InterlockedExchange( (long *) pDest, (long) v ); } template static inline void store32( T volatile * pDest, T src, memory_order order ) noexcept { static_assert( sizeof(T) == 4, "Illegal size of operand" ); assert( order == memory_order_relaxed || order == memory_order_release || order == memory_order_seq_cst ); assert( pDest ); assert( cds::details::is_aligned( pDest, 4 )); if ( order != memory_order_seq_cst ) { fence_before( order ); *pDest = src; } else { exchange32( pDest, src, order ); } } template static inline T load32( T volatile const * pSrc, memory_order order ) noexcept { static_assert( sizeof(T) == 4, "Illegal size of operand" ); assert( order == memory_order_relaxed || order == memory_order_consume || order == memory_order_acquire || order == memory_order_seq_cst ); assert( pSrc ); assert( cds::details::is_aligned( pSrc, 4 )); T v = *pSrc; fence_after_load( order ); return v; } template static inline bool cas32_strong( T volatile * pDest, T& expected, T desired, memory_order /*mo_success*/, memory_order /*mo_fail*/ ) noexcept { static_assert( sizeof(T) == 4, "Illegal size of operand" ); assert( cds::details::is_aligned( pDest, 4 )); // _InterlockedCompareExchange behave as read-write memory barriers T prev = expected; expected = (T) _InterlockedCompareExchange( (long *) pDest, (long) desired, (long) expected ); return expected == prev; } template static inline bool cas32_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { return cas32_strong( pDest, expected, desired, mo_success, mo_fail ); } // fetch_xxx may be emulated via cas32 // If the platform has special fetch_xxx instruction // then it should define CDS_ATOMIC_fetch32_xxx_defined macro # define CDS_ATOMIC_fetch32_add_defined template static inline T fetch32_add( T volatile * pDest, T v, memory_order /*order*/) noexcept { static_assert( sizeof(T) == 4, "Illegal size of operand" ); assert( cds::details::is_aligned( pDest, 4 )); // _InterlockedExchangeAdd behave as read-write memory barriers return (T) _InterlockedExchangeAdd( (long *) pDest, (long) v ); } //----------------------------------------------------------------------------- // 64bit primitives //----------------------------------------------------------------------------- template static inline bool cas64_strong( T volatile * pDest, T& expected, T desired, memory_order /*mo_success*/, memory_order /*mo_fail*/ ) noexcept { static_assert( sizeof(T) == 8, "Illegal size of operand" ); assert( cds::details::is_aligned( pDest, 8 )); // _InterlockedCompareExchange behave as read-write memory barriers T prev = expected; expected = (T) _InterlockedCompareExchange64( (__int64 *) pDest, (__int64) desired, (__int64) expected ); return expected == prev; } template static inline bool cas64_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { return cas64_strong( pDest, expected, desired, mo_success, mo_fail ); } template static inline T load64( T volatile const * pSrc, memory_order order ) noexcept { static_assert( sizeof(T) == 8, "Illegal size of operand" ); assert( order == memory_order_relaxed || order == memory_order_consume || order == memory_order_acquire || order == memory_order_seq_cst ); assert( pSrc ); assert( cds::details::is_aligned( pSrc, 8 )); T v = *pSrc; fence_after_load( order ); return v; } template static inline T exchange64( T volatile * pDest, T v, memory_order order ) noexcept { static_assert( sizeof(T) == 8, "Illegal size of operand" ); T cur = load64( pDest, memory_order_relaxed ); do { } while (!cas64_weak( pDest, cur, v, order, memory_order_relaxed )); return cur; } template static inline void store64( T volatile * pDest, T val, memory_order order ) noexcept { static_assert( sizeof(T) == 8, "Illegal size of operand" ); assert( order == memory_order_relaxed || order == memory_order_release || order == memory_order_seq_cst ); assert( pDest ); assert( cds::details::is_aligned( pDest, 8 )); if ( order != memory_order_seq_cst ) { fence_before( order ); *pDest = val; } else { exchange64( pDest, val, order ); } } # define CDS_ATOMIC_fetch64_add_defined template static inline T fetch64_add( T volatile * pDest, T v, memory_order /*order*/) noexcept { static_assert( sizeof(T) == 8, "Illegal size of operand" ); assert( cds::details::is_aligned( pDest, 8 )); // _InterlockedExchangeAdd64 behave as read-write memory barriers return (T) _InterlockedExchangeAdd64( (__int64 *) pDest, (__int64) v ); } //----------------------------------------------------------------------------- // pointer primitives //----------------------------------------------------------------------------- template static inline T * exchange_ptr( T * volatile * pDest, T * v, memory_order /*order*/ ) noexcept { static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); return (T *) _InterlockedExchangePointer( (void * volatile *) pDest, reinterpret_cast(v)); } template static inline void store_ptr( T * volatile * pDest, T * src, memory_order order ) noexcept { static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); assert( order == memory_order_relaxed || order == memory_order_release || order == memory_order_seq_cst ); assert( pDest ); if ( order != memory_order_seq_cst ) { fence_before( order ); *pDest = src; } else { exchange_ptr( pDest, src, order ); } } template static inline T * load_ptr( T * volatile const * pSrc, memory_order order ) noexcept { static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); assert( order == memory_order_relaxed || order == memory_order_consume || order == memory_order_acquire || order == memory_order_seq_cst ); assert( pSrc ); T * v = *pSrc; fence_after_load( order ); return v; } template static inline bool cas_ptr_strong( T * volatile * pDest, T *& expected, T * desired, memory_order /*mo_success*/, memory_order /*mo_fail*/ ) noexcept { static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); // _InterlockedCompareExchangePointer behave as read-write memory barriers T * prev = expected; expected = (T *) _InterlockedCompareExchangePointer( (void * volatile *) pDest, (void *) desired, (void *) expected ); return expected == prev; } template static inline bool cas_ptr_weak( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept { return cas_ptr_strong( pDest, expected, desired, mo_success, mo_fail ); } }} // namespace vc::amd64 } // namespace platform }} // namespace cds::cxx11_atomic //@endcond #endif // #ifndef CDSLIB_COMPILER_VC_AMD64_CXX11_ATOMIC_H libcds-2.3.3/cds/compiler/vc/compiler_barriers.h000066400000000000000000000011551341244201700216260ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_COMPILER_VC_COMPILER_BARRIERS_H #define CDSLIB_COMPILER_VC_COMPILER_BARRIERS_H #include #pragma intrinsic(_ReadWriteBarrier) #pragma intrinsic(_ReadBarrier) #pragma intrinsic(_WriteBarrier) #define CDS_COMPILER_RW_BARRIER _ReadWriteBarrier() #define CDS_COMPILER_R_BARRIER _ReadBarrier() #define CDS_COMPILER_W_BARRIER _WriteBarrier() #endif // #ifndef CDSLIB_COMPILER_VC_COMPILER_BARRIERS_H libcds-2.3.3/cds/compiler/vc/defs.h000066400000000000000000000102311341244201700170370ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_COMPILER_VC_DEFS_H #define CDSLIB_COMPILER_VC_DEFS_H //@cond // Compiler version #define CDS_COMPILER_VERSION _MSC_VER // Compiler name // Supported compilers: MS VC 2015 + // C++ compiler versions: #define CDS_COMPILER_MSVC14 1900 // 2015 vc14 #define CDS_COMPILER_MSVC14_1 1910 // 2017 vc14.1 #define CDS_COMPILER_MSVC14_1_3 1911 // 2017 vc14.1 (VS 15.3) #define CDS_COMPILER_MSVC14_1_5 1912 // 2017 vc14.1 (VS 15.5) #define CDS_COMPILER_MSVC15 2000 // next Visual Studio #if CDS_COMPILER_VERSION < CDS_COMPILER_MSVC14 # error "Only MS Visual C++ 14 (2015) and above is supported" #endif #if _MSC_VER == 1900 # define CDS_COMPILER__NAME "MS Visual C++ 2015" # define CDS_COMPILER__NICK "vc14" # define CDS_COMPILER_LIBCDS_SUFFIX "vcv140" #elif _MSC_VER < 2000 # define CDS_COMPILER__NAME "MS Visual C++ 2017" # define CDS_COMPILER__NICK "vc141" # define CDS_COMPILER_LIBCDS_SUFFIX "vcv141" #else # define CDS_COMPILER__NAME "MS Visual C++" # define CDS_COMPILER__NICK "msvc" # define CDS_COMPILER_LIBCDS_SUFFIX "vc" #endif // OS interface #define CDS_OS_INTERFACE CDS_OSI_WINDOWS // OS name #if defined(_WIN64) # define CDS_OS_TYPE CDS_OS_WIN64 # define CDS_OS__NAME "Win64" # define CDS_OS__NICK "Win64" #elif defined(_WIN32) # define CDS_OS_TYPE CDS_OS_WIN32 # define CDS_OS__NAME "Win32" # define CDS_OS__NICK "Win32" #endif // Processor architecture #ifdef _M_IX86 # define CDS_BUILD_BITS 32 # define CDS_PROCESSOR_ARCH CDS_PROCESSOR_X86 # define CDS_PROCESSOR__NAME "Intel x86" # define CDS_PROCESSOR__NICK "x86" #elif _M_X64 # define CDS_BUILD_BITS 64 # define CDS_PROCESSOR_ARCH CDS_PROCESSOR_AMD64 # define CDS_PROCESSOR__NAME "AMD64" # define CDS_PROCESSOR__NICK "amd64" #else # define CDS_BUILD_BITS -1 # define CDS_PROCESSOR_ARCH CDS_PROCESSOR_UNKNOWN # define CDS_PROCESSOR__NAME "<>" # error Microsoft Visual C++ compiler is supported for x86 only #endif #define __attribute__( _x ) #ifndef CDS_BUILD_STATIC_LIB # ifdef CDS_BUILD_LIB # define CDS_EXPORT_API __declspec(dllexport) # else # define CDS_EXPORT_API __declspec(dllimport) # endif #else # define CDS_EXPORT_API #endif #define alignof __alignof // Memory leaks detection (debug build only) #ifdef _DEBUG # define _CRTDBG_MAP_ALLOC # define _CRTDBG_MAPALLOC # include # include # define CDS_MSVC_MEMORY_LEAKS_DETECTING_ENABLED #endif // ************************************************* // Alignment macro #define CDS_TYPE_ALIGNMENT(n) __declspec( align(n)) #define CDS_DATA_ALIGNMENT(n) __declspec( align(n)) #define CDS_CLASS_ALIGNMENT(n) __declspec( align(n)) // Attributes #define CDS_DEPRECATED( reason ) [[deprecated( reason )]] #define CDS_NORETURN __declspec(noreturn) // Exceptions #if defined( _CPPUNWIND ) # define CDS_EXCEPTION_ENABLED #endif // double-width CAS support //#define CDS_DCAS_SUPPORT // Byte order // It seems, MSVC works only on little-endian architecture?.. #if !defined(CDS_ARCH_LITTLE_ENDIAN) && !defined(CDS_ARCH_BIG_ENDIAN) # define CDS_ARCH_LITTLE_ENDIAN #endif //if constexpr support (C++17) #ifndef constexpr_if // Standard way to check if the compiler supports "if constexpr" // Of course, MS VC doesn't support any standard way # if defined __cpp_if_constexpr # if __cpp_if_constexpr >= 201606 # define constexpr_if if constexpr # endif # elif CDS_COMPILER_VERSION >= CDS_COMPILER_MSVC14_1_3 && _MSVC_LANG > CDS_CPLUSPLUS_14 // MS-specific WTF. // Don't work in /std:c++17 because /std:c++17 key defines _MSVC_LANG=201402 (c++14) in VC 15.3 # define constexpr_if if constexpr # endif #endif // Sanitizer attributes (not supported) #define CDS_SUPPRESS_SANITIZE( ... ) #include //@endcond #endif // #ifndef CDSLIB_COMPILER_VC_DEFS_H libcds-2.3.3/cds/compiler/vc/x86/000077500000000000000000000000001341244201700163755ustar00rootroot00000000000000libcds-2.3.3/cds/compiler/vc/x86/backoff.h000066400000000000000000000014631341244201700201450ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_COMPILER_VC_X86_BACKOFF_H #define CDSLIB_COMPILER_VC_X86_BACKOFF_H //@cond none #include namespace cds { namespace backoff { namespace vc { namespace x86 { # define CDS_backoff_hint_defined static inline void backoff_hint() { _mm_pause(); } # define CDS_backoff_nop_defined static inline void backoff_nop() { __nop(); } }} // namespace vc::x86 namespace platform { using namespace vc::x86; } }} // namespace cds::backoff //@endcond #endif // #ifndef CDSLIB_COMPILER_VC_X86_BACKOFF_H libcds-2.3.3/cds/compiler/vc/x86/bitop.h000066400000000000000000000053001341244201700176610ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_COMPILER_VC_X86_BITOP_H #define CDSLIB_COMPILER_VC_X86_BITOP_H #include #pragma intrinsic(_BitScanReverse) #pragma intrinsic(_BitScanForward) //@cond none namespace cds { namespace bitop { namespace platform { namespace vc { namespace x86 { // MSB - return index (1..32) of most significant bit in nArg. If nArg == 0 return 0 # define cds_bitop_msb32_DEFINED static inline int msb32( uint32_t nArg ) { unsigned long nIndex; if ( _BitScanReverse( &nIndex, nArg )) return (int) nIndex + 1; return 0; } # define cds_bitop_msb32nz_DEFINED static inline int msb32nz( uint32_t nArg ) { assert( nArg != 0 ); unsigned long nIndex; _BitScanReverse( &nIndex, nArg ); return (int) nIndex; } // LSB - return index (1..32) of least significant bit in nArg. If nArg == 0 return -1U # define cds_bitop_lsb32_DEFINED static inline int lsb32( uint32_t nArg ) { unsigned long nIndex; if ( _BitScanForward( &nIndex, nArg )) return (int) nIndex + 1; return 0; } # define cds_bitop_lsb32nz_DEFINED static inline int lsb32nz( uint32_t nArg ) { assert( nArg != 0 ); unsigned long nIndex; _BitScanForward( &nIndex, nArg ); return (int) nIndex; } // bswap - Reverses the byte order of a 32-bit word # define cds_bitop_bswap32_DEFINED static inline uint32_t bswap32( uint32_t nArg ) { __asm { mov eax, nArg; bswap eax; } } # define cds_bitop_complement32_DEFINED static inline bool complement32( uint32_t * pArg, unsigned int nBit ) { return _bittestandcomplement( reinterpret_cast( pArg ), nBit ) != 0; } # define cds_bitop_complement64_DEFINED static inline bool complement64( uint64_t * pArg, unsigned int nBit ) { if ( nBit < 32 ) return _bittestandcomplement( reinterpret_cast( pArg ), nBit ) != 0; else return _bittestandcomplement( reinterpret_cast( pArg ) + 1, nBit - 32 ) != 0; } }} // namespace vc::x86 using namespace vc::x86; }}} // namespace cds::bitop::platform //@endcond #endif // #ifndef CDSLIB_COMPILER_VC_X86_BITOP_H libcds-2.3.3/cds/compiler/vc/x86/cxx11_atomic.h000066400000000000000000000506551341244201700210610ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_COMPILER_VC_X86_CXX11_ATOMIC_H #define CDSLIB_COMPILER_VC_X86_CXX11_ATOMIC_H #include #include // for 64bit atomic load/store #include #pragma intrinsic( _InterlockedIncrement ) #pragma intrinsic( _InterlockedDecrement ) #pragma intrinsic( _InterlockedCompareExchange ) //#pragma intrinsic( _InterlockedCompareExchangePointer ) // On the x86 architecture, _InterlockedCompareExchangePointer is a macro that calls _InterlockedCompareExchange #pragma intrinsic( _InterlockedCompareExchange16 ) #pragma intrinsic( _InterlockedCompareExchange64 ) #pragma intrinsic( _InterlockedExchange ) //#pragma intrinsic( _InterlockedExchangePointer ) // On the x86 architecture, _InterlockedExchangePointer is a macro that calls _InterlockedExchange #pragma intrinsic( _InterlockedExchangeAdd ) #pragma intrinsic( _InterlockedXor ) #pragma intrinsic( _InterlockedOr ) #pragma intrinsic( _InterlockedAnd ) #pragma intrinsic( _interlockedbittestandset ) #if _MSC_VER >= 1600 # pragma intrinsic( _InterlockedCompareExchange8 ) # pragma intrinsic( _InterlockedExchange8 ) # pragma intrinsic( _InterlockedExchange16 ) #endif //@cond namespace cds { namespace cxx11_atomic { namespace platform { inline namespace vc { inline namespace x86 { static inline void fence_before( memory_order order ) noexcept { switch(order) { case memory_order_relaxed: case memory_order_acquire: case memory_order_consume: break; case memory_order_release: case memory_order_acq_rel: CDS_COMPILER_RW_BARRIER; break; case memory_order_seq_cst: CDS_COMPILER_RW_BARRIER; break; } } static inline void fence_after( memory_order order ) noexcept { switch(order) { case memory_order_acquire: case memory_order_acq_rel: CDS_COMPILER_RW_BARRIER; break; case memory_order_relaxed: case memory_order_consume: case memory_order_release: break; case memory_order_seq_cst: CDS_COMPILER_RW_BARRIER; break; } } static inline void fence_after_load(memory_order order) noexcept { switch(order) { case memory_order_relaxed: case memory_order_release: break; case memory_order_acquire: case memory_order_acq_rel: CDS_COMPILER_RW_BARRIER; break; case memory_order_consume: break; case memory_order_seq_cst: __asm { mfence }; break; default:; } } //----------------------------------------------------------------------------- // fences //----------------------------------------------------------------------------- static inline void thread_fence(memory_order order) noexcept { switch(order) { case memory_order_relaxed: case memory_order_consume: break; case memory_order_release: case memory_order_acquire: case memory_order_acq_rel: CDS_COMPILER_RW_BARRIER; break; case memory_order_seq_cst: __asm { mfence }; break; default:; } } static inline void signal_fence(memory_order order) noexcept { // C++11: 29.8.8: only compiler optimization, no hardware instructions switch(order) { case memory_order_relaxed: break; case memory_order_consume: case memory_order_release: case memory_order_acquire: case memory_order_acq_rel: case memory_order_seq_cst: CDS_COMPILER_RW_BARRIER; break; default:; } } //----------------------------------------------------------------------------- // atomic flag primitives //----------------------------------------------------------------------------- typedef unsigned char atomic_flag_type; static inline bool atomic_flag_tas( atomic_flag_type volatile * pFlag, memory_order /*order*/ ) noexcept { return _interlockedbittestandset( (long volatile *) pFlag, 0 ) != 0; } static inline void atomic_flag_clear( atomic_flag_type volatile * pFlag, memory_order order ) noexcept { assert( order != memory_order_acquire && order != memory_order_acq_rel ); fence_before( order ); *pFlag = 0; fence_after( order ); } //----------------------------------------------------------------------------- // 8bit primitives //----------------------------------------------------------------------------- #if _MSC_VER >= 1600 # pragma warning(push) // Disable warning C4800: 'char' : forcing value to bool 'true' or 'false' (performance warning) # pragma warning( disable: 4800 ) #endif template static inline bool cas8_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { static_assert( sizeof(T) == 1, "Illegal operand size" ); # if _MSC_VER >= 1600 T prev = expected; expected = (T) _InterlockedCompareExchange8( reinterpret_cast(pDest), (char) desired, (char) expected ); return expected == prev; # else bool bRet = false; __asm { mov ecx, pDest; mov edx, expected; mov al, byte ptr [edx]; mov ah, desired; lock cmpxchg byte ptr [ecx], ah; mov byte ptr [edx], al; setz bRet; } return bRet; # endif } #if _MSC_VER >= 1600 # pragma warning(pop) #endif template static inline bool cas8_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { return cas8_strong( pDest, expected, desired, mo_success, mo_fail ); } #if _MSC_VER >= 1600 # pragma warning(push) // Disable warning C4800: 'char' : forcing value to bool 'true' or 'false' (performance warning) # pragma warning( disable: 4800 ) #endif template static inline T exchange8( T volatile * pDest, T v, memory_order order ) noexcept { static_assert( sizeof(T) == 1, "Illegal operand size" ); # if _MSC_VER >= 1600 return (T) _InterlockedExchange8( reinterpret_cast(pDest), (char) v ); # else __asm { mov al, v; mov ecx, pDest; lock xchg byte ptr [ecx], al; } # endif } #if _MSC_VER >= 1600 # pragma warning(pop) #endif template static inline void store8( T volatile * pDest, T src, memory_order order ) noexcept { static_assert( sizeof(T) == 1, "Illegal operand size" ); assert( order == memory_order_relaxed || order == memory_order_release || order == memory_order_seq_cst ); assert( pDest ); if ( order != memory_order_seq_cst ) { fence_before( order ); *pDest = src; } else { exchange8( pDest, src, order ); } } template static inline T load8( T volatile const * pSrc, memory_order order ) noexcept { static_assert( sizeof(T) == 1, "Illegal operand size" ); assert( order == memory_order_relaxed || order == memory_order_consume || order == memory_order_acquire || order == memory_order_seq_cst ); assert( pSrc ); T v = *pSrc; fence_after_load( order ); return v; } //----------------------------------------------------------------------------- // 16bit primitives //----------------------------------------------------------------------------- template static inline T exchange16( T volatile * pDest, T v, memory_order /*order*/ ) noexcept { static_assert( sizeof(T) == 2, "Illegal operand size" ); assert( cds::details::is_aligned( pDest, 2 )); # if _MSC_VER >= 1600 return (T) _InterlockedExchange16( (short volatile *) pDest, (short) v ); # else __asm { mov ax, v; mov ecx, pDest; lock xchg word ptr [ecx], ax; } # endif } template static inline void store16( T volatile * pDest, T src, memory_order order ) noexcept { static_assert( sizeof(T) == 2, "Illegal operand size" ); assert( order == memory_order_relaxed || order == memory_order_release || order == memory_order_seq_cst ); assert( pDest ); assert( cds::details::is_aligned( pDest, 2 )); if ( order != memory_order_seq_cst ) { fence_before( order ); *pDest = src; } else { exchange16( pDest, src, order ); } } template static inline T load16( T volatile const * pSrc, memory_order order ) noexcept { static_assert( sizeof(T) == 2, "Illegal operand size" ); assert( order == memory_order_relaxed || order == memory_order_consume || order == memory_order_acquire || order == memory_order_seq_cst ); assert( pSrc ); assert( cds::details::is_aligned( pSrc, 2 )); T v = *pSrc; fence_after_load( order ); return v; } template static inline bool cas16_strong( T volatile * pDest, T& expected, T desired, memory_order /*mo_success*/, memory_order /*mo_fail*/ ) noexcept { static_assert( sizeof(T) == 2, "Illegal operand size" ); assert( cds::details::is_aligned( pDest, 2 )); // _InterlockedCompareExchange behave as read-write memory barriers T prev = expected; expected = (T) _InterlockedCompareExchange16( (short *) pDest, (short) desired, (short) expected ); return expected == prev; } template static inline bool cas16_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { return cas16_strong( pDest, expected, desired, mo_success, mo_fail ); } //----------------------------------------------------------------------------- // 32bit primitives //----------------------------------------------------------------------------- template static inline T exchange32( T volatile * pDest, T v, memory_order /*order*/ ) noexcept { static_assert( sizeof(T) == 4, "Illegal operand size" ); assert( cds::details::is_aligned( pDest, 4 )); return (T) _InterlockedExchange( (long *) pDest, (long) v ); } template static inline void store32( T volatile * pDest, T src, memory_order order ) noexcept { static_assert( sizeof(T) == 4, "Illegal operand size" ); assert( order == memory_order_relaxed || order == memory_order_release || order == memory_order_seq_cst ); assert( pDest ); assert( cds::details::is_aligned( pDest, 4 )); if ( order != memory_order_seq_cst ) { fence_before( order ); *pDest = src; } else { exchange32( pDest, src, order ); } } template static inline T load32( T volatile const * pSrc, memory_order order ) noexcept { static_assert( sizeof(T) == 4, "Illegal operand size" ); assert( order == memory_order_relaxed || order == memory_order_consume || order == memory_order_acquire || order == memory_order_seq_cst ); assert( pSrc ); assert( cds::details::is_aligned( pSrc, 4 )); T v( *pSrc ); fence_after_load( order ); return v; } template static inline bool cas32_strong( T volatile * pDest, T& expected, T desired, memory_order /*mo_success*/, memory_order /*mo_fail*/ ) noexcept { static_assert( sizeof(T) == 4, "Illegal operand size" ); assert( cds::details::is_aligned( pDest, 4 )); // _InterlockedCompareExchange behave as read-write memory barriers T prev = expected; expected = (T) _InterlockedCompareExchange( (long *) pDest, (long) desired, (long) expected ); return expected == prev; } template static inline bool cas32_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { return cas32_strong( pDest, expected, desired, mo_success, mo_fail ); } // fetch_xxx may be emulated via cas32 // If the platform has special fetch_xxx instruction // then it should define CDS_ATOMIC_fetch32_xxx_defined macro # define CDS_ATOMIC_fetch32_add_defined template static inline T fetch32_add( T volatile * pDest, T v, memory_order /*order*/) noexcept { static_assert( sizeof(T) == 4, "Illegal operand size" ); assert( cds::details::is_aligned( pDest, 4 )); // _InterlockedExchangeAdd behave as read-write memory barriers return (T) _InterlockedExchangeAdd( (long *) pDest, (long) v ); } //----------------------------------------------------------------------------- // 64bit primitives //----------------------------------------------------------------------------- template static inline bool cas64_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { static_assert( sizeof(T) == 8, "Illegal operand size" ); assert( cds::details::is_aligned( pDest, 8 )); // _InterlockedCompareExchange behave as read-write memory barriers T prev = expected; expected = (T) _InterlockedCompareExchange64( (__int64 *) pDest, (__int64) desired, (__int64) expected ); return expected == prev; } template static inline bool cas64_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept { return cas64_strong( pDest, expected, desired, mo_success, mo_fail ); } template static inline T load64( T volatile const * pSrc, memory_order order ) noexcept { static_assert( sizeof(T) == 8, "Illegal operand size" ); assert( order == memory_order_relaxed || order == memory_order_consume || order == memory_order_acquire || order == memory_order_seq_cst ); assert( pSrc ); assert( cds::details::is_aligned( pSrc, 8 )); // Atomically loads 64bit value by SSE intrinsics __m128i volatile v = _mm_loadl_epi64( (__m128i const *) pSrc ); fence_after_load( order ); return (T) v.m128i_i64[0]; } template static inline T exchange64( T volatile * pDest, T v, memory_order order ) noexcept { static_assert( sizeof(T) == 8, "Illegal operand size" ); T cur = load64( pDest, memory_order_relaxed ); do { } while (!cas64_weak( pDest, cur, v, order, memory_order_relaxed )); return cur; } template static inline void store64( T volatile * pDest, T val, memory_order order ) noexcept { static_assert( sizeof(T) == 8, "Illegal operand size" ); assert( order == memory_order_relaxed || order == memory_order_release || order == memory_order_seq_cst ); assert( pDest ); assert( cds::details::is_aligned( pDest, 8 )); if ( order != memory_order_seq_cst ) { __m128i v; v.m128i_i64[0] = val; fence_before( order ); _mm_storel_epi64( (__m128i *) pDest, v ); } else { exchange64( pDest, val, order ); } } //----------------------------------------------------------------------------- // pointer primitives //----------------------------------------------------------------------------- template static inline T * exchange_ptr( T * volatile * pDest, T * v, memory_order order ) noexcept { static_assert( sizeof(T *) == sizeof(void *), "Illegal operand size" ); return (T *) _InterlockedExchange( (long volatile *) pDest, (uintptr_t) v ); //return (T *) _InterlockedExchangePointer( (void * volatile *) pDest, reinterpret_cast(v)); } template static inline void store_ptr( T * volatile * pDest, T * src, memory_order order ) noexcept { static_assert( sizeof(T *) == sizeof(void *), "Illegal operand size" ); assert( order == memory_order_relaxed || order == memory_order_release || order == memory_order_seq_cst ); assert( pDest ); if ( order != memory_order_seq_cst ) { fence_before( order ); *pDest = src; } else { exchange_ptr( pDest, src, order ); } } template static inline T * load_ptr( T * volatile const * pSrc, memory_order order ) noexcept { static_assert( sizeof(T *) == sizeof(void *), "Illegal operand size" ); assert( order == memory_order_relaxed || order == memory_order_consume || order == memory_order_acquire || order == memory_order_seq_cst ); assert( pSrc ); T * v = *pSrc; fence_after_load( order ); return v; } template static inline bool cas_ptr_strong( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept { static_assert( sizeof(T *) == sizeof(void *), "Illegal operand size" ); // _InterlockedCompareExchangePointer behave as read-write memory barriers T * prev = expected; expected = (T *) _InterlockedCompareExchange( (long volatile *) pDest, (uintptr_t) desired, (uintptr_t) prev ); return expected == prev; } template static inline bool cas_ptr_weak( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept { return cas_ptr_strong( pDest, expected, desired, mo_success, mo_fail ); } }} // namespace vc::x86 } // namespace platform }} // namespace cds::cxx11_atomic //@endcond #endif // #ifndef CDSLIB_COMPILER_VC_X86_CXX11_ATOMIC_H libcds-2.3.3/cds/container/000077500000000000000000000000001341244201700155105ustar00rootroot00000000000000libcds-2.3.3/cds/container/basket_queue.h000066400000000000000000000435421341244201700203460ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_BASKET_QUEUE_H #define CDSLIB_CONTAINER_BASKET_QUEUE_H #include #include #include namespace cds { namespace container { /// BasketQueue related definitions /** @ingroup cds_nonintrusive_helper */ namespace basket_queue { /// Internal statistics template ::counter_type > using stat = cds::intrusive::basket_queue::stat< Counter >; /// Dummy internal statistics typedef cds::intrusive::basket_queue::empty_stat empty_stat; /// BasketQueue default type traits struct traits { /// Node allocator typedef CDS_DEFAULT_ALLOCATOR allocator; /// Back-off strategy typedef cds::backoff::empty back_off; /// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter to enable item counting typedef atomicity::empty_item_counter item_counter; /// Internal statistics (by default, disabled) /** Possible option value are: \p basket_queue::stat, \p basket_queue::empty_stat (the default), user-provided class that supports \p %basket_queue::stat interface. */ typedef basket_queue::empty_stat stat; /// C++ memory ordering model /** Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). */ typedef opt::v::relaxed_ordering memory_model; /// Padding for internal critical atomic data. Default is \p opt::cache_line_padding enum { padding = opt::cache_line_padding }; }; /// Metafunction converting option list to \p basket_queue::traits /** Supported \p Options are: - \p opt::allocator - allocator (like \p std::allocator) used for allocating queue nodes. Default is \ref CDS_DEFAULT_ALLOCATOR - \p opt::back_off - back-off strategy used, default is \p cds::backoff::empty. - \p opt::item_counter - the type of item counting feature. Default is \p cds::atomicity::empty_item_counter (item counting disabled) To enable item counting use \p cds::atomicity::item_counter - \ opt::stat - the type to gather internal statistics. Possible statistics types are: \p basket_queue::stat, \p basket_queue::empty_stat, user-provided class that supports \p %basket_queue::stat interface. Default is \p %basket_queue::empty_stat. - \p opt::padding - padding for internal critical atomic data. Default is \p opt::cache_line_padding - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). Example: declare \p %BasketQueue with item counting and internal statistics \code typedef cds::container::BasketQueue< cds::gc::HP, Foo, typename cds::container::basket_queue::make_traits< cds::opt::item_counte< cds::atomicity::item_counter >, cds::opt::stat< cds::intrusive::basket_queue::stat<> > >::type > myQueue; \endcode */ template struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type , Options... >::type type; # endif }; } // namespace basket_queue //@cond namespace details { template struct make_basket_queue { typedef GC gc; typedef T value_type; typedef Traits traits; struct node_type: public intrusive::basket_queue::node< gc > { value_type m_value; node_type( const value_type& val ) : m_value( val ) {} template node_type( Args&&... args ) : m_value( std::forward(args)...) {} }; typedef typename std::allocator_traits< typename traits::allocator >::template rebind_alloc< node_type > allocator_type; //typedef typename traits::allocator::template rebind::other allocator_type; typedef cds::details::Allocator< node_type, allocator_type > cxx_allocator; struct node_deallocator { void operator ()( node_type * pNode ) { cxx_allocator().Delete( pNode ); } }; struct intrusive_traits : public traits { typedef cds::intrusive::basket_queue::base_hook< opt::gc > hook; typedef node_deallocator disposer; static constexpr const cds::intrusive::opt::link_check_type link_checker = cds::intrusive::basket_queue::traits::link_checker; }; typedef cds::intrusive::BasketQueue< gc, node_type, intrusive_traits > type; }; } //@endcond /// Basket lock-free queue (non-intrusive variant) /** @ingroup cds_nonintrusive_queue It is non-intrusive version of basket queue algorithm based on intrusive::BasketQueue counterpart. \par Source: [2007] Moshe Hoffman, Ori Shalev, Nir Shavit "The Baskets Queue" Key idea In the 'basket' approach, instead of the traditional ordered list of nodes, the queue consists of an ordered list of groups of nodes (logical baskets). The order of nodes in each basket need not be specified, and in fact, it is easiest to maintain them in LIFO order. The baskets fulfill the following basic rules: - Each basket has a time interval in which all its nodes' enqueue operations overlap. - The baskets are ordered by the order of their respective time intervals. - For each basket, its nodes' dequeue operations occur after its time interval. - The dequeue operations are performed according to the order of baskets. Two properties define the FIFO order of nodes: - The order of nodes in a basket is not specified. - The order of nodes in different baskets is the FIFO-order of their respective baskets. In algorithms such as the MS-queue or optimistic queue, threads enqueue items by applying a Compare-and-swap (CAS) operation to the queue's tail pointer, and all the threads that fail on a particular CAS operation (and also the winner of that CAS) overlap in time. In particular, they share the time interval of the CAS operation itself. Hence, all the threads that fail to CAS on the tail-node of the queue may be inserted into the same basket. By integrating the basket-mechanism as the back-off mechanism, the time usually spent on backing-off before trying to link onto the new tail, can now be utilized to insert the failed operations into the basket, allowing enqueues to complete sooner. In the meantime, the next successful CAS operations by enqueues allow new baskets to be formed down the list, and these can be filled concurrently. Moreover, the failed operations don't retry their link attempt on the new tail, lowering the overall contention on it. This leads to a queue algorithm that unlike all former concurrent queue algorithms requires virtually no tuning of the backoff mechanisms to reduce contention, making the algorithm an attractive out-of-the-box queue. In order to enqueue, just as in MSQueue, a thread first tries to link the new node to the last node. If it failed to do so, then another thread has already succeeded. Thus it tries to insert the new node into the new basket that was created by the winner thread. To dequeue a node, a thread first reads the head of the queue to obtain the oldest basket. It may then dequeue any node in the oldest basket. Template arguments: - \p GC - garbage collector type: \p gc::HP, \p gc::DHP - \p T - type of value to be stored in the queue - \p Traits - queue traits, default is \p basket_queue::traits. You can use \p basket_queue::make_traits metafunction to make your traits or just derive your traits from \p %basket_queue::traits: \code struct myTraits: public cds::container::basket_queue::traits { typedef cds::intrusive::basket_queue::stat<> stat; typedef cds::atomicity::item_counter item_counter; }; typedef cds::container::BasketQueue< cds::gc::HP, Foo, myTraits > myQueue; // Equivalent make_traits example: typedef cds::container::BasketQueue< cds::gc::HP, Foo, typename cds::container::basket_queue::make_traits< cds::opt::stat< cds::container::basket_queue::stat<> >, cds::opt::item_counter< cds::atomicity::item_counter > >::type > myQueue; \endcode */ template class BasketQueue: #ifdef CDS_DOXYGEN_INVOKED private intrusive::BasketQueue< GC, intrusive::basket_queue::node< T >, Traits > #else protected details::make_basket_queue< GC, T, Traits >::type #endif { //@cond typedef details::make_basket_queue< GC, T, Traits > maker; typedef typename maker::type base_class; //@endcond public: /// Rebind template arguments template struct rebind { typedef BasketQueue< GC2, T2, Traits2> other ; ///< Rebinding result }; public: typedef GC gc; ///< Garbage collector typedef T value_type; ///< Type of value to be stored in the queue typedef Traits traits; ///< Queue's traits typedef typename base_class::back_off back_off; ///< Back-off strategy used typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes typedef typename base_class::item_counter item_counter; ///< Item counting policy used typedef typename base_class::stat stat; ///< Internal statistics policy used typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option static constexpr const size_t c_nHazardPtrCount = base_class::c_nHazardPtrCount; ///< Count of hazard pointer required for the algorithm protected: typedef typename maker::node_type node_type; ///< queue node type (derived from intrusive::basket_queue::node) //@cond typedef typename maker::cxx_allocator cxx_allocator; typedef typename maker::node_deallocator node_deallocator; // deallocate node typedef typename base_class::node_traits node_traits; //@endcond protected: ///@cond static node_type * alloc_node() { return cxx_allocator().New(); } static node_type * alloc_node( const value_type& val ) { return cxx_allocator().New( val ); } template static node_type * alloc_node_move( Args&&... args ) { return cxx_allocator().MoveNew( std::forward( args )... ); } static void free_node( node_type * p ) { node_deallocator()( p ); } struct node_disposer { void operator()( node_type * pNode ) { free_node( pNode ); } }; typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; //@endcond public: /// Initializes empty queue BasketQueue() {} /// Destructor clears the queue ~BasketQueue() {} /// Enqueues \p val value into the queue. /** The function makes queue node in dynamic memory calling copy constructor for \p val and then it calls \p intrusive::BasketQueue::enqueue(). Returns \p true if success, \p false otherwise. */ bool enqueue( value_type const& val ) { scoped_node_ptr p( alloc_node(val)); if ( base_class::enqueue( *p )) { p.release(); return true; } return false; } /// Enqueues \p val value into the queue, move semantics bool enqueue( value_type&& val ) { scoped_node_ptr p( alloc_node_move( std::move( val ))); if ( base_class::enqueue( *p )) { p.release(); return true; } return false; } /// Enqueues \p data to queue using a functor /** \p Func is a functor called to create node. The functor \p f takes one argument - a reference to a new node of type \ref value_type : \code cds::container::BasketQueue< cds::gc::HP, Foo > myQueue; Bar bar; myQueue.enqueue_with( [&bar]( Foo& dest ) { dest = bar; } ); \endcode */ template bool enqueue_with( Func f ) { scoped_node_ptr p( alloc_node()); f( p->m_value ); if ( base_class::enqueue( *p )) { p.release(); return true; } return false; } /// Synonym for \p enqueue() function bool push( value_type const& val ) { return enqueue( val ); } /// Synonym for \p enqueue() function, move semantics bool push( value_type&& val ) { return enqueue( std::move( val )); } /// Synonym for \p enqueue_with() function template bool push_with( Func f ) { return enqueue_with( f ); } /// Enqueues data of type \ref value_type constructed with std::forward(args)... template bool emplace( Args&&... args ) { scoped_node_ptr p( alloc_node_move( std::forward(args)...)); if ( base_class::enqueue( *p )) { p.release(); return true; } return false; } /// Dequeues a value from the queue /** If queue is not empty, the function returns \p true, \p dest contains copy of dequeued value. The assignment operator for \p value_type is invoked. If queue is empty, the function returns \p false, \p dest is unchanged. */ bool dequeue( value_type& dest ) { return dequeue_with( [&dest]( value_type& src ) { // TSan finds a race between this read of \p src and node_type constructor // I think, it is wrong CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN; dest = std::move( src ); CDS_TSAN_ANNOTATE_IGNORE_READS_END; }); } /// Dequeues a value using a functor /** \p Func is a functor called to copy dequeued value. The functor takes one argument - a reference to removed node: \code cds:container::BasketQueue< cds::gc::HP, Foo > myQueue; Bar bar; myQueue.dequeue_with( [&bar]( Foo& src ) { bar = std::move( src );}); \endcode The functor is called only if the queue is not empty. */ template bool dequeue_with( Func f ) { typename base_class::dequeue_result res; if ( base_class::do_dequeue( res, true )) { f( node_traits::to_value_ptr( *res.pNext )->m_value ); return true; } return false; } /// Synonym for \p dequeue() function bool pop( value_type& dest ) { return dequeue( dest ); } /// Synonym for \p dequeue_with() function template bool pop_with( Func f ) { return dequeue_with( f ); } /// Checks if the queue is empty /** Note that this function is not \p const. The function is based on \p dequeue() algorithm. */ bool empty() { return base_class::empty(); } /// Clear the queue /** The function repeatedly calls \ref dequeue until it returns \p nullptr. */ void clear() { base_class::clear(); } /// Returns queue's item count /** \copydetails cds::intrusive::BasketQueue::size() */ size_t size() const { return base_class::size(); } /// Returns reference to internal statistics const stat& statistics() const { return base_class::statistics(); } }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_BASKET_QUEUE_H libcds-2.3.3/cds/container/bronson_avltree_map_rcu.h000066400000000000000000000707131341244201700226010ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_BRONSON_AVLTREE_MAP_RCU_H #define CDSLIB_CONTAINER_BRONSON_AVLTREE_MAP_RCU_H #include #include namespace cds { namespace container { namespace bronson_avltree { //@cond namespace details { template < class RCU, typename Key, typename T, typename Traits> struct make_map { typedef Key key_type; typedef T mapped_type; typedef Traits original_traits; typedef cds::details::Allocator< mapped_type, typename original_traits::allocator > cxx_allocator; struct traits : public original_traits { struct disposer { void operator()( mapped_type * p ) const { cxx_allocator().Delete( p ); } }; }; // Metafunction result typedef BronsonAVLTreeMap< RCU, Key, mapped_type *, traits > type; }; } // namespace details //@endcond } // namespace bronson_avltree /// Bronson et al AVL-tree (RCU specialization) /** @ingroup cds_nonintrusive_map @ingroup cds_nonintrusive_tree @anchor cds_container_BronsonAVLTreeMap_rcu Source: - [2010] N.Bronson, J.Casper, H.Chafi, K.Olukotun "A Practical Concurrent Binary Search Tree" - Java implementation This is a concurrent AVL tree algorithm that uses hand-over-hand optimistic validation, a concurrency control mechanism for searching and navigating a binary search tree. This mechanism minimizes spurious retries when concurrent structural changes cannot affect the correctness of the search or navigation result. The algorithm is based on partially external trees, a simple scheme that simplifies deletions by leaving a routing node in the tree when deleting a node that has two children, then opportunistically unlinking routing nodes during rebalancing. As in external trees, which store values only in leaf nodes, deletions can be performed locally while holding a fixed number of locks. Partially external trees, however, require far fewer routing nodes than an external tree for most sequences of insertions and deletions. The algorithm uses optimistic concurrency control, but carefully manage the tree in such a way that all atomic regions have fixed read and write sets that are known ahead of time. This allows to reduce practical overheads by embedding the concurrency control directly. To perform tree operations using only fixed sized atomic regions the algo uses the following mechanisms: search operations overlap atomic blocks as in the hand-over-hand locking technique; mutations perform rebalancing separately; and deletions occasionally leave a routing node in the tree. Template arguments: - \p RCU - one of \ref cds_urcu_gc "RCU type" - \p Key - key type - \p T - value type to be stored in tree's nodes. - \p Traits - tree traits, default is \p bronson_avltree::traits It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction instead of \p Traits template argument. There is \ref cds_container_BronsonAVLTreeMap_rcu_ptr "a specialization" for "key -> value pointer" map. @note Before including you should include appropriate RCU header file, see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. */ template < typename RCU, typename Key, typename T, # ifdef CDS_DOXYGEN_INVOKED typename Traits = bronson_avltree::traits #else typename Traits #endif > class BronsonAVLTreeMap< cds::urcu::gc, Key, T, Traits > #ifdef CDS_DOXYGEN_INVOKED : private BronsonAVLTreeMap< cds::urcu::gc, Key, T*, Traits > #else : private bronson_avltree::details::make_map< cds::urcu::gc, Key, T, Traits >::type #endif { //@cond typedef bronson_avltree::details::make_map< cds::urcu::gc, Key, T, Traits > maker; typedef typename maker::type base_class; //@endcond public: typedef cds::urcu::gc gc; ///< RCU Garbage collector typedef Key key_type; ///< type of a key stored in the map typedef T mapped_type; ///< type of value stored in the map typedef Traits traits; ///< Traits template parameter typedef typename base_class::key_comparator key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less typedef typename traits::item_counter item_counter; ///< Item counting policy typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option typedef typename traits::allocator allocator_type; ///< allocator for value typedef typename traits::node_allocator node_allocator_type;///< allocator for maintaining internal nodes typedef typename traits::stat stat; ///< internal statistics typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy typedef typename traits::back_off back_off; ///< Back-off strategy typedef typename traits::sync_monitor sync_monitor; ///< @ref cds_sync_monitor "Synchronization monitor" type for node-level locking /// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion" static bool const c_bRelaxedInsert = traits::relaxed_insert; /// Group of \p extract_xxx functions does not require external locking static constexpr const bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; typedef typename base_class::rcu_lock rcu_lock; ///< RCU scoped lock /// Returned pointer to \p mapped_type of extracted node typedef typename base_class::exempt_ptr exempt_ptr; protected: //@cond typedef typename base_class::node_type node_type; typedef typename base_class::node_scoped_lock node_scoped_lock; typedef typename maker::cxx_allocator cxx_allocator; typedef typename base_class::update_flags update_flags; //@endcond public: /// Creates empty map BronsonAVLTreeMap() {} /// Destroys the map ~BronsonAVLTreeMap() {} /// Inserts new node with \p key and default value /** The function creates a node with \p key and default value, and then inserts the node created into the map. Preconditions: - The \p key_type should be constructible from a value of type \p K. - The \p mapped_type should be default-constructible. RCU \p synchronize() can be called. RCU should not be locked. Returns \p true if inserting successful, \p false otherwise. */ template bool insert( K const& key ) { return base_class::do_update(key, key_comparator(), []( node_type * pNode ) -> mapped_type* { assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr ); CDS_UNUSED( pNode ); return cxx_allocator().New(); }, update_flags::allow_insert ) == update_flags::result_inserted; } /// Inserts new node /** The function creates a node with copy of \p val value and then inserts the node created into the map. Preconditions: - The \p key_type should be constructible from \p key of type \p K. - The \p mapped_type should be constructible from \p val of type \p V. RCU \p synchronize() method can be called. RCU should not be locked. Returns \p true if \p val is inserted into the map, \p false otherwise. */ template bool insert( K const& key, V const& val ) { return base_class::do_update( key, key_comparator(), [&val]( node_type * pNode ) -> mapped_type* { assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr ); CDS_UNUSED( pNode ); return cxx_allocator().New( val ); }, update_flags::allow_insert ) == update_flags::result_inserted; } /// Inserts new node and initialize it by a functor /** This function inserts new node with key \p key and if inserting is successful then it calls \p func functor with signature \code struct functor { void operator()( key_type const& key, mapped_type& item ); }; \endcode The key_type should be constructible from value of type \p K. The function allows to split creating of new item into two part: - create item from \p key; - insert new item into the map; - if inserting is successful, initialize the value of item by calling \p func functor This can be useful if complete initialization of object of \p value_type is heavyweight and it is preferable that the initialization should be completed only if inserting is successful. The functor is called under the node lock. RCU \p synchronize() method can be called. RCU should not be locked. */ template bool insert_with( K const& key, Func func ) { return base_class::do_update( key, key_comparator(), [&func]( node_type * pNode ) -> mapped_type* { assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr ); mapped_type * pVal = cxx_allocator().New(); func( pNode->m_key, *pVal ); return pVal; }, update_flags::allow_insert ) == update_flags::result_inserted; } /// For \p key inserts data of type \p mapped_type created in-place from \p args /** Returns \p true if inserting successful, \p false otherwise. RCU \p synchronize() method can be called. RCU should not be locked. */ template bool emplace( K&& key, Args&&... args ) { struct scoped_ptr { mapped_type * pVal; scoped_ptr( mapped_type * p ): pVal( p ) {} ~scoped_ptr() { if ( pVal ) cxx_allocator().Delete( pVal ); } void release() { pVal = nullptr; } }; scoped_ptr p( cxx_allocator().MoveNew( std::forward( args )... )); if ( base_class::insert( std::forward( key ), p.pVal )) { p.release(); return true; } return false; } /// Updates the value for \p key /** The operation performs inserting or changing data with lock-free manner. If the \p key not found in the map, then the new item created from \p key will be inserted into the map iff \p bAllowInsert is \p true (note that in this case the \ref key_type should be constructible from type \p K). Otherwise, the functor \p func is called with item found. The functor \p Func signature is: \code struct my_functor { void operator()( bool bNew, key_type const& key, mapped_type& item ); }; \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - value The functor may change any fields of the \p item. The functor is called under the node lock, the caller can change any field of \p item. RCU \p synchronize() method can be called. RCU should not be locked. Returns std::pair where \p first is \p true if operation is successful, \p second is \p true if new item has been added or \p false if the item with \p key already exists. */ template std::pair update( K const& key, Func func, bool bAllowInsert = true ) { int result = base_class::do_update( key, key_comparator(), [&func]( node_type * pNode ) -> mapped_type* { mapped_type * pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed ); if ( !pVal ) { pVal = cxx_allocator().New(); func( true, pNode->m_key, *pVal ); } else func( false, pNode->m_key, *pVal ); return pVal; }, (bAllowInsert ? update_flags::allow_insert : 0) | update_flags::allow_update ); return std::make_pair( result != 0, (result & update_flags::result_inserted) != 0 ); } /// Delete \p key from the map /** RCU \p synchronize() method can be called. RCU should not be locked. Return \p true if \p key is found and deleted, \p false otherwise */ template bool erase( K const& key ) { return base_class::erase( key ); } /// Deletes the item from the map using \p pred predicate for searching /** The function is an analog of \p erase(K const&) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool erase_with( K const& key, Less pred ) { return base_class::erase_with( key, pred ); } /// Delete \p key from the map /** \anchor cds_nonintrusive_BronsonAVLTreeMap_rcu_erase_func The function searches an item with key \p key, calls \p f functor and deletes the item. If \p key is not found, the functor is not called. The functor \p Func interface: \code struct extractor { void operator()(key_type const& key, mapped_type& item) { ... } }; \endcode RCU \p synchronize method can be called. RCU should not be locked. Return \p true if key is found and deleted, \p false otherwise */ template bool erase( K const& key, Func f ) { return base_class::erase( key, f ); } /// Deletes the item from the map using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_BronsonAVLTreeMap_rcu_erase_func "erase(K const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool erase_with( K const& key, Less pred, Func f ) { return base_class::erase_with( key, pred, f ); } /// Extracts a value with minimal key from the map /** Returns \p exempt_ptr pointer to the leftmost item. If the set is empty, returns empty \p exempt_ptr. Note that the function returns only the value for minimal key. To retrieve its key use \p extract_min( Func ) member function. @note Due the concurrent nature of the map, the function extracts nearly minimum key. It means that the function gets leftmost leaf of the tree and tries to unlink it. During unlinking, a concurrent thread may insert an item with key less than leftmost item's key. So, the function returns the item with minimum key at the moment of tree traversing. RCU \p synchronize method can be called. RCU should NOT be locked. The function does not free the item. The deallocator will be implicitly invoked when the returned object is destroyed or when its \p release() member function is called. */ exempt_ptr extract_min() { return base_class::extract_min(); } /// Extracts minimal key and corresponding value /** Returns \p exempt_ptr to the leftmost item. If the tree is empty, returns empty \p exempt_ptr. \p Func functor is used to store minimal key. \p Func has the following signature: \code struct functor { void operator()( key_type const& key ); }; \endcode If the tree is empty, \p f is not called. Otherwise, is it called with minimal key, the pointer to corresponding value is returned as \p exempt_ptr. @note Due the concurrent nature of the map, the function extracts nearly minimum key. It means that the function gets leftmost leaf of the tree and tries to unlink it. During unlinking, a concurrent thread may insert an item with key less than leftmost item's key. So, the function returns the item with minimum key at the moment of tree traversing. RCU \p synchronize method can be called. RCU should NOT be locked. The function does not free the item. The deallocator will be implicitly invoked when the returned object is destroyed or when its \p release() member function is called. */ template exempt_ptr extract_min( Func f ) { return base_class::extract_min( f ); } /// Extracts minimal key and corresponding value /** This function is a shortcut for the following call: \code key_type key; exempt_ptr xp = theTree.extract_min( [&key]( key_type const& k ) { key = k; } ); \endcode \p key_type should be copy-assignable. The copy of minimal key is returned in \p min_key argument. */ typename std::enable_if< std::is_copy_assignable::value, exempt_ptr >::type extract_min_key( key_type& min_key ) { return base_class::extract_min_key( min_key ); } /// Extracts an item with maximal key from the map /** Returns \p exempt_ptr pointer to the rightmost item. If the set is empty, returns empty \p exempt_ptr. Note that the function returns only the value for maximal key. To retrieve its key use \p extract_max( Func ) or \p extract_max_key(key_type&) member function. @note Due the concurrent nature of the map, the function extracts nearly maximal key. It means that the function gets rightmost leaf of the tree and tries to unlink it. During unlinking, a concurrent thread may insert an item with key greater than rightmost item's key. So, the function returns the item with maximum key at the moment of tree traversing. RCU \p synchronize method can be called. RCU should NOT be locked. The function does not free the item. The deallocator will be implicitly invoked when the returned object is destroyed or when its \p release() is called. */ exempt_ptr extract_max() { return base_class::extract_max(); } /// Extracts the maximal key and corresponding value /** Returns \p exempt_ptr pointer to the rightmost item. If the set is empty, returns empty \p exempt_ptr. \p Func functor is used to store maximal key. \p Func has the following signature: \code struct functor { void operator()( key_type const& key ); }; \endcode If the tree is empty, \p f is not called. Otherwise, is it called with maximal key, the pointer to corresponding value is returned as \p exempt_ptr. @note Due the concurrent nature of the map, the function extracts nearly maximal key. It means that the function gets rightmost leaf of the tree and tries to unlink it. During unlinking, a concurrent thread may insert an item with key greater than rightmost item's key. So, the function returns the item with maximum key at the moment of tree traversing. RCU \p synchronize method can be called. RCU should NOT be locked. The function does not free the item. The deallocator will be implicitly invoked when the returned object is destroyed or when its \p release() is called. */ template exempt_ptr extract_max( Func f ) { return base_class::extract_max( f ); } /// Extracts the maximal key and corresponding value /** This function is a shortcut for the following call: \code key_type key; exempt_ptr xp = theTree.extract_max( [&key]( key_type const& k ) { key = k; } ); \endcode \p key_type should be copy-assignable. The copy of maximal key is returned in \p max_key argument. */ typename std::enable_if< std::is_copy_assignable::value, exempt_ptr >::type extract_max_key( key_type& max_key ) { return base_class::extract_max_key( max_key ); } /// Extracts an item from the map /** The function searches an item with key equal to \p key in the tree, unlinks it, and returns \p exempt_ptr pointer to a value found. If \p key is not found the function returns an empty \p exempt_ptr. RCU \p synchronize method can be called. RCU should NOT be locked. The function does not destroy the value found. The dealloctor will be implicitly invoked when the returned object is destroyed or when its \p release() member function is called. */ template exempt_ptr extract( Q const& key ) { return base_class::extract( key ); } /// Extracts an item from the map using \p pred for searching /** The function is an analog of \p extract(Q const&) but \p pred is used for key compare. \p Less has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the map. */ template exempt_ptr extract_with( Q const& key, Less pred ) { return base_class::extract_with( key, pred ); } /// Find the key \p key /** The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( key_type const& key, mapped_type& val ); }; \endcode where \p val is the item found for \p key The functor is called under node-level lock. The function applies RCU lock internally. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( K const& key, Func f ) { return base_class::find( key, f ); } /// Finds the key \p val using \p pred predicate for searching /** The function is an analog of \p find(K const&, Func) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool find_with( K const& key, Less pred, Func f ) { return base_class::find_with( key, pred, f ); } /// Checks whether the map contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. The function applies RCU lock internally. */ template bool contains( K const& key ) { return base_class::contains( key ); } /// Checks whether the map contains \p key using \p pred predicate for searching /** The function is similar to contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool contains( K const& key, Less pred ) { return base_class::contains( key, pred ); } /// Clears the map void clear() { base_class::clear(); } /// Checks if the map is empty bool empty() const { return base_class::empty(); } /// Returns item count in the map /** Only leaf nodes containing user data are counted. The value returned depends on item counter type provided by \p Traits template parameter. If it is \p atomicity::empty_item_counter this function always returns 0. The function is not suitable for checking the tree emptiness, use \p empty() member function for this purpose. */ size_t size() const { return base_class::size(); } /// Returns const reference to internal statistics stat const& statistics() const { return base_class::statistics(); } /// Returns reference to \p sync_monitor object sync_monitor& monitor() { return base_class::monitor(); } //@cond sync_monitor const& monitor() const { return base_class::monitor(); } //@endcond /// Checks internal consistency (not atomic, not thread-safe) /** The debugging function to check internal consistency of the tree. */ bool check_consistency() const { return base_class::check_consistency(); } /// Checks internal consistency (not atomic, not thread-safe) /** The debugging function to check internal consistency of the tree. The functor \p Func is called if a violation of internal tree structure is found: \code struct functor { void operator()( size_t nLevel, size_t hLeft, size_t hRight ); }; \endcode where - \p nLevel - the level where the violation is found - \p hLeft - the height of left subtree - \p hRight - the height of right subtree The functor is called for each violation found. */ template bool check_consistency( Func f ) const { return base_class::check_consistency( f ); } }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H libcds-2.3.3/cds/container/cuckoo_map.h000066400000000000000000000763261341244201700200170ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_CUCKOO_MAP_H #define CDSLIB_CONTAINER_CUCKOO_MAP_H #include #include namespace cds { namespace container { //@cond namespace details { template struct make_cuckoo_map { typedef Key key_type; ///< key type typedef T mapped_type; ///< type of value stored in the map typedef std::pair value_type; ///< Pair type typedef Traits original_traits; typedef typename original_traits::probeset_type probeset_type; static bool const store_hash = original_traits::store_hash; static unsigned int const store_hash_count = store_hash ? ((unsigned int) std::tuple_size< typename original_traits::hash::hash_tuple_type >::value) : 0; struct node_type: public intrusive::cuckoo::node { value_type m_val; template node_type( K const& key ) : m_val( std::make_pair( key_type(key), mapped_type())) {} template node_type( K const& key, Q const& v ) : m_val( std::make_pair( key_type(key), mapped_type(v))) {} template node_type( K&& key, Args&&... args ) : m_val( std::forward(key), std::move( mapped_type(std::forward(args)...))) {} }; struct key_accessor { key_type const& operator()( node_type const& node ) const { return node.m_val.first; } }; struct intrusive_traits: public original_traits { typedef intrusive::cuckoo::base_hook< cds::intrusive::cuckoo::probeset_type< probeset_type > ,cds::intrusive::cuckoo::store_hash< store_hash_count > > hook; typedef cds::intrusive::cuckoo::traits::disposer disposer; typedef typename std::conditional< std::is_same< typename original_traits::equal_to, opt::none >::value , opt::none , cds::details::predicate_wrapper< node_type, typename original_traits::equal_to, key_accessor > >::type equal_to; typedef typename std::conditional< std::is_same< typename original_traits::compare, opt::none >::value , opt::none , cds::details::compare_wrapper< node_type, typename original_traits::compare, key_accessor > >::type compare; typedef typename std::conditional< std::is_same< typename original_traits::less, opt::none >::value ,opt::none ,cds::details::predicate_wrapper< node_type, typename original_traits::less, key_accessor > >::type less; typedef opt::details::hash_list_wrapper< typename original_traits::hash, node_type, key_accessor > hash; }; typedef intrusive::CuckooSet< node_type, intrusive_traits > type; }; } // namespace details //@endcond /// Cuckoo hash map /** @ingroup cds_nonintrusive_map Source - [2007] M.Herlihy, N.Shavit, M.Tzafrir "Concurrent Cuckoo Hashing. Technical report" - [2008] Maurice Herlihy, Nir Shavit "The Art of Multiprocessor Programming" About Cuckoo hashing [From "The Art of Multiprocessor Programming"] Cuckoo hashing is a hashing algorithm in which a newly added item displaces any earlier item occupying the same slot. For brevity, a table is a k-entry array of items. For a hash set f size N = 2k we use a two-entry array of tables, and two independent hash functions, h0, h1: KeyRange -> 0,...,k-1 mapping the set of possible keys to entries in he array. To test whether a value \p x is in the set, find(x) tests whether either table[0][h0(x)] or table[1][h1(x)] is equal to \p x. Similarly, erase(x)checks whether \p x is in either table[0][h0(x)] or table[1][h1(x)], ad removes it if found. The insert(x) successively "kicks out" conflicting items until every key has a slot. To add \p x, the method swaps \p x with \p y, the current occupant of table[0][h0(x)]. If the prior value was \p nullptr, it is done. Otherwise, it swaps the newly nest-less value \p y for the current occupant of table[1][h1(y)] in the same way. As before, if the prior value was \p nullptr, it is done. Otherwise, the method continues swapping entries (alternating tables) until it finds an empty slot. We might not find an empty slot, either because the table is full, or because the sequence of displacement forms a cycle. We therefore need an upper limit on the number of successive displacements we are willing to undertake. When this limit is exceeded, we resize the hash table, choose new hash functions and start over. For concurrent cuckoo hashing, rather than organizing the set as a two-dimensional table of items, we use two-dimensional table of probe sets, where a probe set is a constant-sized set of items with the same hash code. Each probe set holds at most \p PROBE_SIZE items, but the algorithm tries to ensure that when the set is quiescent (i.e no method call in progress) each probe set holds no more than THRESHOLD < PROBE_SET items. While method calls are in-flight, a probe set may temporarily hold more than \p THRESHOLD but never more than \p PROBE_SET items. In current implementation, a probe set can be defined either as a (single-linked) list or as a fixed-sized vector, optionally ordered. In description above two-table cuckoo hashing (k = 2) has been considered. We can generalize this approach for k >= 2 when we have \p k hash functions h[0], ... h[k-1] and \p k tables table[0], ... table[k-1]. The search in probe set is linear, the complexity is O(PROBE_SET) . The probe set may be ordered or not. Ordered probe set can be a little better since the average search complexity is O(PROBE_SET/2). However, the overhead of sorting can eliminate a gain of ordered search. The probe set is ordered if \p compare or \p less is specified in \p Traits template parameter. Otherwise, the probe set is unordered and \p Traits must contain \p equal_to predicate. Template arguments: - \p Key - key type - \p T - the type stored in the map. - \p Traits - map traits, default is \p cuckoo::traits. It is possible to declare option-based set with \p cuckoo::make_traits metafunction result as \p Traits template argument. Examples Declares cuckoo mapping from \p std::string to struct \p foo. For cuckoo hashing we should provide at least two hash functions: \code struct hash1 { size_t operator()(std::string const& s) const { return cds::opt::v::hash( s ); } }; struct hash2: private hash1 { size_t operator()(std::string const& s) const { size_t h = ~( hash1::operator()(s)); return ~h + 0x9e3779b9 + (h << 6) + (h >> 2); } }; \endcode Cuckoo-map with list-based unordered probe set and storing hash values \code #include // Declare type traits struct my_traits: public cds::container::cuckoo::traits { typedef std::equal_to< std::string > equal_to; typedef std::tuple< hash1, hash2 > hash; static bool const store_hash = true; }; // Declare CuckooMap type typedef cds::container::CuckooMap< std::string, foo, my_traits > my_cuckoo_map; // Equal option-based declaration typedef cds::container::CuckooMap< std::string, foo, cds::container::cuckoo::make_traits< cds::opt::hash< std::tuple< hash1, hash2 > > ,cds::opt::equal_to< std::equal_to< std::string > > ,cds::container::cuckoo::store_hash< true > >::type > opt_cuckoo_map; \endcode If we provide \p less functor instead of \p equal_to we get as a result a cuckoo map with ordered probe set that may improve performance. Example for ordered vector-based probe-set: \code #include // Declare type traits // We use a vector of capacity 4 as probe-set container and store hash values in the node struct my_traits: public cds::container::cuckoo::traits { typedef std::less< std::string > less; typedef std::tuple< hash1, hash2 > hash; typedef cds::container::cuckoo::vector<4> probeset_type; static bool const store_hash = true; }; // Declare CuckooMap type typedef cds::container::CuckooMap< std::string, foo, my_traits > my_cuckoo_map; // Equal option-based declaration typedef cds::container::CuckooMap< std::string, foo, cds::container::cuckoo::make_traits< cds::opt::hash< std::tuple< hash1, hash2 > > ,cds::opt::less< std::less< std::string > > ,cds::container::cuckoo::probeset_type< cds::container::cuckoo::vector<4> > ,cds::container::cuckoo::store_hash< true > >::type > opt_cuckoo_map; \endcode */ template class CuckooMap: #ifdef CDS_DOXYGEN_INVOKED protected intrusive::CuckooSet< std::pair< Key const, T>, Traits> #else protected details::make_cuckoo_map::type #endif { //@cond typedef details::make_cuckoo_map maker; typedef typename maker::type base_class; //@endcond public: typedef Key key_type; ///< key type typedef T mapped_type; ///< value type stored in the container typedef std::pair value_type; ///< Key-value pair type stored in the map typedef Traits traits; ///< Map traits typedef typename traits::hash hash; ///< hash functor tuple wrapped for internal use typedef typename base_class::hash_tuple_type hash_tuple_type; ///< hash tuple type typedef typename base_class::mutex_policy mutex_policy; ///< Concurrent access policy, see \p cuckoo::traits::mutex_policy typedef typename base_class::stat stat; ///< internal statistics type static bool const c_isSorted = base_class::c_isSorted; ///< whether the probe set should be ordered static size_t const c_nArity = base_class::c_nArity; ///< the arity of cuckoo hashing: the number of hash functors provided; minimum 2. typedef typename base_class::key_equal_to key_equal_to; ///< Key equality functor; used only for unordered probe-set typedef typename base_class::key_comparator key_comparator; ///< key comparing functor based on opt::compare and opt::less option setter. Used only for ordered probe set typedef typename base_class::allocator allocator; ///< allocator type used for internal bucket table allocations /// Node allocator type typedef typename std::conditional< std::is_same< typename traits::node_allocator, opt::none >::value, allocator, typename traits::node_allocator >::type node_allocator; /// item counter type typedef typename traits::item_counter item_counter; protected: //@cond typedef typename base_class::scoped_cell_lock scoped_cell_lock; typedef typename base_class::scoped_full_lock scoped_full_lock; typedef typename base_class::scoped_resize_lock scoped_resize_lock; typedef typename maker::key_accessor key_accessor; typedef typename base_class::value_type node_type; typedef cds::details::Allocator< node_type, node_allocator > cxx_node_allocator; //@endcond public: static unsigned int const c_nDefaultProbesetSize = base_class::c_nDefaultProbesetSize; ///< default probeset size static size_t const c_nDefaultInitialSize = base_class::c_nDefaultInitialSize; ///< default initial size static unsigned int const c_nRelocateLimit = base_class::c_nRelocateLimit; ///< Count of attempts to relocate before giving up protected: //@cond template static node_type * alloc_node( K const& key ) { return cxx_node_allocator().New( key ); } template static node_type * alloc_node( K&& key, Args&&... args ) { return cxx_node_allocator().MoveNew( std::forward( key ), std::forward(args)... ); } static void free_node( node_type * pNode ) { cxx_node_allocator().Delete( pNode ); } //@endcond protected: //@cond struct node_disposer { void operator()( node_type * pNode ) { free_node( pNode ); } }; typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; //@endcond public: /// Default constructor /** Initial size = \ref c_nDefaultInitialSize Probe set size: - \ref c_nDefaultProbesetSize if \p probeset_type is \p cuckoo::list - \p Capacity if \p probeset_type is cuckoo::vector Probe set threshold = probe set size - 1 */ CuckooMap() {} /// Constructs an object with given probe set size and threshold /** If probe set type is cuckoo::vector vector then \p nProbesetSize should be equal to vector's \p Capacity. */ CuckooMap( size_t nInitialSize ///< Initial map size; if 0 - use default initial size \ref c_nDefaultInitialSize , unsigned int nProbesetSize ///< probe set size , unsigned int nProbesetThreshold = 0 ///< probe set threshold, nProbesetThreshold < nProbesetSize. If 0, nProbesetThreshold = nProbesetSize - 1 ) : base_class( nInitialSize, nProbesetSize, nProbesetThreshold ) {} /// Constructs an object with given hash functor tuple /** The probe set size and threshold are set as default, see CuckooSet() */ CuckooMap( hash_tuple_type const& h ///< hash functor tuple of type std::tuple where n == \ref c_nArity ) : base_class( h ) {} /// Constructs a map with given probe set properties and hash functor tuple /** If probe set type is cuckoo::vector vector then \p nProbesetSize should be equal to vector's \p Capacity. */ CuckooMap( size_t nInitialSize ///< Initial map size; if 0 - use default initial size \ref c_nDefaultInitialSize , unsigned int nProbesetSize ///< probe set size , unsigned int nProbesetThreshold ///< probe set threshold, nProbesetThreshold < nProbesetSize. If 0, nProbesetThreshold = nProbesetSize - 1 , hash_tuple_type const& h ///< hash functor tuple of type std::tuple where n == \ref c_nArity ) : base_class( nInitialSize, nProbesetSize, nProbesetThreshold, h ) {} /// Constructs a map with given hash functor tuple (move semantics) /** The probe set size and threshold are set as default, see CuckooSet() */ CuckooMap( hash_tuple_type&& h ///< hash functor tuple of type std::tuple where n == \ref c_nArity ) : base_class( std::forward(h)) {} /// Constructs a map with given probe set properties and hash functor tuple (move semantics) /** If probe set type is cuckoo::vector vector then \p nProbesetSize should be equal to vector's \p Capacity. */ CuckooMap( size_t nInitialSize ///< Initial map size; if 0 - use default initial size \ref c_nDefaultInitialSize , unsigned int nProbesetSize ///< probe set size , unsigned int nProbesetThreshold ///< probe set threshold, nProbesetThreshold < nProbesetSize. If 0, nProbesetThreshold = nProbesetSize - 1 , hash_tuple_type&& h ///< hash functor tuple of type std::tuple where n == \ref c_nArity ) : base_class( nInitialSize, nProbesetSize, nProbesetThreshold, std::forward(h)) {} /// Destructor clears the map ~CuckooMap() { clear(); } public: /// Inserts new node with key and default value /** The function creates a node with \p key and default value, and then inserts the node created into the map. Preconditions: - The \ref key_type should be constructible from a value of type \p K. In trivial case, \p K is equal to \ref key_type. - The \ref mapped_type should be default-constructible. Returns \p true if inserting successful, \p false otherwise. */ template bool insert( K const& key ) { return insert_with( key, [](value_type&){} ); } /// Inserts new node /** The function creates a node with copy of \p val value and then inserts the node created into the map. Preconditions: - The \ref key_type should be constructible from \p key of type \p K. - The \ref value_type should be constructible from \p val of type \p V. Returns \p true if \p val is inserted into the set, \p false otherwise. */ template bool insert( K const& key, V const& val ) { return insert_with( key, [&val](value_type& item) { item.second = val ; } ); } /// Inserts new node and initialize it by a functor /** This function inserts new node with key \p key and if inserting is successful then it calls \p func functor with signature \code struct functor { void operator()( value_type& item ); }; \endcode The argument \p item of user-defined functor \p func is the reference to the map's item inserted: - item.first is a const reference to item's key that cannot be changed. - item.second is a reference to item's value that may be changed. The key_type should be constructible from value of type \p K. The function allows to split creating of new item into two part: - create item from \p key; - insert new item into the map; - if inserting is successful, initialize the value of item by calling \p func functor This can be useful if complete initialization of object of \p value_type is heavyweight and it is preferable that the initialization should be completed only if inserting is successful. */ template bool insert_with( const K& key, Func func ) { scoped_node_ptr pNode( alloc_node( key )); if ( base_class::insert( *pNode, [&func]( node_type& item ) { func( item.m_val ); } )) { pNode.release(); return true; } return false; } /// For key \p key inserts data of type \ref value_type constructed with std::forward(args)... /** Returns \p true if inserting successful, \p false otherwise. */ template bool emplace( K&& key, Args&&... args ) { scoped_node_ptr pNode( alloc_node( std::forward(key), std::forward(args)... )); if ( base_class::insert( *pNode )) { pNode.release(); return true; } return false; } /// Updates the node /** The operation performs inserting or changing data with lock-free manner. If \p key is not found in the map, then \p key is inserted iff \p bAllowInsert is \p true. Otherwise, the functor \p func is called with item found. The functor \p func signature is: \code struct my_functor { void operator()( bool bNew, value_type& item ); }; \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - an item of the map for \p key Returns std::pair where \p first is \p true if operation is successful, i.e. the node has been inserted or updated, \p second is \p true if new item has been added or \p false if the item with \p key already exists. */ template std::pair update( K const& key, Func func, bool bAllowInsert = true ) { scoped_node_ptr pNode( alloc_node( key )); std::pair res = base_class::update( *pNode, [&func](bool bNew, node_type& item, node_type const& ){ func( bNew, item.m_val ); }, bAllowInsert ); if ( res.first && res.second ) pNode.release(); return res; } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( K const& key, Func func ) { return update( key, func, true ); } //@endcond /// Delete \p key from the map /** \anchor cds_nonintrusive_CuckooMap_erase_val Return \p true if \p key is found and deleted, \p false otherwise */ template bool erase( K const& key ) { node_type * pNode = base_class::erase(key); if ( pNode ) { free_node( pNode ); return true; } return false; } /// Deletes the item from the list using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_CuckooMap_erase_val "erase(Q const&)" but \p pred is used for key comparing. If cuckoo map is ordered, then \p Predicate should have the interface and semantics like \p std::less. If cuckoo map is unordered, then \p Predicate should have the interface and semantics like \p std::equal_to. \p Predicate must imply the same element order as the comparator used for building the map. */ template bool erase_with( K const& key, Predicate pred ) { CDS_UNUSED( pred ); node_type * pNode = base_class::erase_with(key, cds::details::predicate_wrapper()); if ( pNode ) { free_node( pNode ); return true; } return false; } /// Delete \p key from the map /** \anchor cds_nonintrusive_CuckooMap_erase_func The function searches an item with key \p key, calls \p f functor and deletes the item. If \p key is not found, the functor is not called. The functor \p Func interface: \code struct extractor { void operator()(value_type& item) { ... } }; \endcode Return \p true if key is found and deleted, \p false otherwise See also: \ref erase */ template bool erase( K const& key, Func f ) { node_type * pNode = base_class::erase( key ); if ( pNode ) { f( pNode->m_val ); free_node( pNode ); return true; } return false; } /// Deletes the item from the list using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_CuckooMap_erase_func "erase(Q const&, Func)" but \p pred is used for key comparing. If cuckoo map is ordered, then \p Predicate should have the interface and semantics like \p std::less. If cuckoo map is unordered, then \p Predicate should have the interface and semantics like \p std::equal_to. \p Predicate must imply the same element order as the comparator used for building the map. */ template bool erase_with( K const& key, Predicate pred, Func f ) { CDS_UNUSED( pred ); node_type * pNode = base_class::erase_with( key, cds::details::predicate_wrapper()); if ( pNode ) { f( pNode->m_val ); free_node( pNode ); return true; } return false; } /// Find the key \p key /** \anchor cds_nonintrusive_CuckooMap_find_func The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item ); }; \endcode where \p item is the item found. The functor may change \p item.second. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( K const& key, Func f ) { return base_class::find( key, [&f](node_type& item, K const& ) { f( item.m_val );}); } /// Find the key \p val using \p pred predicate for comparing /** The function is an analog of \ref cds_nonintrusive_CuckooMap_find_func "find(K const&, Func)" but \p pred is used for key comparison. If you use ordered cuckoo map, then \p Predicate should have the interface and semantics like \p std::less. If you use unordered cuckoo map, then \p Predicate should have the interface and semantics like \p std::equal_to. \p pred must imply the same element order as the comparator used for building the map. */ template bool find_with( K const& key, Predicate pred, Func f ) { CDS_UNUSED( pred ); return base_class::find_with( key, cds::details::predicate_wrapper(), [&f](node_type& item, K const& ) { f( item.m_val );}); } /// Checks whether the map contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. */ template bool contains( K const& key ) { return base_class::contains( key ); } //@cond template CDS_DEPRECATED("the function is deprecated, use contains()") bool find( K const& key ) { return contains( key ); } //@endcond /// Checks whether the map contains \p key using \p pred predicate for searching /** The function is similar to contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool contains( K const& key, Predicate pred ) { CDS_UNUSED( pred ); return base_class::contains( key, cds::details::predicate_wrapper()); } //@cond template CDS_DEPRECATED("the function is deprecated, use contains()") bool find_with( K const& key, Predicate pred ) { return contains( key, pred ); } //@endcond /// Clears the map void clear() { base_class::clear_and_dispose( node_disposer()); } /// Checks if the map is empty /** Emptiness is checked by item counting: if item count is zero then the map is empty. */ bool empty() const { return base_class::empty(); } /// Returns item count in the map size_t size() const { return base_class::size(); } /// Returns the size of hash table /** The hash table size is non-constant and can be increased via resizing. */ size_t bucket_count() const { return base_class::bucket_count(); } /// Returns lock array size /** The lock array size is constant. */ size_t lock_count() const { return base_class::lock_count(); } /// Returns const reference to internal statistics stat const& statistics() const { return base_class::statistics(); } /// Returns const reference to mutex policy internal statistics typename mutex_policy::statistics_type const& mutex_policy_statistics() const { return base_class::mutex_policy_statistics(); } }; }} // namespace cds::container #endif //#ifndef CDSLIB_CONTAINER_CUCKOO_MAP_H libcds-2.3.3/cds/container/cuckoo_set.h000066400000000000000000001016121341244201700200200ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_CUCKOO_SET_H #define CDSLIB_CONTAINER_CUCKOO_SET_H #include #include namespace cds { namespace container { //@cond namespace details { template struct make_cuckoo_set { typedef T value_type; typedef Traits original_traits; typedef typename original_traits::probeset_type probeset_type; static bool const store_hash = original_traits::store_hash; static unsigned int const store_hash_count = store_hash ? ((unsigned int) std::tuple_size< typename original_traits::hash::hash_tuple_type >::value) : 0; struct node_type: public intrusive::cuckoo::node { value_type m_val; template node_type( Q const& v ) : m_val(v) {} template node_type( Args&&... args ) : m_val( std::forward(args)...) {} }; struct value_accessor { value_type const& operator()( node_type const& node ) const { return node.m_val; } }; template using predicate_wrapper = cds::details::binary_functor_wrapper< ReturnValue, Pred, node_type, value_accessor >; struct intrusive_traits: public original_traits { typedef intrusive::cuckoo::base_hook< cds::intrusive::cuckoo::probeset_type< probeset_type > ,cds::intrusive::cuckoo::store_hash< store_hash_count > > hook; typedef cds::intrusive::cuckoo::traits::disposer disposer; typedef typename std::conditional< std::is_same< typename original_traits::equal_to, opt::none >::value , opt::none , predicate_wrapper< typename original_traits::equal_to, bool > >::type equal_to; typedef typename std::conditional< std::is_same< typename original_traits::compare, opt::none >::value , opt::none , predicate_wrapper< typename original_traits::compare, int > >::type compare; typedef typename std::conditional< std::is_same< typename original_traits::less, opt::none >::value ,opt::none ,predicate_wrapper< typename original_traits::less, bool > >::type less; typedef opt::details::hash_list_wrapper< typename original_traits::hash, node_type, value_accessor > hash; }; typedef intrusive::CuckooSet< node_type, intrusive_traits > type; }; } // namespace details //@endcond /// Cuckoo hash set /** @ingroup cds_nonintrusive_set Source - [2007] M.Herlihy, N.Shavit, M.Tzafrir "Concurrent Cuckoo Hashing. Technical report" - [2008] Maurice Herlihy, Nir Shavit "The Art of Multiprocessor Programming" About Cuckoo hashing [From "The Art of Multiprocessor Programming"] Cuckoo hashing is a hashing algorithm in which a newly added item displaces any earlier item occupying the same slot. For brevity, a table is a k-entry array of items. For a hash set f size N = 2k we use a two-entry array of tables, and two independent hash functions, h0, h1: KeyRange -> 0,...,k-1 mapping the set of possible keys to entries in he array. To test whether a value \p x is in the set, find(x) tests whether either table[0][h0(x)] or table[1][h1(x)] is equal to \p x. Similarly, erase(x)checks whether \p x is in either table[0][h0(x)] or table[1][h1(x)], ad removes it if found. The insert(x) successively "kicks out" conflicting items until every key has a slot. To add \p x, the method swaps \p x with \p y, the current occupant of table[0][h0(x)]. If the prior value was \p nullptr, it is done. Otherwise, it swaps the newly nest-less value \p y for the current occupant of table[1][h1(y)] in the same way. As before, if the prior value was \p nullptr, it is done. Otherwise, the method continues swapping entries (alternating tables) until it finds an empty slot. We might not find an empty slot, either because the table is full, or because the sequence of displacement forms a cycle. We therefore need an upper limit on the number of successive displacements we are willing to undertake. When this limit is exceeded, we resize the hash table, choose new hash functions and start over. For concurrent cuckoo hashing, rather than organizing the set as a two-dimensional table of items, we use two-dimensional table of probe sets, where a probe set is a constant-sized set of items with the same hash code. Each probe set holds at most \p PROBE_SIZE items, but the algorithm tries to ensure that when the set is quiescent (i.e no method call in progress) each probe set holds no more than THRESHOLD < PROBE_SET items. While method calls are in-flight, a probe set may temporarily hold more than \p THRESHOLD but never more than \p PROBE_SET items. In current implementation, a probe set can be defined either as a (single-linked) list or as a fixed-sized vector, optionally ordered. In description above two-table cuckoo hashing (k = 2) has been considered. We can generalize this approach for k >= 2 when we have \p k hash functions h[0], ... h[k-1] and \p k tables table[0], ... table[k-1]. The search in probe set is linear, the complexity is O(PROBE_SET) . The probe set may be ordered or not. Ordered probe set can be a little better since the average search complexity is O(PROBE_SET/2). However, the overhead of sorting can eliminate a gain of ordered search. The probe set is ordered if \p compare or \p less is specified in \p Traits template parameter. Otherwise, the probe set is unordered and \p Traits must contain \p equal_to predicate. Template arguments: - \p T - the type stored in the set. - \p Traits - type traits. See cuckoo::traits for explanation. It is possible to declare option-based set with cuckoo::make_traits metafunction result as \p Traits template argument. Examples Cuckoo-set with list-based unordered probe set and storing hash values \code #include // Data stored in cuckoo set struct my_data { // key field std::string strKey; // other data // ... }; // Provide equal_to functor for my_data since we will use unordered probe-set struct my_data_equal_to { bool operator()( const my_data& d1, const my_data& d2 ) const { return d1.strKey.compare( d2.strKey ) == 0; } bool operator()( const my_data& d, const std::string& s ) const { return d.strKey.compare(s) == 0; } bool operator()( const std::string& s, const my_data& d ) const { return s.compare( d.strKey ) == 0; } }; // Provide two hash functor for my_data struct hash1 { size_t operator()(std::string const& s) const { return cds::opt::v::hash( s ); } size_t operator()( my_data const& d ) const { return (*this)( d.strKey ); } }; struct hash2: private hash1 { size_t operator()(std::string const& s) const { size_t h = ~( hash1::operator()(s)); return ~h + 0x9e3779b9 + (h << 6) + (h >> 2); } size_t operator()( my_data const& d ) const { return (*this)( d.strKey ); } }; // Declare type traits struct my_traits: public cds::container::cuckoo::traits { typedef my_data_equa_to equal_to; typedef std::tuple< hash1, hash2 > hash; static bool const store_hash = true; }; // Declare CuckooSet type typedef cds::container::CuckooSet< my_data, my_traits > my_cuckoo_set; // Equal option-based declaration typedef cds::container::CuckooSet< my_data, cds::container::cuckoo::make_traits< cds::opt::hash< std::tuple< hash1, hash2 > > ,cds::opt::equal_to< my_data_equal_to > ,cds::container::cuckoo::store_hash< true > >::type > opt_cuckoo_set; \endcode If we provide \p compare function instead of \p equal_to for \p my_data we get as a result a cuckoo set with ordered probe set that may improve performance. Example for ordered vector-based probe-set: \code #include // Data stored in cuckoo set struct my_data { // key field std::string strKey; // other data // ... }; // Provide compare functor for my_data since we want to use ordered probe-set struct my_data_compare { int operator()( const my_data& d1, const my_data& d2 ) const { return d1.strKey.compare( d2.strKey ); } int operator()( const my_data& d, const std::string& s ) const { return d.strKey.compare(s); } int operator()( const std::string& s, const my_data& d ) const { return s.compare( d.strKey ); } }; // Provide two hash functor for my_data struct hash1 { size_t operator()(std::string const& s) const { return cds::opt::v::hash( s ); } size_t operator()( my_data const& d ) const { return (*this)( d.strKey ); } }; struct hash2: private hash1 { size_t operator()(std::string const& s) const { size_t h = ~( hash1::operator()(s)); return ~h + 0x9e3779b9 + (h << 6) + (h >> 2); } size_t operator()( my_data const& d ) const { return (*this)( d.strKey ); } }; // Declare type traits // We use a vector of capacity 4 as probe-set container and store hash values in the node struct my_traits: public cds::container::cuckoo::traits { typedef my_data_compare compare; typedef std::tuple< hash1, hash2 > hash; typedef cds::container::cuckoo::vector<4> probeset_type; static bool const store_hash = true; }; // Declare CuckooSet type typedef cds::container::CuckooSet< my_data, my_traits > my_cuckoo_set; // Equal option-based declaration typedef cds::container::CuckooSet< my_data, cds::container::cuckoo::make_traits< cds::opt::hash< std::tuple< hash1, hash2 > > ,cds::opt::compare< my_data_compare > ,cds::container::cuckoo::probeset_type< cds::container::cuckoo::vector<4> > ,cds::container::cuckoo::store_hash< true > >::type > opt_cuckoo_set; \endcode */ template class CuckooSet: #ifdef CDS_DOXYGEN_INVOKED protected intrusive::CuckooSet #else protected details::make_cuckoo_set::type #endif { //@cond typedef details::make_cuckoo_set maker; typedef typename maker::type base_class; //@endcond public: typedef T value_type ; ///< value type stored in the container typedef Traits traits ; ///< traits typedef typename traits::hash hash; ///< hash functor tuple wrapped for internal use typedef typename base_class::hash_tuple_type hash_tuple_type; ///< Type of hash tuple typedef typename base_class::mutex_policy mutex_policy; ///< Concurrent access policy, see cuckoo::traits::mutex_policy typedef typename base_class::stat stat; ///< internal statistics type static bool const c_isSorted = base_class::c_isSorted; ///< whether the probe set should be ordered static size_t const c_nArity = base_class::c_nArity; ///< the arity of cuckoo hashing: the number of hash functors provided; minimum 2. typedef typename base_class::key_equal_to key_equal_to; ///< Key equality functor; used only for unordered probe-set typedef typename base_class::key_comparator key_comparator; ///< key comparing functor based on \p Traits::compare and \p Traits::less option setter. Used only for ordered probe set typedef typename base_class::allocator allocator; ///< allocator type used for internal bucket table allocations /// Node allocator type typedef typename std::conditional< std::is_same< typename traits::node_allocator, opt::none >::value, allocator, typename traits::node_allocator >::type node_allocator; /// item counter type typedef typename traits::item_counter item_counter; protected: //@cond typedef typename base_class::value_type node_type; typedef cds::details::Allocator< node_type, node_allocator > cxx_node_allocator; //@endcond public: static unsigned int const c_nDefaultProbesetSize = base_class::c_nDefaultProbesetSize; ///< default probeset size static size_t const c_nDefaultInitialSize = base_class::c_nDefaultInitialSize; ///< default initial size static unsigned int const c_nRelocateLimit = base_class::c_nRelocateLimit; ///< Count of attempts to relocate before giving up protected: //@cond template static node_type * alloc_node( Q const& v ) { return cxx_node_allocator().New( v ); } template static node_type * alloc_node( Args&&... args ) { return cxx_node_allocator().MoveNew( std::forward(args)... ); } static void free_node( node_type * pNode ) { cxx_node_allocator().Delete( pNode ); } //@endcond protected: //@cond struct node_disposer { void operator()( node_type * pNode ) { free_node( pNode ); } }; typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; //@endcond public: /// Default constructor /** Initial size = \ref c_nDefaultInitialSize Probe set size: - \ref c_nDefaultProbesetSize if \p probeset_type is \p cuckoo::list - \p Capacity if \p probeset_type is cuckoo::vector Probe set threshold = probe set size - 1 */ CuckooSet() {} /// Constructs the set object with given probe set size and threshold /** If probe set type is cuckoo::vector vector then \p nProbesetSize should be equal to vector's \p Capacity. */ CuckooSet( size_t nInitialSize ///< Initial set size; if 0 - use default initial size \ref c_nDefaultInitialSize , unsigned int nProbesetSize ///< probe set size , unsigned int nProbesetThreshold = 0 ///< probe set threshold, nProbesetThreshold < nProbesetSize. If 0, nProbesetThreshold = nProbesetSize - 1 ) : base_class( nInitialSize, nProbesetSize, nProbesetThreshold ) {} /// Constructs the set object with given hash functor tuple /** The probe set size and threshold are set as default, see CuckooSet() */ CuckooSet( hash_tuple_type const& h ///< hash functor tuple of type std::tuple where n == \ref c_nArity ) : base_class( h ) {} /// Constructs the set object with given probe set properties and hash functor tuple /** If probe set type is cuckoo::vector vector then \p nProbesetSize should be equal to vector's \p Capacity. */ CuckooSet( size_t nInitialSize ///< Initial set size; if 0 - use default initial size \ref c_nDefaultInitialSize , unsigned int nProbesetSize ///< probe set size , unsigned int nProbesetThreshold ///< probe set threshold, nProbesetThreshold < nProbesetSize. If 0, nProbesetThreshold = nProbesetSize - 1 , hash_tuple_type const& h ///< hash functor tuple of type std::tuple where n == \ref c_nArity ) : base_class( nInitialSize, nProbesetSize, nProbesetThreshold, h ) {} /// Constructs the set object with given hash functor tuple (move semantics) /** The probe set size and threshold are set as default, see CuckooSet() */ CuckooSet( hash_tuple_type&& h ///< hash functor tuple of type std::tuple where n == \ref c_nArity ) : base_class( std::forward(h)) {} /// Constructs the set object with given probe set properties and hash functor tuple (move semantics) /** If probe set type is cuckoo::vector vector then \p nProbesetSize should be equal to vector's \p Capacity. */ CuckooSet( size_t nInitialSize ///< Initial set size; if 0 - use default initial size \ref c_nDefaultInitialSize , unsigned int nProbesetSize ///< probe set size , unsigned int nProbesetThreshold ///< probe set threshold, nProbesetThreshold < nProbesetSize. If 0, nProbesetThreshold = nProbesetSize - 1 , hash_tuple_type&& h ///< hash functor tuple of type std::tuple where n == \ref c_nArity ) : base_class( nInitialSize, nProbesetSize, nProbesetThreshold, std::forward(h)) {} /// Destructor clears the set ~CuckooSet() { clear(); } public: /// Inserts new node /** The function creates a node with copy of \p val value and then inserts the node created into the set. The type \p Q should contain as minimum the complete key for the node. The object of \ref value_type should be constructible from a value of type \p Q. In trivial case, \p Q is equal to \ref value_type. Returns \p true if \p val is inserted into the set, \p false otherwise. */ template bool insert( Q const& val ) { return insert( val, []( value_type& ) {} ); } /// Inserts new node /** The function allows to split creating of new item into two part: - create item with key only - insert new item into the set - if inserting is success, calls \p f functor to initialize value-field of new item . The functor signature is: \code void func( value_type& item ); \endcode where \p item is the item inserted. The type \p Q can differ from \ref value_type of items storing in the set. Therefore, the \p value_type should be constructible from type \p Q. The user-defined functor is called only if the inserting is success. */ template bool insert( Q const& val, Func f ) { scoped_node_ptr pNode( alloc_node( val )); if ( base_class::insert( *pNode, [&f]( node_type& node ) { f( node.m_val ); } )) { pNode.release(); return true; } return false; } /// Inserts data of type \ref value_type constructed with std::forward(args)... /** Returns \p true if inserting successful, \p false otherwise. */ template bool emplace( Args&&... args ) { scoped_node_ptr pNode( alloc_node( std::forward(args)... )); if ( base_class::insert( *pNode )) { pNode.release(); return true; } return false; } /// Updates the node /** The operation performs inserting or changing data with lock-free manner. If the item \p val is not found in the set, then \p val is inserted into the set iff \p bAllowInsert is \p true. Otherwise, the functor \p func is called with item found. The functor \p func signature is: \code struct my_functor { void operator()( bool bNew, value_type& item, const Q& val ); }; \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the set - \p val - argument \p val passed into the \p %update() function If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments refer to the same thing. Returns std::pair where \p first is \p true if operation is successful, i.e. the node has been inserted or updated, \p second is \p true if new item has been added or \p false if the item with \p key already exists. */ template std::pair update( Q const& val, Func func, bool bAllowInsert = true ) { scoped_node_ptr pNode( alloc_node( val )); std::pair res = base_class::update( *pNode, [&val,&func](bool bNew, node_type& item, node_type const& ){ func( bNew, item.m_val, val ); }, bAllowInsert ); if ( res.first && res.second ) pNode.release(); return res; } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( Q const& val, Func func ) { return update( val, func, true ); } //@endcond /// Delete \p key from the set /** \anchor cds_nonintrusive_CuckooSet_erase Since the key of set's item type \ref value_type is not explicitly specified, template parameter \p Q defines the key type searching in the list. The set item comparator should be able to compare the type \p value_type and the type \p Q. Return \p true if key is found and deleted, \p false otherwise */ template bool erase( Q const& key ) { node_type * pNode = base_class::erase( key ); if ( pNode ) { free_node( pNode ); return true; } return false; } /// Deletes the item from the list using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_CuckooSet_erase "erase(Q const&)" but \p pred is used for key comparing. If cuckoo set is ordered, then \p Predicate should have the interface and semantics like \p std::less. If cuckoo set is unordered, then \p Predicate should have the interface and semantics like \p std::equal_to. \p Predicate must imply the same element order as the comparator used for building the set. */ template bool erase_with( Q const& key, Predicate pred ) { CDS_UNUSED( pred ); node_type * pNode = base_class::erase_with( key, typename maker::template predicate_wrapper()); if ( pNode ) { free_node( pNode ); return true; } return false; } /// Delete \p key from the set /** \anchor cds_nonintrusive_CuckooSet_erase_func The function searches an item with key \p key, calls \p f functor and deletes the item. If \p key is not found, the functor is not called. The functor \p Func interface is: \code struct functor { void operator()(value_type const& val); }; \endcode Return \p true if key is found and deleted, \p false otherwise */ template bool erase( Q const& key, Func f ) { node_type * pNode = base_class::erase( key ); if ( pNode ) { f( pNode->m_val ); free_node( pNode ); return true; } return false; } /// Deletes the item from the list using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_CuckooSet_erase_func "erase(Q const&, Func)" but \p pred is used for key comparing. If you use ordered cuckoo set, then \p Predicate should have the interface and semantics like \p std::less. If you use unordered cuckoo set, then \p Predicate should have the interface and semantics like \p std::equal_to. \p Predicate must imply the same element order as the comparator used for building the set. */ template bool erase_with( Q const& key, Predicate pred, Func f ) { CDS_UNUSED( pred ); node_type * pNode = base_class::erase_with( key, typename maker::template predicate_wrapper()); if ( pNode ) { f( pNode->m_val ); free_node( pNode ); return true; } return false; } /// Find the key \p val /** \anchor cds_nonintrusive_CuckooSet_find_func The function searches the item with key equal to \p val and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& val ); }; \endcode where \p item is the item found, \p val is the find function argument. The functor can change non-key fields of \p item. The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor can modify both arguments. The type \p Q can differ from \ref value_type of items storing in the container. Therefore, the \p value_type should be comparable with type \p Q. The function returns \p true if \p val is found, \p false otherwise. */ template bool find( Q& val, Func f ) { return base_class::find( val, [&f](node_type& item, Q& v) { f( item.m_val, v );}); } //@cond template bool find( Q const& val, Func f ) { return base_class::find( val, [&f](node_type& item, Q const& v) { f( item.m_val, v );}); } //@endcond /// Find the key \p val using \p pred predicate for comparing /** The function is an analog of \ref cds_nonintrusive_CuckooSet_find_func "find(Q&, Func)" but \p pred is used for key comparison. If you use ordered cuckoo set, then \p Predicate should have the interface and semantics like \p std::less. If you use unordered cuckoo set, then \p Predicate should have the interface and semantics like \p std::equal_to. \p pred must imply the same element order as the comparator used for building the set. */ template bool find_with( Q& val, Predicate pred, Func f ) { CDS_UNUSED( pred ); return base_class::find_with( val, typename maker::template predicate_wrapper(), [&f](node_type& item, Q& v) { f( item.m_val, v );}); } //@cond template bool find_with( Q const& val, Predicate pred, Func f ) { CDS_UNUSED( pred ); return base_class::find_with( val, typename maker::template predicate_wrapper(), [&f](node_type& item, Q const& v) { f( item.m_val, v );}); } //@endcond /// Checks whether the set contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. */ template bool contains( Q const& key ) { return base_class::find( key, [](node_type&, Q const&) {}); } //@cond template CDS_DEPRECATED("the function is deprecated, use contains()") bool find( Q const& key ) { return contains( key ); } //@endcond /// Checks whether the set contains \p key using \p pred predicate for searching /** The function is similar to contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool contains( Q const& key, Predicate pred ) { CDS_UNUSED( pred ); return base_class::find_with( key, typename maker::template predicate_wrapper(), [](node_type&, Q const&) {}); } //@cond template CDS_DEPRECATED("the function is deprecated, use contains()") bool find_with( Q const& key, Predicate pred ) { return contains( key, pred ); } //@endcond /// Clears the set /** The function erases all items from the set. */ void clear() { return base_class::clear_and_dispose( node_disposer()); } /// Checks if the set is empty /** Emptiness is checked by item counting: if item count is zero then the set is empty. */ bool empty() const { return base_class::empty(); } /// Returns item count in the set size_t size() const { return base_class::size(); } /// Returns the size of hash table /** The hash table size is non-constant and can be increased via resizing. */ size_t bucket_count() const { return base_class::bucket_count(); } /// Returns lock array size size_t lock_count() const { return base_class::lock_count(); } /// Returns const reference to internal statistics stat const& statistics() const { return base_class::statistics(); } /// Returns const reference to mutex policy internal statistics typename mutex_policy::statistics_type const& mutex_policy_statistics() const { return base_class::mutex_policy_statistics(); } }; }} // namespace cds::container #endif //#ifndef CDSLIB_CONTAINER_CUCKOO_SET_H libcds-2.3.3/cds/container/details/000077500000000000000000000000001341244201700171355ustar00rootroot00000000000000libcds-2.3.3/cds/container/details/base.h000066400000000000000000000050251341244201700202220ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_DETAILS_BASE_H #define CDSLIB_CONTAINER_DETAILS_BASE_H #include namespace cds { /// Standard (non-intrusive) containers /** @ingroup cds_nonintrusive_containers This namespace contains implementations of non-intrusive (std-like) lock-free containers. */ namespace container { /// Common options for non-intrusive containers /** @ingroup cds_nonintrusive_helper This namespace contains options for non-intrusive containers that is, in general, the same as for the intrusive containers. It imports all definitions from cds::opt and cds::intrusive::opt namespaces */ namespace opt { using namespace cds::intrusive::opt; } // namespace opt /// @defgroup cds_nonintrusive_containers Non-intrusive containers /** @defgroup cds_nonintrusive_helper Helper structs for non-intrusive containers @ingroup cds_nonintrusive_containers */ /** @defgroup cds_nonintrusive_stack Stack @ingroup cds_nonintrusive_containers */ /** @defgroup cds_nonintrusive_queue Queue @ingroup cds_nonintrusive_containers */ /** @defgroup cds_nonintrusive_deque Deque @ingroup cds_nonintrusive_containers */ /** @defgroup cds_nonintrusive_priority_queue Priority queue @ingroup cds_nonintrusive_containers */ /** @defgroup cds_nonintrusive_map Map @ingroup cds_nonintrusive_containers */ /** @defgroup cds_nonintrusive_set Set @ingroup cds_nonintrusive_containers */ /** @defgroup cds_nonintrusive_list List @ingroup cds_nonintrusive_containers */ /** @defgroup cds_nonintrusive_tree Tree @ingroup cds_nonintrusive_containers */ // Tag for selecting iterable list implementation /** @ingroup cds_nonintrusive_helper This struct is empty and it is used only as a tag for selecting \p IterableList as ordered list implementation in declaration of some classes. See \p split_list::traits::ordered_list as an example. */ typedef intrusive::iterable_list_tag iterable_list_tag; //@cond template struct is_iterable_list: public cds::intrusive::is_iterable_list< List > {}; //@endcond } // namespace container } // namespace cds #endif // #ifndef CDSLIB_CONTAINER_DETAILS_BASE_H libcds-2.3.3/cds/container/details/bronson_avltree_base.h000066400000000000000000000576571341244201700235260ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_DETAILS_BRONSON_AVLTREE_BASE_H #define CDSLIB_CONTAINER_DETAILS_BRONSON_AVLTREE_BASE_H #include #include #include #include #include namespace cds { namespace container { /// BronsonAVLTree related declarations namespace bronson_avltree { template struct node; //@cond template struct link_node { typedef Node node_type; typedef T mapped_type; typedef uint32_t version_type; ///< version type (internal) enum { shrinking = 1, unlinked = 2, version_flags = shrinking | unlinked // the rest is version counter }; atomics::atomic< int > m_nHeight; ///< Node height atomics::atomic m_nVersion; ///< Version bits atomics::atomic m_pParent; ///< Parent node atomics::atomic m_pLeft; ///< Left child atomics::atomic m_pRight; ///< Right child typename SyncMonitor::node_injection m_SyncMonitorInjection; ///< @ref cds_sync_monitor "synchronization monitor" injected data atomics::atomic m_pValue; ///< Value public: link_node() : m_nHeight( 0 ) , m_nVersion( 0 ) , m_pParent( nullptr ) , m_pLeft( nullptr ) , m_pRight( nullptr ) { m_pValue.store( nullptr, atomics::memory_order_release ); } link_node( int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight ) : m_nHeight( nHeight ) , m_nVersion( version ) , m_pParent( pParent ) , m_pLeft( pLeft ) , m_pRight( pRight ) { m_pValue.store( nullptr, atomics::memory_order_release ); } node_type * parent( atomics::memory_order order ) const { return m_pParent.load( order ); } void parent( node_type * p, atomics::memory_order order ) { m_pParent.store( p, order ); } node_type * child( int nDirection, atomics::memory_order order ) const { assert( nDirection != 0 ); return nDirection < 0 ? m_pLeft.load( order ) : m_pRight.load( order ); } void child( node_type * pChild, int nDirection, atomics::memory_order order ) { assert( nDirection != 0 ); if ( nDirection < 0 ) m_pLeft.store( pChild, order ); else m_pRight.store( pChild, order ); } version_type version( atomics::memory_order order ) const { return m_nVersion.load( order ); } void version( version_type ver, atomics::memory_order order ) { m_nVersion.store( ver, order ); } void exchange_version( version_type ver, atomics::memory_order order ) { m_nVersion.exchange( ver, order ); } int height( atomics::memory_order order ) const { return m_nHeight.load( order ); } void height( int h, atomics::memory_order order ) { m_nHeight.store( h, order ); } template void wait_until_shrink_completed( atomics::memory_order order ) const { BackOff bkoff; while ( is_shrinking( order )) bkoff(); } bool is_unlinked( atomics::memory_order order ) const { return m_nVersion.load( order ) == unlinked; } bool is_shrinking( atomics::memory_order order ) const { return (m_nVersion.load( order ) & shrinking) != 0; } mapped_type * value( atomics::memory_order order ) const { return m_pValue.load( order ); } bool is_valued( atomics::memory_order order ) const { return value( order ) != nullptr; } }; //@endcond /// BronsonAVLTree internal node template struct node: public link_node< node, T, SyncMonitor > { //@cond typedef link_node< node, T, SyncMonitor > base_class; //@endcond typedef Key key_type; ///< key type typedef T mapped_type; ///< value type //@cond typedef typename base_class::version_type version_type; //@endcond key_type const m_key; ///< Key node * m_pNextRemoved; ///< thread-local list of removed node public: //@cond template node( Q&& key ) : base_class() , m_key( std::forward( key )) , m_pNextRemoved( nullptr ) {} template node( Q&& key, int nHeight, version_type version, node * pParent, node * pLeft, node * pRight ) : base_class( nHeight, version, pParent, pLeft, pRight ) , m_key( std::forward( key )) , m_pNextRemoved( nullptr ) {} //@endcond }; /// BronsonAVLTreeMap internal statistics template struct stat { typedef Counter event_counter; ///< Event counter type event_counter m_nFindSuccess; ///< Count of success \p find() call event_counter m_nFindFailed; ///< Count of failed \p find() call event_counter m_nFindRetry; ///< Count of retries during \p find() event_counter m_nFindWaitShrinking; ///< Count of waiting until shrinking completed duting \p find() call event_counter m_nInsertSuccess; ///< Count of inserting data node event_counter m_nInsertFailed; ///< Count of insert failures event_counter m_nRelaxedInsertFailed; ///< Count of false creating of data nodes (only if @ref bronson_avltree::relaxed_insert "relaxed insertion" is enabled) event_counter m_nInsertRetry; ///< Count of insert retries via concurrent operations event_counter m_nUpdateWaitShrinking; ///< Count of waiting until shrinking completed during \p update() call event_counter m_nUpdateRetry; ///< Count of update retries via concurrent operations event_counter m_nUpdateRootWaitShrinking; ///< Count of waiting until root shrinking completed duting \p update() call event_counter m_nUpdateSuccess; ///< Count of updating data node event_counter m_nUpdateUnlinked; ///< Count of attempts to update unlinked node event_counter m_nDisposedNode; ///< Count of disposed node event_counter m_nDisposedValue; ///< Count of disposed value event_counter m_nExtractedValue; ///< Count of extracted value event_counter m_nRemoveSuccess; ///< Count of successfully \p erase() call event_counter m_nRemoveFailed; ///< Count of failed \p erase() call event_counter m_nRemoveRetry; ///< Count o erase/extract retries event_counter m_nExtractSuccess; ///< Count of successfully \p extract() call event_counter m_nExtractFailed; ///< Count of failed \p extract() call event_counter m_nRemoveWaitShrinking; ///< ount of waiting until shrinking completed during \p erase() or \p extract() call event_counter m_nRemoveRootWaitShrinking; ///< Count of waiting until root shrinking completed duting \p erase() or \p extract() call event_counter m_nMakeRoutingNode; ///< How many nodes were converted to routing (valueless) nodes event_counter m_nRightRotation; ///< Count of single right rotation event_counter m_nLeftRotation; ///< Count of single left rotation event_counter m_nLeftRightRotation; ///< Count of double left-over-right rotation event_counter m_nRightLeftRotation; ///< Count of double right-over-left rotation event_counter m_nRotateAfterRightRotation; ///< Count of rotation required after single right rotation event_counter m_nRemoveAfterRightRotation; ///< Count of removal required after single right rotation event_counter m_nDamageAfterRightRotation; ///< Count of damaged node after single right rotation event_counter m_nRotateAfterLeftRotation; ///< Count of rotation required after signle left rotation event_counter m_nRemoveAfterLeftRotation; ///< Count of removal required after single left rotation event_counter m_nDamageAfterLeftRotation; ///< Count of damaged node after single left rotation event_counter m_nRotateAfterRLRotation; ///< Count of rotation required after right-over-left rotation event_counter m_nRemoveAfterRLRotation; ///< Count of removal required after right-over-left rotation event_counter m_nRotateAfterLRRotation; ///< Count of rotation required after left-over-right rotation event_counter m_nRemoveAfterLRRotation; ///< Count of removal required after left-over-right rotation event_counter m_nInsertRebalanceReq; ///< Count of rebalance required after inserting event_counter m_nRemoveRebalanceReq; ///< Count of rebalance required after removing //@cond void onFindSuccess() { ++m_nFindSuccess ; } void onFindFailed() { ++m_nFindFailed ; } void onFindRetry() { ++m_nFindRetry ; } void onFindWaitShrinking() { ++m_nFindWaitShrinking; } void onInsertSuccess() { ++m_nInsertSuccess; } void onInsertFailed() { ++m_nInsertFailed; } void onRelaxedInsertFailed() { ++m_nRelaxedInsertFailed; } void onInsertRetry() { ++m_nInsertRetry ; } void onUpdateWaitShrinking() { ++m_nUpdateWaitShrinking; } void onUpdateRetry() { ++m_nUpdateRetry; } void onUpdateRootWaitShrinking() { ++m_nUpdateRootWaitShrinking; } void onUpdateSuccess() { ++m_nUpdateSuccess; } void onUpdateUnlinked() { ++m_nUpdateUnlinked; } void onDisposeNode() { ++m_nDisposedNode; } void onDisposeValue() { ++m_nDisposedValue; } void onExtractValue() { ++m_nExtractedValue; } void onRemove(bool bSuccess) { if ( bSuccess ) ++m_nRemoveSuccess; else ++m_nRemoveFailed; } void onExtract( bool bSuccess ) { if ( bSuccess ) ++m_nExtractSuccess; else ++m_nExtractFailed; } void onRemoveRetry() { ++m_nRemoveRetry; } void onRemoveWaitShrinking() { ++m_nRemoveWaitShrinking; } void onRemoveRootWaitShrinking() { ++m_nRemoveRootWaitShrinking; } void onMakeRoutingNode() { ++m_nMakeRoutingNode; } void onRotateRight() { ++m_nRightRotation; } void onRotateLeft() { ++m_nLeftRotation; } void onRotateRightOverLeft() { ++m_nRightLeftRotation; } void onRotateLeftOverRight() { ++m_nLeftRightRotation; } void onRotateAfterRightRotation() { ++m_nRotateAfterRightRotation; } void onRemoveAfterRightRotation() { ++m_nRemoveAfterRightRotation; } void onDamageAfterRightRotation() { ++m_nDamageAfterRightRotation; } void onRotateAfterLeftRotation() { ++m_nRotateAfterLeftRotation; } void onRemoveAfterLeftRotation() { ++m_nRemoveAfterLeftRotation; } void onDamageAfterLeftRotation() { ++m_nDamageAfterLeftRotation; } void onRotateAfterRLRotation() { ++m_nRotateAfterRLRotation; } void onRemoveAfterRLRotation() { ++m_nRemoveAfterRLRotation; } void onRotateAfterLRRotation() { ++m_nRotateAfterLRRotation; } void onRemoveAfterLRRotation() { ++m_nRemoveAfterLRRotation; } void onInsertRebalanceRequired() { ++m_nInsertRebalanceReq; } void onRemoveRebalanceRequired() { ++m_nRemoveRebalanceReq; } //@endcond }; /// BronsonAVLTreeMap empty statistics struct empty_stat { //@cond void onFindSuccess() const {} void onFindFailed() const {} void onFindRetry() const {} void onFindWaitShrinking() const {} void onInsertSuccess() const {} void onInsertFailed() const {} void onRelaxedInsertFailed() const {} void onInsertRetry() const {} void onUpdateWaitShrinking() const {} void onUpdateRetry() const {} void onUpdateRootWaitShrinking() const {} void onUpdateSuccess() const {} void onUpdateUnlinked() const {} void onDisposeNode() const {} void onDisposeValue() const {} void onExtractValue() const {} void onRemove(bool /*bSuccess*/) const {} void onExtract(bool /*bSuccess*/) const {} void onRemoveRetry() const {} void onRemoveWaitShrinking() const {} void onRemoveRootWaitShrinking() const {} void onMakeRoutingNode() const {} void onRotateRight() const {} void onRotateLeft() const {} void onRotateRightOverLeft() const {} void onRotateLeftOverRight() const {} void onRotateAfterRightRotation() const {} void onRemoveAfterRightRotation() const {} void onDamageAfterRightRotation() const {} void onRotateAfterLeftRotation() const {} void onRemoveAfterLeftRotation() const {} void onDamageAfterLeftRotation() const {} void onRotateAfterRLRotation() const {} void onRemoveAfterRLRotation() const {} void onRotateAfterLRRotation() const {} void onRemoveAfterLRRotation() const {} void onInsertRebalanceRequired() const {} void onRemoveRebalanceRequired() const {} //@endcond }; /// Option to allow relaxed insert into \ref cds_container_BronsonAVLTreeMap_rcu "Bronson et al AVL-tree" /** By default, this option is disabled and the new node is created under its parent lock. In this case, it is guaranteed the new node will be attached to its parent. On the other hand, constructing of the new node can be too complex to make it under the lock, that can lead to lock contention. When this option is enabled, the new node is created before locking the parent node. After that, the parent is locked and checked whether the new node can be attached to the parent. In this case, false node creating can be performed, but locked section can be significantly small. */ template struct relaxed_insert { //@cond template struct pack : public Base { enum { relaxed_insert = Enable }; }; //@endcond }; /// \p BronsonAVLTreeMap traits /** Note that there are two main specialization of Bronson et al AVL-tree: - \ref cds_container_BronsonAVLTreeMap_rcu_ptr "pointer-oriented" - the tree node stores an user-provided pointer to value - \ref cds_container_BronsonAVLTreeMap_rcu "data-oriented" - the tree node contains a copy of values Depends on tree specialization, different traits member can be used. */ struct traits { /// Key comparison functor /** No default functor is provided. If the option is not specified, the \p less is used. See \p cds::opt::compare option description for functor interface. You should provide \p compare or \p less functor. */ typedef opt::none compare; /// Specifies binary predicate used for key compare. /** See \p cds::opt::less option description for predicate interface. You should provide \p compare or \p less functor. */ typedef opt::none less; /// Allocator for internal node typedef CDS_DEFAULT_ALLOCATOR node_allocator; /// Allocator for node's value (not used in \p BronsonAVLTreeMap specialisation) typedef CDS_DEFAULT_ALLOCATOR allocator; /// Disposer (only for pointer-oriented tree specialization) /** The functor used for dispose removed values. The user-provided disposer is used only for pointer-oriented tree specialization like \p BronsonAVLTreeMap. When the node becomes the routing node without value, the disposer will be called to signal that the memory for the value can be safely freed. Default is \ref cds::intrusive::opt::delete_disposer "cds::container::opt::v::delete_disposer<>" which calls \p delete operator. */ typedef opt::v::delete_disposer<> disposer; /// @ref cds_sync_monitor "Synchronization monitor" type for node-level locking typedef cds::sync::injecting_monitor sync_monitor; /// Enable relaxed insertion. /** About relaxed insertion see \p bronson_avltree::relaxed_insert option. By default, this option is disabled. */ static bool const relaxed_insert = false; /// Item counter /** The type for item counter, by default it is disabled (\p atomicity::empty_item_counter). To enable it use \p atomicity::item_counter or \p atomicity::cache_friendly_item_counter. */ typedef atomicity::empty_item_counter item_counter; /// C++ memory ordering model /** List of available memory ordering see \p opt::memory_model */ typedef opt::v::relaxed_ordering memory_model; /// Internal statistics /** By default, internal statistics is disabled (\p bronson_avltree::empty_stat). To enable it use \p bronson_avltree::stat. */ typedef empty_stat stat; /// Back-off strategy typedef cds::backoff::empty back_off; /// RCU deadlock checking policy /** List of available options see \p opt::rcu_check_deadlock */ typedef cds::opt::v::rcu_throw_deadlock rcu_check_deadlock; }; /// Metafunction converting option list to BronsonAVLTreeMap traits /** Note that there are two main specialization of Bronson et al AVL-tree: - \ref cds_container_BronsonAVLTreeMap_rcu_ptr "pointer-oriented" - the tree node stores an user-provided pointer to value - \ref cds_container_BronsonAVLTreeMap_rcu "data-oriented" - the tree node contains a copy of values Depends on tree specialization, different options can be specified. \p Options are: - \p opt::compare - key compare functor. No default functor is provided. If the option is not specified, \p %opt::less is used. - \p opt::less - specifies binary predicate used for key compare. At least \p %opt::compare or \p %opt::less should be defined. - \p opt::node_allocator - the allocator for internal nodes. Default is \ref CDS_DEFAULT_ALLOCATOR. - \p opt::allocator - the allocator for node's value. Default is \ref CDS_DEFAULT_ALLOCATOR. This option is not used in \p BronsonAVLTreeMap specialisation - \p cds::intrusive::opt::disposer - the functor used for dispose removed values. The user-provided disposer is used only for pointer-oriented tree specialization like \p BronsonAVLTreeMap. When the node becomes the rounting node without value, the disposer will be called to signal that the memory for the value can be safely freed. Default is \p cds::intrusive::opt::delete_disposer which calls \p delete operator. Due the nature of GC schema the disposer may be called asynchronously. - \p opt::sync_monitor - @ref cds_sync_monitor "synchronization monitor" type for node-level locking, default is \p cds::sync::injecting_monitor - \p bronson_avltree::relaxed_insert - enable (\p true) or disable (\p false, the default) @ref bronson_avltree::relaxed_insert "relaxed insertion" - \p opt::item_counter - the type of item counting feature, by default it is disabled (\p atomicity::empty_item_counter) To enable it use \p atomicity::item_counter or \p atomicity::cache_friendly_item_counter - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). - \p opt::stat - internal statistics, by default it is disabled (\p bronson_avltree::empty_stat) To enable statistics use \p \p bronson_avltree::stat - \p opt::backoff - back-off strategy, by default no strategy is used (\p cds::backoff::empty) - \p opt::rcu_check_deadlock - a deadlock checking policy for RCU-based tree, default is \p opt::v::rcu_throw_deadlock */ template struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type ; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type ,Options... >::type type; # endif }; } // namespace bronson_avltree // Forwards template < class GC, typename Key, typename T, class Traits = bronson_avltree::traits > class BronsonAVLTreeMap; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_DETAILS_BRONSON_AVLTREE_BASE_H libcds-2.3.3/cds/container/details/cuckoo_base.h000066400000000000000000000241661341244201700215740ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_DETAILS_CUCKOO_BASE_H #define CDSLIB_CONTAINER_DETAILS_CUCKOO_BASE_H #include namespace cds { namespace container { /// CuckooSet and CuckooMap related definitions /** @ingroup cds_nonintrusive_helper */ namespace cuckoo { #ifdef CDS_DOXYGEN_INVOKED /// Lock striping concurrent access policy. This is typedef for intrusive::cuckoo::striping template class striping {}; #else using intrusive::cuckoo::striping; #endif #ifdef CDS_DOXYGEN_INVOKED /// Refinable concurrent access policy. This is typedef for intrusive::cuckoo::refinable template class refinable {}; #else using intrusive::cuckoo::refinable; #endif #ifdef CDS_DOXYGEN_INVOKED /// Striping internal statistics. This is typedef for intrusive::cuckoo::striping_stat class striping_stat {}; #else using intrusive::cuckoo::striping_stat; #endif #ifdef CDS_DOXYGEN_INVOKED /// Empty striping internal statistics. This is typedef for intrusive::cuckoo::empty_striping_stat class empty_striping_stat {}; #else using intrusive::cuckoo::empty_striping_stat; #endif #ifdef CDS_DOXYGEN_INVOKED /// Refinable internal statistics. This is typedef for intrusive::cuckoo::refinable_stat class refinable_stat {}; #else using intrusive::cuckoo::refinable_stat; #endif #ifdef CDS_DOXYGEN_INVOKED /// Empty refinable internal statistics. This is typedef for intrusive::cuckoo::empty_refinable_stat class empty_refinable_stat {}; #else using intrusive::cuckoo::empty_refinable_stat; #endif #ifdef CDS_DOXYGEN_INVOKED /// Cuckoo statistics. This is typedef for intrusive::cuckoo::stat class stat {}; #else using intrusive::cuckoo::stat; #endif #ifdef CDS_DOXYGEN_INVOKED /// Cuckoo empty statistics.This is typedef for intrusive::cuckoo::empty_stat class empty_stat {}; #else using intrusive::cuckoo::empty_stat; #endif /// Option specifying whether to store hash values in the node /** This option reserves additional space in the hook to store the hash value of the object once it's introduced in the container. When this option is used, the unordered container will store the calculated hash value in the hook and rehashing operations won't need to recalculate the hash of the value. This option will improve the performance of unordered containers when rehashing is frequent or hashing the value is a slow operation The \p Enable template parameter toggles the feature: - the value \p true enables storing the hash values - the value \p false disables storing the hash values */ template struct store_hash { //@cond template struct pack: public Base { static bool const store_hash = Enable; }; //@endcond }; #ifdef CDS_DOXYGEN_INVOKED /// Probe set type option /** @copydetails cds::intrusive::cuckoo::probeset_type */ template struct probeset_type {}; #else using intrusive::cuckoo::probeset_type; #endif using intrusive::cuckoo::list; using intrusive::cuckoo::vector; /// Type traits for CuckooSet and CuckooMap classes struct traits { /// Hash functors tuple /** This is mandatory type and has no predefined one. At least, two hash functors should be provided. All hash functor should be orthogonal (different): for each i,j: i != j => h[i](x) != h[j](x) . The hash functors are defined as std::tuple< H1, H2, ... Hn > : \@code cds::opt::hash< std::tuple< h1, h2 > > \@endcode The number of hash functors specifies the number \p k - the count of hash tables in cuckoo hashing. To specify hash tuple in traits you should use \p cds::opt::hash_tuple: \code struct my_traits: public cds::container::cuckoo::traits { typedef cds::opt::hash_tuple< hash1, hash2 > hash; }; \endcode */ typedef cds::opt::none hash; /// Concurrent access policy /** Available opt::mutex_policy types: - cuckoo::striping - simple, but the lock array is not resizable - cuckoo::refinable - resizable lock array, but more complex access to set data. Default is cuckoo::striping. */ typedef cuckoo::striping<> mutex_policy; /// Key equality functor /** Default is std::equal_to */ typedef opt::none equal_to; /// Key comparison functor /** No default functor is provided. If the option is not specified, the \p less is used. */ typedef opt::none compare; /// specifies binary predicate used for key comparison. /** Default is \p std::less. */ typedef opt::none less; /// Item counter /** The type for item counting feature. Default is cds::atomicity::item_counter Only atomic item counter type is allowed. */ typedef cds::intrusive::cuckoo::traits::item_counter item_counter; /// Allocator type /** The allocator type for allocating bucket tables. Default is \p CDS_DEFAULT_ALLOCATOR that is \p std::allocator */ typedef CDS_DEFAULT_ALLOCATOR allocator; /// Node allocator type /** If this type is not set explicitly, the \ref allocator type is used. */ typedef opt::none node_allocator; /// Store hash value into items. See cuckoo::store_hash for explanation static bool const store_hash = false; /// Probe-set type. See \ref probeset_type option for explanation typedef cuckoo::list probeset_type; /// Internal statistics typedef empty_stat stat; }; /// Metafunction converting option list to CuckooSet/CuckooMap traits /** Template argument list \p Options... are: - \p opt::hash - hash functor tuple, mandatory option. At least, two hash functors should be provided. All hash functor should be orthogonal (different): for each i,j: i != j => h[i](x) != h[j](x) . The hash functors are passed as std::tuple< H1, H2, ... Hn > . The number of hash functors specifies the number \p k - the count of hash tables in cuckoo hashing. - \p opt::mutex_policy - concurrent access policy. Available policies: \p cuckoo::striping, \p cuckoo::refinable. Default is \p %cuckoo::striping. - \p opt::equal_to - key equality functor like \p std::equal_to. If this functor is defined then the probe-set will be unordered. If \p %opt::compare or \p %opt::less option is specified too, then the probe-set will be ordered and \p %opt::equal_to will be ignored. - \p opt::compare - key comparison functor. No default functor is provided. If the option is not specified, the \p %opt::less is used. If \p %opt::compare or \p %opt::less option is specified, then the probe-set will be ordered. - \p opt::less - specifies binary predicate used for key comparison. Default is \p std::less. If \p %opt::compare or \p %opt::less option is specified, then the probe-set will be ordered. - \p opt::item_counter - the type of item counting feature. Default is \p opt::v::sequential_item_counter. - \p opt::allocator - the allocator type using for allocating bucket tables. Default is \ref CDS_DEFAULT_ALLOCATOR - \p opt::node_allocator - the allocator type using for allocating set's items. If this option is not specified then the type defined in \p %opt::allocator option is used. - \p cuckoo::store_hash - this option reserves additional space in the node to store the hash value of the object once it's introduced in the container. When this option is used, the unordered container will store the calculated hash value in the node and rehashing operations won't need to recalculate the hash of the value. This option will improve the performance of unordered containers when rehashing is frequent or hashing the value is a slow operation. Default value is \p false. - \ref intrusive::cuckoo::probeset_type "cuckoo::probeset_type" - type of probe set, may be \p cuckoo::list or cuckoo::vector, Default is \p cuckoo::list. - \p opt::stat - internal statistics. Possibly types: \p cuckoo::stat, \p cuckoo::empty_stat. Default is \p %cuckoo::empty_stat */ template struct make_traits { typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< cuckoo::traits, Options... >::type ,Options... >::type type ; ///< Result of metafunction }; } // namespace cuckoo }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_DETAILS_CUCKOO_BASE_H libcds-2.3.3/cds/container/details/ellen_bintree_base.h000066400000000000000000000474201341244201700231160ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_DETAILS_ELLEN_BINTREE_BASE_H #define CDSLIB_CONTAINER_DETAILS_ELLEN_BINTREE_BASE_H #include #include #include #include namespace cds { namespace container { /// EllenBinTree related definitions /** @ingroup cds_nonintrusive_helper */ namespace ellen_bintree { #ifdef CDS_DOXYGEN_INVOKED /// Typedef for \p cds::intrusive::ellen_bintree::update_desc typedef cds::intrusive::ellen_bintree::update_desc update_desc; /// Typedef for \p cds::intrusive::ellen_bintree::internal_node typedef cds::intrusive::ellen_bintree::internal_node internal_node; /// Typedef for \p cds::intrusive::ellen_bintree::key_extractor typedef cds::intrusive::ellen_bintree::key_extractor key_extractor; /// Typedef for \p cds::intrusive::ellen_bintree::update_desc_allocator typedef cds::intrusive::ellen_bintree::update_desc_allocator update_desc_allocator; #else using cds::intrusive::ellen_bintree::update_desc; using cds::intrusive::ellen_bintree::internal_node; using cds::intrusive::ellen_bintree::key_extractor; using cds::intrusive::ellen_bintree::update_desc_allocator; using cds::intrusive::ellen_bintree::node_types; #endif /// EllenBinTree internal statistics template ::event_counter > using stat = cds::intrusive::ellen_bintree::stat< Counter >; /// EllenBinTree empty internal statistics typedef cds::intrusive::ellen_bintree::empty_stat empty_stat; /// EllenBinTree leaf node template struct node: public cds::intrusive::ellen_bintree::node { typedef T value_type ; ///< Value type T m_Value ; ///< Value /// Default ctor node() {} /// Initializing ctor template node(Q const& v) : m_Value(v) {} /// Copy constructor template node( Args const&... args ) : m_Value( args... ) {} /// Move constructor template node( Args&&... args ) : m_Value( std::forward( args )... ) {} }; /// EllenBinTreeMap leaf node template struct map_node: public cds::intrusive::ellen_bintree::node< GC > { typedef Key key_type ; ///< key type typedef T mapped_type ; ///< value type typedef std::pair value_type ; ///< key-value pair stored in the map value_type m_Value ; ///< Key-value pair stored in map leaf node /// Initializes key field, value if default-constructed template map_node( K const& key ) : m_Value( std::make_pair( key_type(key), mapped_type())) {} /// Initializes key and value fields template map_node( K const& key, Q const& v ) : m_Value( std::make_pair(key_type(key), mapped_type(v))) {} }; /// Type traits for \p EllenBinTreeSet and \p EllenBinTreeMap struct traits { /// Key extracting functor (only for \p EllenBinTreeSet) /** This is mandatory functor for \p %EllenBinTreeSet. It has the following prototype: \code struct key_extractor { void operator ()( Key& dest, T const& src ); }; \endcode It should initialize \p dest key from \p src data. The functor is used to initialize internal nodes of \p %EllenBinTreeSet */ typedef opt::none key_extractor; /// Key comparison functor /** No default functor is provided. If the option is not specified, the \p less is used. See \p cds::opt::compare option description for functor interface. You should provide \p compare or \p less functor. See \ref cds_container_EllenBinTreeSet_rcu_less "predicate requirements". */ typedef opt::none compare; /// Specifies binary predicate used for key compare. /** See \p cds::opt::less option description. You should provide \p compare or \p less functor. See \ref cds_container_EllenBinTreeSet_rcu_less "predicate requirements". */ typedef opt::none less; /// Item counter /** The type for item counter, by default it is disabled (\p atomicity::empty_item_counter). To enable it use \p atomicity::item_counter or \p atomicity::cache_friendly_item_counter */ typedef atomicity::empty_item_counter item_counter; /// C++ memory ordering model /** List of available memory ordering see \p opt::memory_model */ typedef opt::v::relaxed_ordering memory_model; /// Allocator for update descriptors /** The allocator type is used for \p ellen_bintree::update_desc. Update descriptor is helping data structure with short lifetime and it is good candidate for pooling. The number of simultaneously existing descriptors is a small number limited the number of threads working with the tree. Therefore, a bounded lock-free container like \p cds::container::VyukovMPMCCycleQueue is good choice for the free-list of update descriptors, see \p cds::memory::vyukov_queue_pool free-list implementation. Also notice that size of update descriptor is not dependent on the type of data stored in the tree so single free-list object can be used for several \p EllenBinTree object. */ typedef CDS_DEFAULT_ALLOCATOR update_desc_allocator; /// Allocator for internal nodes /** The allocator type is used for \p ellen_bintree::internal_node. */ typedef CDS_DEFAULT_ALLOCATOR node_allocator; /// Allocator for leaf nodes /** Each leaf node contains data stored in the container. */ typedef CDS_DEFAULT_ALLOCATOR allocator; /// Internal statistics /** By default, internal statistics is disabled (\p ellen_bintree::empty_stat). To enable it use \p ellen_bintree::stat. */ typedef empty_stat stat; /// Back-off strategy typedef cds::backoff::empty back_off; /// RCU deadlock checking policy (only for RCU-based EllenBinTreeXXX classes) /** List of available options see \p opt::rcu_check_deadlock */ typedef cds::opt::v::rcu_throw_deadlock rcu_check_deadlock; /// Key copy policy (for \p EllenBinTreeMap) /** The key copy policy defines a functor to copy leaf node's key to internal node. This policy is used only in \p EllenBinTreeMap. By default, assignment operator is used. The copy functor interface is: \code struct copy_functor { void operator()( Key& dest, Key const& src ); }; \endcode */ typedef opt::none copy_policy; }; /// Metafunction converting option list to \p EllenBinTreeSet traits /** \p Options are: - \p ellen_bintree::key_extractor - key extracting functor, mandatory option. The functor has the following prototype: \code struct key_extractor { void operator ()( Key& dest, T const& src ); }; \endcode It should initialize \p dest key from \p src data. The functor is used to initialize internal nodes. - \p opt::compare - key compare functor. No default functor is provided. If the option is not specified, \p %opt::less is used. - \p opt::less - specifies binary predicate used for key compare. At least \p %opt::compare or \p %opt::less should be defined. - \p opt::item_counter - the type of item counter, default is disabled (\p atomicity::empty_item_counter). To enable it use \p atomicity::item_counter or \p atomicity::cache_friendly_item_counter - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). - \p opt::allocator - the allocator for \ref ellen_bintree::node "leaf nodes" which contains data. Default is \ref CDS_DEFAULT_ALLOCATOR. - \p opt::node_allocator - the allocator for internal nodes. Default is \ref CDS_DEFAULT_ALLOCATOR. - \p ellen_bintree::update_desc_allocator - an allocator of \ref ellen_bintree::update_desc "update descriptors", default is \ref CDS_DEFAULT_ALLOCATOR. Note that update descriptor is helping data structure with short lifetime and it is good candidate for pooling. The number of simultaneously existing descriptors is a relatively small number limited the number of threads working with the tree and RCU buffer size. Therefore, a bounded lock-free container like \p cds::container::VyukovMPMCCycleQueue is good choice for the free-list of update descriptors, see \p cds::memory::vyukov_queue_pool free-list implementation. Also notice that size of update descriptor is not dependent on the type of data stored in the tree so single free-list object can be used for several EllenBinTree-based object. - \p opt::stat - internal statistics, by default disabled (\p ellen_bintree::empty_stat). To enable it use \p ellen_bintree::stat. - \p opt::backoff - back-off strategy, by default no strategy is used (\p cds::backoff::empty) - \p opt::rcu_check_deadlock - a deadlock checking policy, only for RCU-based tree. Default is \p opt::v::rcu_throw_deadlock. */ template struct make_set_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type ; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type ,Options... >::type type; # endif }; /// Metafunction converting option list to \p EllenBinTreeMap traits /** \p Options are: - \p opt::compare - key compare functor. No default functor is provided. If the option is not specified, \p %opt::less is used. - \p opt::less - specifies binary predicate used for key compare. At least \p %opt::compare or \p %opt::less should be defined. - \p opt::item_counter - the type of item counter, default is disabled (\p atomicity::empty_item_counter). To enable it use \p atomicity::item_counter or \p atomicity::cache_friendly_item_counter - opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). - \p opt::allocator - the allocator used for \ref ellen_bintree::map_node "leaf nodes" which contains data. Default is \ref CDS_DEFAULT_ALLOCATOR. - \p opt::node_allocator - the allocator used for \ref ellen_bintree::internal_node "internal nodes". Default is \ref CDS_DEFAULT_ALLOCATOR. - \p ellen_bintree::update_desc_allocator - an allocator of \ref ellen_bintree::update_desc "update descriptors", default is \ref CDS_DEFAULT_ALLOCATOR. Note that update descriptor is helping data structure with short lifetime and it is good candidate for pooling. The number of simultaneously existing descriptors is a relatively small number limited the number of threads working with the tree and RCU buffer size. Therefore, a bounded lock-free container like \p cds::container::VyukovMPMCCycleQueue is good choice for the free-list of update descriptors, see \p cds::memory::vyukov_queue_pool free-list implementation. Also notice that size of update descriptor is not dependent on the type of data stored in the tree so single free-list object can be used for several EllenBinTree-based object. - \p opt::stat - internal statistics, by default disabled (\p ellen_bintree::empty_stat). To enable it use \p ellen_bintree::stat. - \p opt::backoff - back-off strategy, by default no strategy is used (\p cds::backoff::empty) - \p opt::rcu_check_deadlock - a deadlock checking policy, only for RCU-based tree. Default is \p opt::v::rcu_throw_deadlock - opt::copy_policy - key copying policy defines a functor to copy leaf node's key to internal node. By default, assignment operator is used. The copy functor interface is: \code struct copy_functor { void operator()( Key& dest, Key const& src ); }; \endcode */ template struct make_map_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type ; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type ,Options... >::type type; # endif }; //@cond namespace details { template < class GC, typename Key, typename T, class Traits> struct make_ellen_bintree_set { typedef GC gc; typedef Key key_type; typedef T value_type; typedef Traits original_traits; typedef node< gc, value_type > leaf_node; struct intrusive_key_extractor { void operator()( key_type& dest, leaf_node const& src ) const { typename original_traits::key_extractor()( dest, src.m_Value ); } }; struct value_accessor { value_type const& operator()( leaf_node const& node ) const { return node.m_Value; } }; typedef typename cds::opt::details::make_comparator< value_type, original_traits, false >::type key_comparator; typedef cds::details::Allocator< leaf_node, typename original_traits::allocator> cxx_leaf_node_allocator; struct leaf_deallocator { void operator()( leaf_node * p ) const { cxx_leaf_node_allocator().Delete( p ); } }; struct intrusive_traits: public original_traits { typedef cds::intrusive::ellen_bintree::base_hook< cds::opt::gc< gc >> hook; typedef intrusive_key_extractor key_extractor; typedef leaf_deallocator disposer; typedef cds::details::compare_wrapper< leaf_node, key_comparator, value_accessor > compare; }; // Metafunction result typedef cds::intrusive::EllenBinTree< gc, key_type, leaf_node, intrusive_traits > type; }; template < class GC, typename Key, typename T, class Traits> struct make_ellen_bintree_map { typedef GC gc; typedef Key key_type; typedef T mapped_type; typedef map_node< gc, key_type, mapped_type > leaf_node; typedef typename leaf_node::value_type value_type; typedef Traits original_traits; struct assignment_copy_policy { void operator()( key_type& dest, key_type const& src ) { dest = src; } }; typedef typename std::conditional< std::is_same< typename original_traits::copy_policy, opt::none >::value, assignment_copy_policy, typename original_traits::copy_policy >::type copy_policy; struct intrusive_key_extractor { void operator()( key_type& dest, leaf_node const& src ) const { copy_policy()( dest, src.m_Value.first ); } }; struct key_accessor { key_type const& operator()( leaf_node const& node ) const { return node.m_Value.first; } }; typedef typename cds::opt::details::make_comparator< key_type, original_traits, false >::type key_comparator; typedef cds::details::Allocator< leaf_node, typename original_traits::allocator> cxx_leaf_node_allocator; struct leaf_deallocator { void operator()( leaf_node * p ) const { cxx_leaf_node_allocator().Delete( p ); } }; struct intrusive_traits: public original_traits { typedef cds::intrusive::ellen_bintree::base_hook< cds::opt::gc< gc > > hook; typedef intrusive_key_extractor key_extractor; typedef leaf_deallocator disposer; typedef cds::details::compare_wrapper< leaf_node, key_comparator, key_accessor > compare; }; // Metafunction result typedef cds::intrusive::EllenBinTree< gc, key_type, leaf_node, intrusive_traits > type; }; } // namespace details //@endcond } // namespace ellen_bintree // Forward declarations //@cond template < class GC, typename Key, typename T, class Traits = ellen_bintree::traits > class EllenBinTreeSet; template < class GC, typename Key, typename T, class Traits = ellen_bintree::traits > class EllenBinTreeMap; //@endcond }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_DETAILS_ELLEN_BINTREE_BASE_H libcds-2.3.3/cds/container/details/feldman_hashmap_base.h000066400000000000000000000342711341244201700234160ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_DETAILS_FELDMAN_HASHMAP_BASE_H #define CDSLIB_CONTAINER_DETAILS_FELDMAN_HASHMAP_BASE_H #include #include #include namespace cds { namespace container { /// \p FeldmanHashMap related definitions /** @ingroup cds_nonintrusive_helper */ namespace feldman_hashmap { /// \p FeldmanHashMap internal statistics, see cds::intrusive::feldman_hashset::stat template using stat = cds::intrusive::feldman_hashset::stat< EventCounter >; /// \p FeldmanHashMap empty internal statistics typedef cds::intrusive::feldman_hashset::empty_stat empty_stat; /// Bit-wise memcmp-based comparator for hash value \p T template using bitwise_compare = cds::intrusive::feldman_hashset::bitwise_compare< T >; /// \p FeldmanHashMap level statistics typedef cds::intrusive::feldman_hashset::level_statistics level_statistics; /// Key size option /** @copydetails cds::container::feldman_hashmap::traits::hash_size */ template using hash_size = cds::intrusive::feldman_hashset::hash_size< Size >; /// Hash splitter option /** @copydetails cds::container::feldman_hashmap::traits::hash_splitter */ template using hash_splitter = cds::intrusive::feldman_hashset::hash_splitter< Splitter >; /// \p FeldmanHashMap traits struct traits { /// Hash functor, default is \p opt::none /** \p FeldmanHashMap may use any hash functor converting a key to fixed-sized bit-string, for example, SHA1, SHA2, MurmurHash, CityHash or its successor FarmHash. If you use a fixed-sized key you can use it directly instead of a hash. In such case \p %traits::hash should be specified as \p opt::none. However, if you want to use the hash values or if your key type is not fixed-sized you must specify a proper hash functor in your traits. For example: fixed-sized key - IP4 address map @code // Key - IP address struct ip4_address { uint8_t ip[4]; }; // IP compare struct ip4_cmp { int operator()( ip4_address const& lhs, ip4_address const& rhs ) const { return memcmp( &lhs, &rhs, sizeof(lhs)); } }; // Value - statistics for the IP address struct statistics { // ... }; // Traits // Key type (ip4_addr) is fixed-sized so we may use the map without any hash functor struct ip4_map_traits: public cds::container::multilevl_hashmap::traits { typedef ip4_cmp compare; }; // IP4 address - statistics map typedef cds::container::FeldmanHashMap< cds::gc::HP, ip4_address, statistics, ip4_map_traits > ip4_map; @endcode variable-size key requires a hash functor: URL map @code // Value - statistics for the URL struct statistics { // ... }; // Traits // Key type (std::string) is variable-sized so we must provide a hash functor in our traits // We do not specify any comparing predicate (less or compare) so std::less will be used by default struct url_map_traits: public cds::container::multilevl_hashmap::traits { typedef std::hash hash; }; // URL statistics map typedef cds::container::FeldmanHashMap< cds::gc::HP, std::string, statistics, url_map_traits > url_map; @endcode */ typedef opt::none hash; /// The size of hash value in bytes /** By default, the size of hash value is sizeof( hash_type ) where \p hash_type is type of \p hash() result or sizeof( key ) if you use fixed-sized key. Sometimes that size is wrong, for example, for that 6-byte key: \code struct key_type { uint32_t key; uint16_t subkey; }; static_assert( sizeof( key_type ) == 6, "Key type size mismatch" ); \endcode Here sizeof( key_type ) == 8 so \p static_assert will be thrown. For that case you can specify \p hash_size explicitly. Value \p 0 means auto-calculated sizeof( key_type ). */ static constexpr size_t const hash_size = 0; /// Hash splitter /** @copydetails cds::intrusive::feldman_hashset::traits::hash_splitter */ typedef cds::opt::none hash_splitter; /// Hash comparing functor /** @copydetails cds::intrusive::feldman_hashset::traits::compare */ typedef cds::opt::none compare; /// Specifies binary predicate used for hash compare. /** @copydetails cds::intrusive::feldman_hashset::traits::less */ typedef cds::opt::none less; /// Item counter /** @copydetails cds::intrusive::feldman_hashset::traits::item_counter */ typedef cds::atomicity::item_counter item_counter; /// Item allocator /** Default is \ref CDS_DEFAULT_ALLOCATOR */ typedef CDS_DEFAULT_ALLOCATOR allocator; /// Array node allocator /** @copydetails cds::intrusive::feldman_hashset::traits::node_allocator */ typedef CDS_DEFAULT_ALLOCATOR node_allocator; /// C++ memory ordering model /** @copydetails cds::intrusive::feldman_hashset::traits::memory_model */ typedef cds::opt::v::relaxed_ordering memory_model; /// Back-off strategy typedef cds::backoff::Default back_off; /// Internal statistics /** @copydetails cds::intrusive::feldman_hashset::traits::stat */ typedef empty_stat stat; /// RCU deadlock checking policy (only for \ref cds_container_FeldmanHashMap_rcu "RCU-based FeldmanHashMap") /** @copydetails cds::intrusive::feldman_hashset::traits::rcu_check_deadlock */ typedef cds::opt::v::rcu_throw_deadlock rcu_check_deadlock; }; /// Metafunction converting option list to \p feldman_hashmap::traits /** Supported \p Options are: - \p opt::hash - a hash functor, default is \p std::hash @copydetails traits::hash - \p feldman_hashmap::hash_size - the size of hash value in bytes. @copydetails traits::hash_size - \p opt::allocator - item allocator @copydetails traits::allocator - \p opt::node_allocator - array node allocator. @copydetails traits::node_allocator - \p opt::compare - hash comparison functor. No default functor is provided. If the option is not specified, the \p opt::less is used. - \p opt::less - specifies binary predicate used for hash comparison. @copydetails cds::container::feldman_hashmap::traits::less - \p opt::back_off - back-off strategy used. If the option is not specified, the \p cds::backoff::Default is used. - \p opt::item_counter - the type of item counting feature. @copydetails cds::container::feldman_hashmap::traits::item_counter - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). - \p opt::stat - internal statistics. By default, it is disabled (\p feldman_hashmap::empty_stat). To enable it use \p feldman_hashmap::stat - \p opt::rcu_check_deadlock - a deadlock checking policy for \ref cds_intrusive_FeldmanHashSet_rcu "RCU-based FeldmanHashSet" Default is \p opt::v::rcu_throw_deadlock */ template struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type ; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type ,Options... >::type type; # endif }; } // namespace feldman_hashmap //@cond // Forward declaration template < class GC, typename Key, typename T, class Traits = feldman_hashmap::traits > class FeldmanHashMap; //@endcond //@cond namespace details { template struct hash_selector { typedef Key key_type; typedef Value mapped_type; typedef Hash hasher; typedef typename std::decay< typename std::remove_reference< decltype(hasher()(std::declval())) >::type >::type hash_type; struct node_type { std::pair< key_type const, mapped_type> m_Value; hash_type const m_hash; node_type() = delete; node_type(node_type const&) = delete; template node_type(hasher& h, Q const& key) : m_Value( std::move( std::make_pair( key_type( key ), mapped_type()))) , m_hash( h( m_Value.first )) {} template node_type(hasher& h, Q const& key, U const& val) : m_Value( std::move( std::make_pair( key_type( key ), mapped_type(val)))) , m_hash( h( m_Value.first )) {} template node_type(hasher& h, Q&& key, Args&&... args) : m_Value( std::move(std::make_pair( key_type( std::forward(key)), std::move( mapped_type(std::forward(args)...))))) , m_hash( h( m_Value.first )) {} }; struct hash_accessor { hash_type const& operator()(node_type const& node) const { return node.m_hash; } }; }; template struct hash_selector { typedef Key key_type; typedef Value mapped_type; struct hasher { key_type const& operator()(key_type const& k) const { return k; } }; typedef key_type hash_type; struct node_type { std::pair< key_type const, mapped_type> m_Value; node_type() = delete; node_type(node_type const&) = delete; template node_type( hasher /*h*/, Q&& key, Args&&... args ) : m_Value( std::make_pair( key_type( std::forward( key )), mapped_type( std::forward(args)...))) {} }; struct hash_accessor { hash_type const& operator()(node_type const& node) const { return node.m_Value.first; } }; }; template struct make_feldman_hashmap { typedef GC gc; typedef Key key_type; typedef T mapped_type; typedef Traits original_traits; typedef hash_selector< key_type, mapped_type, typename original_traits::hash > select; typedef typename select::hasher hasher; typedef typename select::hash_type hash_type; typedef typename select::node_type node_type; typedef cds::details::Allocator< node_type, typename original_traits::allocator > cxx_node_allocator; struct node_disposer { void operator()( node_type * p ) const { cxx_node_allocator().Delete( p ); } }; struct intrusive_traits: public original_traits { typedef typename select::hash_accessor hash_accessor; typedef node_disposer disposer; }; // Metafunction result typedef cds::intrusive::FeldmanHashSet< GC, node_type, intrusive_traits > type; }; } // namespace details //@endcond }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_DETAILS_FELDMAN_HASHMAP_BASE_H libcds-2.3.3/cds/container/details/feldman_hashset_base.h000066400000000000000000000176221341244201700234350ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_DETAILS_FELDMAN_HASHSET_BASE_H #define CDSLIB_CONTAINER_DETAILS_FELDMAN_HASHSET_BASE_H #include #include namespace cds { namespace container { /// \p FeldmanHashSet related definitions /** @ingroup cds_nonintrusive_helper */ namespace feldman_hashset { /// Hash accessor option /** @copydetails cds::intrusive::feldman_hashset::traits::hash_accessor */ template using hash_accessor = cds::intrusive::feldman_hashset::hash_accessor< Accessor >; /// Hash size option /** @copydetails cds::intrusive::feldman_hashset::traits::hash_size */ template using hash_size = cds::intrusive::feldman_hashset::hash_size< Size >; /// Hash splitter /** @copydetails cds::intrusive::feldman_hashset::traits::hash_splitter */ template using hash_splitter = cds::intrusive::feldman_hashset::hash_splitter< Splitter >; /// \p FeldmanHashSet internal statistics, see cds::intrusive::feldman_hashset::stat template using stat = cds::intrusive::feldman_hashset::stat< EventCounter >; /// \p FeldmanHashSet empty internal statistics typedef cds::intrusive::feldman_hashset::empty_stat empty_stat; /// Bit-wise memcmp-based comparator for hash value \p T template using bitwise_compare = cds::intrusive::feldman_hashset::bitwise_compare< T >; /// \p FeldmanHashSet level statistics typedef cds::intrusive::feldman_hashset::level_statistics level_statistics; /// \p FeldmanHashSet traits struct traits { /// Mandatory functor to get hash value from data node /** @copydetails cds::intrusive::feldman_hashset::traits::hash_accessor */ typedef cds::opt::none hash_accessor; /// The size of hash value in bytes /** @copydetails cds::intrusive::feldman_hashset::traits::hash_size */ static constexpr size_t const hash_size = 0; /// Hash splitter /** @copydetails cds::intrusive::feldman_hashset::traits::hash_splitter */ typedef cds::opt::none hash_splitter; /// Hash comparing functor /** @copydetails cds::intrusive::feldman_hashset::traits::compare */ typedef cds::opt::none compare; /// Specifies binary predicate used for hash compare. /** @copydetails cds::intrusive::feldman_hashset::traits::less */ typedef cds::opt::none less; /// Item counter /** @copydetails cds::intrusive::feldman_hashset::traits::item_counter */ typedef cds::atomicity::item_counter item_counter; /// Item allocator /** Default is \ref CDS_DEFAULT_ALLOCATOR */ typedef CDS_DEFAULT_ALLOCATOR allocator; /// Array node allocator /** @copydetails cds::intrusive::feldman_hashset::traits::node_allocator */ typedef CDS_DEFAULT_ALLOCATOR node_allocator; /// C++ memory ordering model /** @copydetails cds::intrusive::feldman_hashset::traits::memory_model */ typedef cds::opt::v::relaxed_ordering memory_model; /// Back-off strategy typedef cds::backoff::Default back_off; /// Internal statistics /** @copydetails cds::intrusive::feldman_hashset::traits::stat */ typedef empty_stat stat; /// RCU deadlock checking policy (only for \ref cds_container_FeldmanHashSet_rcu "RCU-based FeldmanHashSet") /** @copydetails cds::intrusive::feldman_hashset::traits::rcu_check_deadlock */ typedef cds::opt::v::rcu_throw_deadlock rcu_check_deadlock; }; /// Metafunction converting option list to \p feldman_hashset::traits /** Supported \p Options are: - \p feldman_hashset::hash_accessor - mandatory option, hash accessor functor. @copydetails traits::hash_accessor - \p feldman_hashset::hash_size - the size of hash value in bytes. @copydetails traits::hash_size - \p feldman_hashset::hash_splitter - a hash splitter algorithm @copydetails traits::hash_splitter - \p opt::allocator - item allocator @copydetails traits::allocator - \p opt::node_allocator - array node allocator. @copydetails traits::node_allocator - \p opt::compare - hash comparison functor. No default functor is provided. If the option is not specified, the \p opt::less is used. - \p opt::less - specifies binary predicate used for hash comparison. @copydetails cds::container::feldman_hashset::traits::less - \p opt::back_off - back-off strategy used. If the option is not specified, the \p cds::backoff::Default is used. - \p opt::item_counter - the type of item counting feature. @copydetails cds::intrusive::feldman_hashset::traits::item_counter - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). - \p opt::stat - internal statistics. By default, it is disabled (\p feldman_hashset::empty_stat). To enable it use \p feldman_hashset::stat - \p opt::rcu_check_deadlock - a deadlock checking policy for \ref cds_intrusive_FeldmanHashSet_rcu "RCU-based FeldmanHashSet" Default is \p opt::v::rcu_throw_deadlock */ template struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type ; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type ,Options... >::type type; # endif }; } // namespace feldman_hashset //@cond // Forward declaration template < class GC, typename T, class Traits = cds::container::feldman_hashset::traits > class FeldmanHashSet; //@endcond //@cond namespace details { template struct make_feldman_hashset { typedef GC gc; typedef T value_type; typedef Traits original_traits; typedef cds::details::Allocator< value_type, typename original_traits::allocator > cxx_node_allocator; struct node_disposer { void operator()( value_type * p ) const { cxx_node_allocator().Delete( p ); } }; struct intrusive_traits: public original_traits { typedef node_disposer disposer; }; // Metafunction result typedef cds::intrusive::FeldmanHashSet< GC, T, intrusive_traits > type; }; } // namespace details //@endcond }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_DETAILS_FELDMAN_HASHSET_BASE_H libcds-2.3.3/cds/container/details/guarded_ptr_cast.h000066400000000000000000000016001341244201700226150ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_DETAILS_GUARDED_PTR_CAST_H #define CDSLIB_CONTAINER_DETAILS_GUARDED_PTR_CAST_H //@cond #include namespace cds { namespace container { namespace details { template struct guarded_ptr_cast_set { T * operator()(Node* pNode ) const noexcept { return &(pNode->m_Value); } }; template struct guarded_ptr_cast_map { T * operator()(Node* pNode ) const noexcept { return &(pNode->m_Data); } }; }}} // namespace cds::container::details //@endcond #endif // #ifndef CDSLIB_CONTAINER_DETAILS_GUARDED_PTR_CAST_H libcds-2.3.3/cds/container/details/iterable_list_base.h000066400000000000000000000127161341244201700231310ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_DETAILS_ITERABLE_LIST_BASE_H #define CDSLIB_CONTAINER_DETAILS_ITERABLE_LIST_BASE_H #include #include #include namespace cds { namespace container { /// \p IterableList ordered list related definitions /** @ingroup cds_nonintrusive_helper */ namespace iterable_list { /// \p IterableList internal statistics, see \p cds::intrusive::iterable_list::stat template ::event_counter > using stat = cds::intrusive::iterable_list::stat< EventCounter >; /// \p IterableList empty internal statistics, see \p cds::intrusive::iterable_list::empty_stat typedef cds::intrusive::iterable_list::empty_stat empty_stat; //@cond template ::stat_type > using wrapped_stat = cds::intrusive::iterable_list::wrapped_stat< Stat >; //@endif /// \p IterableList traits struct traits { /// Allocator used to allocate new data typedef CDS_DEFAULT_ALLOCATOR allocator; /// Node allocator typedef intrusive::iterable_list::traits::node_allocator node_allocator; /// Key comparison functor /** No default functor is provided. If the option is not specified, the \p less is used. */ typedef opt::none compare; /// Specifies binary predicate used for key comparison. /** Default is \p std::less. */ typedef opt::none less; /// Back-off strategy typedef intrusive::iterable_list::traits::back_off back_off; /// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter or \p atomicity::cache_friendly_item_counter to enable item counting typedef intrusive::iterable_list::traits::item_counter item_counter; /// Internal statistics /** By default, internal statistics is disabled (\p iterable_list::empty_stat). Use \p iterable_list::stat to enable it. */ typedef intrusive::iterable_list::traits::stat stat; /// C++ memory ordering model /** Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). */ typedef opt::v::relaxed_ordering memory_model; /// RCU deadlock checking policy (only for \ref cds_intrusive_MichaelList_rcu "RCU-based MichaelList") /** List of available options see opt::rcu_check_deadlock */ typedef opt::v::rcu_throw_deadlock rcu_check_deadlock; //@cond // IterableKVList: supporting for split-ordered list // key accessor (opt::none = internal key type is equal to user key type) typedef opt::none key_accessor; //@endcond }; /// Metafunction converting option list to \p iterable_list::traits /** Supported \p Options are: - \p opt::compare - key comparison functor. No default functor is provided. If the option is not specified, the \p opt::less is used. - \p opt::less - specifies binary predicate used for key comparison. Default is \p std::less. - \p opt::allocator - an allocator for data, default is \p CDS_DEFAULT_ALLOCATOR - \p opt::node_allocator - node allocator, default is \p std::allocator. - \p opt::back_off - back-off strategy used. If the option is not specified, the \p cds::backoff::Default is used. - \p opt::item_counter - the type of item counting feature. Default is disabled (\p atomicity::empty_item_counter). To enable item counting use \p atomicity::item_counter or \p atomicity::cache_friendly_item_counter. - \p opt::stat - internal statistics. By default, it is disabled (\p iterable_list::empty_stat). To enable it use \p iterable_list::stat - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consistent memory model). */ template struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type ; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type ,Options... >::type type; #endif }; } // namespace iterable_list // Forward declarations template class IterableList; template class IterableKVList; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_DETAILS_ITERABLE_LIST_BASE_H libcds-2.3.3/cds/container/details/lazy_list_base.h000066400000000000000000000161771341244201700223260ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_DETAILS_LAZY_LIST_BASE_H #define CDSLIB_CONTAINER_DETAILS_LAZY_LIST_BASE_H #include #include #include namespace cds { namespace container { /// \p LazyList ordered list related definitions /** @ingroup cds_nonintrusive_helper */ namespace lazy_list { /// \p LazyList internal statistics, see \p cds::intrusive::lazy_list::stat template ::event_counter> using stat = cds::intrusive::lazy_list::stat< EventCounter >; /// \p LazyList empty internal statistics, see \p cds::intrusive::lazy_list::empty_stat typedef cds::intrusive::lazy_list::empty_stat empty_stat; //@cond template ::stat_type> using wrapped_stat = cds::intrusive::lazy_list::wrapped_stat< Stat >; //@endif /// LazyList traits /** Either \p compare or \p less or both must be specified. */ struct traits { /// allocator used to allocate new node typedef CDS_DEFAULT_ALLOCATOR allocator; /// Key comparing functor /** No default functor is provided. If the option is not specified, the \p less is used. */ typedef opt::none compare; /// Specifies binary predicate used for key comparing /** Default is \p std::less. */ typedef opt::none less; /// Specifies binary functor used for comparing keys for equality /** No default functor is provided. If \p equal_to option is not spcified, \p compare is used, if \p compare is not specified, \p less is used. */ typedef opt::none equal_to; /// Specifies list ordering policy. /** If \p sort is \p true, than list maintains items in sorted order, otherwise items are unordered. Default is \p true. Note that if \p sort is \p false then lookup operations scan entire list. */ static const bool sort = true; /// Lock type used to lock modifying items /** Default is cds::sync::spin */ typedef cds::sync::spin lock_type; /// back-off strategy used typedef cds::backoff::Default back_off; /// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter to enable item counting typedef atomicity::empty_item_counter item_counter; /// Internal statistics /** By default, internal statistics is disabled (\p lazy_list::empty_stat). Use \p lazy_list::stat to enable it. */ typedef empty_stat stat; /// C++ memory ordering model /** Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consistent memory model). */ typedef opt::v::relaxed_ordering memory_model; /// RCU deadlock checking policy (only for \ref cds_intrusive_LazyList_rcu "RCU-based LazyList") /** List of available options see \p opt::rcu_check_deadlock */ typedef opt::v::rcu_throw_deadlock rcu_check_deadlock; //@cond // LazyKVList: supporting for split-ordered list // key accessor (opt::none = internal key type is equal to user key type) typedef opt::none key_accessor; //@endcond }; /// Metafunction converting option list to \p lazy_list::traits /** \p Options are: - \p opt::lock_type - lock type for node-level locking. Default \p is cds::sync::spin. Note that each node of the list has member of type \p lock_type, therefore, heavy-weighted locking primitive is not acceptable as candidate for \p lock_type. - \p opt::compare - key compare functor. No default functor is provided. If the option is not specified, the \p opt::less is used. - \p opt::less - specifies binary predicate used for key compare. Default is \p std::less. - \p opt::equal_to - specifies binary functor for comparing keys for equality. This option is applicable only for unordered list. No default is provided. If \p equal_to is not specified, \p compare is used, if \p compare is not specified, \p less is used. - \p opt::sort - specifies ordering policy. Default value is \p true, i.e. the list is ordered. Note: unordering feature is not fully supported yet. - \p opt::back_off - back-off strategy used. If the option is not specified, \p cds::backoff::Default is used. - \p opt::item_counter - the type of item counting feature. Default is disabled (\p atomicity::empty_item_counter). To enable item counting use \p atomicity::item_counter or \p atomicity::cache_friendly_item_counter - \p opt::stat - internal statistics. By default, it is disabled (\p lazy_list::empty_stat). To enable it use \p lazy_list::stat - \p opt::allocator - the allocator used for creating and freeing list's item. Default is \ref CDS_DEFAULT_ALLOCATOR macro. - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consistent memory model). */ template struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type ; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type ,Options... >::type type; #endif }; } // namespace lazy_list // Forward declarations template class LazyList; template class LazyKVList; // Tag for selecting lazy list implementation /** This empty struct is used only as a tag for selecting \p LazyList as ordered list implementation in declaration of some classes. See \p split_list::traits::ordered_list as an example. */ struct lazy_list_tag {}; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_DETAILS_LAZY_LIST_BASE_H libcds-2.3.3/cds/container/details/make_iterable_kvlist.h000066400000000000000000000052221341244201700234670ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_DETAILS_MAKE_ITERABLE_KVLIST_H #define CDSLIB_CONTAINER_DETAILS_MAKE_ITERABLE_KVLIST_H #include #include #include namespace cds { namespace container { //@cond namespace details { template struct make_iterable_kvlist { typedef Traits original_type_traits; typedef GC gc; typedef K key_type; typedef T mapped_type; typedef std::pair value_type; typedef typename std::allocator_traits< typename original_type_traits::allocator >::template rebind_alloc data_allocator_type; typedef cds::details::Allocator< value_type, data_allocator_type > cxx_data_allocator; typedef typename original_type_traits::memory_model memory_model; struct data_disposer { void operator ()( value_type * pData ) { cxx_data_allocator().Delete( pData ); } }; struct key_field_accessor { key_type const& operator()( value_type const& data ) { return data.first; } }; template struct less_wrapper { template bool operator()( value_type const& lhs, Q const& rhs ) const { return Less()( lhs.first, rhs ); } template bool operator()( Q const& lhs, value_type const& rhs ) const { return Less()( lhs, rhs.first ); } }; typedef typename opt::details::make_comparator< key_type, original_type_traits >::type key_comparator; struct base_traits: public original_type_traits { typedef data_disposer disposer; typedef cds::details::compare_wrapper< value_type, key_comparator, key_field_accessor > compare; }; typedef container::IterableList type; }; } // namespace details //@endcond }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_DETAILS_MAKE_ITERABLE_KVLIST_H libcds-2.3.3/cds/container/details/make_iterable_list.h000066400000000000000000000034601341244201700231300ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_DETAILS_MAKE_ITERABLE_LIST_H #define CDSLIB_CONTAINER_DETAILS_MAKE_ITERABLE_LIST_H #include #include #include namespace cds { namespace container { //@cond namespace details { template struct make_iterable_list { typedef GC gc; typedef T value_type; typedef Traits original_traits; typedef typename std::allocator_traits< typename original_traits::allocator >::template rebind_alloc< value_type > data_allocator_type; typedef cds::details::Allocator< value_type, data_allocator_type > cxx_data_allocator; typedef typename original_traits::memory_model memory_model; struct data_disposer { void operator ()( value_type* data ) { cxx_data_allocator().Delete( data ); } }; template using less_wrapper = cds::opt::details::make_comparator_from_less; struct intrusive_traits: public original_traits { typedef data_disposer disposer; }; typedef intrusive::IterableList type; typedef typename type::key_comparator key_comparator; }; } // namespace details //@endcond }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_DETAILS_MAKE_ITERABLE_LIST_H libcds-2.3.3/cds/container/details/make_lazy_kvlist.h000066400000000000000000000121731341244201700226620ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_DETAILS_MAKE_LAZY_KVLIST_H #define CDSLIB_CONTAINER_DETAILS_MAKE_LAZY_KVLIST_H #include #include namespace cds { namespace container { //@cond namespace details { template struct make_lazy_kvlist { typedef Traits original_type_traits; typedef GC gc; typedef K key_type; typedef T mapped_type; typedef std::pair value_type; struct node_type: public intrusive::lazy_list::node { value_type m_Data; node_type( key_type const& key ) : m_Data( key, mapped_type()) {} template node_type( Q const& key ) : m_Data( key_type( key ), mapped_type()) {} template explicit node_type( std::pair const& pair ) : m_Data( pair ) {} node_type( key_type const& key, mapped_type const& value ) : m_Data( key, value ) {} template node_type( key_type const& key, R const& value ) : m_Data( key, mapped_type( value )) {} template node_type( Q const& key, mapped_type const& value ) : m_Data( key_type( key ), value ) {} template node_type( Q const& key, R const& value ) : m_Data( key_type( key ), mapped_type( value )) {} template node_type( Ky&& key, Args&&... args ) : m_Data( key_type( std::forward( key )), std::move( mapped_type( std::forward( args )... ))) {} }; typedef typename std::allocator_traits< typename original_type_traits::allocator >::template rebind_alloc< node_type > allocator_type; typedef cds::details::Allocator< node_type, allocator_type > cxx_allocator; struct node_deallocator { void operator ()( node_type * pNode ) { cxx_allocator().Delete( pNode ); } }; struct key_field_accessor { key_type const& operator()( node_type const& pair ) { return pair.m_Data.first; } }; typedef typename std::conditional< original_type_traits::sort, typename opt::details::make_comparator< value_type, original_type_traits >::type, typename opt::details::make_equal_to< value_type, original_type_traits >::type >::type key_comparator; template using less_wrapper = cds::details::compare_wrapper< node_type, cds::opt::details::make_comparator_from_less, key_field_accessor >; template using equal_to_wrapper = cds::details::predicate_wrapper< node_type, Equal, key_field_accessor >; struct intrusive_traits: public original_type_traits { typedef intrusive::lazy_list::base_hook< opt::gc, opt::lock_type< typename original_type_traits::lock_type >> hook; typedef node_deallocator disposer; typedef typename std::conditional< std::is_same< typename original_type_traits::equal_to, cds::opt::none >::value, cds::opt::none, equal_to_wrapper< typename original_type_traits::equal_to > >::type equal_to; typedef typename std::conditional< original_type_traits::sort || !std::is_same< typename original_type_traits::compare, cds::opt::none >::value || !std::is_same< typename original_type_traits::less, cds::opt::none >::value, cds::details::compare_wrapper< node_type, typename opt::details::make_comparator< value_type, original_type_traits >::type, key_field_accessor >, cds::opt::none >::type compare; static const opt::link_check_type link_checker = cds::intrusive::lazy_list::traits::link_checker; }; typedef intrusive::LazyList type; }; } // namespace details //@endcond }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_DETAILS_MAKE_LAZY_KVLIST_H libcds-2.3.3/cds/container/details/make_lazy_list.h000066400000000000000000000077271341244201700223320ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_DETAILS_MAKE_LAZY_LIST_H #define CDSLIB_CONTAINER_DETAILS_MAKE_LAZY_LIST_H #include #include namespace cds { namespace container { //@cond namespace details { template struct make_lazy_list { typedef GC gc; typedef T value_type; typedef Traits original_type_traits; struct node_type : public intrusive::lazy_list::node { value_type m_Value; node_type() {} template node_type( Q const& v ) : m_Value(v) {} template node_type( Args&&... args ) : m_Value( std::forward(args)...) {} }; typedef typename std::allocator_traits< typename original_type_traits::allocator >::template rebind_alloc allocator_type; typedef cds::details::Allocator< node_type, allocator_type > cxx_allocator; struct node_deallocator { void operator ()( node_type * pNode ) { cxx_allocator().Delete( pNode ); } }; typedef typename std::conditional< original_type_traits::sort, typename opt::details::make_comparator< value_type, original_type_traits >::type, typename opt::details::make_equal_to< value_type, original_type_traits >::type >::type key_comparator; struct value_accessor { value_type const & operator()( node_type const & node ) const { return node.m_Value; } }; template using less_wrapper = cds::details::compare_wrapper< node_type, cds::opt::details::make_comparator_from_less, value_accessor >; template using equal_to_wrapper = cds::details::predicate_wrapper< node_type, Equal, value_accessor >; struct intrusive_traits: public original_type_traits { typedef intrusive::lazy_list::base_hook< opt::gc, cds::opt::lock_type< typename original_type_traits::lock_type >> hook; typedef node_deallocator disposer; static constexpr const opt::link_check_type link_checker = cds::intrusive::lazy_list::traits::link_checker; typedef typename std::conditional< std::is_same< typename original_type_traits::equal_to, cds::opt::none >::value, cds::opt::none, equal_to_wrapper< typename original_type_traits::equal_to > >::type equal_to; typedef typename std::conditional< original_type_traits::sort || !std::is_same::value || !std::is_same::value, cds::details::compare_wrapper< node_type, typename opt::details::make_comparator< value_type, original_type_traits >::type, value_accessor >, cds::opt::none >::type compare; }; typedef intrusive::LazyList type; }; } // namespace details //@endcond }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_DETAILS_MAKE_MICHAEL_LIST_H libcds-2.3.3/cds/container/details/make_michael_kvlist.h000066400000000000000000000077121341244201700233100ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_DETAILS_MAKE_MICHAEL_KVLIST_H #define CDSLIB_CONTAINER_DETAILS_MAKE_MICHAEL_KVLIST_H #include #include namespace cds { namespace container { //@cond namespace details { template struct make_michael_kvlist { typedef Traits original_type_traits; typedef GC gc; typedef K key_type; typedef T value_type; typedef std::pair pair_type; struct node_type: public intrusive::michael_list::node { pair_type m_Data; node_type( key_type const& key ) : m_Data( key, value_type()) {} template node_type( Q const& key ) : m_Data( key_type(key), value_type()) {} template explicit node_type( std::pair const& pair ) : m_Data( pair ) {} node_type( key_type const& key, value_type const& value ) : m_Data( key, value ) {} template node_type( key_type const& key, R const& value ) : m_Data( key, value_type( value )) {} template node_type( Q const& key, value_type const& value ) : m_Data( key_type( key ), value ) {} template node_type( Q const& key, R const& value ) : m_Data( key_type( key ), value_type( value )) {} template< typename Ky, typename... Args> node_type( Ky&& key, Args&&... args ) : m_Data( key_type( std::forward(key)), std::move( value_type( std::forward(args)...))) {} }; typedef typename std::allocator_traits< typename original_type_traits::allocator >::template rebind_alloc< node_type > allocator_type; typedef cds::details::Allocator< node_type, allocator_type > cxx_allocator; struct node_deallocator { void operator ()( node_type * pNode ) { cxx_allocator().Delete( pNode ); } }; struct key_field_accessor { key_type const& operator()( node_type const& pair ) { return pair.m_Data.first; } }; typedef typename opt::details::make_comparator< key_type, original_type_traits >::type key_comparator; template using less_wrapper = cds::details::compare_wrapper< node_type, cds::opt::details::make_comparator_from_less, key_field_accessor >; struct intrusive_traits: public original_type_traits { typedef intrusive::michael_list::base_hook< opt::gc > hook; typedef node_deallocator disposer; typedef cds::details::compare_wrapper< node_type, key_comparator, key_field_accessor > compare; static const opt::link_check_type link_checker = intrusive::michael_list::traits::link_checker; }; typedef intrusive::MichaelList type; }; } // namespace details //@endcond }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_DETAILS_MAKE_MICHAEL_KVLIST_H libcds-2.3.3/cds/container/details/make_michael_list.h000066400000000000000000000054031341244201700227420ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_DETAILS_MAKE_MICHAEL_LIST_H #define CDSLIB_CONTAINER_DETAILS_MAKE_MICHAEL_LIST_H #include #include namespace cds { namespace container { //@cond namespace details { template struct make_michael_list { typedef GC gc; typedef T value_type; struct node_type : public intrusive::michael_list::node { value_type m_Value; node_type() {} template node_type( Q const& v ) : m_Value(v) {} template node_type( Args&&... args ) : m_Value( std::forward(args)... ) {} }; typedef Traits original_traits; typedef typename std::allocator_traits< typename original_traits::allocator >::template rebind_alloc< node_type > allocator_type; typedef cds::details::Allocator< node_type, allocator_type > cxx_allocator; struct node_deallocator { void operator ()( node_type * pNode ) { cxx_allocator().Delete( pNode ); } }; typedef typename opt::details::make_comparator< value_type, original_traits >::type key_comparator; struct value_accessor { value_type const & operator()( node_type const& node ) const { return node.m_Value; } }; template using less_wrapper = cds::details::compare_wrapper< node_type, cds::opt::details::make_comparator_from_less, value_accessor >; struct intrusive_traits: public original_traits { typedef intrusive::michael_list::base_hook< opt::gc > hook; typedef node_deallocator disposer; typedef cds::details::compare_wrapper< node_type, key_comparator, value_accessor > compare; static constexpr const opt::link_check_type link_checker = cds::intrusive::michael_list::traits::link_checker; }; typedef intrusive::MichaelList type; }; } // namespace details //@endcond }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_DETAILS_MAKE_MICHAEL_LIST_H libcds-2.3.3/cds/container/details/make_skip_list_map.h000066400000000000000000000115311341244201700231420ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_DETAILS_MAKE_SKIP_LIST_MAP_H #define CDSLIB_CONTAINER_DETAILS_MAKE_SKIP_LIST_MAP_H #include #include //@cond namespace cds { namespace container { namespace details { template struct make_skip_list_map { typedef GC gc; typedef K key_type; typedef T mapped_type; typedef std::pair< key_type const, mapped_type> value_type; typedef Traits traits; typedef cds::intrusive::skip_list::node< gc > intrusive_node_type; struct node_type: public intrusive_node_type { typedef intrusive_node_type base_class; typedef typename base_class::atomic_marked_ptr atomic_marked_ptr; typedef value_type stored_value_type; value_type m_Value; //atomic_marked_ptr m_arrTower[] ; // allocated together with node_type in single memory block template node_type( unsigned int nHeight, atomic_marked_ptr * pTower, Q&& key ) : m_Value( std::make_pair( std::forward( key ), mapped_type())) { init_tower( nHeight, pTower ); } template node_type( unsigned int nHeight, atomic_marked_ptr * pTower, Q&& key, Args&&... args ) : m_Value( std::forward(key), mapped_type( std::forward(args)... )) { init_tower( nHeight, pTower ); } node_type() = delete; private: void init_tower( unsigned int nHeight, atomic_marked_ptr * pTower ) { if ( nHeight > 1 ) { new (pTower) atomic_marked_ptr[ nHeight - 1 ]; base_class::make_tower( nHeight, pTower ); } } }; class node_allocator : public skip_list::details::node_allocator< node_type, traits> { typedef skip_list::details::node_allocator< node_type, traits> base_class; public: template node_type * New( unsigned int nHeight, Q const& key ) { return base_class::New( nHeight, key_type( key )); } template node_type * New( unsigned int nHeight, Q const& key, U const& val ) { unsigned char * pMem = base_class::alloc_space( nHeight ); return new( pMem ) node_type( nHeight, nHeight > 1 ? reinterpret_cast( pMem + base_class::c_nNodeSize ) : nullptr, key_type( key ), mapped_type( val ) ); } template node_type * New( unsigned int nHeight, Args&&... args ) { unsigned char * pMem = base_class::alloc_space( nHeight ); return new( pMem ) node_type( nHeight, nHeight > 1 ? reinterpret_cast( pMem + base_class::c_nNodeSize ) : nullptr, std::forward(args)... ); } }; struct node_deallocator { void operator ()( node_type * pNode ) { node_allocator().Delete( pNode ); } }; typedef skip_list::details::dummy_node_builder dummy_node_builder; struct key_accessor { key_type const & operator()( node_type const& node ) const { return node.m_Value.first; } }; typedef typename opt::details::make_comparator< key_type, traits >::type key_comparator; class intrusive_type_traits: public cds::intrusive::skip_list::make_traits< cds::opt::type_traits< traits > ,cds::intrusive::opt::hook< intrusive::skip_list::base_hook< cds::opt::gc< gc > > > ,cds::intrusive::opt::disposer< node_deallocator > ,cds::intrusive::skip_list::internal_node_builder< dummy_node_builder > ,cds::opt::compare< cds::details::compare_wrapper< node_type, key_comparator, key_accessor > > >::type {}; typedef cds::intrusive::SkipListSet< gc, node_type, intrusive_type_traits> type; }; }}} // namespace cds::container::details //@endcond #endif // CDSLIB_CONTAINER_DETAILS_MAKE_SKIP_LIST_MAP_H libcds-2.3.3/cds/container/details/make_skip_list_set.h000066400000000000000000000070161341244201700231630ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_DETAILS_MAKE_SKIP_LIST_SET_H #define CDSLIB_CONTAINER_DETAILS_MAKE_SKIP_LIST_SET_H #include #include //@cond namespace cds { namespace container { namespace details { template struct make_skip_list_set { typedef GC gc; typedef T value_type; typedef Traits traits; typedef cds::intrusive::skip_list::node< gc > intrusive_node_type; struct node_type: public intrusive_node_type { typedef intrusive_node_type base_class; typedef typename base_class::atomic_marked_ptr atomic_marked_ptr; typedef value_type stored_value_type; value_type m_Value; //atomic_marked_ptr m_arrTower[] ; // allocated together with node_type in single memory block template node_type( unsigned int nHeight, atomic_marked_ptr * pTower, Q&& v ) : m_Value( std::forward( v )) { init_tower( nHeight, pTower ); } template node_type( unsigned int nHeight, atomic_marked_ptr * pTower, Q&& q, Args&&... args ) : m_Value( std::forward(q), std::forward(args)... ) { init_tower( nHeight, pTower ); } node_type() = delete; private: void init_tower( unsigned nHeight, atomic_marked_ptr* pTower ) { if ( nHeight > 1 ) { new ( pTower ) atomic_marked_ptr[nHeight - 1]; base_class::make_tower( nHeight, pTower ); } } }; typedef skip_list::details::node_allocator< node_type, traits> node_allocator; struct node_deallocator { void operator ()( node_type * pNode ) { node_allocator().Delete( pNode ); } }; typedef skip_list::details::dummy_node_builder dummy_node_builder; struct value_accessor { value_type const& operator()( node_type const& node ) const { return node.m_Value; } }; typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator; template using less_wrapper = cds::details::compare_wrapper< node_type, cds::opt::details::make_comparator_from_less, value_accessor >; class intrusive_traits: public cds::intrusive::skip_list::make_traits< cds::opt::type_traits< traits > ,cds::intrusive::opt::hook< intrusive::skip_list::base_hook< cds::opt::gc< gc > > > ,cds::intrusive::opt::disposer< node_deallocator > ,cds::intrusive::skip_list::internal_node_builder< dummy_node_builder > ,cds::opt::compare< cds::details::compare_wrapper< node_type, key_comparator, value_accessor > > >::type {}; typedef cds::intrusive::SkipListSet< gc, node_type, intrusive_traits> type; }; }}} // namespace cds::container::details //@endcond #endif //#ifndef CDSLIB_CONTAINER_DETAILS_MAKE_SKIP_LIST_SET_H libcds-2.3.3/cds/container/details/make_split_list_set.h000066400000000000000000000020531341244201700233440ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_DETAILS_MAKE_SPLIT_LIST_SET_H #define CDSLIB_CONTAINER_DETAILS_MAKE_SPLIT_LIST_SET_H #include #include #include //@cond namespace cds { namespace container { // Forward declaration struct michael_list_tag; struct lazy_list_tag; }} // namespace cds::container //@endcond #ifdef CDSLIB_CONTAINER_DETAILS_MICHAEL_LIST_BASE_H # include #endif #ifdef CDSLIB_CONTAINER_DETAILS_LAZY_LIST_BASE_H # include #endif #ifdef CDSLIB_CONTAINER_DETAILS_ITERABLE_LIST_BASE_H # include #endif #endif // #ifndef CDSLIB_CONTAINER_DETAILS_MAKE_SPLIT_LIST_SET_H libcds-2.3.3/cds/container/details/make_split_list_set_iterable_list.h000066400000000000000000000075571341244201700262640ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_DETAILS_MAKE_SPLIT_LIST_SET_ITERABLE_LIST_H #define CDSLIB_CONTAINER_DETAILS_MAKE_SPLIT_LIST_SET_ITERABLE_LIST_H //@cond namespace cds { namespace container { namespace details { template struct make_split_list_set< GC, T, iterable_list_tag, Traits > { typedef GC gc; typedef T value_type; typedef Traits original_traits; typedef typename cds::opt::select_default< typename original_traits::ordered_list_traits, cds::container::iterable_list::traits >::type original_ordered_list_traits; struct node_type: public cds::intrusive::split_list::node< void > { value_type m_Value; template explicit node_type( Q&& v ) : m_Value( std::forward( v )) {} template explicit node_type( Q&& q, Args&&... args ) : m_Value( std::forward(q), std::forward(args)... ) {} node_type() = delete; }; typedef typename cds::opt::select_default< typename original_traits::ordered_list_traits, typename original_traits::allocator, typename cds::opt::select_default< typename original_traits::ordered_list_traits::allocator, typename original_traits::allocator >::type >::type node_allocator_; typedef typename std::allocator_traits< node_allocator_ >::template rebind_alloc< node_type > node_allocator_type; typedef cds::details::Allocator< node_type, node_allocator_type > cxx_node_allocator; struct node_deallocator { void operator ()( node_type * pNode ) { cxx_node_allocator().Delete( pNode ); } }; typedef typename opt::details::make_comparator< value_type, original_ordered_list_traits >::type key_comparator; typedef typename original_traits::key_accessor key_accessor; struct value_accessor { typename key_accessor::key_type const& operator()( node_type const& node ) const { return key_accessor()(node.m_Value); } }; template using predicate_wrapper = cds::details::predicate_wrapper< node_type, Predicate, value_accessor >; struct ordered_list_traits: public original_ordered_list_traits { typedef cds::atomicity::empty_item_counter item_counter; typedef node_deallocator disposer; typedef cds::details::compare_wrapper< node_type, key_comparator, value_accessor > compare; }; struct traits: public original_traits { struct hash: public original_traits::hash { typedef typename original_traits::hash base_class; size_t operator()(node_type const& v ) const { return base_class::operator()( key_accessor()( v.m_Value )); } template size_t operator()( Q const& k ) const { return base_class::operator()( k ); } }; }; class ordered_list: public cds::intrusive::IterableList< gc, node_type, ordered_list_traits > {}; typedef cds::intrusive::SplitListSet< gc, ordered_list, traits > type; }; }}} // namespace cds::container::details //@endcond #endif // #ifndef CDSLIB_CONTAINER_DETAILS_MAKE_SPLIT_LIST_SET_ITERABLE_LIST_H libcds-2.3.3/cds/container/details/make_split_list_set_lazy_list.h000066400000000000000000000106561341244201700254460ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_DETAILS_MAKE_SPLIT_LIST_SET_LAZY_LIST_H #define CDSLIB_CONTAINER_DETAILS_MAKE_SPLIT_LIST_SET_LAZY_LIST_H #include //@cond namespace cds { namespace container { namespace details { template struct make_split_list_set< GC, T, lazy_list_tag, Traits > { typedef GC gc; typedef T value_type; typedef Traits original_traits; typedef typename cds::opt::select_default< typename original_traits::ordered_list_traits, cds::container::lazy_list::traits >::type original_ordered_list_traits; typedef typename cds::opt::select_default< typename original_ordered_list_traits::lock_type, typename cds::container::lazy_list::traits::lock_type >::type lock_type; typedef cds::intrusive::split_list::node< cds::intrusive::lazy_list::node > primary_node_type; struct node_type: public primary_node_type { value_type m_Value; template explicit node_type( Q&& v ) : m_Value( std::forward( v )) {} template explicit node_type( Q&& q, Args&&... args ) : m_Value( std::forward(q), std::forward(args)... ) {} node_type() = delete; }; typedef typename cds::opt::select_default< typename original_traits::ordered_list_traits, typename original_traits::allocator, typename cds::opt::select_default< typename original_traits::ordered_list_traits::allocator, typename original_traits::allocator >::type >::type node_allocator_; typedef typename std::allocator_traits::template rebind_alloc node_allocator_type; typedef cds::details::Allocator< node_type, node_allocator_type > cxx_node_allocator; struct node_deallocator { void operator ()( node_type * pNode ) { cxx_node_allocator().Delete( pNode ); } }; typedef typename opt::details::make_comparator< value_type, original_ordered_list_traits >::type key_comparator; typedef typename original_traits::key_accessor key_accessor; struct value_accessor { typename key_accessor::key_type const & operator()( node_type const & node ) const { return key_accessor()(node.m_Value); } }; template using predicate_wrapper = cds::details::predicate_wrapper< node_type, Predicate, value_accessor >; struct ordered_list_traits: public original_ordered_list_traits { typedef cds::intrusive::lazy_list::base_hook< opt::gc ,opt::lock_type< lock_type > > hook; typedef cds::atomicity::empty_item_counter item_counter; typedef node_deallocator disposer; typedef cds::details::compare_wrapper< node_type, key_comparator, value_accessor > compare; static constexpr const opt::link_check_type link_checker = cds::intrusive::lazy_list::traits::link_checker; }; struct traits: public original_traits { struct hash: public original_traits::hash { typedef typename original_traits::hash base_class; size_t operator()(node_type const& v ) const { return base_class::operator()( key_accessor()( v.m_Value )); } template size_t operator()( Q const& k ) const { return base_class::operator()( k ); } }; }; class ordered_list: public cds::intrusive::LazyList< gc, node_type, ordered_list_traits > {}; typedef cds::intrusive::SplitListSet< gc, ordered_list, traits > type; }; }}} // namespace cds::container::details //@endcond #endif // #ifndef CDSLIB_CONTAINER_DETAILS_MAKE_SPLIT_LIST_SET_LAZY_LIST_H libcds-2.3.3/cds/container/details/make_split_list_set_michael_list.h000066400000000000000000000102331341244201700260600ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_DETAILS_MAKE_SPLIT_LIST_SET_MICHAEL_LIST_H #define CDSLIB_CONTAINER_DETAILS_MAKE_SPLIT_LIST_SET_MICHAEL_LIST_H //@cond namespace cds { namespace container { namespace details { template struct make_split_list_set< GC, T, michael_list_tag, Traits > { typedef GC gc; typedef T value_type; typedef Traits original_traits; typedef typename cds::opt::select_default< typename original_traits::ordered_list_traits, cds::container::michael_list::traits >::type original_ordered_list_traits; typedef cds::intrusive::split_list::node< cds::intrusive::michael_list::node > primary_node_type; struct node_type: public primary_node_type { value_type m_Value; template explicit node_type( Q&& v ) : m_Value( std::forward( v )) {} template explicit node_type( Q&& q, Args&&... args ) : m_Value( std::forward(q), std::forward(args)... ) {} node_type() = delete; }; typedef typename cds::opt::select_default< typename original_traits::ordered_list_traits, typename original_traits::allocator, typename cds::opt::select_default< typename original_traits::ordered_list_traits::allocator, typename original_traits::allocator >::type >::type node_allocator_; typedef typename std::allocator_traits::template rebind_alloc node_allocator_type; typedef cds::details::Allocator< node_type, node_allocator_type > cxx_node_allocator; struct node_deallocator { void operator ()( node_type * pNode ) { cxx_node_allocator().Delete( pNode ); } }; typedef typename opt::details::make_comparator< value_type, original_ordered_list_traits >::type key_comparator; typedef typename original_traits::key_accessor key_accessor; struct value_accessor { typename key_accessor::key_type const& operator()( node_type const& node ) const { return key_accessor()(node.m_Value); } }; template using predicate_wrapper = cds::details::predicate_wrapper< node_type, Predicate, value_accessor >; struct ordered_list_traits: public original_ordered_list_traits { typedef cds::intrusive::michael_list::base_hook< opt::gc > hook; typedef cds::atomicity::empty_item_counter item_counter; typedef node_deallocator disposer; typedef cds::details::compare_wrapper< node_type, key_comparator, value_accessor > compare; static constexpr const opt::link_check_type link_checker = cds::intrusive::michael_list::traits::link_checker; }; struct traits: public original_traits { struct hash: public original_traits::hash { typedef typename original_traits::hash base_class; size_t operator()(node_type const& v ) const { return base_class::operator()( key_accessor()( v.m_Value )); } template size_t operator()( Q const& k ) const { return base_class::operator()( k ); } }; }; class ordered_list: public cds::intrusive::MichaelList< gc, node_type, ordered_list_traits > {}; typedef cds::intrusive::SplitListSet< gc, ordered_list, traits > type; }; }}} // namespace cds::container::details //@endcond #endif // #ifndef CDSLIB_CONTAINER_DETAILS_MAKE_SPLIT_LIST_SET_MICHAEL_LIST_H libcds-2.3.3/cds/container/details/michael_list_base.h000066400000000000000000000133431341244201700227410ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_DETAILS_MICHAEL_LIST_BASE_H #define CDSLIB_CONTAINER_DETAILS_MICHAEL_LIST_BASE_H #include #include #include namespace cds { namespace container { /// MichaelList ordered list related definitions /** @ingroup cds_nonintrusive_helper */ namespace michael_list { /// \p MichaelList internal statistics, see \p cds::intrusive::michael_list::stat template ::event_counter > using stat = cds::intrusive::michael_list::stat< EventCounter >; /// \p MichaelList empty internal statistics, see \p cds::intrusive::michael_list::empty_stat typedef cds::intrusive::michael_list::empty_stat empty_stat; //@cond template ::stat_type> using wrapped_stat = cds::intrusive::michael_list::wrapped_stat< Stat >; //@endif /// MichaelList traits struct traits { typedef CDS_DEFAULT_ALLOCATOR allocator; ///< allocator used to allocate new node /// Key comparison functor /** No default functor is provided. If the option is not specified, the \p less is used. */ typedef opt::none compare; /// Specifies binary predicate used for key comparison. /** Default is \p std::less. */ typedef opt::none less; /// Back-off strategy typedef cds::backoff::empty back_off; /// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter or \p atomicity::cache_friendly_item_counter to enable item counting typedef atomicity::empty_item_counter item_counter; /// Internal statistics /** By default, internal statistics is disabled (\p michael_list::empty_stat). Use \p michael_list::stat to enable it. */ typedef empty_stat stat; /// C++ memory ordering model /** Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). */ typedef opt::v::relaxed_ordering memory_model; /// RCU deadlock checking policy (only for \ref cds_intrusive_MichaelList_rcu "RCU-based MichaelList") /** List of available options see opt::rcu_check_deadlock */ typedef opt::v::rcu_throw_deadlock rcu_check_deadlock; //@cond // MichaelKVList: supporting for split-ordered list // key accessor (opt::none = internal key type is equal to user key type) typedef opt::none key_accessor; //@endcond }; /// Metafunction converting option list to \p michael_list::traits /** Supported \p Options are: - \p opt::compare - key comparison functor. No default functor is provided. If the option is not specified, the \p opt::less is used. - \p opt::less - specifies binary predicate used for key comparison. Default is \p std::less. - \p opt::allocator - an allocator, default is \p CDS_DEFAULT_ALLOCATOR - \p opt::back_off - back-off strategy used. If the option is not specified, the \p cds::backoff::Default is used. - \p opt::item_counter - the type of item counting feature. Default is disabled (\p atomicity::empty_item_counter). To enable item counting use \p atomicity::item_counter or \p atomicity::cache_friendly_item_counter - \p opt::stat - internal statistics. By default, it is disabled (\p michael_list::empty_stat). To enable it use \p michael_list::stat - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consistent memory model). - \p opt::rcu_check_deadlock - a deadlock checking policy for \ref cds_intrusive_MichaelList_rcu "RCU-based MichaelList" Default is \p opt::v::rcu_throw_deadlock */ template struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type ; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type ,Options... >::type type; #endif }; } // namespace michael_list // Forward declarations template class MichaelList; template class MichaelKVList; // Tag for selecting Michael's list implementation /** This struct is empty and it is used only as a tag for selecting \p MichaelList as ordered list implementation in declaration of some classes. See \p split_list::traits::ordered_list as an example. */ struct michael_list_tag {}; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_DETAILS_MICHAEL_LIST_BASE_H libcds-2.3.3/cds/container/details/michael_map_base.h000066400000000000000000000023111341244201700225340ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_DETAILS_MICHAEL_MAP_BASE_H #define CDSLIB_CONTAINER_DETAILS_MICHAEL_MAP_BASE_H #include namespace cds { namespace container { /// MichaelHashMap related definitions /** @ingroup cds_nonintrusive_helper */ namespace michael_map { /// \p MichaelHashMap traits typedef container::michael_set::traits traits; /// Metafunction converting option list to \p michael_map::traits template using make_traits = cds::intrusive::michael_set::make_traits< Options... >; //@cond namespace details { using michael_set::details::init_hash_bitmask; } //@endcond } // namespace michael_map //@cond // Forward declarations template class MichaelHashMap; //@endcond }} // namespace cds::container #endif // ifndef CDSLIB_CONTAINER_DETAILS_MICHAEL_MAP_BASE_H libcds-2.3.3/cds/container/details/michael_set_base.h000066400000000000000000000025171341244201700225620ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_DETAILS_MICHAEL_SET_BASE_H #define CDSLIB_CONTAINER_DETAILS_MICHAEL_SET_BASE_H #include namespace cds { namespace container { /// MichaelHashSet related definitions /** @ingroup cds_nonintrusive_helper */ namespace michael_set { /// MichaelHashSet traits typedef cds::intrusive::michael_set::traits traits; /// Metafunction converting option list to \p michael_set::traits template using make_traits = cds::intrusive::michael_set::make_traits< Options... >; //@cond namespace details { using cds::intrusive::michael_set::details::init_hash_bitmask; using cds::intrusive::michael_set::details::list_iterator_selector; using cds::intrusive::michael_set::details::iterator; } //@endcond } //@cond // Forward declarations template class MichaelHashSet; //@endcond }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_DETAILS_MICHAEL_SET_BASE_H libcds-2.3.3/cds/container/details/skip_list_base.h000066400000000000000000000321131341244201700223010ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_DETAILS_SKIP_LIST_BASE_H #define CDSLIB_CONTAINER_DETAILS_SKIP_LIST_BASE_H #include #include #include namespace cds { namespace container { /// SkipListSet related definitions /** @ingroup cds_nonintrusive_helper */ namespace skip_list { /// Option specifying random level generator template using random_level_generator = cds::intrusive::skip_list::random_level_generator; /// Xor-shift random level generator template using xor_shift = cds::intrusive::skip_list::xor_shift; /// Xor-shift random level generator, max height 32 typedef cds::intrusive::skip_list::xorshift32 xorshift32; /// Xor-shift random level generator, max height 24 typedef cds::intrusive::skip_list::xorshift24 xorshift24; /// Xor-shift random level generator, max height 16 typedef cds::intrusive::skip_list::xorshift16 xorshift16; //@cond // for backward compatibility using cds::intrusive::skip_list::xorshift; //@endcond /// Turbo-pascal random level generator template using turbo = cds::intrusive::skip_list::turbo; /// Turbo-pascal random level generator, max height 32 typedef cds::intrusive::skip_list::turbo32 turbo32; /// Turbo-pascal random level generator, max height 24 typedef cds::intrusive::skip_list::turbo24 turbo24; /// Turbo-pascal random level generator, max height 16 typedef cds::intrusive::skip_list::turbo16 turbo16; //@cond // for backward compatibility using cds::intrusive::skip_list::turbo_pascal; //@endcond /// Skip list internal statistics template using stat = cds::intrusive::skip_list::stat < EventCounter >; /// Skip list empty internal statistics typedef cds::intrusive::skip_list::empty_stat empty_stat; /// SkipListSet traits struct traits { /// Key comparison functor /** No default functor is provided. If the option is not specified, the \p less is used. */ typedef opt::none compare; /// specifies binary predicate used for key compare. /** Default is \p std::less. */ typedef opt::none less; /// Item counter /** The type for item counting feature, by default disabled (\p atomicity::empty_item_counter) */ typedef atomicity::empty_item_counter item_counter; /// C++ memory ordering model /** List of available memory ordering see \p opt::memory_model */ typedef opt::v::relaxed_ordering memory_model; /// Random level generator /** The random level generator is an important part of skip-list algorithm. The node height in the skip-list have a probabilistic distribution where half of the nodes that have level \p i also have level i+1 (i = 0..30). The height of a node is in range [0..31]. See \p skip_list::random_level_generator option setter. */ typedef turbo32 random_level_generator; /// Allocator for skip-list nodes, \p std::allocator interface typedef CDS_DEFAULT_ALLOCATOR allocator; /// back-off strategy, default is \p cds::backoff::Default typedef cds::backoff::Default back_off; /// Internal statistics, by default disabled. To enable, use \p split_list::stat typedef empty_stat stat; /// RCU deadlock checking policy (for \ref cds_nonintrusive_SkipListSet_rcu "RCU-based SkipListSet") /** List of available options see opt::rcu_check_deadlock */ typedef opt::v::rcu_throw_deadlock rcu_check_deadlock; //@cond // For internal use only typedef opt::none key_accessor; //@endcond }; /// Metafunction converting option list to SkipListSet traits /** \p Options are: - \p opt::compare - key comparison functor. No default functor is provided. If the option is not specified, the \p opt::less is used. - \p opt::less - specifies binary predicate used for key comparison. Default is \p std::less. - \p opt::item_counter - the type of item counting feature. Default is \p atomicity::empty_item_counter that is no item counting. - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). - \p skip_list::random_level_generator - random level generator. Can be \p skip_list::xor_shift, \p skip_list::turbo or user-provided one. Default is \p %skip_list::turbo32. - \p opt::allocator - allocator for skip-list node. Default is \ref CDS_DEFAULT_ALLOCATOR. - \p opt::back_off - back-off strategy used. If the option is not specified, the \p cds::backoff::Default is used. - \p opt::stat - internal statistics. Available types: \p skip_list::stat, \p skip_list::empty_stat (the default) - \p opt::rcu_check_deadlock - a deadlock checking policy for RCU-based skip-list. Default is \p opt::v::rcu_throw_deadlock */ template struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type ; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type ,Options... >::type type; # endif }; //@cond namespace details { template class node_allocator { protected: typedef Node node_type; typedef Traits traits; typedef typename node_type::tower_item_type node_tower_item; typedef typename std::allocator_traits::template rebind_alloc tower_allocator_type; typedef typename std::allocator_traits::template rebind_alloc node_allocator_type; static size_t const c_nTowerItemSize = sizeof(node_tower_item); static size_t const c_nNodePadding = sizeof(node_type) % c_nTowerItemSize; static size_t const c_nNodeSize = sizeof(node_type) + (c_nNodePadding ? (c_nTowerItemSize - c_nNodePadding) : 0); static constexpr size_t node_size( unsigned int nHeight ) noexcept { return c_nNodeSize + (nHeight - 1) * c_nTowerItemSize; } static unsigned char * alloc_space( unsigned int nHeight ) { unsigned char * pMem; size_t const sz = node_size( nHeight ); if ( nHeight > 1 ) { pMem = tower_allocator_type().allocate( sz ); // check proper alignments assert( (((uintptr_t) pMem) & (alignof(node_type) - 1)) == 0 ); assert( (((uintptr_t) (pMem + c_nNodeSize)) & (alignof(node_tower_item) - 1)) == 0 ); return pMem; } else pMem = reinterpret_cast( node_allocator_type().allocate( 1 )); return pMem; } static void free_space( unsigned char * p, unsigned int nHeight ) { assert( p != nullptr ); if ( nHeight == 1 ) node_allocator_type().deallocate( reinterpret_cast(p), 1 ); else tower_allocator_type().deallocate( p, node_size(nHeight)); } public: template node_type * New( unsigned int nHeight, Q const& v ) { unsigned char * pMem = alloc_space( nHeight ); node_type * p = new( pMem ) node_type( nHeight, nHeight > 1 ? reinterpret_cast(pMem + c_nNodeSize) : nullptr, v ); return p; } template node_type * New( unsigned int nHeight, Args&&... args ) { unsigned char * pMem = alloc_space( nHeight ); node_type * p = new( pMem ) node_type( nHeight, nHeight > 1 ? reinterpret_cast(pMem + c_nNodeSize) : nullptr, std::forward(args)... ); return p; } void Delete( node_type * p ) { assert( p != nullptr ); unsigned int nHeight = p->height(); node_allocator_type a; std::allocator_traits::destroy( a, p ); free_space( reinterpret_cast(p), nHeight ); } }; template struct dummy_node_builder { typedef IntrusiveNode intrusive_node_type; template static intrusive_node_type * make_tower( intrusive_node_type * pNode, RandomGen& /*gen*/ ) { return pNode ; } static intrusive_node_type * make_tower( intrusive_node_type * pNode, unsigned int /*nHeight*/ ) { return pNode ; } static void dispose_tower( intrusive_node_type * pNode ) { pNode->release_tower(); } struct node_disposer { void operator()( intrusive_node_type * /*pNode*/ ) const {} }; }; template class iterator { typedef ForwardIterator intrusive_iterator; typedef typename intrusive_iterator::value_type node_type; typedef typename node_type::stored_value_type value_type; static bool const c_isConst = intrusive_iterator::c_isConst; typedef typename std::conditional< c_isConst, value_type const&, value_type&>::type value_ref; template friend class iterator; intrusive_iterator m_It; public: // for internal use only!!! iterator( intrusive_iterator const& it ) : m_It( it ) {} public: iterator() : m_It() {} iterator( iterator const& s) : m_It( s.m_It ) {} value_type * operator ->() const { return &( m_It.operator->()->m_Value ); } value_ref operator *() const { return m_It.operator*().m_Value; } /// Pre-increment iterator& operator ++() { ++m_It; return *this; } iterator& operator = (iterator const& src) { m_It = src.m_It; return *this; } template bool operator ==(iterator const& i ) const { return m_It == i.m_It; } template bool operator !=(iterator const& i ) const { return !( *this == i ); } }; } // namespace details //@endcond } // namespace skip_list // Forward declaration template class SkipListSet; // Forward declaration template class SkipListMap; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_DETAILS_SKIP_LIST_BASE_H libcds-2.3.3/cds/container/details/split_list_base.h000066400000000000000000000157021341244201700224730ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_DETAILS_SPLIT_LIST_BASE_H #define CDSLIB_CONTAINER_DETAILS_SPLIT_LIST_BASE_H #include namespace cds { namespace container { // forward declaration struct michael_list_tag; /// SplitListSet related definitions /** @ingroup cds_nonintrusive_helper */ namespace split_list { /// Internal statistics, see \p cds::intrusive::split_list::stat template ::counter_type > using stat = cds::intrusive::split_list::stat; /// Disabled internal statistics, see \p cds::intrusive::split_list::empty_stat typedef cds::intrusive::split_list::empty_stat empty_stat; /// Selector of bucket table implementation = typedef for \p intrusive::split_list::dynamic_bucket_table template using dynamic_bucket_table = cds::intrusive::split_list::dynamic_bucket_table; /// @copydoc cds::intrusive::split_list::bit_reversal template using bit_reversal = cds::intrusive::split_list::bit_reversal; using cds::intrusive::split_list::static_bucket_table; using cds::intrusive::split_list::expandable_bucket_table; //@cond namespace details { template struct wrap_map_traits_helper { typedef Opt key_accessor; }; template struct wrap_map_traits_helper { struct key_accessor { typedef Key key_type; key_type const & operator()( std::pair const & val ) const { return val.first; } }; }; template struct wrap_map_traits: public Traits { typedef typename wrap_map_traits_helper::key_accessor key_accessor; }; template struct wrap_set_traits_helper { typedef Opt key_accessor; }; template struct wrap_set_traits_helper { struct key_accessor { typedef Value key_type; key_type const& operator()( Value const& val ) const { return val; } }; }; template struct wrap_set_traits: public Traits { typedef typename wrap_set_traits_helper::key_accessor key_accessor; }; } // namespace details //@endcond /// \p SplitListSet traits struct traits: public intrusive::split_list::traits { // Ordered list implementation /** Selects appropriate ordered-list implementation for split-list. Supported types are: - \p michael_list_tag - for \p MichaelList - \p lazy_list_tag - for \p LazyList - \p iterable_list_tag - for \p IterableList */ typedef michael_list_tag ordered_list; // Ordered list traits /** Specifyes traits for selected ordered list type, default type: - for \p michael_list_tag: \p container::michael_list::traits. - for \p lazy_list_tag: \p container::lazy_list::traits. - for \p iterable_list_tag: \p container::iterable_list::traits. If this type is \p opt::none, the ordered list traits is combined with default ordered list traits and split-list traits. */ typedef opt::none ordered_list_traits; //@cond typedef opt::none key_accessor; //@endcond }; /// Option to select ordered list class for split-list /** This option selects appropriate ordered list class for containers based on split-list. Template parameter \p Type may be \p michael_list_tag or \p lazy_list_tag. */ template struct ordered_list { //@cond template struct pack: public Base { typedef Type ordered_list; }; //@endcond }; /// Option to specify ordered list type traits /** The \p Type template parameter specifies ordered list type traits. It depends on type of ordered list selected. */ template struct ordered_list_traits { //@cond template struct pack: public Base { typedef Type ordered_list_traits; }; //@endcond }; /// Metafunction converting option list to traits struct /** Available \p Options: - \p split_list::ordered_list - a tag for ordered list implementation. - \p split_list::ordered_list_traits - type traits for ordered list implementation. For \p MichaelList use \p container::michael_list::traits or derivatives, for \p LazyList use \p container::lazy_list::traits or derivatives. - plus any option from \p intrusive::split_list::make_traits */ template struct make_traits { typedef typename cds::opt::make_options< traits, Options...>::type type ; ///< Result of metafunction }; } // namespace split_list //@cond // Forward declarations template class SplitListSet; template class SplitListMap; //@endcond //@cond // Forward declaration namespace details { template struct make_split_list_set; template struct make_split_list_map; } //@endcond }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_DETAILS_SPLIT_LIST_BASE_H libcds-2.3.3/cds/container/ellen_bintree_map_dhp.h000066400000000000000000000006521341244201700221630ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_ELLEN_BINTREE_MAP_DHP_H #define CDSLIB_CONTAINER_ELLEN_BINTREE_MAP_DHP_H #include #include #endif // #ifndef CDSLIB_CONTAINER_ELLEN_BINTREE_MAP_DHP_H libcds-2.3.3/cds/container/ellen_bintree_map_hp.h000066400000000000000000000006461341244201700220220ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_ELLEN_BINTREE_MAP_HP_H #define CDSLIB_CONTAINER_ELLEN_BINTREE_MAP_HP_H #include #include #endif // #ifndef CDSLIB_CONTAINER_ELLEN_BINTREE_MAP_HP_H libcds-2.3.3/cds/container/ellen_bintree_map_rcu.h000066400000000000000000000614601341244201700222050ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_ELLEN_BINTREE_MAP_RCU_H #define CDSLIB_CONTAINER_ELLEN_BINTREE_MAP_RCU_H #include #include namespace cds { namespace container { /// Map based on Ellen's et al binary search tree (RCU specialization) /** @ingroup cds_nonintrusive_map @ingroup cds_nonintrusive_tree @anchor cds_container_EllenBinTreeMap_rcu Source: - [2010] F.Ellen, P.Fatourou, E.Ruppert, F.van Breugel "Non-blocking Binary Search Tree" %EllenBinTreeMap is an unbalanced leaf-oriented binary search tree that implements the map abstract data type. Nodes maintains child pointers but not parent pointers. Every internal node has exactly two children, and all data of type std::pair currently in the tree are stored in the leaves. Internal nodes of the tree are used to direct \p find operation along the path to the correct leaf. The keys (of \p Key type) stored in internal nodes may or may not be in the map. Unlike \ref cds_container_EllenBinTreeSet_rcu "EllenBinTreeSet" keys are not a part of \p T type. The map can be represented as a set containing std::pair< Key const, T> values. Due to \p extract_min and \p extract_max member functions the \p %EllenBinTreeMap can act as a priority queue. In this case you should provide unique compound key, for example, the priority value plus some uniformly distributed random value. @warning Recall the tree is unbalanced. The complexity of operations is O(log N) for uniformly distributed random keys, but in the worst case the complexity is O(N). @note In the current implementation we do not use helping technique described in original paper. So, the current implementation is near to fine-grained lock-based tree. Helping will be implemented in future release Template arguments : - \p RCU - one of \ref cds_urcu_gc "RCU type" - \p Key - key type - \p T - value type to be stored in tree's leaf nodes. - \p Traits - map traits, default is \p ellen_bintree::traits. It is possible to declare option-based tree with \p ellen_bintree::make_map_traits metafunction instead of \p Traits template argument. @note Before including you should include appropriate RCU header file, see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. */ template < class RCU, typename Key, typename T, #ifdef CDS_DOXYGEN_INVOKED class Traits = ellen_bintree::traits #else class Traits #endif > class EllenBinTreeMap< cds::urcu::gc, Key, T, Traits > #ifdef CDS_DOXYGEN_INVOKED : public cds::intrusive::EllenBinTree< cds::urcu::gc, Key, T, Traits > #else : public ellen_bintree::details::make_ellen_bintree_map< cds::urcu::gc, Key, T, Traits >::type #endif { //@cond typedef ellen_bintree::details::make_ellen_bintree_map< cds::urcu::gc, Key, T, Traits > maker; typedef typename maker::type base_class; //@endcond public: typedef cds::urcu::gc gc; ///< RCU Garbage collector typedef Key key_type; ///< type of a key stored in the map typedef T mapped_type; ///< type of value stored in the map typedef std::pair< key_type const, mapped_type > value_type; ///< Key-value pair stored in leaf node of the mp typedef Traits traits; ///< Traits template parameter static_assert( std::is_default_constructible::value, "Key should be default constructible type" ); # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined key_comparator ; ///< key compare functor based on \p Traits::compare and \p Traits::less # else typedef typename maker::intrusive_traits::compare key_comparator; # endif typedef typename base_class::item_counter item_counter; ///< Item counting policy typedef typename base_class::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option typedef typename base_class::node_allocator node_allocator_type; ///< allocator for maintaining internal node typedef typename base_class::stat stat; ///< internal statistics typedef typename base_class::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy typedef typename traits::copy_policy copy_policy; ///< key copy policy typedef typename traits::back_off back_off; ///< Back-off strategy typedef typename traits::allocator allocator_type; ///< Allocator for leaf nodes typedef typename base_class::node_allocator node_allocator; ///< Internal node allocator typedef typename base_class::update_desc_allocator update_desc_allocator; ///< Update descriptor allocator static constexpr const bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; ///< Group of \p extract_xxx functions do not require external locking protected: //@cond typedef typename base_class::value_type leaf_node; typedef typename base_class::internal_node internal_node; typedef typename base_class::update_desc update_desc; typedef typename maker::cxx_leaf_node_allocator cxx_leaf_node_allocator; typedef std::unique_ptr< leaf_node, typename maker::leaf_deallocator > scoped_node_ptr; //@endcond public: typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock /// pointer to extracted node using exempt_ptr = cds::urcu::exempt_ptr < gc, leaf_node, value_type, typename maker::intrusive_traits::disposer, cds::urcu::details::conventional_exempt_member_cast < leaf_node, value_type > >; public: /// Default constructor EllenBinTreeMap() : base_class() {} /// Clears the map ~EllenBinTreeMap() {} /// Inserts new node with key and default value /** The function creates a node with \p key and default value, and then inserts the node created into the map. Preconditions: - The \p key_type should be constructible from a value of type \p K. - The \p mapped_type should be default-constructible. RCU \p synchronize() can be called. RCU should not be locked. Returns \p true if inserting successful, \p false otherwise. */ template bool insert( K const& key ) { return insert_with( key, [](value_type&){} ); } /// Inserts new node /** The function creates a node with copy of \p val value and then inserts the node created into the map. Preconditions: - The \p key_type should be constructible from \p key of type \p K. - The \p value_type should be constructible from \p val of type \p V. RCU \p synchronize() method can be called. RCU should not be locked. Returns \p true if \p val is inserted into the map, \p false otherwise. */ template bool insert( K const& key, V const& val ) { scoped_node_ptr pNode( cxx_leaf_node_allocator().New( key, val )); if ( base_class::insert( *pNode )) { pNode.release(); return true; } return false; } /// Inserts new node and initialize it by a functor /** This function inserts new node with key \p key and if inserting is successful then it calls \p func functor with signature \code struct functor { void operator()( value_type& item ); }; \endcode The argument \p item of user-defined functor \p func is the reference to the map's item inserted: - item.first is a const reference to item's key that cannot be changed. - item.second is a reference to item's value that may be changed. The key_type should be constructible from value of type \p K. The function allows to split creating of new item into two part: - create item from \p key; - insert new item into the map; - if inserting is successful, initialize the value of item by calling \p func functor This can be useful if complete initialization of object of \p value_type is heavyweight and it is preferable that the initialization should be completed only if inserting is successful. RCU \p synchronize() method can be called. RCU should not be locked. */ template bool insert_with( K const& key, Func func ) { scoped_node_ptr pNode( cxx_leaf_node_allocator().New( key )); if ( base_class::insert( *pNode, [&func]( leaf_node& item ) { func( item.m_Value ); } )) { pNode.release(); return true; } return false; } /// For key \p key inserts data of type \p value_type created in-place from \p args /** Returns \p true if inserting successful, \p false otherwise. RCU \p synchronize() method can be called. RCU should not be locked. */ template bool emplace( K&& key, Args&&... args ) { scoped_node_ptr pNode( cxx_leaf_node_allocator().MoveNew( key_type( std::forward(key)), mapped_type( std::forward(args)... ))); if ( base_class::insert( *pNode )) { pNode.release(); return true; } return false; } /// Updates the node /** The operation performs inserting or changing data with lock-free manner. If the item \p val is not found in the map, then \p val is inserted iff \p bAllowInsert is \p true. Otherwise, the functor \p func is called with item found. The functor \p func signature is: \code struct my_functor { void operator()( bool bNew, value_type& item ); }; \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the map The functor may change any fields of the \p item.second that is \p mapped_type; however, \p func must guarantee that during changing no any other modifications could be made on this item by concurrent threads. RCU \p synchronize() method can be called. RCU should not be locked. Returns std::pair where \p first is \p true if operation is successful, i.e. the node has been inserted or updated, \p second is \p true if new item has been added or \p false if the item with \p key already exists. @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" */ template std::pair update( K const& key, Func func, bool bAllowInsert = true ) { scoped_node_ptr pNode( cxx_leaf_node_allocator().New( key )); std::pair res = base_class::update( *pNode, [&func](bool bNew, leaf_node& item, leaf_node const& ){ func( bNew, item.m_Value ); }, bAllowInsert ); if ( res.first && res.second ) pNode.release(); return res; } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( K const& key, Func func ) { return update( key, func, true ); } //@endcond /// Delete \p key from the map /**\anchor cds_nonintrusive_EllenBinTreeMap_rcu_erase_val RCU \p synchronize() method can be called. RCU should not be locked. Return \p true if \p key is found and deleted, \p false otherwise */ template bool erase( K const& key ) { return base_class::erase(key); } /// Deletes the item from the map using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_EllenBinTreeMap_rcu_erase_val "erase(K const&)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool erase_with( K const& key, Less pred ) { CDS_UNUSED( pred ); return base_class::erase_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >()); } /// Delete \p key from the map /** \anchor cds_nonintrusive_EllenBinTreeMap_rcu_erase_func The function searches an item with key \p key, calls \p f functor and deletes the item. If \p key is not found, the functor is not called. The functor \p Func interface: \code struct extractor { void operator()(value_type& item) { ... } }; \endcode RCU \p synchronize method can be called. RCU should not be locked. Return \p true if key is found and deleted, \p false otherwise */ template bool erase( K const& key, Func f ) { return base_class::erase( key, [&f]( leaf_node& node) { f( node.m_Value ); } ); } /// Deletes the item from the map using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_EllenBinTreeMap_rcu_erase_func "erase(K const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool erase_with( K const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return base_class::erase_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >(), [&f]( leaf_node& node) { f( node.m_Value ); } ); } /// Extracts an item with minimal key from the map /** Returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the leftmost item. If the set is empty, returns empty \p exempt_ptr. @note Due the concurrent nature of the map, the function extracts nearly minimum key. It means that the function gets leftmost leaf of the tree and tries to unlink it. During unlinking, a concurrent thread may insert an item with key less than leftmost item's key. So, the function returns the item with minimum key at the moment of tree traversing. RCU \p synchronize method can be called. RCU should NOT be locked. The function does not free the item. The deallocator will be implicitly invoked when the returned object is destroyed or when its \p release() member function is called. */ exempt_ptr extract_min() { return exempt_ptr( base_class::extract_min_()); } /// Extracts an item with maximal key from the map /** Returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the rightmost item. If the set is empty, returns empty \p exempt_ptr. @note Due the concurrent nature of the map, the function extracts nearly maximal key. It means that the function gets rightmost leaf of the tree and tries to unlink it. During unlinking, a concurrent thread may insert an item with key great than leftmost item's key. So, the function returns the item with maximum key at the moment of tree traversing. RCU \p synchronize method can be called. RCU should NOT be locked. The function does not free the item. The deallocator will be implicitly invoked when the returned object is destroyed or when its \p release() is called. @note Before reusing \p result object you should call its \p release() method. */ exempt_ptr extract_max() { return exempt_ptr( base_class::extract_max_()); } /// Extracts an item from the map /** \anchor cds_nonintrusive_EllenBinTreeMap_rcu_extract The function searches an item with key equal to \p key in the tree, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to an item found. If \p key is not found the function returns an empty \p exempt_ptr. RCU \p synchronize method can be called. RCU should NOT be locked. The function does not destroy the item found. The dealloctor will be implicitly invoked when the returned object is destroyed or when its \p release() member function is called. */ template exempt_ptr extract( Q const& key ) { return exempt_ptr( base_class::extract_( key, typename base_class::node_compare())); } /// Extracts an item from the map using \p pred for searching /** The function is an analog of \p extract(Q const&) but \p pred is used for key compare. \p Less has the interface like \p std::less and should meet \ref cds_container_EllenBinTreeSet_rcu_less "predicate requirements". \p pred must imply the same element order as the comparator used for building the map. */ template exempt_ptr extract_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return exempt_ptr( base_class::extract_with_( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >())); } /// Find the key \p key /** \anchor cds_nonintrusive_EllenBinTreeMap_rcu_find_cfunc The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item ); }; \endcode where \p item is the item found. The functor may change \p item.second. The function applies RCU lock internally. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( K const& key, Func f ) { return base_class::find( key, [&f](leaf_node& item, K const& ) { f( item.m_Value );}); } /// Finds the key \p val using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_EllenBinTreeMap_rcu_find_cfunc "find(K const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool find_with( K const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return base_class::find_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >(), [&f](leaf_node& item, K const& ) { f( item.m_Value );}); } /// Checks whether the map contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. The function applies RCU lock internally. */ template bool contains( K const& key ) { return base_class::contains( key ); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find( K const& key ) { return contains( key ); } //@endcond /// Checks whether the map contains \p key using \p pred predicate for searching /** The function is similar to contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_rcu_less "Predicate requirements". \p Less must imply the same element order as the comparator used for building the set. */ template bool contains( K const& key, Less pred ) { CDS_UNUSED( pred ); return base_class::contains( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find_with( K const& key, Less pred ) { return contains( key, pred ); } //@endcond /// Finds \p key and return the item found /** \anchor cds_nonintrusive_EllenBinTreeMap_rcu_get The function searches the item with key equal to \p key and returns the pointer to item found. If \p key is not found it returns \p nullptr. RCU should be locked before call the function. Returned pointer is valid while RCU is locked. */ template value_type * get( Q const& key ) const { leaf_node * pNode = base_class::get( key ); return pNode ? &pNode->m_Value : nullptr; } /// Finds \p key with \p pred predicate and return the item found /** The function is an analog of \ref cds_nonintrusive_EllenBinTreeMap_rcu_get "get(Q const&)" but \p pred is used for comparing the keys. \p Less functor has the semantics like \p std::less but should take arguments of type \p key_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the map. */ template value_type * get_with( Q const& key, Less pred ) const { CDS_UNUSED( pred ); leaf_node * pNode = base_class::get_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >()); return pNode ? &pNode->m_Value : nullptr; } /// Clears the map void clear() { base_class::clear(); } /// Checks if the map is empty bool empty() const { return base_class::empty(); } /// Returns item count in the map /** Only leaf nodes containing user data are counted. The value returned depends on item counter type provided by \p Traits template parameter. If it is \p atomicity::empty_item_counter this function always returns 0. The function is not suitable for checking the tree emptiness, use \p empty() member function for this purpose. */ size_t size() const { return base_class::size(); } /// Returns const reference to internal statistics stat const& statistics() const { return base_class::statistics(); } /// Checks internal consistency (not atomic, not thread-safe) /** The debugging function to check internal consistency of the tree. */ bool check_consistency() const { return base_class::check_consistency(); } }; }} // namespace cds::container #endif //#ifndef CDSLIB_CONTAINER_ELLEN_BINTREE_MAP_RCU_H libcds-2.3.3/cds/container/ellen_bintree_set_dhp.h000066400000000000000000000006521341244201700222010ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_ELLEN_BINTREE_SET_DHP_H #define CDSLIB_CONTAINER_ELLEN_BINTREE_SET_DHP_H #include #include #endif // #ifndef CDSLIB_CONTAINER_ELLEN_BINTREE_SET_DHP_H libcds-2.3.3/cds/container/ellen_bintree_set_hp.h000066400000000000000000000006461341244201700220400ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_ELLEN_BINTREE_SET_HP_H #define CDSLIB_CONTAINER_ELLEN_BINTREE_SET_HP_H #include #include #endif // #ifndef CDSLIB_CONTAINER_ELLEN_BINTREE_SET_HP_H libcds-2.3.3/cds/container/ellen_bintree_set_rcu.h000066400000000000000000000656101341244201700222240ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_ELLEN_BINTREE_SET_RCU_H #define CDSLIB_CONTAINER_ELLEN_BINTREE_SET_RCU_H #include #include namespace cds { namespace container { /// Set based on Ellen's et al binary search tree (RCU specialization) /** @ingroup cds_nonintrusive_set @ingroup cds_nonintrusive_tree @anchor cds_container_EllenBinTreeSet_rcu Source: - [2010] F.Ellen, P.Fatourou, E.Ruppert, F.van Breugel "Non-blocking Binary Search Tree" %EllenBinTreeSet is an unbalanced leaf-oriented binary search tree that implements the set abstract data type. Nodes maintains child pointers but not parent pointers. Every internal node has exactly two children, and all data of type \p T currently in the tree are stored in the leaves. Internal nodes of the tree are used to direct \p find operation along the path to the correct leaf. The keys (of \p Key type) stored in internal nodes may or may not be in the set. \p Key type is a subset of \p T type. There should be exactly defined a key extracting functor for converting object of type \p T to object of type \p Key. Due to \p extract_min and \p extract_max member functions the \p %EllenBinTreeSet can act as a priority queue. In this case you should provide unique compound key, for example, the priority value plus some uniformly distributed random value. @warning Recall the tree is unbalanced. The complexity of operations is O(log N) for uniformly distributed random keys, but in the worst case the complexity is O(N). @note In the current implementation we do not use helping technique described in original paper. So, the current implementation is near to fine-grained lock-based tree. Helping will be implemented in future release Template arguments : - \p RCU - one of \ref cds_urcu_gc "RCU type" - \p Key - key type, a subset of \p T - \p T - type to be stored in tree's leaf nodes. - \p Traits - set traits, default is \p ellen_bintree::traits. It is possible to declare option-based tree with \p ellen_bintree::make_set_traits metafunction instead of \p Traits template argument. @note Before including you should include appropriate RCU header file, see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. @anchor cds_container_EllenBinTreeSet_rcu_less Predicate requirements opt::less, opt::compare and other predicates using with member fuctions should accept at least parameters of type \p T and \p Key in any combination. For example, for \p Foo struct with \p std::string key field the appropiate \p less functor is: \code struct Foo { std::string m_strKey; ... }; struct less { bool operator()( Foo const& v1, Foo const& v2 ) const { return v1.m_strKey < v2.m_strKey ; } bool operator()( Foo const& v, std::string const& s ) const { return v.m_strKey < s ; } bool operator()( std::string const& s, Foo const& v ) const { return s < v.m_strKey ; } // Support comparing std::string and char const * bool operator()( std::string const& s, char const * p ) const { return s.compare(p) < 0 ; } bool operator()( Foo const& v, char const * p ) const { return v.m_strKey.compare(p) < 0 ; } bool operator()( char const * p, std::string const& s ) const { return s.compare(p) > 0; } bool operator()( char const * p, Foo const& v ) const { return v.m_strKey.compare(p) > 0; } }; \endcode */ template < class RCU, typename Key, typename T, #ifdef CDS_DOXYGEN_INVOKED class Traits = ellen_bintree::traits #else class Traits #endif > class EllenBinTreeSet< cds::urcu::gc, Key, T, Traits > #ifdef CDS_DOXYGEN_INVOKED : public cds::intrusive::EllenBinTree< cds::urcu::gc, Key, T, Traits > #else : public ellen_bintree::details::make_ellen_bintree_set< cds::urcu::gc, Key, T, Traits >::type #endif { //@cond typedef ellen_bintree::details::make_ellen_bintree_set< cds::urcu::gc, Key, T, Traits > maker; typedef typename maker::type base_class; //@endcond public: typedef cds::urcu::gc gc; ///< RCU Garbage collector typedef Key key_type; ///< type of a key stored in internal nodes; key is a part of \p value_type typedef T value_type; ///< type of value stored in the binary tree typedef Traits traits; ///< Traits template parameter # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less # else typedef typename maker::intrusive_traits::compare key_comparator; # endif typedef typename base_class::item_counter item_counter; ///< Item counting policy typedef typename base_class::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model typedef typename base_class::stat stat; ///< internal statistics type typedef typename base_class::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy typedef typename traits::key_extractor key_extractor; ///< key extracting functor typedef typename traits::back_off back_off; ///< Back-off strategy typedef typename traits::allocator allocator_type; ///< Allocator for leaf nodes typedef typename base_class::node_allocator node_allocator; ///< Internal node allocator typedef typename base_class::update_desc_allocator update_desc_allocator; ///< Update descriptor allocator static constexpr const bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; ///< Group of \p extract_xxx functions do not require external locking protected: //@cond typedef typename maker::cxx_leaf_node_allocator cxx_leaf_node_allocator; typedef typename base_class::value_type leaf_node; typedef typename base_class::internal_node internal_node; typedef std::unique_ptr< leaf_node, typename maker::intrusive_traits::disposer > scoped_node_ptr; //@endcond public: typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock /// pointer to extracted node using exempt_ptr = cds::urcu::exempt_ptr < gc, leaf_node, value_type, typename maker::intrusive_traits::disposer, cds::urcu::details::conventional_exempt_member_cast < leaf_node, value_type > >; public: /// Default constructor EllenBinTreeSet() : base_class() {} /// Clears the set ~EllenBinTreeSet() {} /// Inserts new node /** The function creates a node with copy of \p val value and then inserts the node created into the set. The type \p Q should contain at least the complete key for the node. The object of \ref value_type should be constructible from a value of type \p Q. In trivial case, \p Q is equal to \ref value_type. RCU \p synchronize() method can be called. RCU should not be locked. Returns \p true if \p val is inserted into the set, \p false otherwise. */ template bool insert( Q const& val ) { scoped_node_ptr sp( cxx_leaf_node_allocator().New( val )); if ( base_class::insert( *sp.get())) { sp.release(); return true; } return false; } /// Inserts new node /** The function allows to split creating of new item into two part: - create item with key only - insert new item into the set - if inserting is success, calls \p f functor to initialize value-fields of \p val. The functor signature is: \code void func( value_type& val ); \endcode where \p val is the item inserted. User-defined functor \p f should guarantee that during changing \p val no any other changes could be made on this set's item by concurrent threads. The user-defined functor is called only if the inserting is success. RCU \p synchronize() can be called. RCU should not be locked. */ template bool insert( Q const& val, Func f ) { scoped_node_ptr sp( cxx_leaf_node_allocator().New( val )); if ( base_class::insert( *sp.get(), [&f]( leaf_node& v ) { f( v.m_Value ); } )) { sp.release(); return true; } return false; } /// Updates the node /** The operation performs inserting or changing data with lock-free manner. If the item \p val is not found in the set, then \p val is inserted into the set iff \p bAllowInsert is \p true. Otherwise, the functor \p func is called with item found. The functor \p func signature is: \code void func( bool bNew, value_type& item, value_type& val ); \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the set - \p val - argument \p val passed into the \p %update() function The functor can change non-key fields of the \p item; however, \p func must guarantee that during changing no any other modifications could be made on this item by concurrent threads. RCU \p synchronize method can be called. RCU should not be locked. Returns std::pair where \p first is \p true if operation is successful, i.e. the node has been inserted or updated, \p second is \p true if new item has been added or \p false if the item with \p key already exists. @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" */ template std::pair update( Q const& val, Func func, bool bAllowInsert = true ) { scoped_node_ptr sp( cxx_leaf_node_allocator().New( val )); std::pair bRes = base_class::update( *sp, [&func, &val](bool bNew, leaf_node& node, leaf_node&){ func( bNew, node.m_Value, val ); }, bAllowInsert ); if ( bRes.first && bRes.second ) sp.release(); return bRes; } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( const Q& val, Func func ) { return update( val, func, true ); } //@endcond /// Inserts data of type \p value_type created in-place from \p args /** Returns \p true if inserting successful, \p false otherwise. RCU \p synchronize method can be called. RCU should not be locked. */ template bool emplace( Args&&... args ) { scoped_node_ptr sp( cxx_leaf_node_allocator().MoveNew( std::forward(args)... )); if ( base_class::insert( *sp.get())) { sp.release(); return true; } return false; } /// Delete \p key from the set /** \anchor cds_nonintrusive_EllenBinTreeSet_rcu_erase_val The item comparator should be able to compare the type \p value_type and the type \p Q. RCU \p synchronize method can be called. RCU should not be locked. Return \p true if key is found and deleted, \p false otherwise */ template bool erase( Q const& key ) { return base_class::erase( key ); } /// Deletes the item from the set using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_EllenBinTreeSet_rcu_erase_val "erase(Q const&)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool erase_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return base_class::erase_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >()); } /// Delete \p key from the set /** \anchor cds_nonintrusive_EllenBinTreeSet_rcu_erase_func The function searches an item with key \p key, calls \p f functor and deletes the item. If \p key is not found, the functor is not called. The functor \p Func interface: \code struct extractor { void operator()(value_type const& val); }; \endcode Since the key of MichaelHashSet's \p value_type is not explicitly specified, template parameter \p Q defines the key type searching in the list. The list item comparator should be able to compare the type \p T of list item and the type \p Q. RCU \p synchronize method can be called. RCU should not be locked. Return \p true if key is found and deleted, \p false otherwise See also: \ref erase */ template bool erase( Q const& key, Func f ) { return base_class::erase( key, [&f]( leaf_node const& node) { f( node.m_Value ); } ); } /// Deletes the item from the set using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_EllenBinTreeSet_rcu_erase_func "erase(Q const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool erase_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return base_class::erase_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >(), [&f]( leaf_node const& node) { f( node.m_Value ); } ); } /// Extracts an item with minimal key from the set /** Returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the leftmost item. If the set is empty, returns empty \p exempt_ptr. @note Due the concurrent nature of the set, the function extracts nearly minimum key. It means that the function gets leftmost leaf of the tree and tries to unlink it. During unlinking, a concurrent thread may insert an item with key less than leftmost item's key. So, the function returns the item with minimum key at the moment of tree traversing. RCU \p synchronize method can be called. RCU should NOT be locked. The function does not free the item. The deallocator will be implicitly invoked when the returned object is destroyed or when its \p release() member function is called. */ exempt_ptr extract_min() { return exempt_ptr( base_class::extract_min_()); } /// Extracts an item with maximal key from the set /** Returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the rightmost item. If the set is empty, returns empty \p exempt_ptr. @note Due the concurrent nature of the set, the function extracts nearly maximal key. It means that the function gets rightmost leaf of the tree and tries to unlink it. During unlinking, a concurrent thread may insert an item with key great than leftmost item's key. So, the function returns the item with maximum key at the moment of tree traversing. RCU \p synchronize method can be called. RCU should NOT be locked. The function does not free the item. The deallocator will be implicitly invoked when the returned object is destroyed or when its \p release() member function is called. */ exempt_ptr extract_max() { return exempt_ptr( base_class::extract_max_()); } /// Extracts an item from the set /** \anchor cds_nonintrusive_EllenBinTreeSet_rcu_extract The function searches an item with key equal to \p key in the tree, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to an item found. If \p key is not found the function returns an empty \p exempt_ptr. RCU \p synchronize method can be called. RCU should NOT be locked. The function does not destroy the item found. The dealloctor will be implicitly invoked when the returned object is destroyed or when its release() member function is called. */ template exempt_ptr extract( Q const& key ) { return exempt_ptr( base_class::extract_( key, typename base_class::node_compare())); } /// Extracts an item from the set using \p pred for searching /** The function is an analog of \p extract(Q const&) but \p pred is used for key compare. \p Less has the interface like \p std::less and should meet \ref cds_container_EllenBinTreeSet_rcu_less "predicate requirements". \p pred must imply the same element order as the comparator used for building the set. */ template exempt_ptr extract_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return exempt_ptr( base_class::extract_with_( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >())); } /// Find the key \p key /** @anchor cds_nonintrusive_EllenBinTreeSet_rcu_find_func The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& key ); }; \endcode where \p item is the item found, \p key is the find function argument. The functor may change non-key fields of \p item. Note that the functor is only guarantee that \p item cannot be disposed during functor is executing. The functor does not serialize simultaneous access to the set's \p item. If such access is possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor can modify both arguments. Note the hash functor specified for class \p Traits template parameter should accept a parameter of type \p Q that may be not the same as \p value_type. The function applies RCU lock internally. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( Q& key, Func f ) const { return base_class::find( key, [&f]( leaf_node& node, Q& v ) { f( node.m_Value, v ); }); } //@cond template bool find( Q const& key, Func f ) const { return base_class::find( key, [&f]( leaf_node& node, Q const& v ) { f( node.m_Value, v ); } ); } //@endcond /// Finds the key \p key using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_EllenBinTreeSet_rcu_find_func "find(Q&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool find_with( Q& key, Less pred, Func f ) const { CDS_UNUSED( pred ); return base_class::find_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >(), [&f]( leaf_node& node, Q& v ) { f( node.m_Value, v ); } ); } //@cond template bool find_with( Q const& key, Less pred, Func f ) const { CDS_UNUSED( pred ); return base_class::find_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >(), [&f]( leaf_node& node, Q const& v ) { f( node.m_Value, v ); } ); } //@endcond /// Checks whether the set contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. The function applies RCU lock internally. */ template bool contains( Q const& key ) const { return base_class::contains( key ); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find( Q const& key ) const { return contains( key ); } //@endcond /// Checks whether the set contains \p key using \p pred predicate for searching /** The function is similar to contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_rcu_less "Predicate requirements". \p Less must imply the same element order as the comparator used for building the set. \p pred should accept arguments of type \p Q, \p key_type, \p value_type in any combination. */ template bool contains( Q const& key, Less pred ) const { CDS_UNUSED( pred ); return base_class::contains( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find_with( Q const& key, Less pred ) const { return contains( key, pred ); } //@endcond /// Finds \p key and return the item found /** \anchor cds_nonintrusive_EllenBinTreeSet_rcu_get The function searches the item with key equal to \p key and returns the pointer to item found. If \p key is not found it returns \p nullptr. RCU should be locked before call the function. Returned pointer is valid while RCU is locked. */ template value_type * get( Q const& key ) const { leaf_node * pNode = base_class::get( key ); return pNode ? &pNode->m_Value : nullptr; } /// Finds \p key with \p pred predicate and return the item found /** The function is an analog of \ref cds_nonintrusive_EllenBinTreeSet_rcu_get "get(Q const&)" but \p pred is used for comparing the keys. \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the set. */ template value_type * get_with( Q const& key, Less pred ) const { CDS_UNUSED( pred ); leaf_node * pNode = base_class::get_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >()); return pNode ? &pNode->m_Value : nullptr; } /// Clears the set (non-atomic) /** The function unlink all items from the tree. The function is not atomic, thus, in multi-threaded environment with parallel insertions this sequence \code set.clear(); assert( set.empty()); \endcode the assertion could be raised. For each leaf the \ref disposer will be called after unlinking. RCU \p synchronize method can be called. RCU should not be locked. */ void clear() { base_class::clear(); } /// Checks if the set is empty bool empty() const { return base_class::empty(); } /// Returns item count in the set /** Only leaf nodes containing user data are counted. The value returned depends on item counter type provided by \p Traits template parameter. If it is \p atomicity::empty_item_counter \p %size() always returns 0. Therefore, the function is not suitable for checking the tree emptiness, use \p empty() member function for this purpose. */ size_t size() const { return base_class::size(); } /// Returns const reference to internal statistics stat const& statistics() const { return base_class::statistics(); } /// Checks internal consistency (not atomic, not thread-safe) /** The debugging function to check internal consistency of the tree. */ bool check_consistency() const { return base_class::check_consistency(); } }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_ELLEN_BINTREE_SET_RCU_H libcds-2.3.3/cds/container/fcdeque.h000066400000000000000000000537061341244201700173100ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_FCDEQUE_H #define CDSLIB_CONTAINER_FCDEQUE_H #include #include #include namespace cds { namespace container { /// FCDeque related definitions /** @ingroup cds_nonintrusive_helper */ namespace fcdeque { /// FCDeque internal statistics template struct stat: public cds::algo::flat_combining::stat { typedef cds::algo::flat_combining::stat flat_combining_stat; ///< Flat-combining statistics typedef typename flat_combining_stat::counter_type counter_type; ///< Counter type counter_type m_nPushFront ; ///< Count of push_front operations counter_type m_nPushFrontMove ; ///< Count of push_front operations with move semantics counter_type m_nPushBack ; ///< Count of push_back operations counter_type m_nPushBackMove ; ///< Count of push_back operations with move semantics counter_type m_nPopFront ; ///< Count of success pop_front operations counter_type m_nFailedPopFront; ///< Count of failed pop_front operations (pop from empty deque) counter_type m_nPopBack ; ///< Count of success pop_back operations counter_type m_nFailedPopBack ; ///< Count of failed pop_back operations (pop from empty deque) counter_type m_nCollided ; ///< How many pairs of push/pop were collided, if elimination is enabled //@cond void onPushFront() { ++m_nPushFront; } void onPushFrontMove() { ++m_nPushFrontMove; } void onPushBack() { ++m_nPushBack; } void onPushBackMove() { ++m_nPushBackMove; } void onPopFront( bool bFailed ) { if ( bFailed ) ++m_nFailedPopFront; else ++m_nPopFront; } void onPopBack( bool bFailed ) { if ( bFailed ) ++m_nFailedPopBack; else ++m_nPopBack; } void onCollide() { ++m_nCollided; } //@endcond }; /// FCDeque dummy statistics, no overhead struct empty_stat: public cds::algo::flat_combining::empty_stat { //@cond void onPushFront() {} void onPushFrontMove() {} void onPushBack() {} void onPushBackMove() {} void onPopFront(bool) {} void onPopBack(bool) {} void onCollide() {} //@endcond }; /// FCDeque type traits struct traits: public cds::algo::flat_combining::traits { typedef empty_stat stat; ///< Internal statistics static constexpr const bool enable_elimination = false; ///< Enable \ref cds_elimination_description "elimination" }; /// Metafunction converting option list to traits /** \p Options are: - any \p cds::algo::flat_combining::make_traits options - \p opt::stat - internal statistics, possible type: \ref stat, \ref empty_stat (the default) - \p opt::enable_elimination - enable/disable operation \ref cds_elimination_description "elimination" By default, the elimination is disabled. For queue, the elimination is possible if the queue is empty. */ template struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type ; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type ,Options... >::type type; # endif }; } // namespace fcqueue /// Flat-combining deque /** @ingroup cds_nonintrusive_deque @ingroup cds_flat_combining_container \ref cds_flat_combining_description "Flat combining" sequential deque. The class can be considered as a concurrent FC-based wrapper for \p std::deque. Template parameters: - \p T - a value type stored in the deque - \p Deque - sequential deque implementation, for example, \p std::deque (the default) or \p boost::container::deque - \p Trats - type traits of flat combining, default is \p fcdeque::traits. \p fcdeque::make_traits metafunction can be used to construct specialized \p %fcdeque::traits */ template , typename Traits = fcdeque::traits > class FCDeque #ifndef CDS_DOXYGEN_INVOKED : public cds::algo::flat_combining::container #endif { public: typedef T value_type; ///< Value type typedef Deque deque_type; ///< Sequential deque class typedef Traits traits; ///< Deque type traits typedef typename traits::stat stat; ///< Internal statistics type static constexpr const bool c_bEliminationEnabled = traits::enable_elimination; ///< \p true if elimination is enabled protected: //@cond /// Deque operation IDs enum fc_operation { op_push_front = cds::algo::flat_combining::req_Operation, ///< Push front op_push_front_move, ///< Push front (move semantics) op_push_back, ///< Push back op_push_back_move, ///< Push back (move semantics) op_pop_front, ///< Pop front op_pop_back, ///< Pop back op_clear ///< Clear }; /// Flat combining publication list record struct fc_record: public cds::algo::flat_combining::publication_record { union { value_type const * pValPush; ///< Value to push value_type * pValPop; ///< Pop destination }; bool bEmpty; ///< \p true if the deque is empty }; //@endcond /// Flat combining kernel typedef cds::algo::flat_combining::kernel< fc_record, traits > fc_kernel; protected: //@cond mutable fc_kernel m_FlatCombining; deque_type m_Deque; //@endcond public: /// Initializes empty deque object FCDeque() {} /// Initializes empty deque object and gives flat combining parameters FCDeque( unsigned int nCompactFactor ///< Flat combining: publication list compacting factor ,unsigned int nCombinePassCount ///< Flat combining: number of combining passes for combiner thread ) : m_FlatCombining( nCompactFactor, nCombinePassCount ) {} /// Inserts a new element at the beginning of the deque container /** The function always returns \p true */ bool push_front( value_type const& val ///< Value to be copied to inserted element ) { auto pRec = m_FlatCombining.acquire_record(); pRec->pValPush = &val; constexpr_if ( c_bEliminationEnabled ) m_FlatCombining.batch_combine( op_push_front, pRec, *this ); else m_FlatCombining.combine( op_push_front, pRec, *this ); assert( pRec->is_done()); m_FlatCombining.release_record( pRec ); m_FlatCombining.internal_statistics().onPushFront(); return true; } /// Inserts a new element at the beginning of the deque container (move semantics) /** The function always returns \p true */ bool push_front( value_type&& val ///< Value to be moved to inserted element ) { auto pRec = m_FlatCombining.acquire_record(); pRec->pValPush = &val; constexpr_if ( c_bEliminationEnabled ) m_FlatCombining.batch_combine( op_push_front_move, pRec, *this ); else m_FlatCombining.combine( op_push_front_move, pRec, *this ); assert( pRec->is_done()); m_FlatCombining.release_record( pRec ); m_FlatCombining.internal_statistics().onPushFrontMove(); return true; } /// Inserts a new element at the end of the deque container /** The function always returns \p true */ bool push_back( value_type const& val ///< Value to be copied to inserted element ) { auto pRec = m_FlatCombining.acquire_record(); pRec->pValPush = &val; constexpr_if ( c_bEliminationEnabled ) m_FlatCombining.batch_combine( op_push_back, pRec, *this ); else m_FlatCombining.combine( op_push_back, pRec, *this ); assert( pRec->is_done()); m_FlatCombining.release_record( pRec ); m_FlatCombining.internal_statistics().onPushBack(); return true; } /// Inserts a new element at the end of the deque container (move semantics) /** The function always returns \p true */ bool push_back( value_type&& val ///< Value to be moved to inserted element ) { auto pRec = m_FlatCombining.acquire_record(); pRec->pValPush = &val; constexpr_if ( c_bEliminationEnabled ) m_FlatCombining.batch_combine( op_push_back_move, pRec, *this ); else m_FlatCombining.combine( op_push_back_move, pRec, *this ); assert( pRec->is_done()); m_FlatCombining.release_record( pRec ); m_FlatCombining.internal_statistics().onPushBackMove(); return true; } /// Removes the first element in the deque container /** The function returns \p false if the deque is empty, \p true otherwise. If the deque is empty \p val is not changed. */ bool pop_front( value_type& val ///< Target to be received the copy of removed element ) { auto pRec = m_FlatCombining.acquire_record(); pRec->pValPop = &val; constexpr_if ( c_bEliminationEnabled ) m_FlatCombining.batch_combine( op_pop_front, pRec, *this ); else m_FlatCombining.combine( op_pop_front, pRec, *this ); assert( pRec->is_done()); m_FlatCombining.release_record( pRec ); m_FlatCombining.internal_statistics().onPopFront( pRec->bEmpty ); return !pRec->bEmpty; } /// Removes the last element in the deque container /** The function returns \p false if the deque is empty, \p true otherwise. If the deque is empty \p val is not changed. */ bool pop_back( value_type& val ///< Target to be received the copy of removed element ) { auto pRec = m_FlatCombining.acquire_record(); pRec->pValPop = &val; constexpr_if ( c_bEliminationEnabled ) m_FlatCombining.batch_combine( op_pop_back, pRec, *this ); else m_FlatCombining.combine( op_pop_back, pRec, *this ); assert( pRec->is_done()); m_FlatCombining.release_record( pRec ); m_FlatCombining.internal_statistics().onPopBack( pRec->bEmpty ); return !pRec->bEmpty; } /// Clears the deque void clear() { auto pRec = m_FlatCombining.acquire_record(); constexpr_if ( c_bEliminationEnabled ) m_FlatCombining.batch_combine( op_clear, pRec, *this ); else m_FlatCombining.combine( op_clear, pRec, *this ); assert( pRec->is_done()); m_FlatCombining.release_record( pRec ); } /// Exclusive access to underlying deque object /** The functor \p f can do any operation with underlying \p deque_type in exclusive mode. For example, you can iterate over the deque. \p Func signature is: \code void f( deque_type& deque ); \endcode */ template void apply( Func f ) { auto& deque = m_Deque; m_FlatCombining.invoke_exclusive( [&deque, &f]() { f( deque ); } ); } /// Exclusive access to underlying deque object /** The functor \p f can do any operation with underlying \p deque_type in exclusive mode. For example, you can iterate over the deque. \p Func signature is: \code void f( deque_type const& deque ); \endcode */ template void apply( Func f ) const { auto const& deque = m_Deque; m_FlatCombining.invoke_exclusive( [&deque, &f]() { f( deque ); } ); } /// Returns the number of elements in the deque. /** Note that size() == 0 is not mean that the deque is empty because combining record can be in process. To check emptiness use \ref empty function. */ size_t size() const { return m_Deque.size(); } /// Checks if the deque is empty /** If the combining is in process the function waits while combining done. */ bool empty() const { bool bRet = false; auto const& deq = m_Deque; m_FlatCombining.invoke_exclusive( [&deq, &bRet]() { bRet = deq.empty(); } ); return bRet; } /// Internal statistics stat const& statistics() const { return m_FlatCombining.statistics(); } public: // flat combining cooperation, not for direct use! //@cond /// Flat combining supporting function. Do not call it directly! /** The function is called by \ref cds::algo::flat_combining::kernel "flat combining kernel" object if the current thread becomes a combiner. Invocation of the function means that the deque should perform an action recorded in \p pRec. */ void fc_apply( fc_record * pRec ) { assert( pRec ); switch ( pRec->op()) { case op_push_front: assert( pRec->pValPush ); m_Deque.push_front( *(pRec->pValPush)); break; case op_push_front_move: assert( pRec->pValPush ); m_Deque.push_front( std::move( *(pRec->pValPush ))); break; case op_push_back: assert( pRec->pValPush ); m_Deque.push_back( *(pRec->pValPush)); break; case op_push_back_move: assert( pRec->pValPush ); m_Deque.push_back( std::move( *(pRec->pValPush ))); break; case op_pop_front: assert( pRec->pValPop ); pRec->bEmpty = m_Deque.empty(); if ( !pRec->bEmpty ) { *(pRec->pValPop) = std::move( m_Deque.front()); m_Deque.pop_front(); } break; case op_pop_back: assert( pRec->pValPop ); pRec->bEmpty = m_Deque.empty(); if ( !pRec->bEmpty ) { *(pRec->pValPop) = std::move( m_Deque.back()); m_Deque.pop_back(); } break; case op_clear: while ( !m_Deque.empty()) m_Deque.pop_front(); break; default: assert(false); break; } } /// Batch-processing flat combining void fc_process( typename fc_kernel::iterator itBegin, typename fc_kernel::iterator itEnd ) { typedef typename fc_kernel::iterator fc_iterator; for ( fc_iterator it = itBegin, itPrev = itEnd; it != itEnd; ++it ) { switch ( it->op( atomics::memory_order_acquire )) { case op_push_front: if ( itPrev != itEnd && (itPrev->op() == op_pop_front || (m_Deque.empty() && itPrev->op() == op_pop_back))) { collide( *it, *itPrev ); itPrev = itEnd; } else itPrev = it; break; case op_push_front_move: if ( itPrev != itEnd && (itPrev->op() == op_pop_front || ( m_Deque.empty() && itPrev->op() == op_pop_back ))) { collide_move( *it, *itPrev ); itPrev = itEnd; } else itPrev = it; break; case op_push_back: if ( itPrev != itEnd && (itPrev->op() == op_pop_back || (m_Deque.empty() && itPrev->op() == op_pop_front))) { collide( *it, *itPrev ); itPrev = itEnd; } else itPrev = it; break; case op_push_back_move: if ( itPrev != itEnd && (itPrev->op() == op_pop_back || ( m_Deque.empty() && itPrev->op() == op_pop_front ))) { collide_move( *it, *itPrev ); itPrev = itEnd; } else itPrev = it; break; case op_pop_front: if ( itPrev != itEnd ) { if ( m_Deque.empty()) { switch ( itPrev->op()) { case op_push_back: collide( *itPrev, *it ); itPrev = itEnd; break; case op_push_back_move: collide_move( *itPrev, *it ); itPrev = itEnd; break; default: itPrev = it; break; } } else { switch ( itPrev->op()) { case op_push_front: collide( *itPrev, *it ); itPrev = itEnd; break; case op_push_front_move: collide_move( *itPrev, *it ); itPrev = itEnd; break; default: itPrev = it; break; } } } else itPrev = it; break; case op_pop_back: if ( itPrev != itEnd ) { if ( m_Deque.empty()) { switch ( itPrev->op()) { case op_push_front: collide( *itPrev, *it ); itPrev = itEnd; break; case op_push_front_move: collide_move( *itPrev, *it ); itPrev = itEnd; break; default: itPrev = it; break; } } else { switch ( itPrev->op()) { case op_push_back: collide( *itPrev, *it ); itPrev = itEnd; break; case op_push_back_move: collide_move( *itPrev, *it ); itPrev = itEnd; break; default: itPrev = it; break; } } } else itPrev = it; break; } } } //@endcond private: //@cond void collide( fc_record& recPush, fc_record& recPop ) { *(recPop.pValPop) = *(recPush.pValPush); recPop.bEmpty = false; m_FlatCombining.operation_done( recPush ); m_FlatCombining.operation_done( recPop ); m_FlatCombining.internal_statistics().onCollide(); } void collide_move( fc_record& recPush, fc_record& recPop ) { *(recPop.pValPop) = std::move( *(recPush.pValPush)); recPop.bEmpty = false; m_FlatCombining.operation_done( recPush ); m_FlatCombining.operation_done( recPop ); m_FlatCombining.internal_statistics().onCollide(); } //@endcond }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_FCDEQUE_H libcds-2.3.3/cds/container/fcpriority_queue.h000066400000000000000000000261351341244201700212660ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_FCPRIORITY_QUEUE_H #define CDSLIB_CONTAINER_FCPRIORITY_QUEUE_H #include #include #include namespace cds { namespace container { /// FCPriorityQueue related definitions /** @ingroup cds_nonintrusive_helper */ namespace fcpqueue { /// FCPriorityQueue internal statistics template struct stat: public cds::algo::flat_combining::stat { typedef cds::algo::flat_combining::stat flat_combining_stat; ///< Flat-combining statistics typedef typename flat_combining_stat::counter_type counter_type; ///< Counter type counter_type m_nPush ; ///< Count of push operations counter_type m_nPushMove ; ///< Count of push operations with move semantics counter_type m_nPop ; ///< Count of success pop operations counter_type m_nFailedPop; ///< Count of failed pop operations (pop from empty queue) //@cond void onPush() { ++m_nPush; } void onPushMove() { ++m_nPushMove; } void onPop( bool bFailed ) { if ( bFailed ) ++m_nFailedPop; else ++m_nPop; } //@endcond }; /// FCPriorityQueue dummy statistics, no overhead struct empty_stat: public cds::algo::flat_combining::empty_stat { //@cond void onPush() {} void onPushMove() {} void onPop(bool) {} //@endcond }; /// FCPriorityQueue traits struct traits: public cds::algo::flat_combining::traits { typedef empty_stat stat; ///< Internal statistics }; /// Metafunction converting option list to traits /** \p Options are: - any \p cds::algo::flat_combining::make_traits options - \p opt::stat - internal statistics, possible type: \p fcpqueue::stat, \p fcpqueue::empty_stat (the default) */ template struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type ; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type ,Options... >::type type; # endif }; } // namespace fcpqueue /// Flat-combining priority queue /** @ingroup cds_nonintrusive_priority_queue @ingroup cds_flat_combining_container \ref cds_flat_combining_description "Flat combining" sequential priority queue. The class can be considered as a concurrent FC-based wrapper for \p std::priority_queue. Template parameters: - \p T - a value type stored in the queue - \p PriorityQueue - sequential priority queue implementation, default is \p std::priority_queue - \p Traits - type traits of flat combining, default is \p fcpqueue::traits. \p fcpqueue::make_traits metafunction can be used to construct specialized \p %fcpqueue::traits */ template , typename Traits = fcpqueue::traits > class FCPriorityQueue #ifndef CDS_DOXYGEN_INVOKED : public cds::algo::flat_combining::container #endif { public: typedef T value_type; ///< Value type typedef PriorityQueue priority_queue_type; ///< Sequential priority queue class typedef Traits traits; ///< Priority queue type traits typedef typename traits::stat stat; ///< Internal statistics type protected: //@cond // Priority queue operation IDs enum fc_operation { op_push = cds::algo::flat_combining::req_Operation, op_push_move, op_pop, op_clear }; // Flat combining publication list record struct fc_record: public cds::algo::flat_combining::publication_record { union { value_type const * pValPush; // Value to push value_type * pValPop; // Pop destination }; bool bEmpty; // true if the queue is empty }; //@endcond /// Flat combining kernel typedef cds::algo::flat_combining::kernel< fc_record, traits > fc_kernel; protected: //@cond mutable fc_kernel m_FlatCombining; priority_queue_type m_PQueue; //@endcond public: /// Initializes empty priority queue object FCPriorityQueue() {} /// Initializes empty priority queue object and gives flat combining parameters FCPriorityQueue( unsigned int nCompactFactor ///< Flat combining: publication list compacting factor ,unsigned int nCombinePassCount ///< Flat combining: number of combining passes for combiner thread ) : m_FlatCombining( nCompactFactor, nCombinePassCount ) {} /// Inserts a new element in the priority queue /** The function always returns \p true */ bool push( value_type const& val ///< Value to be copied to inserted element ) { auto pRec = m_FlatCombining.acquire_record(); pRec->pValPush = &val; m_FlatCombining.combine( op_push, pRec, *this ); assert( pRec->is_done()); m_FlatCombining.release_record( pRec ); m_FlatCombining.internal_statistics().onPush(); return true; } /// Inserts a new element in the priority queue (move semantics) /** The function always returns \p true */ bool push( value_type&& val ///< Value to be moved to inserted element ) { auto pRec = m_FlatCombining.acquire_record(); pRec->pValPush = &val; m_FlatCombining.combine( op_push_move, pRec, *this ); assert( pRec->is_done()); m_FlatCombining.release_record( pRec ); m_FlatCombining.internal_statistics().onPushMove(); return true; } /// Removes the top element from priority queue /** The function returns \p false if the queue is empty, \p true otherwise. If the queue is empty \p val is not changed. */ bool pop( value_type& val ///< Target to be received the copy of top element ) { auto pRec = m_FlatCombining.acquire_record(); pRec->pValPop = &val; m_FlatCombining.combine( op_pop, pRec, *this ); assert( pRec->is_done()); m_FlatCombining.release_record( pRec ); m_FlatCombining.internal_statistics().onPop( pRec->bEmpty ); return !pRec->bEmpty; } /// Exclusive access to underlying priority queue object /** The functor \p f can do any operation with underlying \p priority_queue_type in exclusive mode. For example, you can iterate over the queue. \p Func signature is: \code void f( priority_queue_type& deque ); \endcode */ template void apply( Func f ) { auto& pqueue = m_PQueue; m_FlatCombining.invoke_exclusive( [&pqueue, &f]() { f( pqueue ); } ); } /// Exclusive access to underlying priority queue object /** The functor \p f can do any operation with underlying \p proiprity_queue_type in exclusive mode. For example, you can iterate over the queue. \p Func signature is: \code void f( priority_queue_type const& queue ); \endcode */ template void apply( Func f ) const { auto const& pqueue = m_PQueue; m_FlatCombining.invoke_exclusive( [&pqueue, &f]() { f( pqueue ); } ); } /// Clears the priority queue void clear() { auto pRec = m_FlatCombining.acquire_record(); m_FlatCombining.combine( op_clear, pRec, *this ); assert( pRec->is_done()); m_FlatCombining.release_record( pRec ); } /// Returns the number of elements in the priority queue. /** Note that size() == 0 does not mean that the queue is empty because combining record can be in process. To check emptiness use \ref empty function. */ size_t size() const { return m_PQueue.size(); } /// Checks if the priority queue is empty /** If the combining is in process the function waits while combining done. */ bool empty() { bool bRet = false; auto const& pq = m_PQueue; m_FlatCombining.invoke_exclusive( [&pq, &bRet]() { bRet = pq.empty(); } ); return bRet; } /// Internal statistics stat const& statistics() const { return m_FlatCombining.statistics(); } public: // flat combining cooperation, not for direct use! //@cond /* The function is called by \ref cds::algo::flat_combining::kernel "flat combining kernel" object if the current thread becomes a combiner. Invocation of the function means that the priority queue should perform an action recorded in \p pRec. */ void fc_apply( fc_record * pRec ) { assert( pRec ); // this function is called under FC mutex, so switch TSan off //CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN; switch ( pRec->op()) { case op_push: assert( pRec->pValPush ); m_PQueue.push( *(pRec->pValPush)); break; case op_push_move: assert( pRec->pValPush ); m_PQueue.push( std::move( *(pRec->pValPush ))); break; case op_pop: assert( pRec->pValPop ); pRec->bEmpty = m_PQueue.empty(); if ( !pRec->bEmpty ) { *(pRec->pValPop) = std::move( m_PQueue.top()); m_PQueue.pop(); } break; case op_clear: while ( !m_PQueue.empty()) m_PQueue.pop(); break; default: assert(false); break; } //CDS_TSAN_ANNOTATE_IGNORE_RW_END; } //@endcond }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_FCPRIORITY_QUEUE_H libcds-2.3.3/cds/container/fcqueue.h000066400000000000000000000350421341244201700173220ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_FCQUEUE_H #define CDSLIB_CONTAINER_FCQUEUE_H #include #include #include namespace cds { namespace container { /// FCQueue related definitions /** @ingroup cds_nonintrusive_helper */ namespace fcqueue { /// FCQueue internal statistics template struct stat: public cds::algo::flat_combining::stat { typedef cds::algo::flat_combining::stat flat_combining_stat; ///< Flat-combining statistics typedef typename flat_combining_stat::counter_type counter_type; ///< Counter type counter_type m_nEnqueue ; ///< Count of enqueue operations counter_type m_nEnqMove ; ///< Count of enqueue operations with move semantics counter_type m_nDequeue ; ///< Count of success dequeue operations counter_type m_nFailedDeq ; ///< Count of failed dequeue operations (pop from empty queue) counter_type m_nCollided ; ///< How many pairs of enqueue/dequeue were collided, if elimination is enabled //@cond void onEnqueue() { ++m_nEnqueue; } void onEnqMove() { ++m_nEnqMove; } void onDequeue( bool bFailed ) { if ( bFailed ) ++m_nFailedDeq; else ++m_nDequeue; } void onCollide() { ++m_nCollided; } //@endcond }; /// FCQueue dummy statistics, no overhead struct empty_stat: public cds::algo::flat_combining::empty_stat { //@cond void onEnqueue() {} void onEnqMove() {} void onDequeue(bool) {} void onCollide() {} //@endcond }; /// FCQueue type traits struct traits: public cds::algo::flat_combining::traits { typedef empty_stat stat; ///< Internal statistics static constexpr const bool enable_elimination = false; ///< Enable \ref cds_elimination_description "elimination" }; /// Metafunction converting option list to traits /** \p Options are: - any \p cds::algo::flat_combining::make_traits options - \p opt::stat - internal statistics, possible type: \p fcqueue::stat, \p fcqueue::empty_stat (the default) - \p opt::enable_elimination - enable/disable operation \ref cds_elimination_description "elimination" By default, the elimination is disabled. For queue, the elimination is possible if the queue is empty. */ template struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type ; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type ,Options... >::type type; # endif }; } // namespace fcqueue /// Flat-combining queue /** @ingroup cds_nonintrusive_queue @ingroup cds_flat_combining_container \ref cds_flat_combining_description "Flat combining" sequential queue. The class can be considered as a concurrent FC-based wrapper for \p std::queue. Template parameters: - \p T - a value type stored in the queue - \p Queue - sequential queue implementation, default is \p std::queue - \p Trats - type traits of flat combining, default is \p fcqueue::traits. \p fcqueue::make_traits metafunction can be used to construct \p %fcqueue::traits specialization. */ template , typename Traits = fcqueue::traits > class FCQueue #ifndef CDS_DOXYGEN_INVOKED : public cds::algo::flat_combining::container #endif { public: typedef T value_type; ///< Value type typedef Queue queue_type; ///< Sequential queue class typedef Traits traits; ///< Queue type traits typedef typename traits::stat stat; ///< Internal statistics type static constexpr const bool c_bEliminationEnabled = traits::enable_elimination; ///< \p true if elimination is enabled protected: //@cond /// Queue operation IDs enum fc_operation { op_enq = cds::algo::flat_combining::req_Operation, ///< Enqueue op_enq_move, ///< Enqueue (move semantics) op_deq, ///< Dequeue op_clear ///< Clear }; /// Flat combining publication list record struct fc_record: public cds::algo::flat_combining::publication_record { union { value_type const * pValEnq; ///< Value to enqueue value_type * pValDeq; ///< Dequeue destination }; bool bEmpty; ///< \p true if the queue is empty }; //@endcond /// Flat combining kernel typedef cds::algo::flat_combining::kernel< fc_record, traits > fc_kernel; protected: //@cond mutable fc_kernel m_FlatCombining; queue_type m_Queue; //@endcond public: /// Initializes empty queue object FCQueue() {} /// Initializes empty queue object and gives flat combining parameters FCQueue( unsigned int nCompactFactor ///< Flat combining: publication list compacting factor ,unsigned int nCombinePassCount ///< Flat combining: number of combining passes for combiner thread ) : m_FlatCombining( nCompactFactor, nCombinePassCount ) {} /// Inserts a new element at the end of the queue /** The content of the new element initialized to a copy of \p val. The function always returns \p true */ bool enqueue( value_type const& val ) { auto pRec = m_FlatCombining.acquire_record(); pRec->pValEnq = &val; constexpr_if ( c_bEliminationEnabled ) m_FlatCombining.batch_combine( op_enq, pRec, *this ); else m_FlatCombining.combine( op_enq, pRec, *this ); assert( pRec->is_done()); m_FlatCombining.release_record( pRec ); m_FlatCombining.internal_statistics().onEnqueue(); return true; } /// Inserts a new element at the end of the queue (a synonym for \ref enqueue) bool push( value_type const& val ) { return enqueue( val ); } /// Inserts a new element at the end of the queue (move semantics) /** \p val is moved to inserted element */ bool enqueue( value_type&& val ) { auto pRec = m_FlatCombining.acquire_record(); pRec->pValEnq = &val; constexpr_if ( c_bEliminationEnabled ) m_FlatCombining.batch_combine( op_enq_move, pRec, *this ); else m_FlatCombining.combine( op_enq_move, pRec, *this ); assert( pRec->is_done()); m_FlatCombining.release_record( pRec ); m_FlatCombining.internal_statistics().onEnqMove(); return true; } /// Inserts a new element at the end of the queue (move semantics, synonym for \p enqueue) bool push( value_type&& val ) { return enqueue( val ); } /// Removes the next element from the queue /** \p val takes a copy of the element */ bool dequeue( value_type& val ) { auto pRec = m_FlatCombining.acquire_record(); pRec->pValDeq = &val; constexpr_if ( c_bEliminationEnabled ) m_FlatCombining.batch_combine( op_deq, pRec, *this ); else m_FlatCombining.combine( op_deq, pRec, *this ); assert( pRec->is_done()); m_FlatCombining.release_record( pRec ); m_FlatCombining.internal_statistics().onDequeue( pRec->bEmpty ); return !pRec->bEmpty; } /// Removes the next element from the queue (a synonym for \ref dequeue) bool pop( value_type& val ) { return dequeue( val ); } /// Clears the queue void clear() { auto pRec = m_FlatCombining.acquire_record(); constexpr_if ( c_bEliminationEnabled ) m_FlatCombining.batch_combine( op_clear, pRec, *this ); else m_FlatCombining.combine( op_clear, pRec, *this ); assert( pRec->is_done()); m_FlatCombining.release_record( pRec ); } /// Exclusive access to underlying queue object /** The functor \p f can do any operation with underlying \p queue_type in exclusive mode. For example, you can iterate over the queue. \p Func signature is: \code void f( queue_type& queue ); \endcode */ template void apply( Func f ) { auto& queue = m_Queue; m_FlatCombining.invoke_exclusive( [&queue, &f]() { f( queue ); } ); } /// Exclusive access to underlying queue object /** The functor \p f can do any operation with underlying \p queue_type in exclusive mode. For example, you can iterate over the queue. \p Func signature is: \code void f( queue_type const& queue ); \endcode */ template void apply( Func f ) const { auto const& queue = m_Queue; m_FlatCombining.invoke_exclusive( [&queue, &f]() { f( queue ); } ); } /// Returns the number of elements in the queue. /** Note that size() == 0 is not mean that the queue is empty because combining record can be in process. To check emptiness use \ref empty function. */ size_t size() const { return m_Queue.size(); } /// Checks if the queue is empty /** If the combining is in process the function waits while combining done. */ bool empty() const { bool bRet = false; auto const& queue = m_Queue; m_FlatCombining.invoke_exclusive( [&queue, &bRet]() { bRet = queue.empty(); } ); return bRet; } /// Internal statistics stat const& statistics() const { return m_FlatCombining.statistics(); } public: // flat combining cooperation, not for direct use! //@cond /// Flat combining supporting function. Do not call it directly! /** The function is called by \ref cds::algo::flat_combining::kernel "flat combining kernel" object if the current thread becomes a combiner. Invocation of the function means that the queue should perform an action recorded in \p pRec. */ void fc_apply( fc_record * pRec ) { assert( pRec ); switch ( pRec->op()) { case op_enq: assert( pRec->pValEnq ); m_Queue.push( *(pRec->pValEnq )); break; case op_enq_move: assert( pRec->pValEnq ); m_Queue.push( std::move( *(pRec->pValEnq ))); break; case op_deq: assert( pRec->pValDeq ); pRec->bEmpty = m_Queue.empty(); if ( !pRec->bEmpty ) { *(pRec->pValDeq) = std::move( m_Queue.front()); m_Queue.pop(); } break; case op_clear: while ( !m_Queue.empty()) m_Queue.pop(); break; default: assert(false); break; } } /// Batch-processing flat combining void fc_process( typename fc_kernel::iterator itBegin, typename fc_kernel::iterator itEnd ) { typedef typename fc_kernel::iterator fc_iterator; for ( fc_iterator it = itBegin, itPrev = itEnd; it != itEnd; ++it ) { switch ( it->op( atomics::memory_order_acquire )) { case op_enq: case op_enq_move: case op_deq: if ( m_Queue.empty()) { if ( itPrev != itEnd && collide( *itPrev, *it )) itPrev = itEnd; else itPrev = it; } break; } } } //@endcond private: //@cond bool collide( fc_record& rec1, fc_record& rec2 ) { switch ( rec1.op()) { case op_enq: if ( rec2.op() == op_deq ) { assert(rec1.pValEnq); assert(rec2.pValDeq); *rec2.pValDeq = *rec1.pValEnq; rec2.bEmpty = false; goto collided; } break; case op_enq_move: if ( rec2.op() == op_deq ) { assert(rec1.pValEnq); assert(rec2.pValDeq); *rec2.pValDeq = std::move( *rec1.pValEnq ); rec2.bEmpty = false; goto collided; } break; case op_deq: switch ( rec2.op()) { case op_enq: case op_enq_move: return collide( rec2, rec1 ); } } return false; collided: m_FlatCombining.operation_done( rec1 ); m_FlatCombining.operation_done( rec2 ); m_FlatCombining.internal_statistics().onCollide(); return true; } //@endcond }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_FCQUEUE_H libcds-2.3.3/cds/container/fcstack.h000066400000000000000000000337441341244201700173120ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_FCSTACK_H #define CDSLIB_CONTAINER_FCSTACK_H #include #include #include namespace cds { namespace container { /// FCStack related definitions /** @ingroup cds_nonintrusive_helper */ namespace fcstack { /// FCStack internal statistics template struct stat: public cds::algo::flat_combining::stat { typedef cds::algo::flat_combining::stat flat_combining_stat; ///< Flat-combining statistics typedef typename flat_combining_stat::counter_type counter_type; ///< Counter type counter_type m_nPush ; ///< Count of push operations counter_type m_nPushMove ; ///< Count of push operations with move semantics counter_type m_nPop ; ///< Count of success pop operations counter_type m_nFailedPop; ///< Count of failed pop operations (pop from empty stack) counter_type m_nCollided ; ///< How many pairs of push/pop were collided, if elimination is enabled //@cond void onPush() { ++m_nPush; } void onPushMove() { ++m_nPushMove; } void onPop( bool bFailed ) { if ( bFailed ) ++m_nFailedPop; else ++m_nPop; } void onCollide() { ++m_nCollided; } //@endcond }; /// FCStack dummy statistics, no overhead struct empty_stat: public cds::algo::flat_combining::empty_stat { //@cond void onPush() {} void onPushMove() {} void onPop(bool) {} void onCollide() {} //@endcond }; /// FCStack type traits struct traits: public cds::algo::flat_combining::traits { typedef empty_stat stat; ///< Internal statistics static constexpr const bool enable_elimination = false; ///< Enable \ref cds_elimination_description "elimination" }; /// Metafunction converting option list to traits /** \p Options are: - any \p cds::algo::flat_combining::make_traits options - \p opt::stat - internal statistics, possible type: \p fcstack::stat, \p fcstack::empty_stat (the default) - \p opt::enable_elimination - enable/disable operation \ref cds_elimination_description "elimination" By default, the elimination is disabled. */ template struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type ; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type ,Options... >::type type; # endif }; } // namespace fcstack /// Flat-combining stack /** @ingroup cds_nonintrusive_stack @ingroup cds_flat_combining_container \ref cds_flat_combining_description "Flat combining" sequential stack. Template parameters: - \p T - a value type stored in the stack - \p Stack - sequential stack implementation, default is \p std::stack - \p Trats - type traits of flat combining, default is \p fcstack::traits \p fcstack::make_traits metafunction can be used to construct specialized \p %fcstack::traits */ template , typename Traits = fcstack::traits > class FCStack #ifndef CDS_DOXYGEN_INVOKED : public cds::algo::flat_combining::container #endif { public: typedef T value_type; ///< Value type typedef Stack stack_type; ///< Sequential stack class typedef Traits traits; ///< Stack traits typedef typename traits::stat stat; ///< Internal statistics type static constexpr const bool c_bEliminationEnabled = traits::enable_elimination; ///< \p true if elimination is enabled protected: //@cond /// Stack operation IDs enum fc_operation { op_push = cds::algo::flat_combining::req_Operation, ///< Push op_push_move, ///< Push (move semantics) op_pop, ///< Pop op_clear, ///< Clear op_empty ///< Empty }; /// Flat combining publication list record struct fc_record: public cds::algo::flat_combining::publication_record { union { value_type const * pValPush; ///< Value to push value_type * pValPop; ///< Pop destination }; bool bEmpty; ///< \p true if the stack is empty }; //@endcond /// Flat combining kernel typedef cds::algo::flat_combining::kernel< fc_record, traits > fc_kernel; protected: //@cond mutable fc_kernel m_FlatCombining; stack_type m_Stack; //@endcond public: /// Initializes empty stack object FCStack() {} /// Initializes empty stack object and gives flat combining parameters FCStack( unsigned int nCompactFactor ///< Flat combining: publication list compacting factor ,unsigned int nCombinePassCount ///< Flat combining: number of combining passes for combiner thread ) : m_FlatCombining( nCompactFactor, nCombinePassCount ) {} /// Inserts a new element at the top of stack /** The content of the new element initialized to a copy of \p val. */ bool push( value_type const& val ) { auto pRec = m_FlatCombining.acquire_record(); pRec->pValPush = &val; constexpr_if ( c_bEliminationEnabled ) m_FlatCombining.batch_combine( op_push, pRec, *this ); else m_FlatCombining.combine( op_push, pRec, *this ); assert( pRec->is_done()); m_FlatCombining.release_record( pRec ); m_FlatCombining.internal_statistics().onPush(); return true; } /// Inserts a new element at the top of stack (move semantics) /** The content of the new element initialized to a copy of \p val. */ bool push( value_type&& val ) { auto pRec = m_FlatCombining.acquire_record(); pRec->pValPush = &val; constexpr_if ( c_bEliminationEnabled ) m_FlatCombining.batch_combine( op_push_move, pRec, *this ); else m_FlatCombining.combine( op_push_move, pRec, *this ); assert( pRec->is_done()); m_FlatCombining.release_record( pRec ); m_FlatCombining.internal_statistics().onPushMove(); return true; } /// Removes the element on top of the stack /** \p val takes a copy of top element */ bool pop( value_type& val ) { auto pRec = m_FlatCombining.acquire_record(); pRec->pValPop = &val; constexpr_if ( c_bEliminationEnabled ) m_FlatCombining.batch_combine( op_pop, pRec, *this ); else m_FlatCombining.combine( op_pop, pRec, *this ); assert( pRec->is_done()); m_FlatCombining.release_record( pRec ); m_FlatCombining.internal_statistics().onPop( pRec->bEmpty ); return !pRec->bEmpty; } /// Clears the stack void clear() { auto pRec = m_FlatCombining.acquire_record(); constexpr_if ( c_bEliminationEnabled ) m_FlatCombining.batch_combine( op_clear, pRec, *this ); else m_FlatCombining.combine( op_clear, pRec, *this ); assert( pRec->is_done()); m_FlatCombining.release_record( pRec ); } /// Exclusive access to underlying stack object /** The functor \p f can do any operation with underlying \p stack_type in exclusive mode. For example, you can iterate over the stack. \p Func signature is: \code void f( stack_type& stack ); \endcode */ template void apply( Func f ) { auto& stack = m_Stack; m_FlatCombining.invoke_exclusive( [&stack, &f]() { f( stack ); } ); } /// Exclusive access to underlying stack object /** The functor \p f can do any operation with underlying \p stack_type in exclusive mode. For example, you can iterate over the stack. \p Func signature is: \code void f( stack_type const& stack ); \endcode */ template void apply( Func f ) const { auto const& stack = m_Stack; m_FlatCombining.invoke_exclusive( [&stack, &f]() { f( stack ); } ); } /// Returns the number of elements in the stack. /** Note that size() == 0 is not mean that the stack is empty because combining record can be in process. To check emptiness use \ref empty() function. */ size_t size() const { return m_Stack.size(); } /// Checks if the stack is empty /** If the combining is in process the function waits while combining done. */ bool empty() { auto pRec = m_FlatCombining.acquire_record(); constexpr_if ( c_bEliminationEnabled ) m_FlatCombining.batch_combine( op_empty, pRec, *this ); else m_FlatCombining.combine( op_empty, pRec, *this ); assert( pRec->is_done()); m_FlatCombining.release_record( pRec ); return pRec->bEmpty; } /// Internal statistics stat const& statistics() const { return m_FlatCombining.statistics(); } public: // flat combining cooperation, not for direct use! //@cond /// Flat combining supporting function. Do not call it directly! /** The function is called by \ref cds::algo::flat_combining::kernel "flat combining kernel" object if the current thread becomes a combiner. Invocation of the function means that the stack should perform an action recorded in \p pRec. */ void fc_apply( fc_record * pRec ) { assert( pRec ); switch ( pRec->op()) { case op_push: assert( pRec->pValPush ); m_Stack.push( *(pRec->pValPush )); break; case op_push_move: assert( pRec->pValPush ); m_Stack.push( std::move( *(pRec->pValPush ))); break; case op_pop: assert( pRec->pValPop ); pRec->bEmpty = m_Stack.empty(); if ( !pRec->bEmpty ) { *(pRec->pValPop) = std::move( m_Stack.top()); m_Stack.pop(); } break; case op_clear: while ( !m_Stack.empty()) m_Stack.pop(); break; case op_empty: pRec->bEmpty = m_Stack.empty(); break; default: assert(false); break; } } /// Batch-processing flat combining void fc_process( typename fc_kernel::iterator itBegin, typename fc_kernel::iterator itEnd ) { typedef typename fc_kernel::iterator fc_iterator; for ( fc_iterator it = itBegin, itPrev = itEnd; it != itEnd; ++it ) { switch ( it->op( atomics::memory_order_acquire )) { case op_push: case op_push_move: case op_pop: if ( itPrev != itEnd && collide( *itPrev, *it )) itPrev = itEnd; else itPrev = it; break; } } } //@endcond private: //@cond bool collide( fc_record& rec1, fc_record& rec2 ) { switch ( rec1.op()) { case op_push: if ( rec2.op() == op_pop ) { assert(rec1.pValPush); assert(rec2.pValPop); *rec2.pValPop = *rec1.pValPush; rec2.bEmpty = false; goto collided; } break; case op_push_move: if ( rec2.op() == op_pop ) { assert(rec1.pValPush); assert(rec2.pValPop); *rec2.pValPop = std::move( *rec1.pValPush ); rec2.bEmpty = false; goto collided; } break; case op_pop: switch ( rec2.op()) { case op_push: case op_push_move: return collide( rec2, rec1 ); } } return false; collided: m_FlatCombining.operation_done( rec1 ); m_FlatCombining.operation_done( rec2 ); m_FlatCombining.internal_statistics().onCollide(); return true; } //@endcond }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_FCSTACK_H libcds-2.3.3/cds/container/feldman_hashmap_dhp.h000066400000000000000000000006421341244201700216250ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_FELDMAN_HASHMAP_DHP_H #define CDSLIB_CONTAINER_FELDMAN_HASHMAP_DHP_H #include #include #endif // #ifndef CDSLIB_CONTAINER_FELDMAN_HASHMAP_DHP_H libcds-2.3.3/cds/container/feldman_hashmap_hp.h000066400000000000000000000006361341244201700214640ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_FELDMAN_HASHMAP_HP_H #define CDSLIB_CONTAINER_FELDMAN_HASHMAP_HP_H #include #include #endif // #ifndef CDSLIB_CONTAINER_FELDMAN_HASHMAP_HP_H libcds-2.3.3/cds/container/feldman_hashmap_rcu.h000066400000000000000000000765351341244201700216610ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_FELDMAN_HASHMAP_RCU_H #define CDSLIB_CONTAINER_FELDMAN_HASHMAP_RCU_H #include #include namespace cds { namespace container { /// Hash map based on multi-level array /** @ingroup cds_nonintrusive_map @anchor cds_container_FeldmanHashMap_rcu Source: - [2013] Steven Feldman, Pierre LaBorde, Damian Dechev "Concurrent Multi-level Arrays: Wait-free Extensible Hash Maps" See algorithm short description @ref cds_container_FeldmanHashMap_hp "here" @note Two important things you should keep in mind when you're using \p %FeldmanHashMap: - all keys is converted to fixed-size bit-string by hash functor provided. You can use variable-length keys, for example, \p std::string as a key for \p %FeldmanHashMap, but real key in the map will be fixed-size hash values of your keys. For the strings you may use well-known hashing algorithms like SHA1, SHA2, MurmurHash, CityHash or its successor FarmHash and so on, which converts variable-length strings to fixed-length bit-strings, and such hash values will be the keys in \p %FeldmanHashMap. If your key is fixed-sized the hash functor is optional, see \p feldman_hashmap::traits::hash for explanation and examples. - \p %FeldmanHashMap uses a perfect hashing. It means that if two different keys, for example, of type \p std::string, have identical hash then you cannot insert both that keys in the map. \p %FeldmanHashMap does not maintain the key, it maintains its fixed-size hash value. The map supports @ref cds_container_FeldmanHashMap_rcu_iterators "bidirectional thread-safe iterators". Template parameters: - \p RCU - one of \ref cds_urcu_gc "RCU type" - \p Key - a key type to be stored in the map - \p T - a value type to be stored in the map - \p Traits - type traits, the structure based on \p feldman_hashmap::traits or result of \p feldman_hashmap::make_traits metafunction. @note Before including you should include appropriate RCU header file, see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. */ template < class RCU ,typename Key ,typename T #ifdef CDS_DOXYGEN_INVOKED ,class Traits = feldman_hashmap::traits #else ,class Traits #endif > class FeldmanHashMap< cds::urcu::gc< RCU >, Key, T, Traits > #ifdef CDS_DOXYGEN_INVOKED : protected cds::intrusive::FeldmanHashSet< cds::urcu::gc< RCU >, std::pair, Traits > #else : protected cds::container::details::make_feldman_hashmap< cds::urcu::gc< RCU >, Key, T, Traits >::type #endif { //@cond typedef cds::container::details::make_feldman_hashmap< cds::urcu::gc< RCU >, Key, T, Traits > maker; typedef typename maker::type base_class; //@endcond public: typedef cds::urcu::gc< RCU > gc; ///< RCU garbage collector typedef Key key_type; ///< Key type typedef T mapped_type; ///< Mapped type typedef std::pair< key_type const, mapped_type> value_type; ///< Key-value pair to be stored in the map typedef Traits traits; ///< Map traits #ifdef CDS_DOXYGEN_INVOKED typedef typename traits::hash hasher; ///< Hash functor, see \p feldman_hashmap::traits::hash #else typedef typename maker::hasher hasher; #endif typedef typename maker::hash_type hash_type; ///< Hash type deduced from \p hasher return type typedef typename base_class::hash_comparator hash_comparator; ///< hash compare functor based on \p Traits::compare and \p Traits::less typedef typename traits::item_counter item_counter; ///< Item counter type typedef typename traits::allocator allocator; ///< Element allocator typedef typename traits::node_allocator node_allocator; ///< Array node allocator typedef typename traits::memory_model memory_model; ///< Memory model typedef typename traits::back_off back_off; ///< Back-off strategy typedef typename traits::stat stat; ///< Internal statistics type typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock static constexpr const bool c_bExtractLockExternal = false; ///< Group of \p extract_xxx functions does not require external locking /// Level statistics typedef feldman_hashmap::level_statistics level_statistics; protected: //@cond typedef typename maker::node_type node_type; typedef typename maker::cxx_node_allocator cxx_node_allocator; typedef std::unique_ptr< node_type, typename maker::node_disposer > scoped_node_ptr; typedef typename base_class::check_deadlock_policy check_deadlock_policy; struct node_cast { value_type * operator()(node_type * p) const { return p ? &p->m_Value : nullptr; } }; public: /// pointer to extracted node using exempt_ptr = cds::urcu::exempt_ptr< gc, node_type, value_type, typename base_class::disposer, node_cast >; protected: template class bidirectional_iterator: public base_class::iterator_base { friend class FeldmanHashMap; typedef typename base_class::iterator_base iterator_base; protected: static constexpr bool const c_bConstantIterator = IsConst; public: typedef typename std::conditional< IsConst, value_type const*, value_type*>::type value_ptr; ///< Value pointer typedef typename std::conditional< IsConst, value_type const&, value_type&>::type value_ref; ///< Value reference public: bidirectional_iterator() noexcept {} bidirectional_iterator( bidirectional_iterator const& rhs ) noexcept : iterator_base( rhs ) {} bidirectional_iterator& operator=(bidirectional_iterator const& rhs) noexcept { iterator_base::operator=( rhs ); return *this; } bidirectional_iterator& operator++() { iterator_base::operator++(); return *this; } bidirectional_iterator& operator--() { iterator_base::operator--(); return *this; } value_ptr operator ->() const noexcept { node_type * p = iterator_base::pointer(); return p ? &p->m_Value : nullptr; } value_ref operator *() const noexcept { node_type * p = iterator_base::pointer(); assert( p ); return p->m_Value; } void release() { iterator_base::release(); } template bool operator ==(bidirectional_iterator const& rhs) const noexcept { return iterator_base::operator==( rhs ); } template bool operator !=(bidirectional_iterator const& rhs) const noexcept { return !( *this == rhs ); } public: // for internal use only! bidirectional_iterator( base_class const& set, typename base_class::array_node * pNode, size_t idx, bool ) : iterator_base( set, pNode, idx, false ) {} bidirectional_iterator( base_class const& set, typename base_class::array_node * pNode, size_t idx ) : iterator_base( set, pNode, idx ) {} }; /// Reverse bidirectional iterator template class reverse_bidirectional_iterator : public base_class::iterator_base { friend class FeldmanHashMap; typedef typename base_class::iterator_base iterator_base; public: typedef typename std::conditional< IsConst, value_type const*, value_type*>::type value_ptr; ///< Value pointer typedef typename std::conditional< IsConst, value_type const&, value_type&>::type value_ref; ///< Value reference public: reverse_bidirectional_iterator() noexcept : iterator_base() {} reverse_bidirectional_iterator( reverse_bidirectional_iterator const& rhs ) noexcept : iterator_base( rhs ) {} reverse_bidirectional_iterator& operator=( reverse_bidirectional_iterator const& rhs) noexcept { iterator_base::operator=( rhs ); return *this; } reverse_bidirectional_iterator& operator++() { iterator_base::operator--(); return *this; } reverse_bidirectional_iterator& operator--() { iterator_base::operator++(); return *this; } value_ptr operator ->() const noexcept { node_type * p = iterator_base::pointer(); return p ? &p->m_Value : nullptr; } value_ref operator *() const noexcept { node_type * p = iterator_base::pointer(); assert( p ); return p->m_Value; } void release() { iterator_base::release(); } template bool operator ==(reverse_bidirectional_iterator const& rhs) const { return iterator_base::operator==( rhs ); } template bool operator !=(reverse_bidirectional_iterator const& rhs) { return !( *this == rhs ); } public: // for internal use only! reverse_bidirectional_iterator( base_class const& set, typename base_class::array_node * pNode, size_t idx, bool ) : iterator_base( set, pNode, idx, false ) {} reverse_bidirectional_iterator( base_class const& set, typename base_class::array_node * pNode, size_t idx ) : iterator_base( set, pNode, idx, false ) { iterator_base::backward(); } }; //@endcond public: #ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined iterator; ///< @ref cds_container_FeldmanHashMap_rcu_iterators "bidirectional iterator" type typedef implementation_defined const_iterator; ///< @ref cds_container_FeldmanHashMap_rcu_iterators "bidirectional const iterator" type typedef implementation_defined reverse_iterator; ///< @ref cds_container_FeldmanHashMap_rcu_iterators "bidirectional reverse iterator" type typedef implementation_defined const_reverse_iterator; ///< @ref cds_container_FeldmanHashMap_rcu_iterators "bidirectional reverse const iterator" type #else typedef bidirectional_iterator iterator; typedef bidirectional_iterator const_iterator; typedef reverse_bidirectional_iterator reverse_iterator; typedef reverse_bidirectional_iterator const_reverse_iterator; #endif protected: //@cond hasher m_Hasher; //@endcond public: /// Creates empty map /** @param head_bits - 2head_bits specifies the size of head array, minimum is 4. @param array_bits - 2array_bits specifies the size of array node, minimum is 2. Equation for \p head_bits and \p array_bits: \code sizeof(hash_type) * 8 == head_bits + N * array_bits \endcode where \p N is multi-level array depth. */ FeldmanHashMap( size_t head_bits = 8, size_t array_bits = 4 ) : base_class( head_bits, array_bits ) {} /// Destructs the map and frees all data ~FeldmanHashMap() {} /// Inserts new element with key and default value /** The function creates an element with \p key and default value, and then inserts the node created into the map. Preconditions: - The \p key_type should be constructible from a value of type \p K. In trivial case, \p K is equal to \p key_type. - The \p mapped_type should be default-constructible. Returns \p true if inserting successful, \p false otherwise. The function locks RCU internally. */ template bool insert( K&& key ) { scoped_node_ptr sp( cxx_node_allocator().MoveNew( m_Hasher, std::forward(key))); if ( base_class::insert( *sp )) { sp.release(); return true; } return false; } /// Inserts new element /** The function creates a node with copy of \p val value and then inserts the node created into the map. Preconditions: - The \p key_type should be constructible from \p key of type \p K. - The \p value_type should be constructible from \p val of type \p V. Returns \p true if \p val is inserted into the map, \p false otherwise. The function locks RCU internally. */ template bool insert( K&& key, V&& val ) { scoped_node_ptr sp( cxx_node_allocator().MoveNew( m_Hasher, std::forward(key), std::forward(val))); if ( base_class::insert( *sp )) { sp.release(); return true; } return false; } /// Inserts new element and initialize it by a functor /** This function inserts new element with key \p key and if inserting is successful then it calls \p func functor with signature \code struct functor { void operator()( value_type& item ); }; \endcode The argument \p item of user-defined functor \p func is the reference to the map's item inserted: - item.first is a const reference to item's key that cannot be changed. - item.second is a reference to item's value that may be changed. \p key_type should be constructible from value of type \p K. The function allows to split creating of new item into two part: - create item from \p key; - insert new item into the map; - if inserting is successful, initialize the value of item by calling \p func functor This can be useful if complete initialization of object of \p value_type is heavyweight and it is preferable that the initialization should be completed only if inserting is successful. The function locks RCU internally. */ template bool insert_with( K&& key, Func func ) { scoped_node_ptr sp( cxx_node_allocator().MoveNew( m_Hasher, std::forward(key))); if ( base_class::insert( *sp, [&func]( node_type& item ) { func( item.m_Value ); } )) { sp.release(); return true; } return false; } /// For key \p key inserts data of type \p value_type created in-place from std::forward(args)... /** Returns \p true if inserting successful, \p false otherwise. The function locks RCU internally. */ template bool emplace( K&& key, Args&&... args ) { scoped_node_ptr sp( cxx_node_allocator().MoveNew( m_Hasher, std::forward(key), std::forward(args)... )); if ( base_class::insert( *sp )) { sp.release(); return true; } return false; } /// Updates data by \p key /** The operation performs inserting or replacing the element with lock-free manner. If the \p key not found in the map, then the new item created from \p key will be inserted into the map iff \p bInsert is \p true (note that in this case the \ref key_type should be constructible from type \p K). Otherwise, if \p key is found, it is replaced with a new item created from \p key. The functor \p Func signature: \code struct my_functor { void operator()( value_type& item, value_type * old ); }; \endcode where: - \p item - item of the map - \p old - old item of the map, if \p nullptr - the new item was inserted The functor may change any fields of the \p item.second. Returns std::pair where \p first is \p true if operation is successful, \p second is \p true if new item has been added or \p false if \p key already exists. The function locks RCU internally. @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" */ template std::pair update( K&& key, Func func, bool bInsert = true ) { scoped_node_ptr sp( cxx_node_allocator().MoveNew( m_Hasher, std::forward(key))); std::pair result = base_class::do_update( *sp, [&func]( node_type& node, node_type * old ) { func( node.m_Value, old ? &old->m_Value : nullptr );}, bInsert ); if ( result.first ) sp.release(); return result; } /// Delete \p key from the map /** \p key_type must be constructible from value of type \p K. The function deletes the element with hash value equal to hash( key_type( key )) Return \p true if \p key is found and deleted, \p false otherwise. RCU should not be locked. The function locks RCU internally. */ template bool erase( K const& key ) { return base_class::erase(m_Hasher(key_type(key))); } /// Delete \p key from the map /** The function searches an item with hash value equal to hash( key_type( key )), calls \p f functor and deletes the item. If \p key is not found, the functor is not called. The functor \p Func interface: \code struct extractor { void operator()(value_type& item) { ... } }; \endcode where \p item is the element found. \p key_type must be constructible from value of type \p K. Return \p true if key is found and deleted, \p false otherwise RCU should not be locked. The function locks RCU internally. */ template bool erase( K const& key, Func f ) { return base_class::erase(m_Hasher(key_type(key)), [&f]( node_type& node) { f( node.m_Value ); }); } /// Extracts the item from the map with specified \p key /** The function searches an item with key equal to hash( key_type( key )) in the map, unlinks it from the map, and returns a guarded pointer to the item found. If \p key is not found the function returns an empty guarded pointer. RCU \p synchronize method can be called. RCU should NOT be locked. The function does not call the disposer for the item found. The disposer will be implicitly invoked when the returned object is destroyed or when its \p release() member function is called. Example: \code typedef cds::container::FeldmanHashMap< cds::urcu::gc< cds::urcu::general_buffered<>>, int, foo, my_traits > map_type; map_type theMap; // ... typename map_type::exempt_ptr ep( theMap.extract( 5 )); if ( ep ) { // Deal with ep //... // Dispose returned item. ep.release(); } \endcode */ template exempt_ptr extract( K const& key ) { check_deadlock_policy::check(); node_type * p; { rcu_lock rcuLock; p = base_class::do_erase( m_Hasher( key_type(key)), [](node_type const&) -> bool {return true;}); } return exempt_ptr(p); } /// Checks whether the map contains \p key /** The function searches the item by its hash that is equal to hash( key_type( key )) and returns \p true if it is found, or \p false otherwise. */ template bool contains( K const& key ) { return base_class::contains( m_Hasher( key_type( key ))); } /// Find the key \p key /** The function searches the item by its hash that is equal to hash( key_type( key )) and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item ); }; \endcode where \p item is the item found. The functor may change \p item.second. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( K const& key, Func f ) { return base_class::find( m_Hasher( key_type( key )), [&f](node_type& node) { f( node.m_Value );}); } /// Finds the key \p key and return the item found /** The function searches the item by its \p hash and returns the pointer to the item found. If \p hash is not found the function returns \p nullptr. RCU should be locked before the function invocation. Returned pointer is valid only while RCU is locked. Usage: \code typedef cds::container::FeldmanHashMap< your_template_params > my_map; my_map theMap; // ... { // lock RCU my_map::rcu_lock; foo * p = theMap.get( 5 ); if ( p ) { // Deal with p //... } } \endcode */ template value_type * get( K const& key ) { node_type * p = base_class::get( m_Hasher( key_type( key ))); return p ? &p->m_Value : nullptr; } /// Clears the map (non-atomic) /** The function unlink all data node from the map. The function is not atomic but is thread-safe. After \p %clear() the map may not be empty because another threads may insert items. */ void clear() { base_class::clear(); } /// Checks if the map is empty /** Emptiness is checked by item counting: if item count is zero then the map is empty. Thus, the correct item counting feature is an important part of the map implementation. */ bool empty() const { return base_class::empty(); } /// Returns item count in the map size_t size() const { return base_class::size(); } /// Returns const reference to internal statistics stat const& statistics() const { return base_class::statistics(); } /// Returns the size of head node size_t head_size() const { return base_class::head_size(); } /// Returns the size of the array node size_t array_node_size() const { return base_class::array_node_size(); } /// Collects tree level statistics into \p stat /** The function traverses the set and collects statistics for each level of the tree into \p feldman_hashset::level_statistics struct. The element of \p stat[i] represents statistics for level \p i, level 0 is head array. The function is thread-safe and may be called in multi-threaded environment. Result can be useful for estimating efficiency of hash functor you use. */ void get_level_statistics(std::vector< feldman_hashmap::level_statistics>& stat) const { base_class::get_level_statistics(stat); } public: ///@name Thread-safe iterators /** @anchor cds_container_FeldmanHashMap_rcu_iterators The map supports thread-safe iterators: you may iterate over the map in multi-threaded environment under explicit RCU lock. RCU lock requirement means that inserting or searching is allowed but you must not erase the items from the map since erasing under RCU lock can lead to a deadlock. However, another thread can call \p erase() safely while your thread is iterating. A typical example is: \code struct foo { // ... other fields uint32_t payload; // only for example }; typedef cds::urcu::gc< cds::urcu::general_buffered<>> rcu; typedef cds::container::FeldmanHashMap< rcu, std::string, foo> map_type; map_type m; // ... // iterate over the map { // lock the RCU. typename set_type::rcu_lock l; // scoped RCU lock // traverse the map for ( auto i = m.begin(); i != s.end(); ++i ) { // deal with i. Remember, erasing is prohibited here! i->second.payload++; } } // at this point RCU lock is released \endcode Each iterator object supports the common interface: - dereference operators: @code value_type [const] * operator ->() noexcept value_type [const] & operator *() noexcept @endcode - pre-increment and pre-decrement. Post-operators is not supported - equality operators == and !=. Iterators are equal iff they point to the same cell of the same array node. Note that for two iterators \p it1 and \p it2 the condition it1 == it2 does not entail &(*it1) == &(*it2) : welcome to concurrent containers @note It is possible the item can be iterated more that once, for example, if an iterator points to the item in an array node that is being splitted. */ ///@{ /// Returns an iterator to the beginning of the map iterator begin() { return base_class::template init_begin(); } /// Returns an const iterator to the beginning of the map const_iterator begin() const { return base_class::template init_begin(); } /// Returns an const iterator to the beginning of the map const_iterator cbegin() { return base_class::template init_begin(); } /// Returns an iterator to the element following the last element of the map. This element acts as a placeholder; attempting to access it results in undefined behavior. iterator end() { return base_class::template init_end(); } /// Returns a const iterator to the element following the last element of the map. This element acts as a placeholder; attempting to access it results in undefined behavior. const_iterator end() const { return base_class::template init_end(); } /// Returns a const iterator to the element following the last element of the map. This element acts as a placeholder; attempting to access it results in undefined behavior. const_iterator cend() { return base_class::template init_end(); } /// Returns a reverse iterator to the first element of the reversed map reverse_iterator rbegin() { return base_class::template init_rbegin(); } /// Returns a const reverse iterator to the first element of the reversed map const_reverse_iterator rbegin() const { return base_class::template init_rbegin(); } /// Returns a const reverse iterator to the first element of the reversed map const_reverse_iterator crbegin() { return base_class::template init_rbegin(); } /// Returns a reverse iterator to the element following the last element of the reversed map /** It corresponds to the element preceding the first element of the non-reversed container. This element acts as a placeholder, attempting to access it results in undefined behavior. */ reverse_iterator rend() { return base_class::template init_rend(); } /// Returns a const reverse iterator to the element following the last element of the reversed map /** It corresponds to the element preceding the first element of the non-reversed container. This element acts as a placeholder, attempting to access it results in undefined behavior. */ const_reverse_iterator rend() const { return base_class::template init_rend(); } /// Returns a const reverse iterator to the element following the last element of the reversed map /** It corresponds to the element preceding the first element of the non-reversed container. This element acts as a placeholder, attempting to access it results in undefined behavior. */ const_reverse_iterator crend() { return base_class::template init_rend(); } ///@} }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_FELDMAN_HASHMAP_RCU_H libcds-2.3.3/cds/container/feldman_hashset_dhp.h000066400000000000000000000006421341244201700216430ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_FELDMAN_HASHSET_DHP_H #define CDSLIB_CONTAINER_FELDMAN_HASHSET_DHP_H #include #include #endif // #ifndef CDSLIB_CONTAINER_FELDMAN_HASHSET_DHP_H libcds-2.3.3/cds/container/feldman_hashset_hp.h000066400000000000000000000006361341244201700215020ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_FELDMAN_HASHSET_HP_H #define CDSLIB_CONTAINER_FELDMAN_HASHSET_HP_H #include #include #endif // #ifndef CDSLIB_CONTAINER_FELDMAN_HASHSET_HP_H libcds-2.3.3/cds/container/feldman_hashset_rcu.h000066400000000000000000000562151341244201700216700ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_FELDMAN_HASHSET_RCU_H #define CDSLIB_CONTAINER_FELDMAN_HASHSET_RCU_H #include #include namespace cds { namespace container { /// Hash set based on multi-level array, \ref cds_urcu_desc "RCU" specialization /** @ingroup cds_nonintrusive_set @anchor cds_container_FeldmanHashSet_rcu Source: - [2013] Steven Feldman, Pierre LaBorde, Damian Dechev "Concurrent Multi-level Arrays: Wait-free Extensible Hash Maps" See algorithm short description @ref cds_intrusive_FeldmanHashSet_hp "here" @note Two important things you should keep in mind when you're using \p %FeldmanHashSet: - all keys must be fixed-size. It means that you cannot use \p std::string as a key for \p %FeldmanHashSet. Instead, for the strings you should use well-known hashing algorithms like SHA1, SHA2, MurmurHash, CityHash or its successor FarmHash and so on, which converts variable-length strings to fixed-length bit-strings, and use that hash as a key in \p %FeldmanHashSet. - \p %FeldmanHashSet uses a perfect hashing. It means that if two different keys, for example, of type \p std::string, have identical hash then you cannot insert both that keys in the set. \p %FeldmanHashSet does not maintain the key, it maintains its fixed-size hash value. The set supports @ref cds_container_FeldmanHashSet_iterators "bidirectional thread-safe iterators". Template parameters: - \p RCU - one of \ref cds_urcu_gc "RCU type" - \p T - a value type to be stored in the set - \p Traits - type traits, the structure based on \p feldman_hashset::traits or result of \p feldman_hashset::make_traits metafunction. \p Traits is the mandatory argument because it has one mandatory type - an @ref feldman_hashset::traits::hash_accessor "accessor" to hash value of \p T. The set algorithm does not calculate that hash value. @note Before including you should include appropriate RCU header file, see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. The set supports @ref cds_container_FeldmanHashSet_rcu_iterators "bidirectional thread-safe iterators" with some restrictions. */ template < class RCU , typename T #ifdef CDS_DOXYGEN_INVOKED , class Traits = feldman_hashset::traits #else , class Traits #endif > class FeldmanHashSet< cds::urcu::gc< RCU >, T, Traits > #ifdef CDS_DOXYGEN_INVOKED : protected cds::intrusive::FeldmanHashSet< cds::urcu::gc< RCU >, T, Traits > #else : protected cds::container::details::make_feldman_hashset< cds::urcu::gc< RCU >, T, Traits >::type #endif { //@cond typedef cds::container::details::make_feldman_hashset< cds::urcu::gc< RCU >, T, Traits > maker; typedef typename maker::type base_class; //@endcond public: typedef cds::urcu::gc< RCU > gc; ///< RCU garbage collector typedef T value_type; ///< type of value stored in the set typedef Traits traits; ///< Traits template parameter, see \p feldman_hashset::traits typedef typename base_class::hash_accessor hash_accessor; ///< Hash accessor functor typedef typename base_class::hash_type hash_type; ///< Hash type deduced from \p hash_accessor return type typedef typename base_class::hash_comparator hash_comparator; ///< hash compare functor based on \p opt::compare and \p opt::less option setter typedef typename traits::item_counter item_counter; ///< Item counter type typedef typename traits::allocator allocator; ///< Element allocator typedef typename traits::node_allocator node_allocator; ///< Array node allocator typedef typename traits::memory_model memory_model; ///< Memory model typedef typename traits::back_off back_off; ///< Backoff strategy typedef typename traits::stat stat; ///< Internal statistics type typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock static constexpr const bool c_bExtractLockExternal = false; ///< Group of \p extract_xxx functions does not require external locking typedef typename base_class::exempt_ptr exempt_ptr; ///< pointer to extracted node /// The size of hash_type in bytes, see \p feldman_hashset::traits::hash_size for explanation static constexpr size_t const c_hash_size = base_class::c_hash_size; /// Level statistics typedef feldman_hashset::level_statistics level_statistics; protected: //@cond typedef typename maker::cxx_node_allocator cxx_node_allocator; typedef std::unique_ptr< value_type, typename maker::node_disposer > scoped_node_ptr; //@endcond public: /// Creates empty set /** @param head_bits - 2head_bits specifies the size of head array, minimum is 4. @param array_bits - 2array_bits specifies the size of array node, minimum is 2. Equation for \p head_bits and \p array_bits: \code sizeof(hash_type) * 8 == head_bits + N * array_bits \endcode where \p N is multi-level array depth. */ FeldmanHashSet( size_t head_bits = 8, size_t array_bits = 4 ) : base_class( head_bits, array_bits ) {} /// Destructs the set and frees all data ~FeldmanHashSet() {} /// Inserts new element /** The function creates an element with copy of \p val value and then inserts it into the set. The type \p Q should contain as minimum the complete hash for the element. The object of \ref value_type should be constructible from a value of type \p Q. In trivial case, \p Q is equal to \ref value_type. Returns \p true if \p val is inserted into the set, \p false otherwise. The function locks RCU internally. */ template bool insert( Q const& val ) { scoped_node_ptr sp( cxx_node_allocator().New( val )); if ( base_class::insert( *sp )) { sp.release(); return true; } return false; } /// Inserts new element /** The function allows to split creating of new item into two part: - create item with key only - insert new item into the set - if inserting is success, calls \p f functor to initialize value-fields of \p val. The functor signature is: \code void func( value_type& val ); \endcode where \p val is the item inserted. User-defined functor \p f should guarantee that during changing \p val no any other changes could be made on this set's item by concurrent threads. The user-defined functor is called only if the inserting is success. The function locks RCU internally. */ template bool insert( Q const& val, Func f ) { scoped_node_ptr sp( cxx_node_allocator().New( val )); if ( base_class::insert( *sp, f )) { sp.release(); return true; } return false; } /// Updates the element /** The operation performs inserting or replacing with lock-free manner. If the \p val key not found in the set, then the new item created from \p val will be inserted into the set iff \p bInsert is \p true. Otherwise, if \p val is found, it is replaced with new item created from \p val and previous item is disposed. In both cases \p func functor is called. The functor \p Func signature: \code struct my_functor { void operator()( value_type& cur, value_type * prev ); }; \endcode where: - \p cur - current element - \p prev - pointer to previous element with such hash. \p prev is \p nullptr if \p cur was just inserted. The functor may change non-key fields of the \p item; however, \p func must guarantee that during changing no any other modifications could be made on this item by concurrent threads. Returns std::pair where \p first is \p true if operation is successful, i.e. the item has been inserted or updated, \p second is \p true if the new item has been added or \p false if the item with key equal to \p val already exists. */ template std::pair update( Q const& val, Func func, bool bInsert = true ) { scoped_node_ptr sp( cxx_node_allocator().New( val )); std::pair bRes = base_class::do_update( *sp, func, bInsert ); if ( bRes.first ) sp.release(); return bRes; } /// Inserts data of type \p value_type created in-place from std::forward(args)... /** Returns \p true if inserting successful, \p false otherwise. */ template bool emplace( Args&&... args ) { scoped_node_ptr sp( cxx_node_allocator().MoveNew( std::forward(args)... )); if ( base_class::insert( *sp )) { sp.release(); return true; } return false; } /// Deletes the item from the set /** The function searches \p hash in the set, deletes the item found, and returns \p true. If that item is not found the function returns \p false. RCU should not be locked. The function locks RCU internally. */ bool erase( hash_type const& hash ) { return base_class::erase( hash ); } /// Deletes the item from the set /** The function searches \p hash in the set, call \p f functor with item found, and deltes the element from the set. The \p Func interface is \code struct functor { void operator()( value_type& item ); }; \endcode If \p hash is not found the function returns \p false. RCU should not be locked. The function locks RCU internally. */ template bool erase( hash_type const& hash, Func f ) { return base_class::erase( hash, f ); } /// Extracts the item with specified \p hash /** The function searches \p hash in the set, unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found. If the item with key equal to \p key is not found the function returns an empty \p exempt_ptr. RCU \p synchronize method can be called. RCU should NOT be locked. The function does not call the disposer for the item found. The disposer will be implicitly invoked when the returned object is destroyed or when its \p release() member function is called. Example: \code typedef cds::container::FeldmanHashSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > set_type; set_type theSet; // ... typename set_type::exempt_ptr ep( theSet.extract( 5 )); if ( ep ) { // Deal with ep //... // Dispose returned item. ep.release(); } \endcode */ exempt_ptr extract( hash_type const& hash ) { return base_class::extract( hash ); } /// Finds an item by it's \p hash /** The function searches the item by \p hash and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item ); }; \endcode where \p item is the item found. The functor may change non-key fields of \p item. Note that the functor is only guarantee that \p item cannot be disposed during the functor is executing. The functor does not serialize simultaneous access to the set's \p item. If such access is possible you must provide your own synchronization schema on item level to prevent unsafe item modifications. The function returns \p true if \p hash is found, \p false otherwise. */ template bool find( hash_type const& hash, Func f ) { return base_class::find( hash, f ); } /// Checks whether the set contains \p hash /** The function searches the item by its \p hash and returns \p true if it is found, or \p false otherwise. */ bool contains( hash_type const& hash ) { return base_class::contains( hash ); } /// Finds an item by it's \p hash and returns the item found /** The function searches the item by its \p hash and returns the pointer to the item found. If \p hash is not found the function returns \p nullptr. RCU should be locked before the function invocation. Returned pointer is valid only while RCU is locked. Usage: \code typedef cds::container::FeldmanHashSet< your_template_params > my_set; my_set theSet; // ... { // lock RCU my_set::rcu_lock lock; foo * p = theSet.get( 5 ); if ( p ) { // Deal with p //... } } \endcode */ value_type * get( hash_type const& hash ) { return base_class::get( hash ); } /// Clears the set (non-atomic) /** The function unlink all data node from the set. The function is not atomic but is thread-safe. After \p %clear() the set may not be empty because another threads may insert items. */ void clear() { base_class::clear(); } /// Checks if the set is empty /** Emptiness is checked by item counting: if item count is zero then the set is empty. Thus, the correct item counting feature is an important part of the set implementation. */ bool empty() const { return base_class::empty(); } /// Returns item count in the set size_t size() const { return base_class::size(); } /// Returns const reference to internal statistics stat const& statistics() const { return base_class::statistics(); } /// Returns the size of head node size_t head_size() const { return base_class::head_size(); } /// Returns the size of the array node size_t array_node_size() const { return base_class::array_node_size(); } /// Collects tree level statistics into \p stat /** The function traverses the set and collects statistics for each level of the tree into \p feldman_hashset::level_statistics struct. The element of \p stat[i] represents statistics for level \p i, level 0 is head array. The function is thread-safe and may be called in multi-threaded environment. Result can be useful for estimating efficiency of hash functor you use. */ void get_level_statistics(std::vector< feldman_hashset::level_statistics>& stat) const { base_class::get_level_statistics(stat); } public: ///@name Thread-safe iterators ///@{ /// Bidirectional iterator /** @anchor cds_container_FeldmanHashSet_rcu_iterators The set supports thread-safe iterators: you may iterate over the set in multi-threaded environment under explicit RCU lock. RCU lock requirement means that inserting or searching is allowed but you must not erase the items from the set since erasing under RCU lock can lead to a deadlock. However, another thread can call \p erase() safely while your thread is iterating. A typical example is: \code struct foo { uint32_t hash; // ... other fields uint32_t payload; // only for example }; struct set_traits: cds::container::feldman_hashset::traits { struct hash_accessor { uint32_t operator()( foo const& src ) const { retur src.hash; } }; }; typedef cds::urcu::gc< cds::urcu::general_buffered<>> rcu; typedef cds::container::FeldmanHashSet< rcu, foo, set_traits > set_type; set_type s; // ... // iterate over the set { // lock the RCU. typename set_type::rcu_lock l; // scoped RCU lock // traverse the set for ( auto i = s.begin(); i != s.end(); ++i ) { // deal with i. Remember, erasing is prohibited here! i->payload++; } } // at this point RCU lock is released \endcode Each iterator object supports the common interface: - dereference operators: @code value_type [const] * operator ->() noexcept value_type [const] & operator *() noexcept @endcode - pre-increment and pre-decrement. Post-operators is not supported - equality operators == and !=. Iterators are equal iff they point to the same cell of the same array node. Note that for two iterators \p it1 and \p it2 the condition it1 == it2 does not entail &(*it1) == &(*it2) : welcome to concurrent containers @note It is possible the item can be iterated more that once, for example, if an iterator points to the item in an array node that is being splitted. */ typedef typename base_class::iterator iterator; typedef typename base_class::const_iterator const_iterator; ///< @ref cds_container_FeldmanHashSet_rcu_iterators "bidirectional const iterator" type typedef typename base_class::reverse_iterator reverse_iterator; ///< @ref cds_container_FeldmanHashSet_rcu_iterators "bidirectional reverse iterator" type typedef typename base_class::const_reverse_iterator const_reverse_iterator; ///< @ref cds_container_FeldmanHashSet_rcu_iterators "bidirectional reverse const iterator" type /// Returns an iterator to the beginning of the set iterator begin() { return base_class::begin(); } /// Returns an const iterator to the beginning of the set const_iterator begin() const { return base_class::begin(); } /// Returns an const iterator to the beginning of the set const_iterator cbegin() { return base_class::cbegin(); } /// Returns an iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior. iterator end() { return base_class::end(); } /// Returns a const iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior. const_iterator end() const { return base_class::end(); } /// Returns a const iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior. const_iterator cend() { return base_class::cend(); } /// Returns a reverse iterator to the first element of the reversed set reverse_iterator rbegin() { return base_class::rbegin(); } /// Returns a const reverse iterator to the first element of the reversed set const_reverse_iterator rbegin() const { return base_class::rbegin(); } /// Returns a const reverse iterator to the first element of the reversed set const_reverse_iterator crbegin() { return base_class::crbegin(); } /// Returns a reverse iterator to the element following the last element of the reversed set /** It corresponds to the element preceding the first element of the non-reversed container. This element acts as a placeholder, attempting to access it results in undefined behavior. */ reverse_iterator rend() { return base_class::rend(); } /// Returns a const reverse iterator to the element following the last element of the reversed set /** It corresponds to the element preceding the first element of the non-reversed container. This element acts as a placeholder, attempting to access it results in undefined behavior. */ const_reverse_iterator rend() const { return base_class::rend(); } /// Returns a const reverse iterator to the element following the last element of the reversed set /** It corresponds to the element preceding the first element of the non-reversed container. This element acts as a placeholder, attempting to access it results in undefined behavior. */ const_reverse_iterator crend() { return base_class::crend(); } ///@} }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_FELDMAN_HASHSET_RCU_H libcds-2.3.3/cds/container/impl/000077500000000000000000000000001341244201700164515ustar00rootroot00000000000000libcds-2.3.3/cds/container/impl/bronson_avltree_map_rcu.h000066400000000000000000002726501341244201700235460ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H #define CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H #include // is_base_of #include #include #include namespace cds { namespace container { /// Bronson et al AVL-tree (RCU specialization for pointers) /** @ingroup cds_nonintrusive_map @ingroup cds_nonintrusive_tree @headerfile cds/container/bronson_avltree_map_rcu.h @anchor cds_container_BronsonAVLTreeMap_rcu_ptr This is the specialization of \ref cds_container_BronsonAVLTreeMap_rcu "RCU-based Bronson et al AVL-tree" for "key -> value pointer" map. This specialization stores the pointer to user-allocated values instead of the copy of the value. When a tree node is removed, the algorithm does not free the value pointer directly, instead, it call the disposer functor provided by \p Traits template parameter. Template arguments: - \p RCU - one of \ref cds_urcu_gc "RCU type" - \p Key - key type - \p T - value type to be stored in tree's nodes. Note, the specialization stores the pointer to user-allocated value, not the copy. - \p Traits - tree traits, default is \p bronson_avltree::traits It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction instead of \p Traits template argument. @note Before including you should include appropriate RCU header file, see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. */ template < typename RCU, typename Key, typename T, # ifdef CDS_DOXYGEN_INVOKED typename Traits = bronson_avltree::traits #else typename Traits #endif > class BronsonAVLTreeMap< cds::urcu::gc, Key, T*, Traits > { public: typedef cds::urcu::gc gc; ///< RCU Garbage collector typedef Key key_type; ///< type of a key stored in the map typedef T * mapped_type; ///< type of value stored in the map typedef Traits traits; ///< Traits template parameter # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less # else typedef typename opt::details::make_comparator< key_type, traits >::type key_comparator; #endif typedef typename traits::item_counter item_counter; ///< Item counting policy typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option typedef typename traits::node_allocator node_allocator_type; ///< allocator for maintaining internal nodes typedef typename traits::stat stat; ///< internal statistics typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy typedef typename traits::back_off back_off; ///< Back-off strategy typedef typename traits::disposer disposer; ///< Value disposer typedef typename traits::sync_monitor sync_monitor; ///< @ref cds_sync_monitor "Synchronization monitor" type for node-level locking /// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion" static constexpr bool const c_bRelaxedInsert = traits::relaxed_insert; /// Group of \p extract_xxx functions does not require external locking static constexpr const bool c_bExtractLockExternal = false; # ifdef CDS_DOXYGEN_INVOKED /// Returned pointer to \p mapped_type of extracted node typedef cds::urcu::exempt_ptr< gc, T, T, disposer, void > exempt_ptr; # else typedef cds::urcu::exempt_ptr< gc, typename std::remove_pointer::type, typename std::remove_pointer::type, disposer, void > exempt_ptr; # endif typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock protected: //@cond typedef bronson_avltree::node< key_type, mapped_type, sync_monitor > node_type; typedef typename node_type::version_type version_type; typedef cds::details::Allocator< node_type, node_allocator_type > cxx_allocator; typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy; enum class find_result { not_found, found, retry }; struct update_flags { enum { allow_insert = 1, allow_update = 2, //allow_remove = 4, retry = 1024, failed = 0, result_inserted = allow_insert, result_updated = allow_update, result_removed = 4 }; }; enum node_condition { nothing_required = -3, rebalance_required = -2, unlink_required = -1 }; enum direction { left_child = -1, right_child = 1 }; typedef typename sync_monitor::template scoped_lock node_scoped_lock; //@endcond protected: //@cond template static node_type * alloc_node( K&& key, int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight ) { return cxx_allocator().New( std::forward( key ), nHeight, version, pParent, pLeft, pRight ); } static void free_node( node_type * pNode ) { // Free node without disposer assert( !pNode->is_valued( memory_model::memory_order_relaxed )); assert( pNode->m_SyncMonitorInjection.check_free()); cxx_allocator().Delete( pNode ); } static void free_value( mapped_type pVal ) { disposer()(pVal); } static node_type * child( node_type * pNode, int nDir, atomics::memory_order order ) { return pNode->child( nDir, order ); } static node_type * parent( node_type * pNode, atomics::memory_order order ) { return pNode->parent( order ); } // RCU safe disposer class rcu_disposer { node_type * m_pRetiredList; ///< head of retired node list mapped_type m_pRetiredValue; ///< value retired public: rcu_disposer() : m_pRetiredList( nullptr ) , m_pRetiredValue( nullptr ) {} ~rcu_disposer() { clean(); } void dispose( node_type * pNode ) { assert( !pNode->is_valued( memory_model::memory_order_relaxed )); pNode->m_pNextRemoved = m_pRetiredList; m_pRetiredList = pNode; } void dispose_value( mapped_type pVal ) { assert( m_pRetiredValue == nullptr ); m_pRetiredValue = pVal; } private: struct internal_disposer { void operator()( node_type * p ) const { free_node( p ); } }; void clean() { assert( !gc::is_locked()); // TODO: use RCU::batch_retire // Dispose nodes for ( node_type * p = m_pRetiredList; p; ) { node_type * pNext = static_cast( p->m_pNextRemoved ); // Value already disposed gc::template retire_ptr( p ); p = pNext; } // Dispose value if ( m_pRetiredValue ) gc::template retire_ptr( m_pRetiredValue ); } }; //@endcond protected: //@cond typename node_type::base_class m_Root; node_type * m_pRoot; item_counter m_ItemCounter; mutable sync_monitor m_Monitor; mutable stat m_stat; //@endcond public: /// Creates empty map BronsonAVLTreeMap() : m_pRoot( static_cast( &m_Root )) {} /// Destroys the map ~BronsonAVLTreeMap() { unsafe_clear(); } /// Inserts new node /** The \p key_type should be constructible from a value of type \p K. RCU \p synchronize() can be called. RCU should not be locked. Returns \p true if inserting successful, \p false otherwise. */ template bool insert( K const& key, mapped_type pVal ) { return do_update(key, key_comparator(), [pVal]( node_type * pNode ) -> mapped_type { assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr ); CDS_UNUSED( pNode ); return pVal; }, update_flags::allow_insert ) == update_flags::result_inserted; } /// Updates the value for \p key /** The operation performs inserting or updating the value for \p key with lock-free manner. If \p bInsert is \p false, only updating of existing node is possible. If \p key is not found and inserting is allowed (i.e. \p bInsert is \p true), then the new node created from \p key will be inserted into the map; note that in this case the \ref key_type should be constructible from type \p K. Otherwise, the value for \p key will be changed to \p pVal. RCU \p synchronize() method can be called. RCU should not be locked. Returns std::pair where \p first is \p true if operation is successful, \p second is \p true if new node has been added or \p false if the node with \p key already exists. */ template std::pair update( K const& key, mapped_type pVal, bool bInsert = true ) { int result = do_update( key, key_comparator(), [pVal]( node_type * ) -> mapped_type { return pVal; }, update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0) ); return std::make_pair( result != 0, (result & update_flags::result_inserted) != 0 ); } //@endcond /// Delete \p key from the map /** RCU \p synchronize() method can be called. RCU should not be locked. Return \p true if \p key is found and deleted, \p false otherwise */ template bool erase( K const& key ) { return do_remove( key, key_comparator(), []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; } ); } /// Deletes the item from the map using \p pred predicate for searching /** The function is an analog of \p erase(K const&) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool erase_with( K const& key, Less pred ) { CDS_UNUSED( pred ); return do_remove( key, cds::opt::details::make_comparator_from_less(), []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; } ); } /// Delete \p key from the map /** The function searches an item with key \p key, calls \p f functor and deletes the item. If \p key is not found, the functor is not called. The functor \p Func interface: \code struct functor { void operator()( key_type const& key, std::remove_pointer::type& val) { ... } }; \endcode RCU \p synchronize method can be called. RCU should not be locked. Return \p true if key is found and deleted, \p false otherwise */ template bool erase( K const& key, Func f ) { return do_remove( key, key_comparator(), [&f]( key_type const& k, mapped_type pVal, rcu_disposer& disp ) -> bool { assert( pVal ); f( k, *pVal ); disp.dispose_value(pVal); return true; } ); } /// Deletes the item from the map using \p pred predicate for searching /** The function is an analog of \p erase(K const&, Func) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool erase_with( K const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return do_remove( key, cds::opt::details::make_comparator_from_less(), [&f]( key_type const& k, mapped_type pVal, rcu_disposer& disp ) -> bool { assert( pVal ); f( k, *pVal ); disp.dispose_value(pVal); return true; } ); } /// Extracts a value with minimal key from the map /** Returns \p exempt_ptr to the leftmost item. If the tree is empty, returns empty \p exempt_ptr. Note that the function returns only the value for minimal key. To retrieve its key use \p extract_min( Func ) member function. @note Due the concurrent nature of the map, the function extracts nearly minimum key. It means that the function gets leftmost leaf of the tree and tries to unlink it. During unlinking, a concurrent thread may insert an item with key less than leftmost item's key. So, the function returns the item with minimum key at the moment of tree traversing. RCU \p synchronize method can be called. RCU should NOT be locked. The function does not free the item. The deallocator will be implicitly invoked when the returned object is destroyed or when its \p release() member function is called. */ exempt_ptr extract_min() { return exempt_ptr(do_extract_min( []( key_type const& ) {})); } /// Extracts minimal key and corresponding value /** Returns \p exempt_ptr to the leftmost item. If the tree is empty, returns empty \p exempt_ptr. \p Func functor is used to store minimal key. \p Func has the following signature: \code struct functor { void operator()( key_type const& key ); }; \endcode If the tree is empty, \p f is not called. Otherwise, it is called with minimal key, the pointer to corresponding value is returned as \p exempt_ptr. @note Due the concurrent nature of the map, the function extracts nearly minimum key. It means that the function gets leftmost leaf of the tree and tries to unlink it. During unlinking, a concurrent thread may insert an item with key less than leftmost item's key. So, the function returns the item with minimum key at the moment of tree traversing. RCU \p synchronize method can be called. RCU should NOT be locked. The function does not free the item. The deallocator will be implicitly invoked when the returned object is destroyed or when its \p release() member function is called. */ template exempt_ptr extract_min( Func f ) { return exempt_ptr(do_extract_min( [&f]( key_type const& key ) { f(key); })); } /// Extracts minimal key and corresponding value /** This function is a shortcut for the following call: \code key_type key; exempt_ptr xp = theTree.extract_min( [&key]( key_type const& k ) { key = k; } ); \endcode \p key_type should be copy-assignable. The copy of minimal key is returned in \p min_key argument. */ typename std::enable_if< std::is_copy_assignable::value, exempt_ptr >::type extract_min_key( key_type& min_key ) { return exempt_ptr(do_extract_min( [&min_key]( key_type const& key ) { min_key = key; })); } /// Extracts a value with maximal key from the tree /** Returns \p exempt_ptr pointer to the rightmost item. If the set is empty, returns empty \p exempt_ptr. Note that the function returns only the value for maximal key. To retrieve its key use \p extract_max( Func ) member function. @note Due the concurrent nature of the map, the function extracts nearly maximal key. It means that the function gets rightmost leaf of the tree and tries to unlink it. During unlinking, a concurrent thread may insert an item with key greater than rightmost item's key. So, the function returns the item with maximum key at the moment of tree traversing. RCU \p synchronize method can be called. RCU should NOT be locked. The function does not free the item. The deallocator will be implicitly invoked when the returned object is destroyed or when its \p release() is called. */ exempt_ptr extract_max() { return exempt_ptr(do_extract_max( []( key_type const& ) {})); } /// Extracts the maximal key and corresponding value /** Returns \p exempt_ptr pointer to the rightmost item. If the set is empty, returns empty \p exempt_ptr. \p Func functor is used to store maximal key. \p Func has the following signature: \code struct functor { void operator()( key_type const& key ); }; \endcode If the tree is empty, \p f is not called. Otherwise, it is called with maximal key, the pointer to corresponding value is returned as \p exempt_ptr. @note Due the concurrent nature of the map, the function extracts nearly maximal key. It means that the function gets rightmost leaf of the tree and tries to unlink it. During unlinking, a concurrent thread may insert an item with key greater than rightmost item's key. So, the function returns the item with maximum key at the moment of tree traversing. RCU \p synchronize method can be called. RCU should NOT be locked. The function does not free the item. The deallocator will be implicitly invoked when the returned object is destroyed or when its \p release() is called. */ template exempt_ptr extract_max( Func f ) { return exempt_ptr(do_extract_max( [&f]( key_type const& key ) { f(key); })); } /// Extracts the maximal key and corresponding value /** This function is a shortcut for the following call: \code key_type key; exempt_ptr xp = theTree.extract_max( [&key]( key_type const& k ) { key = k; } ); \endcode \p key_type should be copy-assignable. The copy of maximal key is returned in \p max_key argument. */ typename std::enable_if< std::is_copy_assignable::value, exempt_ptr >::type extract_max_key( key_type& max_key ) { return exempt_ptr(do_extract_max( [&max_key]( key_type const& key ) { max_key = key; })); } /// Extracts an item from the map /** The function searches an item with key equal to \p key in the tree, unlinks it, and returns \p exempt_ptr pointer to a value found. If \p key is not found the function returns an empty \p exempt_ptr. RCU \p synchronize method can be called. RCU should NOT be locked. The function does not destroy the value found. The disposer will be implicitly invoked when the returned object is destroyed or when its \p release() member function is called. */ template exempt_ptr extract( Q const& key ) { return exempt_ptr(do_extract( key )); } /// Extracts an item from the map using \p pred for searching /** The function is an analog of \p extract(Q const&) but \p pred is used for key compare. \p Less has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the tree. */ template exempt_ptr extract_with( Q const& key, Less pred ) { return exempt_ptr(do_extract_with( key, pred )); } /// Find the key \p key /** The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( key_type const& key, std::remove_pointer< mapped_type )::type& item ); }; \endcode where \p item is the item found. The functor is called under node-level lock. The function applies RCU lock internally. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( K const& key, Func f ) { return do_find( key, key_comparator(), [&f]( node_type * pNode ) -> bool { assert( pNode != nullptr ); mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed ); if ( pVal ) { f( pNode->m_key, *pVal ); return true; } return false; } ); } /// Finds the key \p val using \p pred predicate for searching /** The function is an analog of \p find(K const&, Func) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool find_with( K const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return do_find( key, cds::opt::details::make_comparator_from_less(), [&f]( node_type * pNode ) -> bool { assert( pNode != nullptr ); mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed ); if ( pVal ) { f( pNode->m_key, *pVal ); return true; } return false; } ); } /// Checks whether the map contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. The function applies RCU lock internally. */ template bool contains( K const& key ) { return do_find( key, key_comparator(), []( node_type * ) -> bool { return true; }); } /// Checks whether the map contains \p key using \p pred predicate for searching /** The function is similar to contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool contains( K const& key, Less pred ) { CDS_UNUSED( pred ); return do_find( key, cds::opt::details::make_comparator_from_less(), []( node_type * ) -> bool { return true; } ); } /// Clears the tree (thread safe, not atomic) /** The function unlink all items from the tree. The function is thread safe but not atomic: in multi-threaded environment with parallel insertions this sequence \code set.clear(); assert( set.empty()); \endcode the assertion could be raised. For each node the \ref disposer will be called after unlinking. RCU \p synchronize method can be called. RCU should not be locked. */ void clear() { while ( extract_min()); } /// Clears the tree (not thread safe) /** This function is not thread safe and may be called only when no other thread deals with the tree. The function is used in the tree destructor. */ void unsafe_clear() { clear(); // temp solution //TODO } /// Checks if the map is empty bool empty() const { return m_Root.m_pRight.load( memory_model::memory_order_relaxed ) == nullptr; } /// Returns item count in the map /** Only leaf nodes containing user data are counted. The value returned depends on item counter type provided by \p Traits template parameter. If it is \p atomicity::empty_item_counter this function always returns 0. The function is not suitable for checking the tree emptiness, use \p empty() member function for this purpose. */ size_t size() const { return m_ItemCounter; } /// Returns const reference to internal statistics stat const& statistics() const { return m_stat; } /// Returns reference to \p sync_monitor object sync_monitor& monitor() { return m_Monitor; } //@cond sync_monitor const& monitor() const { return m_Monitor; } //@endcond /// Checks internal consistency (not atomic, not thread-safe) /** The debugging function to check internal consistency of the tree. */ bool check_consistency() const { return check_consistency([]( size_t /*nLevel*/, size_t /*hLeft*/, size_t /*hRight*/ ){} ); } /// Checks internal consistency (not atomic, not thread-safe) /** The debugging function to check internal consistency of the tree. The functor \p Func is called if a violation of internal tree structure is found: \code struct functor { void operator()( size_t nLevel, size_t hLeft, size_t hRight ); }; \endcode where - \p nLevel - the level where the violation is found - \p hLeft - the height of left subtree - \p hRight - the height of right subtree The functor is called for each violation found. */ template bool check_consistency( Func f ) const { node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire ); if ( pChild ) { size_t nErrors = 0; do_check_consistency( pChild, 1, f, nErrors ); return nErrors == 0; } return true; } protected: //@cond template size_t do_check_consistency( node_type * pNode, size_t nLevel, Func f, size_t& nErrors ) const { if ( pNode ) { key_comparator cmp; node_type * pLeft = child( pNode, left_child, memory_model::memory_order_acquire ); node_type * pRight = child( pNode, right_child, memory_model::memory_order_acquire ); if ( pLeft && cmp( pLeft->m_key, pNode->m_key ) > 0 ) ++nErrors; if ( pRight && cmp( pNode->m_key, pRight->m_key ) > 0 ) ++nErrors; size_t hLeft = do_check_consistency( pLeft, nLevel + 1, f, nErrors ); size_t hRight = do_check_consistency( pRight, nLevel + 1, f, nErrors ); if ( hLeft >= hRight ) { if ( hLeft - hRight > 1 ) { f( nLevel, hLeft, hRight ); ++nErrors; } return hLeft; } else { if ( hRight - hLeft > 1 ) { f( nLevel, hLeft, hRight ); ++nErrors; } return hRight; } } return 0; } template bool do_find( Q& key, Compare cmp, Func f ) const { find_result result; { rcu_lock l; result = try_find( key, cmp, f, m_pRoot, right_child, 0 ); } assert( result != find_result::retry ); return result == find_result::found; } template int do_update( K const& key, Compare cmp, Func funcUpdate, int nFlags ) { check_deadlock_policy::check(); rcu_disposer removed_list; { rcu_lock l; return try_update_root( key, cmp, nFlags, funcUpdate, removed_list ); } } template bool do_remove( K const& key, Compare cmp, Func func ) { // Func must return true if the value was disposed // or false if the value was extracted check_deadlock_policy::check(); rcu_disposer removed_list; { rcu_lock l; return try_remove_root( key, cmp, func, removed_list ); } } template mapped_type do_extract_min( Func f ) { mapped_type pExtracted = nullptr; do_extract_minmax( left_child, [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; } ); return pExtracted; } template mapped_type do_extract_max( Func f ) { mapped_type pExtracted = nullptr; do_extract_minmax( right_child, [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; } ); return pExtracted; } template void do_extract_minmax( int nDir, Func func ) { check_deadlock_policy::check(); rcu_disposer removed_list; { rcu_lock l; while ( true ) { int result; // get right child of root node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire ); if ( pChild ) { version_type nChildVersion = pChild->version( memory_model::memory_order_acquire ); if ( nChildVersion & node_type::shrinking ) { m_stat.onRemoveRootWaitShrinking(); pChild->template wait_until_shrink_completed( memory_model::memory_order_acquire ); result = update_flags::retry; } else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) { result = try_extract_minmax( nDir, func, m_pRoot, pChild, nChildVersion, removed_list ); } else result = update_flags::retry; } else return; if ( result == update_flags::retry ) m_stat.onRemoveRetry(); else { m_stat.onExtract( result == update_flags::result_removed ); return; } } } } template mapped_type do_extract( Q const& key ) { mapped_type pExtracted = nullptr; do_remove( key, key_comparator(), [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; } ); m_stat.onExtract( pExtracted != nullptr ); return pExtracted; } template mapped_type do_extract_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); mapped_type pExtracted = nullptr; do_remove( key, cds::opt::details::make_comparator_from_less(), [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; } ); m_stat.onExtract( pExtracted != nullptr ); return pExtracted; } //@endcond private: //@cond static int height( node_type * pNode, atomics::memory_order order ) { assert( pNode ); return pNode->m_nHeight.load( order ); } static void set_height( node_type * pNode, int h, atomics::memory_order order ) { assert( pNode ); pNode->m_nHeight.store( h, order ); } static int height_null( node_type * pNode, atomics::memory_order order ) { return pNode ? height( pNode, order ) : 0; } static constexpr int const c_stackSize = 64; template find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const { assert( gc::is_locked()); assert( pNode ); struct stack_record { node_type * pNode; version_type nVersion; int nDir; }; stack_record stack[c_stackSize]; int pos = 0; stack[0].pNode = pNode; stack[0].nVersion = nVersion; stack[0].nDir = nDir; while ( pos >= 0 ) { pNode = stack[pos].pNode; nVersion = stack[pos].nVersion; nDir = stack[pos].nDir; while ( true ) { node_type * pChild = child( pNode, nDir, memory_model::memory_order_acquire ); if ( !pChild ) { if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) { --pos; m_stat.onFindRetry(); break; // retry } m_stat.onFindFailed(); return find_result::not_found; } int nCmp = cmp( key, pChild->m_key ); if ( nCmp == 0 ) { if ( pChild->is_valued( memory_model::memory_order_acquire )) { // key found node_scoped_lock l( m_Monitor, *pChild ); if ( child(pNode, nDir, memory_model::memory_order_acquire) == pChild ) { if ( pChild->is_valued( memory_model::memory_order_relaxed )) { if ( f( pChild )) { m_stat.onFindSuccess(); return find_result::found; } } } else { m_stat.onFindRetry(); continue; } } m_stat.onFindFailed(); return find_result::not_found; } else { version_type nChildVersion = pChild->version( memory_model::memory_order_acquire ); if ( nChildVersion & node_type::shrinking ) { m_stat.onFindWaitShrinking(); pChild->template wait_until_shrink_completed( memory_model::memory_order_acquire ); if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) { --pos; m_stat.onFindRetry(); break; // retry } } else if ( nChildVersion != node_type::unlinked && child( pNode, nDir, memory_model::memory_order_acquire ) == pChild ) { if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) { --pos; m_stat.onFindRetry(); break; // retry } ++pos; assert(pos < c_stackSize); stack[pos].pNode = pChild; stack[pos].nVersion = nChildVersion; stack[pos].nDir = nCmp; break; // child iteration } if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) { --pos; m_stat.onFindRetry(); break; // retry } } m_stat.onFindRetry(); } } return find_result::retry; } template int try_update_root( K const& key, Compare cmp, int nFlags, Func funcUpdate, rcu_disposer& disp ) { assert( gc::is_locked()); while ( true ) { int result; // get right child of root node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire ); if ( pChild ) { version_type nChildVersion = pChild->version( memory_model::memory_order_acquire ); if ( nChildVersion & node_type::shrinking ) { m_stat.onUpdateRootWaitShrinking(); pChild->template wait_until_shrink_completed( memory_model::memory_order_acquire ); result = update_flags::retry; } else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) result = try_update( key, cmp, nFlags, funcUpdate, pChild, nChildVersion, disp ); else result = update_flags::retry; } else { // the tree is empty if ( nFlags & update_flags::allow_insert ) { // insert into tree as right child of the root { node_scoped_lock l( m_Monitor, *m_pRoot ); if ( child( m_pRoot, right_child, memory_model::memory_order_acquire ) != nullptr ) { result = update_flags::retry; continue; } node_type * pNew = alloc_node( key, 1, 0, m_pRoot, nullptr, nullptr ); mapped_type pVal = funcUpdate( pNew ); assert( pVal != nullptr ); pNew->m_pValue.store( pVal, memory_model::memory_order_release ); m_pRoot->child( pNew, right_child, memory_model::memory_order_release); set_height( m_pRoot, 2, memory_model::memory_order_release ); } ++m_ItemCounter; m_stat.onInsertSuccess(); return update_flags::result_inserted; } return update_flags::failed; } if ( result != update_flags::retry ) return result; } } template bool try_remove_root( K const& key, Compare cmp, Func func, rcu_disposer& disp ) { assert( gc::is_locked()); while ( true ) { int result; // get right child of root node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire ); if ( pChild ) { version_type nChildVersion = pChild->version( memory_model::memory_order_acquire ); if ( nChildVersion & node_type::shrinking ) { m_stat.onRemoveRootWaitShrinking(); pChild->template wait_until_shrink_completed( memory_model::memory_order_acquire ); result = update_flags::retry; } else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) { result = try_remove( key, cmp, func, m_pRoot, pChild, nChildVersion, disp ); } else result = update_flags::retry; } else return false; if ( result == update_flags::retry ) m_stat.onRemoveRetry(); else { m_stat.onRemove( result == update_flags::result_removed ); return result == update_flags::result_removed; } } } template int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pNode, version_type nVersion, rcu_disposer& disp ) { assert( gc::is_locked()); assert( nVersion != node_type::unlinked ); struct stack_record { node_type * pNode; version_type nVersion; }; stack_record stack[c_stackSize]; int pos = 0; stack[0].pNode = pNode; stack[0].nVersion = nVersion; while ( pos >= 0 ) { pNode = stack[pos].pNode; nVersion = stack[pos].nVersion; int nCmp = cmp( key, pNode->m_key ); if ( nCmp == 0 ) { int result = try_update_node( nFlags, funcUpdate, pNode, nVersion, disp ); if ( result != update_flags::retry ) return result; --pos; m_stat.onUpdateRetry(); continue; } while ( true ) { node_type * pChild = child( pNode, nCmp, memory_model::memory_order_acquire ); if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) { --pos; m_stat.onUpdateRetry(); break; } if ( pChild == nullptr ) { // insert new node if ( nFlags & update_flags::allow_insert ) { int result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp ); if ( result != update_flags::retry ) return result; --pos; m_stat.onUpdateRetry(); break; } else return update_flags::failed; } else { // update child version_type nChildVersion = pChild->version( memory_model::memory_order_acquire ); if ( nChildVersion & node_type::shrinking ) { m_stat.onUpdateWaitShrinking(); pChild->template wait_until_shrink_completed( memory_model::memory_order_acquire ); // retry } else if ( pChild == child( pNode, nCmp, memory_model::memory_order_acquire )) { // this second read is important, because it is protected by nChildVersion // validate the read that our caller took to get to node if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) { --pos; m_stat.onUpdateRetry(); break; // retry } // At this point we know that the traversal our parent took to get to node is still valid. // The recursive implementation will validate the traversal from node to // child, so just prior to the node nVersion validation both traversals were definitely okay. // This means that we are no longer vulnerable to node shrinks, and we don't need // to validate node version any more. ++pos; assert( pos < c_stackSize ); stack[pos].pNode = pChild; stack[pos].nVersion = nChildVersion; assert( nChildVersion != node_type::unlinked ); break; // child iteration } m_stat.onUpdateRetry(); } } } return update_flags::retry; } template int try_remove( K const& key, Compare cmp, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp ) { assert( gc::is_locked()); assert( nVersion != node_type::unlinked ); struct stack_record { node_type * pParent; node_type * pNode; version_type nVersion; }; stack_record stack[c_stackSize]; int pos = 0; stack[0].pParent = pParent; stack[0].pNode = pNode; stack[0].nVersion = nVersion; while ( pos >= 0 ) { pParent = stack[pos].pParent; pNode = stack[pos].pNode; nVersion = stack[pos].nVersion; int nCmp = cmp( key, pNode->m_key ); if ( nCmp == 0 ) { int result = try_remove_node( pParent, pNode, nVersion, func, disp ); if ( result != update_flags::retry ) return result; --pos; m_stat.onRemoveRetry(); continue; } while ( true ) { node_type * pChild = child( pNode, nCmp, memory_model::memory_order_acquire ); if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) { --pos; m_stat.onRemoveRetry(); break; } if ( pChild == nullptr ) return update_flags::failed; // update child version_type nChildVersion = pChild->version( memory_model::memory_order_acquire ); if ( nChildVersion & node_type::shrinking ) { m_stat.onRemoveWaitShrinking(); pChild->template wait_until_shrink_completed( memory_model::memory_order_acquire ); // retry } else if ( pChild == child( pNode, nCmp, memory_model::memory_order_acquire )) { // this second read is important, because it is protected by nChildVersion // validate the read that our caller took to get to node if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) { --pos; m_stat.onRemoveRetry(); break; } // At this point we know that the traversal our parent took to get to node is still valid. // The recursive implementation will validate the traversal from node to // child, so just prior to the node nVersion validation both traversals were definitely okay. // This means that we are no longer vulnerable to node shrinks, and we don't need // to validate node version any more. ++pos; assert( pos < c_stackSize ); stack[pos].pParent = pNode; stack[pos].pNode = pChild; stack[pos].nVersion = nChildVersion; break; // child iteration } m_stat.onRemoveRetry(); } } return update_flags::retry; } template int try_extract_minmax( int nDir, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp ) { assert( gc::is_locked()); assert( nVersion != node_type::unlinked ); struct stack_record { node_type * pParent; node_type * pNode; version_type nVersion; }; stack_record stack[c_stackSize]; int pos = 0; stack[0].pParent = pParent; stack[0].pNode = pNode; stack[0].nVersion = nVersion; while ( pos >= 0 ) { pParent = stack[pos].pParent; pNode = stack[pos].pNode; nVersion = stack[pos].nVersion; while ( true ) { int iterDir = nDir; node_type * pChild = child( pNode, iterDir, memory_model::memory_order_acquire ); if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) { --pos; m_stat.onRemoveRetry(); break; } if ( !pChild ) { // Found min/max if ( pNode->is_valued( memory_model::memory_order_acquire )) { int result = try_remove_node( pParent, pNode, nVersion, func, disp ); if ( result == update_flags::result_removed ) return result; --pos; m_stat.onRemoveRetry(); break; } else { // check right (for min) or left (for max) child node iterDir = -iterDir; pChild = child( pNode, iterDir, memory_model::memory_order_acquire ); if ( !pChild ) { --pos; m_stat.onRemoveRetry(); break; } } } version_type nChildVersion = pChild->version( memory_model::memory_order_acquire ); if ( nChildVersion & node_type::shrinking ) { m_stat.onRemoveWaitShrinking(); pChild->template wait_until_shrink_completed( memory_model::memory_order_acquire ); // retry } else if ( pChild == child( pNode, iterDir, memory_model::memory_order_acquire )) { // this second read is important, because it is protected by nChildVersion // validate the read that our caller took to get to node if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) { --pos; m_stat.onRemoveRetry(); break; } // At this point we know that the traversal our parent took to get to node is still valid. // The recursive implementation will validate the traversal from node to // child, so just prior to the node nVersion validation both traversals were definitely okay. // This means that we are no longer vulnerable to node shrinks, and we don't need // to validate node version any more. ++pos; assert( pos < c_stackSize ); stack[pos].pParent = pNode; stack[pos].pNode = pChild; stack[pos].nVersion = nChildVersion; break; // child iteration } m_stat.onRemoveRetry(); } } return update_flags::retry; } template int try_insert_node( K const& key, Func funcUpdate, node_type * pNode, int nDir, version_type nVersion, rcu_disposer& disp ) { node_type * pNew; auto fnCreateNode = [&funcUpdate]( node_type * node ) { mapped_type pVal = funcUpdate( node ); assert( pVal != nullptr ); node->m_pValue.store( pVal, memory_model::memory_order_release ); }; constexpr_if ( c_bRelaxedInsert ) { if ( pNode->version( memory_model::memory_order_acquire ) != nVersion || child( pNode, nDir, memory_model::memory_order_acquire ) != nullptr ) { m_stat.onInsertRetry(); return update_flags::retry; } fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr )); } node_type * pDamaged; { assert( pNode != nullptr ); node_scoped_lock l( m_Monitor, *pNode ); if ( pNode->version( memory_model::memory_order_acquire ) != nVersion || child( pNode, nDir, memory_model::memory_order_acquire ) != nullptr ) { constexpr_if ( c_bRelaxedInsert ) { mapped_type pVal = pNew->m_pValue.load( memory_model::memory_order_relaxed ); pNew->m_pValue.store( nullptr, memory_model::memory_order_relaxed ); free_value( pVal ); free_node( pNew ); m_stat.onRelaxedInsertFailed(); } m_stat.onInsertRetry(); return update_flags::retry; } constexpr_if ( !c_bRelaxedInsert ) fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr )); pNode->child( pNew, nDir, memory_model::memory_order_release ); pDamaged = fix_height_locked( pNode ); } ++m_ItemCounter; m_stat.onInsertSuccess(); if ( pDamaged ) { fix_height_and_rebalance( pDamaged, disp ); m_stat.onInsertRebalanceRequired(); } return update_flags::result_inserted; } template int try_update_node( int nFlags, Func funcUpdate, node_type * pNode, version_type nVersion, rcu_disposer& disp ) { mapped_type pOld; bool bInserted; assert( pNode != nullptr ); { node_scoped_lock l( m_Monitor, *pNode ); if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) return update_flags::retry; if ( pNode->is_unlinked( memory_model::memory_order_acquire )) { m_stat.onUpdateUnlinked(); return update_flags::retry; } if ( pNode->is_valued( memory_model::memory_order_relaxed ) && !(nFlags & update_flags::allow_update)) { m_stat.onInsertFailed(); return update_flags::failed; } pOld = pNode->value( memory_model::memory_order_relaxed ); bInserted = pOld == nullptr; mapped_type pVal = funcUpdate( pNode ); if ( pVal == pOld ) pOld = nullptr; else { assert( pVal != nullptr ); pNode->m_pValue.store( pVal, memory_model::memory_order_release ); } } if ( pOld ) { disp.dispose_value(pOld); m_stat.onDisposeValue(); } if ( bInserted ) { ++m_ItemCounter; m_stat.onInsertSuccess(); return update_flags::result_inserted; } m_stat.onUpdateSuccess(); return update_flags::result_updated; } template int try_remove_node( node_type * pParent, node_type * pNode, version_type nVersion, Func func, rcu_disposer& disp ) { assert( pParent != nullptr ); assert( pNode != nullptr ); if ( !pNode->is_valued( memory_model::memory_order_acquire )) return update_flags::failed; if ( child( pNode, left_child, memory_model::memory_order_acquire ) == nullptr || child( pNode, right_child, memory_model::memory_order_acquire ) == nullptr ) { // pNode can be replaced with its child node_type * pDamaged; mapped_type pOld; { node_scoped_lock lp( m_Monitor, *pParent ); if ( pParent->is_unlinked( memory_model::memory_order_acquire ) || parent( pNode, memory_model::memory_order_acquire ) != pParent ) return update_flags::retry; { node_scoped_lock ln( m_Monitor, *pNode ); if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) return update_flags::retry; pOld = pNode->value( memory_model::memory_order_relaxed ); if ( !pOld ) return update_flags::failed; if ( !try_unlink_locked( pParent, pNode, disp )) return update_flags::retry; } pDamaged = fix_height_locked( pParent ); } --m_ItemCounter; if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside m_stat.onDisposeValue(); else m_stat.onExtractValue(); if ( pDamaged ) { fix_height_and_rebalance( pDamaged, disp ); m_stat.onRemoveRebalanceRequired(); } } else { // pNode is an internal with two children mapped_type pOld; { node_scoped_lock ln( m_Monitor, *pNode ); pOld = pNode->value( memory_model::memory_order_relaxed ); if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) return update_flags::retry; if ( !pOld ) return update_flags::failed; pNode->m_pValue.store( nullptr, memory_model::memory_order_release ); m_stat.onMakeRoutingNode(); } --m_ItemCounter; if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside m_stat.onDisposeValue(); else m_stat.onExtractValue(); } return update_flags::result_removed; } bool try_unlink_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp ) { // pParent and pNode must be locked assert( !pParent->is_unlinked(memory_model::memory_order_relaxed)); node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed ); node_type * pParentRight = child( pParent, right_child, memory_model::memory_order_relaxed ); if ( pNode != pParentLeft && pNode != pParentRight ) { // node is no longer a child of parent return false; } assert( !pNode->is_unlinked( memory_model::memory_order_relaxed )); assert( pParent == parent( pNode, memory_model::memory_order_relaxed )); node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed ); node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed ); if ( pLeft != nullptr && pRight != nullptr ) { // splicing is no longer possible return false; } node_type * pSplice = pLeft ? pLeft : pRight; if ( pParentLeft == pNode ) pParent->m_pLeft.store( pSplice, memory_model::memory_order_release ); else pParent->m_pRight.store( pSplice, memory_model::memory_order_release ); if ( pSplice ) pSplice->parent( pParent, memory_model::memory_order_release ); // Mark the node as unlinked pNode->version( node_type::unlinked, memory_model::memory_order_release ); // The value will be disposed by calling function pNode->m_pValue.store( nullptr, memory_model::memory_order_release ); disp.dispose( pNode ); m_stat.onDisposeNode(); return true; } //@endcond private: // rotations //@cond int check_node_ordering( node_type* pParent, node_type* pChild ) { return key_comparator()( pParent->m_key, pChild->m_key ); } int estimate_node_condition( node_type * pNode ) { node_type * pLeft = child( pNode, left_child, memory_model::memory_order_acquire ); node_type * pRight = child( pNode, right_child, memory_model::memory_order_acquire ); if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_acquire )) return unlink_required; int h = height( pNode, memory_model::memory_order_acquire ); int hL = height_null( pLeft, memory_model::memory_order_acquire ); int hR = height_null( pRight, memory_model::memory_order_acquire ); int hNew = 1 + std::max( hL, hR ); int nBalance = hL - hR; if ( nBalance < -1 || nBalance > 1 ) return rebalance_required; return h != hNew ? hNew : nothing_required; } node_type * fix_height( node_type * pNode ) { assert( pNode != nullptr ); node_scoped_lock l( m_Monitor, *pNode ); return fix_height_locked( pNode ); } node_type * fix_height_locked( node_type * pNode ) { // pNode must be locked!!! int h = estimate_node_condition( pNode ); switch ( h ) { case rebalance_required: case unlink_required: return pNode; case nothing_required: return nullptr; default: set_height( pNode, h, memory_model::memory_order_release ); return parent( pNode, memory_model::memory_order_relaxed ); } } void fix_height_and_rebalance( node_type * pNode, rcu_disposer& disp ) { while ( pNode && parent( pNode, memory_model::memory_order_acquire )) { int nCond = estimate_node_condition( pNode ); if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_acquire )) return; if ( nCond != unlink_required && nCond != rebalance_required ) pNode = fix_height( pNode ); else { node_type * pParent = parent( pNode, memory_model::memory_order_acquire ); assert( pParent != nullptr ); { node_scoped_lock lp( m_Monitor, *pParent ); if ( !pParent->is_unlinked( memory_model::memory_order_relaxed ) && parent( pNode, memory_model::memory_order_acquire ) == pParent ) { node_scoped_lock ln( m_Monitor, *pNode ); pNode = rebalance_locked( pParent, pNode, disp ); } } } } } node_type * rebalance_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp ) { // pParent and pNode should be locked. // Returns a damaged node, or nullptr if no more rebalancing is necessary assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent ); node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed ); node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed ); if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed )) { if ( try_unlink_locked( pParent, pNode, disp )) return fix_height_locked( pParent ); else { // retry needed for pNode return pNode; } } assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode ); int h = height( pNode, memory_model::memory_order_acquire ); int hL = height_null( pLeft, memory_model::memory_order_acquire ); int hR = height_null( pRight, memory_model::memory_order_acquire ); int hNew = 1 + std::max( hL, hR ); int balance = hL - hR; if ( balance > 1 ) return rebalance_to_right_locked( pParent, pNode, pLeft, hR ); else if ( balance < -1 ) return rebalance_to_left_locked( pParent, pNode, pRight, hL ); else if ( hNew != h ) { set_height( pNode, hNew, memory_model::memory_order_release ); // pParent is already locked return fix_height_locked( pParent ); } else return nullptr; } node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR ) { assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent ); assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode ); // pParent and pNode is locked yet // pNode->pLeft is too large, we will rotate-right. // If pLeft->pRight is taller than pLeft->pLeft, then we will first rotate-left pLeft. assert( pLeft != nullptr ); node_scoped_lock l( m_Monitor, *pLeft ); if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft ) return pNode; // retry for pNode assert( check_node_ordering( pNode, pLeft ) > 0 ); int hL = height( pLeft, memory_model::memory_order_acquire ); if ( hL - hR <= 1 ) return pNode; // retry node_type * pLRight = child( pLeft, right_child, memory_model::memory_order_relaxed ); int hLR = height_null( pLRight, memory_model::memory_order_acquire ); node_type * pLLeft = child( pLeft, left_child, memory_model::memory_order_relaxed ); int hLL = height_null( pLLeft, memory_model::memory_order_acquire ); if ( pLRight ) { { node_scoped_lock lr( m_Monitor, *pLRight ); if ( pLeft->m_pRight.load( memory_model::memory_order_acquire ) != pLRight ) return pNode; // retry assert( check_node_ordering( pLeft, pLRight ) < 0 ); hLR = height( pLRight, memory_model::memory_order_acquire ); if ( hLL > hLR ) return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR ); int hLRL = height_null( child( pLRight, left_child, memory_model::memory_order_relaxed ), memory_model::memory_order_acquire ); int balance = hLL - hLRL; if ( balance >= -1 && balance <= 1 && !( ( hLL == 0 || hLRL == 0 ) && !pLeft->is_valued( memory_model::memory_order_relaxed ))) { // nParent.child.left won't be damaged after a double rotation return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL ); } } // focus on pLeft, if necessary pNode will be balanced later return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL ); } else if ( hLL > hLR ) { // rotate right return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR ); } return pNode; // retry } node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL ) { assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent ); assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode ); // pParent and pNode is locked yet assert( pRight != nullptr ); node_scoped_lock l( m_Monitor, *pRight ); if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight ) return pNode; // retry for pNode assert( check_node_ordering( pNode, pRight ) < 0 ); int hR = height( pRight, memory_model::memory_order_acquire ); if ( hL - hR >= -1 ) return pNode; // retry node_type * pRLeft = child( pRight, left_child, memory_model::memory_order_relaxed ); int hRL = height_null( pRLeft, memory_model::memory_order_acquire ); node_type * pRRight = child( pRight, right_child, memory_model::memory_order_relaxed ); int hRR = height_null( pRRight, memory_model::memory_order_acquire ); if ( pRLeft ) { { node_scoped_lock lrl( m_Monitor, *pRLeft ); if ( pRight->m_pLeft.load( memory_model::memory_order_acquire ) != pRLeft ) return pNode; // retry assert( check_node_ordering( pRight, pRLeft ) > 0 ); hRL = height( pRLeft, memory_model::memory_order_acquire ); if ( hRR >= hRL ) return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR ); node_type * pRLRight = child( pRLeft, right_child, memory_model::memory_order_relaxed ); int hRLR = height_null( pRLRight, memory_model::memory_order_acquire ); int balance = hRR - hRLR; if ( balance >= -1 && balance <= 1 && !( ( hRR == 0 || hRLR == 0 ) && !pRight->is_valued( memory_model::memory_order_relaxed ))) return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR ); } return rebalance_to_right_locked( pNode, pRight, pRLeft, hRR ); } else if ( hRR > hRL ) return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR ); return pNode; // retry } static void begin_change( node_type * pNode, version_type version ) { assert(pNode->version(memory_model::memory_order_acquire) == version ); assert( (version & node_type::shrinking) == 0 ); pNode->exchange_version( version | node_type::shrinking, memory_model::memory_order_acquire ); } static void end_change( node_type * pNode, version_type version ) { // Clear shrinking and unlinked flags and increment version pNode->version( (version | node_type::version_flags) + 1, memory_model::memory_order_release ); } node_type * rotate_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLR ) { version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed ); node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed ); begin_change( pNode, nodeVersion ); pNode->m_pLeft.store( pLRight, memory_model::memory_order_release ); if ( pLRight != nullptr ) { atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLRight->m_pParent ); pLRight->parent( pNode, memory_model::memory_order_relaxed ); assert( check_node_ordering( pNode, pLRight ) > 0 ); } atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLeft->m_pRight ); pLeft->m_pRight.store( pNode, memory_model::memory_order_relaxed ); atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pNode->m_pParent ); pNode->parent( pLeft, memory_model::memory_order_relaxed ); assert( check_node_ordering( pLeft, pNode ) < 0 ); atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); if ( pParentLeft == pNode ) { CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pLeft ); pParent->m_pLeft.store( pLeft, memory_model::memory_order_relaxed ); } else { assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode ); CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pRight ); pParent->m_pRight.store( pLeft, memory_model::memory_order_relaxed ); } atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLeft->m_pParent ); pLeft->parent( pParent, memory_model::memory_order_relaxed ); atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); // fix up heights links int hNode = 1 + std::max( hLR, hR ); set_height( pNode, hNode, memory_model::memory_order_release ); set_height( pLeft, 1 + std::max( hLL, hNode ), memory_model::memory_order_release); end_change( pNode, nodeVersion ); m_stat.onRotateRight(); // We have damaged pParent, pNode (now parent.child.right), and pLeft (now // parent.child). pNode is the deepest. Perform as many fixes as we can // with the locks we've got. // We've already fixed the height for pNode, but it might still be outside // our allowable balance range. In that case a simple fix_height_locked() // won't help. int nodeBalance = hLR - hR; if ( nodeBalance < -1 || nodeBalance > 1 ) { // we need another rotation at pNode m_stat.onRotateAfterRightRotation(); return pNode; } // we've fixed balance and height damage for pNode, now handle // extra-routing node damage if ( (pLRight == nullptr || hR == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) { // we need to remove pNode and then repair m_stat.onRemoveAfterRightRotation(); return pNode; } // we've already fixed the height at pLeft, do we need a rotation here? int leftBalance = hLL - hNode; if ( leftBalance < -1 || leftBalance > 1 ) { m_stat.onRotateAfterRightRotation(); return pLeft; } // pLeft might also have routing node damage (if pLeft.left was null) if ( hLL == 0 && !pLeft->is_valued(memory_model::memory_order_acquire)) { m_stat.onDamageAfterRightRotation(); return pLeft; } // try to fix the parent height while we've still got the lock return fix_height_locked( pParent ); } node_type * rotate_left_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRL, int hRR ) { version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed ); node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed ); begin_change( pNode, nodeVersion ); // fix up pNode links, careful to be compatible with concurrent traversal for all but pNode pNode->m_pRight.store( pRLeft, memory_model::memory_order_release ); if ( pRLeft != nullptr ) { atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRLeft->m_pParent ); pRLeft->parent( pNode, memory_model::memory_order_relaxed ); } atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRight->m_pLeft ); pRight->m_pLeft.store( pNode, memory_model::memory_order_relaxed ); atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pNode->m_pParent ); pNode->parent( pRight, memory_model::memory_order_relaxed ); assert( check_node_ordering( pRight, pNode ) > 0 ); atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); if ( pParentLeft == pNode ) { CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pLeft ); pParent->m_pLeft.store( pRight, memory_model::memory_order_relaxed ); } else { assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode ); CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pRight ); pParent->m_pRight.store( pRight, memory_model::memory_order_relaxed ); } atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRight->m_pParent ); pRight->parent( pParent, memory_model::memory_order_relaxed ); atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); // fix up heights int hNode = 1 + std::max( hL, hRL ); set_height( pNode, hNode, memory_model::memory_order_release ); set_height( pRight, 1 + std::max( hNode, hRR ), memory_model::memory_order_release); end_change( pNode, nodeVersion ); m_stat.onRotateLeft(); int nodeBalance = hRL - hL; if ( nodeBalance < -1 || nodeBalance > 1 ) { m_stat.onRotateAfterLeftRotation(); return pNode; } if ( (pRLeft == nullptr || hL == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) { m_stat.onRemoveAfterLeftRotation(); return pNode; } int rightBalance = hRR - hNode; if ( rightBalance < -1 || rightBalance > 1 ) { m_stat.onRotateAfterLeftRotation(); return pRight; } if ( hRR == 0 && !pRight->is_valued(memory_model::memory_order_acquire)) { m_stat.onDamageAfterLeftRotation(); return pRight; } return fix_height_locked( pParent ); } node_type * rotate_right_over_left_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLRL ) { version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed ); version_type leftVersion = pLeft->version( memory_model::memory_order_acquire ); node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed ); node_type * pLRL = child( pLRight, left_child, memory_model::memory_order_acquire ); node_type * pLRR = child( pLRight, right_child, memory_model::memory_order_acquire ); int hLRR = height_null( pLRR, memory_model::memory_order_acquire ); begin_change( pNode, nodeVersion ); begin_change( pLeft, leftVersion ); // fix up pNode links, careful about the order! pNode->m_pLeft.store( pLRR, memory_model::memory_order_release ); if ( pLRR != nullptr ) { atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLRR->m_pParent ); pLRR->parent( pNode, memory_model::memory_order_relaxed ); } atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLeft->m_pRight ); pLeft->m_pRight.store( pLRL, memory_model::memory_order_relaxed ); if ( pLRL != nullptr ) { atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLRL->m_pParent ); pLRL->parent( pLeft, memory_model::memory_order_relaxed ); } atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLRight->m_pLeft ); pLRight->m_pLeft.store( pLeft, memory_model::memory_order_relaxed ); atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLeft->m_pParent ); pLeft->parent( pLRight, memory_model::memory_order_relaxed ); assert( check_node_ordering( pLRight, pLeft ) > 0 ); atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLRight->m_pRight ); pLRight->m_pRight.store( pNode, memory_model::memory_order_relaxed ); atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pNode->m_pParent ); pNode->parent( pLRight, memory_model::memory_order_relaxed ); assert( check_node_ordering( pLRight, pNode ) < 0 ); atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); if ( pPL == pNode ) { CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pLeft ); pParent->m_pLeft.store( pLRight, memory_model::memory_order_relaxed ); } else { assert( child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode ); CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pRight ); pParent->m_pRight.store( pLRight, memory_model::memory_order_relaxed ); } atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLRight->m_pParent ); pLRight->parent( pParent, memory_model::memory_order_relaxed ); atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); // fix up heights int hNode = 1 + std::max( hLRR, hR ); set_height( pNode, hNode, memory_model::memory_order_release ); int hLeft = 1 + std::max( hLL, hLRL ); set_height( pLeft, hLeft, memory_model::memory_order_release ); set_height( pLRight, 1 + std::max( hLeft, hNode ), memory_model::memory_order_release); end_change( pNode, nodeVersion ); end_change( pLeft, leftVersion ); m_stat.onRotateRightOverLeft(); // caller should have performed only a single rotation if pLeft was going // to end up damaged assert( hLL - hLRL <= 1 && hLRL - hLL <= 1 ); assert( !((hLL == 0 || pLRL == nullptr) && !pLeft->is_valued( memory_model::memory_order_acquire ))); // We have damaged pParent, pLR (now parent.child), and pNode (now // parent.child.right). pNode is the deepest. Perform as many fixes as we // can with the locks we've got. // We've already fixed the height for pNode, but it might still be outside // our allowable balance range. In that case a simple fix_height_locked() // won't help. int nodeBalance = hLRR - hR; if ( nodeBalance < -1 || nodeBalance > 1 ) { // we need another rotation at pNode m_stat.onRotateAfterRLRotation(); return pNode; } // pNode might also be damaged by being an unnecessary routing node if ( (pLRR == nullptr || hR == 0) && !pNode->is_valued( memory_model::memory_order_relaxed )) { // repair involves splicing out pNode and maybe more rotations m_stat.onRemoveAfterRLRotation(); return pNode; } // we've already fixed the height at pLRight, do we need a rotation here? int balanceLR = hLeft - hNode; if ( balanceLR < -1 || balanceLR > 1 ) { m_stat.onRotateAfterRLRotation(); return pLRight; } // try to fix the parent height while we've still got the lock return fix_height_locked( pParent ); } node_type * rotate_left_over_right_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRR, int hRLR ) { version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed ); version_type rightVersion = pRight->version( memory_model::memory_order_acquire ); node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed ); node_type * pRLL = child( pRLeft, left_child, memory_model::memory_order_acquire ); node_type * pRLR = child( pRLeft, right_child, memory_model::memory_order_acquire ); int hRLL = height_null( pRLL, memory_model::memory_order_acquire ); begin_change( pNode, nodeVersion ); begin_change( pRight, rightVersion ); // fix up pNode links, careful about the order! pNode->m_pRight.store( pRLL, memory_model::memory_order_release ); if ( pRLL != nullptr ) { atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRLL->m_pParent ); pRLL->parent( pNode, memory_model::memory_order_relaxed ); } atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRight->m_pLeft ); pRight->m_pLeft.store( pRLR, memory_model::memory_order_relaxed ); if ( pRLR != nullptr ) { atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRLR->m_pParent ); pRLR->parent( pRight, memory_model::memory_order_relaxed ); } atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRLeft->m_pRight ); pRLeft->m_pRight.store( pRight, memory_model::memory_order_relaxed ); atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRight->m_pParent ); pRight->parent( pRLeft, memory_model::memory_order_relaxed ); assert( check_node_ordering( pRLeft, pRight ) < 0 ); atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRLeft->m_pLeft ); pRLeft->m_pLeft.store( pNode, memory_model::memory_order_relaxed ); atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pNode->m_pParent ); pNode->parent( pRLeft, memory_model::memory_order_relaxed ); assert( check_node_ordering( pRLeft, pNode ) > 0 ); atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); if ( pPL == pNode ) { CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pLeft ); pParent->m_pLeft.store( pRLeft, memory_model::memory_order_relaxed ); } else { assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode ); CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pRight ); pParent->m_pRight.store( pRLeft, memory_model::memory_order_relaxed ); } atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRLeft->m_pParent ); pRLeft->parent( pParent, memory_model::memory_order_relaxed ); atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); // fix up heights int hNode = 1 + std::max( hL, hRLL ); set_height( pNode, hNode, memory_model::memory_order_release ); int hRight = 1 + std::max( hRLR, hRR ); set_height( pRight, hRight, memory_model::memory_order_release ); set_height( pRLeft, 1 + std::max( hNode, hRight ), memory_model::memory_order_release); end_change( pNode, nodeVersion ); end_change( pRight, rightVersion ); m_stat.onRotateLeftOverRight(); assert( hRR - hRLR <= 1 && hRLR - hRR <= 1 ); int nodeBalance = hRLL - hL; if ( nodeBalance < -1 || nodeBalance > 1 ) { m_stat.onRotateAfterLRRotation(); return pNode; } if ( (pRLL == nullptr || hL == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) { m_stat.onRemoveAfterLRRotation(); return pNode; } int balRL = hRight - hNode; if ( balRL < -1 || balRL > 1 ) { m_stat.onRotateAfterLRRotation(); return pRLeft; } return fix_height_locked( pParent ); } //@endcond }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H libcds-2.3.3/cds/container/impl/ellen_bintree_map.h000066400000000000000000000575041341244201700223010ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_IMPL_ELLEN_BINTREE_MAP_H #define CDSLIB_CONTAINER_IMPL_ELLEN_BINTREE_MAP_H #include #include #include #include namespace cds { namespace container { /// Map based on Ellen's et al binary search tree /** @ingroup cds_nonintrusive_map @ingroup cds_nonintrusive_tree @anchor cds_container_EllenBinTreeMap Source: - [2010] F.Ellen, P.Fatourou, E.Ruppert, F.van Breugel "Non-blocking Binary Search Tree" %EllenBinTreeMap is an unbalanced leaf-oriented binary search tree that implements the map abstract data type. Nodes maintains child pointers but not parent pointers. Every internal node has exactly two children, and all data of type std::pair currently in the tree are stored in the leaves. Internal nodes of the tree are used to direct \p find operation along the path to the correct leaf. The keys (of \p Key type) stored in internal nodes may or may not be in the map. Unlike \ref cds_container_EllenBinTreeSet "EllenBinTreeSet" keys are not a part of \p T type. The map can be represented as a set containing std::pair< Key const, T> values. Due to \p extract_min and \p extract_max member functions the \p %EllenBinTreeMap can act as a priority queue. In this case you should provide unique compound key, for example, the priority value plus some uniformly distributed random value. @warning Recall the tree is unbalanced. The complexity of operations is O(log N) for uniformly distributed random keys, but in the worst case the complexity is O(N). @note In the current implementation we do not use helping technique described in the original paper. In Hazard Pointer schema helping is too complicated and does not give any observable benefits. Instead of helping, when a thread encounters a concurrent operation it just spins waiting for the operation done. Such solution allows greatly simplify implementation of the tree. Template arguments : - \p GC - safe memory reclamation (i.e. light-weight garbage collector) type, like \p cds::gc::HP, \p cds::gc::DHP - \p Key - key type. Should be default-constructible - \p T - value type to be stored in tree's leaf nodes. - \p Traits - map traits, default is \p ellen_bintree::traits It is possible to declare option-based tree with \p ellen_bintree::make_map_traits metafunction instead of \p Traits template argument. @note Do not include header file directly. There are header file for each GC type: - - for Hazard Pointer GC cds::gc::HP - - for Dynamic Hazard Pointer GC cds::gc::DHP - - for RCU GC (see \ref cds_container_EllenBinTreeMap_rcu "RCU-based EllenBinTreeMap") */ template < class GC, typename Key, typename T, #ifdef CDS_DOXYGEN_INVOKED class Traits = ellen_bintree::traits #else class Traits #endif > class EllenBinTreeMap #ifdef CDS_DOXYGEN_INVOKED : public cds::intrusive::EllenBinTree< GC, Key, T, Traits > #else : public ellen_bintree::details::make_ellen_bintree_map< GC, Key, T, Traits >::type #endif { //@cond typedef ellen_bintree::details::make_ellen_bintree_map< GC, Key, T, Traits > maker; typedef typename maker::type base_class; //@endcond public: typedef GC gc; ///< Garbage collector typedef Key key_type; ///< type of a key stored in the map typedef T mapped_type; ///< type of value stored in the map typedef std::pair< key_type const, mapped_type > value_type ; ///< Key-value pair stored in leaf node of the mp typedef Traits traits; ///< Map traits static_assert( std::is_default_constructible::value, "Key should be default constructible type"); # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less # else typedef typename maker::intrusive_traits::compare key_comparator; # endif typedef typename base_class::item_counter item_counter; ///< Item counting policy typedef typename base_class::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model typedef typename base_class::node_allocator node_allocator_type; ///< allocator for maintaining internal node typedef typename base_class::stat stat; ///< internal statistics type typedef typename traits::copy_policy copy_policy; ///< key copy policy typedef typename traits::back_off back_off; ///< Back-off strategy typedef typename traits::allocator allocator_type; ///< Allocator for leaf nodes typedef typename base_class::node_allocator node_allocator; ///< Internal node allocator typedef typename base_class::update_desc_allocator update_desc_allocator; ///< Update descriptor allocator protected: //@cond typedef typename base_class::value_type leaf_node; typedef typename base_class::internal_node internal_node; typedef typename base_class::update_desc update_desc; typedef typename maker::cxx_leaf_node_allocator cxx_leaf_node_allocator; typedef std::unique_ptr< leaf_node, typename maker::leaf_deallocator > scoped_node_ptr; //@endcond public: /// Guarded pointer typedef typename gc::template guarded_ptr< leaf_node, value_type, details::guarded_ptr_cast_set > guarded_ptr; public: /// Default constructor EllenBinTreeMap() : base_class() {} /// Clears the map ~EllenBinTreeMap() {} /// Inserts new node with key and default value /** The function creates a node with \p key and default value, and then inserts the node created into the map. Preconditions: - The \ref key_type should be constructible from a value of type \p K. In trivial case, \p K is equal to \ref key_type. - The \ref mapped_type should be default-constructible. Returns \p true if inserting successful, \p false otherwise. */ template bool insert( K const& key ) { return insert_with( key, [](value_type&){} ); } /// Inserts new node /** The function creates a node with copy of \p val value and then inserts the node created into the map. Preconditions: - The \p key_type should be constructible from \p key of type \p K. - The \p value_type should be constructible from \p val of type \p V. Returns \p true if \p val is inserted into the map, \p false otherwise. */ template bool insert( K const& key, V const& val ) { scoped_node_ptr pNode( cxx_leaf_node_allocator().New( key, val )); if ( base_class::insert( *pNode )) { pNode.release(); return true; } return false; } /// Inserts new node and initialize it by a functor /** This function inserts new node with key \p key and if inserting is successful then it calls \p func functor with signature \code struct functor { void operator()( value_type& item ); }; \endcode The argument \p item of user-defined functor \p func is the reference to the map's item inserted: - item.first is a const reference to item's key that cannot be changed. - item.second is a reference to item's value that may be changed. The key_type should be constructible from value of type \p K. The function allows to split creating of new item into two part: - create item from \p key; - insert new item into the map; - if inserting is successful, initialize the value of item by calling \p func functor This can be useful if complete initialization of object of \p value_type is heavyweight and it is preferable that the initialization should be completed only if inserting is successful. */ template bool insert_with( const K& key, Func func ) { scoped_node_ptr pNode( cxx_leaf_node_allocator().New( key )); if ( base_class::insert( *pNode, [&func]( leaf_node& item ) { func( item.m_Value ); } )) { pNode.release(); return true; } return false; } /// For key \p key inserts data of type \p value_type created in-place from \p args /** Returns \p true if inserting successful, \p false otherwise. */ template bool emplace( K&& key, Args&&... args ) { scoped_node_ptr pNode( cxx_leaf_node_allocator().MoveNew( key_type( std::forward(key)), mapped_type( std::forward(args)... ))); if ( base_class::insert( *pNode )) { pNode.release(); return true; } return false; } /// Updates the node /** The operation performs inserting or changing data with lock-free manner. If the item \p val is not found in the map, then \p val is inserted iff \p bAllowInsert is \p true. Otherwise, the functor \p func is called with item found. The functor \p func signature is: \code struct my_functor { void operator()( bool bNew, value_type& item ); }; \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the map The functor may change any fields of the \p item.second that is \ref mapped_type; however, \p func must guarantee that during changing no any other modifications could be made on this item by concurrent threads. Returns std::pair where \p first is \p true if operation is successful, i.e. the node has been inserted or updated, \p second is \p true if new item has been added or \p false if the item with \p key already exists. @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" */ template std::pair update( K const& key, Func func, bool bAllowInsert = true ) { scoped_node_ptr pNode( cxx_leaf_node_allocator().New( key )); std::pair res = base_class::update( *pNode, [&func](bool bNew, leaf_node& item, leaf_node const& ){ func( bNew, item.m_Value ); }, bAllowInsert ); if ( res.first && res.second ) pNode.release(); return res; } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( K const& key, Func func ) { return update( key, func, true ); } //@endcond /// Delete \p key from the map /**\anchor cds_nonintrusive_EllenBinTreeMap_erase_val Return \p true if \p key is found and deleted, \p false otherwise */ template bool erase( K const& key ) { return base_class::erase(key); } /// Deletes the item from the map using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_EllenBinTreeMap_erase_val "erase(K const&)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool erase_with( K const& key, Less pred ) { CDS_UNUSED( pred ); return base_class::erase_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >()); } /// Delete \p key from the map /** \anchor cds_nonintrusive_EllenBinTreeMap_erase_func The function searches an item with key \p key, calls \p f functor and deletes the item. If \p key is not found, the functor is not called. The functor \p Func interface: \code struct extractor { void operator()(value_type& item) { ... } }; \endcode Return \p true if key is found and deleted, \p false otherwise */ template bool erase( K const& key, Func f ) { return base_class::erase( key, [&f]( leaf_node& node) { f( node.m_Value ); } ); } /// Deletes the item from the map using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_EllenBinTreeMap_erase_func "erase(K const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool erase_with( K const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return base_class::erase_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >(), [&f]( leaf_node& node) { f( node.m_Value ); } ); } /// Extracts an item with minimal key from the map /** If the map is not empty, the function returns an guarded pointer to minimum value. If the map is empty, the function returns an empty \p guarded_ptr. @note Due the concurrent nature of the map, the function extracts nearly minimum key. It means that the function gets leftmost leaf of the tree and tries to unlink it. During unlinking, a concurrent thread may insert an item with key less than leftmost item's key. So, the function returns the item with minimum key at the moment of tree traversing. The guarded pointer prevents deallocation of returned item, see \p cds::gc::guarded_ptr for explanation. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. */ guarded_ptr extract_min() { return guarded_ptr( base_class::extract_min_()); } /// Extracts an item with maximal key from the map /** If the map is not empty, the function returns a guarded pointer to maximal value. If the map is empty, the function returns an empty \p guarded_ptr. @note Due the concurrent nature of the map, the function extracts nearly maximal key. It means that the function gets rightmost leaf of the tree and tries to unlink it. During unlinking, a concurrent thread may insert an item with key great than leftmost item's key. So, the function returns the item with maximum key at the moment of tree traversing. The guarded pointer prevents deallocation of returned item, see \p cds::gc::guarded_ptr for explanation. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. */ guarded_ptr extract_max() { return guarded_ptr( base_class::extract_max_()); } /// Extracts an item from the tree /** \anchor cds_nonintrusive_EllenBinTreeMap_extract The function searches an item with key equal to \p key in the tree, unlinks it, and returns a guarded pointer to an item found. If the item is not found the function returns an empty \p guarded_ptr. The guarded pointer prevents deallocation of returned item, see \p cds::gc::guarded_ptr for explanation. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. */ template guarded_ptr extract( Q const& key ) { return guarded_ptr( base_class::extract_( key )); } /// Extracts an item from the map using \p pred for searching /** The function is an analog of \ref cds_nonintrusive_EllenBinTreeMap_extract "extract(Q const&)" but \p pred is used for key compare. \p Less has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the map. */ template guarded_ptr extract_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return guarded_ptr( base_class::extract_with_( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >())); } /// Find the key \p key /** \anchor cds_nonintrusive_EllenBinTreeMap_find_cfunc The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item ); }; \endcode where \p item is the item found. The functor may change \p item.second. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( K const& key, Func f ) { return base_class::find( key, [&f](leaf_node& item, K const& ) { f( item.m_Value );}); } /// Finds the key \p val using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_EllenBinTreeMap_find_cfunc "find(K const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool find_with( K const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return base_class::find_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >(), [&f](leaf_node& item, K const& ) { f( item.m_Value );}); } /// Checks whether the map contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. */ template bool contains( K const& key ) { return base_class::contains( key ); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find( K const& key ) { return contains( key ); } //@endcond /// Checks whether the map contains \p key using \p pred predicate for searching /** The function is similar to contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool contains( K const& key, Less pred ) { CDS_UNUSED( pred ); return base_class::contains( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find_with( K const& key, Less pred ) { return contains( key, pred ); } //@endcond /// Finds \p key and returns the item found /** @anchor cds_nonintrusive_EllenBinTreeMap_get The function searches the item with key equal to \p key and returns the item found as a guarded pointer. If \p key is not foudn the function returns an empty \p guarded_ptr. The guarded pointer prevents deallocation of returned item, see \p cds::gc::guarded_ptr for explanation. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. */ template guarded_ptr get( Q const& key ) { return guarded_ptr( base_class::get_( key )); } /// Finds \p key with predicate \p pred and returns the item found /** The function is an analog of \ref cds_nonintrusive_EllenBinTreeMap_get "get(Q const&)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the map. */ template guarded_ptr get_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return guarded_ptr( base_class::get_with_( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >())); } /// Clears the map (not atomic) void clear() { base_class::clear(); } /// Checks if the map is empty /** Emptiness is checked by item counting: if item count is zero then the map is empty. */ bool empty() const { return base_class::empty(); } /// Returns item count in the set /** Only leaf nodes containing user data are counted. The value returned depends on item counter type provided by \p Traits template parameter. If it is \p atomicity::empty_item_counter this function always returns 0. The function is not suitable for checking the tree emptiness, use \p empty() member function for this purpose. */ size_t size() const { return base_class::size(); } /// Returns const reference to internal statistics stat const& statistics() const { return base_class::statistics(); } /// Checks internal consistency (not atomic, not thread-safe) /** The debugging function to check internal consistency of the tree. */ bool check_consistency() const { return base_class::check_consistency(); } }; }} // namespace cds::container #endif //#ifndef CDSLIB_CONTAINER_IMPL_ELLEN_BINTREE_MAP_H libcds-2.3.3/cds/container/impl/ellen_bintree_set.h000066400000000000000000000631601341244201700223120ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_IMPL_ELLEN_BINTREE_SET_H #define CDSLIB_CONTAINER_IMPL_ELLEN_BINTREE_SET_H #include #include #include #include namespace cds { namespace container { /// Set based on Ellen's et al binary search tree /** @ingroup cds_nonintrusive_set @ingroup cds_nonintrusive_tree @anchor cds_container_EllenBinTreeSet Source: - [2010] F.Ellen, P.Fatourou, E.Ruppert, F.van Breugel "Non-blocking Binary Search Tree" %EllenBinTreeSet is an unbalanced leaf-oriented binary search tree that implements the set abstract data type. Nodes maintains child pointers but not parent pointers. Every internal node has exactly two children, and all data of type \p T currently in the tree are stored in the leaves. Internal nodes of the tree are used to direct \p find operation along the path to the correct leaf. The keys (of \p Key type) stored in internal nodes may or may not be in the set. \p Key type is a subset of \p T type. There should be exactly defined a key extracting functor for converting object of type \p T to object of type \p Key. Due to \p extract_min and \p extract_max member functions the \p %EllenBinTreeSet can act as a priority queue. In this case you should provide unique compound key, for example, the priority value plus some uniformly distributed random value. @warning Recall the tree is unbalanced. The complexity of operations is O(log N) for uniformly distributed random keys, but in the worst case the complexity is O(N). @note In the current implementation we do not use helping technique described in the original paper. In Hazard Pointer schema helping is too complicated and does not give any observable benefits. Instead of helping, when a thread encounters a concurrent operation it just spins waiting for the operation done. Such solution allows greatly simplify the implementation of tree. Template arguments : - \p GC - safe memory reclamation (i.e. light-weight garbage collector) type, like \p cds::gc::HP, cds::gc::DHP - \p Key - key type, a subset of \p T - \p T - type to be stored in tree's leaf nodes. - \p Traits - set traits, default is \p ellen_bintree::traits It is possible to declare option-based tree with \p ellen_bintree::make_set_traits metafunction instead of \p Traits template argument. @note Do not include header file directly. There are header file for each GC type: - - for \p cds::gc::HP - - for \p cds::gc::DHP - - for RCU GC (see \ref cds_container_EllenBinTreeSet_rcu "RCU-based EllenBinTreeSet") @anchor cds_container_EllenBinTreeSet_less Predicate requirements \p Traits::less, \p Traits::compare and other predicates using with member fuctions should accept at least parameters of type \p T and \p Key in any combination. For example, for \p Foo struct with \p std::string key field the appropiate \p less functor is: \code struct Foo { std::string m_strKey; ... }; struct less { bool operator()( Foo const& v1, Foo const& v2 ) const { return v1.m_strKey < v2.m_strKey ; } bool operator()( Foo const& v, std::string const& s ) const { return v.m_strKey < s ; } bool operator()( std::string const& s, Foo const& v ) const { return s < v.m_strKey ; } // Support comparing std::string and char const * bool operator()( std::string const& s, char const * p ) const { return s.compare(p) < 0 ; } bool operator()( Foo const& v, char const * p ) const { return v.m_strKey.compare(p) < 0 ; } bool operator()( char const * p, std::string const& s ) const { return s.compare(p) > 0; } bool operator()( char const * p, Foo const& v ) const { return v.m_strKey.compare(p) > 0; } }; \endcode */ template < class GC, typename Key, typename T, #ifdef CDS_DOXYGEN_INVOKED class Traits = ellen_bintree::traits #else class Traits #endif > class EllenBinTreeSet #ifdef CDS_DOXYGEN_INVOKED : public cds::intrusive::EllenBinTree< GC, Key, T, Traits > #else : public ellen_bintree::details::make_ellen_bintree_set< GC, Key, T, Traits >::type #endif { //@cond typedef ellen_bintree::details::make_ellen_bintree_set< GC, Key, T, Traits > maker; typedef typename maker::type base_class; //@endcond public: typedef GC gc; ///< Garbage collector typedef Key key_type; ///< type of a key to be stored in internal nodes; key is a part of \p value_type typedef T value_type; ///< type of value to be stored in the binary tree typedef Traits traits; ///< Traits template parameter # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined key_comparator ; ///< key compare functor based on opt::compare and opt::less option setter. # else typedef typename maker::intrusive_traits::compare key_comparator; # endif typedef typename base_class::item_counter item_counter; ///< Item counting policy used typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option typedef typename base_class::stat stat; ///< internal statistics type typedef typename traits::key_extractor key_extractor; ///< key extracting functor typedef typename traits::back_off back_off; ///< Back-off strategy typedef typename traits::allocator allocator_type; ///< Allocator for leaf nodes typedef typename base_class::node_allocator node_allocator; ///< Internal node allocator typedef typename base_class::update_desc_allocator update_desc_allocator; ///< Update descriptor allocator protected: //@cond typedef typename maker::cxx_leaf_node_allocator cxx_leaf_node_allocator; typedef typename base_class::value_type leaf_node; typedef typename base_class::internal_node internal_node; typedef std::unique_ptr< leaf_node, typename maker::leaf_deallocator > scoped_node_ptr; //@endcond public: /// Guarded pointer typedef typename gc::template guarded_ptr< leaf_node, value_type, details::guarded_ptr_cast_set > guarded_ptr; public: /// Default constructor EllenBinTreeSet() : base_class() {} /// Clears the set ~EllenBinTreeSet() {} /// Inserts new node /** The function creates a node with copy of \p val value and then inserts the node created into the set. The type \p Q should contain at least the complete key for the node. The object of \ref value_type should be constructible from a value of type \p Q. In trivial case, \p Q is equal to \ref value_type. Returns \p true if \p val is inserted into the set, \p false otherwise. */ template bool insert( Q const& val ) { scoped_node_ptr sp( cxx_leaf_node_allocator().New( val )); if ( base_class::insert( *sp.get())) { sp.release(); return true; } return false; } /// Inserts new node /** The function allows to split creating of new item into two part: - create item with key only - insert new item into the set - if inserting is success, calls \p f functor to initialize value-fields of \p val. The functor signature is: \code void func( value_type& val ); \endcode where \p val is the item inserted. User-defined functor \p f should guarantee that during changing \p val no any other changes could be made on this set's item by concurrent threads. The user-defined functor is called only if the inserting is success. */ template bool insert( Q const& val, Func f ) { scoped_node_ptr sp( cxx_leaf_node_allocator().New( val )); if ( base_class::insert( *sp.get(), [&f]( leaf_node& v ) { f( v.m_Value ); } )) { sp.release(); return true; } return false; } /// Updates the node /** The operation performs inserting or changing data with lock-free manner. If the item \p val is not found in the set, then \p val is inserted into the set iff \p bAllowInsert is \p true. Otherwise, the functor \p func is called with item found. The functor \p func signature is: \code struct my_functor { void operator()( bool bNew, value_type& item, const Q& val ); }; \endcode with arguments: with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the set - \p val - argument \p key passed into the \p %update() function The functor can change non-key fields of the \p item; however, \p func must guarantee that during changing no any other modifications could be made on this item by concurrent threads. Returns std::pair where \p first is \p true if operation is successful, i.e. the node has been inserted or updated, \p second is \p true if new item has been added or \p false if the item with \p key already exists. @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" */ template std::pair update( const Q& val, Func func, bool bAllowInsert = true ) { scoped_node_ptr sp( cxx_leaf_node_allocator().New( val )); std::pair bRes = base_class::update( *sp, [&func, &val](bool bNew, leaf_node& node, leaf_node&){ func( bNew, node.m_Value, val ); }, bAllowInsert ); if ( bRes.first && bRes.second ) sp.release(); return bRes; } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( const Q& val, Func func ) { return update( val, func, true ); } //@endcond /// Inserts data of type \p value_type created in-place from \p args /** Returns \p true if inserting successful, \p false otherwise. */ template bool emplace( Args&&... args ) { scoped_node_ptr sp( cxx_leaf_node_allocator().MoveNew( std::forward(args)... )); if ( base_class::insert( *sp.get())) { sp.release(); return true; } return false; } /// Delete \p key from the set /** \anchor cds_nonintrusive_EllenBinTreeSet_erase_val The item comparator should be able to compare the type \p value_type and the type \p Q. Return \p true if key is found and deleted, \p false otherwise */ template bool erase( Q const& key ) { return base_class::erase( key ); } /// Deletes the item from the set using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_EllenBinTreeSet_erase_val "erase(Q const&)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool erase_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return base_class::erase_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >()); } /// Delete \p key from the set /** \anchor cds_nonintrusive_EllenBinTreeSet_erase_func The function searches an item with key \p key, calls \p f functor and deletes the item. If \p key is not found, the functor is not called. The functor \p Func interface: \code struct extractor { void operator()(value_type const& val); }; \endcode Since the key of MichaelHashSet's \p value_type is not explicitly specified, template parameter \p Q defines the key type searching in the list. The list item comparator should be able to compare the type \p T of list item and the type \p Q. Return \p true if key is found and deleted, \p false otherwise */ template bool erase( Q const& key, Func f ) { return base_class::erase( key, [&f]( leaf_node const& node) { f( node.m_Value ); } ); } /// Deletes the item from the set using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_EllenBinTreeSet_erase_func "erase(Q const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool erase_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return base_class::erase_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >(), [&f]( leaf_node const& node) { f( node.m_Value ); } ); } /// Extracts an item with minimal key from the set /** If the set is not empty, the function returns a guarded pointer to minimum value. If the set is empty, the function returns an empty \p guarded_ptr. @note Due the concurrent nature of the set, the function extracts nearly minimum key. It means that the function gets leftmost leaf of the tree and tries to unlink it. During unlinking, a concurrent thread may insert an item with key less than leftmost item's key. So, the function returns the item with minimum key at the moment of tree traversing. The guarded pointer prevents deallocation of returned item, see \p cds::gc::guarded_ptr for explanation. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. */ guarded_ptr extract_min() { return guarded_ptr( base_class::extract_min_()); } /// Extracts an item with maximal key from the set /** If the set is not empty, the function returns a guarded pointer to maximal value. If the set is empty, the function returns an empty \p guarded_ptr. @note Due the concurrent nature of the set, the function extracts nearly maximal key. It means that the function gets rightmost leaf of the tree and tries to unlink it. During unlinking, a concurrent thread may insert an item with key great than leftmost item's key. So, the function returns the item with maximum key at the moment of tree traversing. The guarded pointer prevents deallocation of returned item, see \p cds::gc::guarded_ptr for explanation. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. */ guarded_ptr extract_max() { return guarded_ptr( base_class::extract_max_()); } /// Extracts an item from the tree /** \anchor cds_nonintrusive_EllenBinTreeSet_extract The function searches an item with key equal to \p key in the tree, unlinks it, and returns an guarded pointer to it. If the item is not found the function returns an empty \p guarded_ptr. The guarded pointer prevents deallocation of returned item, see \p cds::gc::guarded_ptr for explanation. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. */ template guarded_ptr extract( Q const& key ) { return base_class::extract_( key ); } /// Extracts an item from the set using \p pred for searching /** The function is an analog of \ref cds_nonintrusive_EllenBinTreeSet_extract "extract(Q const&)" but \p pred is used for key compare. \p Less has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. */ template guarded_ptr extract_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return base_class::extract_with_( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >()); } /// Find the key \p key /** @anchor cds_nonintrusive_EllenBinTreeSet_find_func The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& key ); }; \endcode where \p item is the item found, \p key is the find function argument. The functor may change non-key fields of \p item. Note that the functor is only guarantee that \p item cannot be disposed during functor is executing. The functor does not serialize simultaneous access to the set's \p item. If such access is possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor can modify both arguments. Note the hash functor specified for class \p Traits template parameter should accept a parameter of type \p Q that may be not the same as \p value_type. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( Q& key, Func f ) { return base_class::find( key, [&f]( leaf_node& node, Q& v ) { f( node.m_Value, v ); }); } //@cond template bool find( Q const& key, Func f ) { return base_class::find( key, [&f]( leaf_node& node, Q const& v ) { f( node.m_Value, v ); } ); } //@endcond /// Finds the key \p key using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_EllenBinTreeSet_find_func "find(Q&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool find_with( Q& key, Less pred, Func f ) { CDS_UNUSED( pred ); return base_class::find_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >(), [&f]( leaf_node& node, Q& v ) { f( node.m_Value, v ); } ); } //@cond template bool find_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return base_class::find_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >(), [&f]( leaf_node& node, Q const& v ) { f( node.m_Value, v ); } ); } //@endcond /// Checks whether the set contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. */ template bool contains( Q const & key ) { return base_class::contains( key ); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find( Q const & key ) { return contains( key ); } //@endcond /// Checks whether the set contains \p key using \p pred predicate for searching /** The function is similar to contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool contains( Q const& key, Less pred ) { CDS_UNUSED( pred ); return base_class::contains( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find_with( Q const& key, Less pred ) { return contains( key, pred ); } //@endcond /// Finds \p key and returns the item found /** @anchor cds_nonintrusive_EllenBinTreeSet_get The function searches the item with key equal to \p key and returns the item found as an guarded pointer. The function returns \p true if \p key is found, \p false otherwise. The guarded pointer prevents deallocation of returned item, see \p cds::gc::guarded_ptr for explanation. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. */ template guarded_ptr get( Q const& key ) { return base_class::get_( key ); } /// Finds \p key with predicate \p pred and returns the item found /** The function is an analog of \ref cds_nonintrusive_EllenBinTreeSet_get "get(Q const&)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. */ template guarded_ptr get_with( Q const& key, Less pred ) { CDS_UNUSED(pred); return base_class::get_with_( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >()); } /// Clears the set (not atomic) /** The function unlink all items from the tree. The function is not atomic, thus, in multi-threaded environment with parallel insertions this sequence \code set.clear(); assert( set.empty()); \endcode the assertion could be raised. For each leaf the \ref disposer will be called after unlinking. */ void clear() { base_class::clear(); } /// Checks if the set is empty bool empty() const { return base_class::empty(); } /// Returns item count in the set /** Only leaf nodes containing user data are counted. The value returned depends on item counter type provided by \p Traits template parameter. If it is \p atomicity::empty_item_counter this function always returns 0. The function is not suitable for checking the tree emptiness, use \p empty() member function for this purpose. */ size_t size() const { return base_class::size(); } /// Returns const reference to internal statistics stat const& statistics() const { return base_class::statistics(); } /// Checks internal consistency (not atomic, not thread-safe) /** The debugging function to check internal consistency of the tree. */ bool check_consistency() const { return base_class::check_consistency(); } }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_IMPL_ELLEN_BINTREE_SET_H libcds-2.3.3/cds/container/impl/feldman_hashmap.h000066400000000000000000001057001341244201700217340ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_IMPL_FELDMAN_HASHMAP_H #define CDSLIB_CONTAINER_IMPL_FELDMAN_HASHMAP_H #include #include #include namespace cds { namespace container { /// Hash map based on multi-level array /** @ingroup cds_nonintrusive_map @anchor cds_container_FeldmanHashMap_hp Source: - [2013] Steven Feldman, Pierre LaBorde, Damian Dechev "Concurrent Multi-level Arrays: Wait-free Extensible Hash Maps" [From the paper] The hardest problem encountered while developing a parallel hash map is how to perform a global resize, the process of redistributing the elements in a hash map that occurs when adding new buckets. The negative impact of blocking synchronization is multiplied during a global resize, because all threads will be forced to wait on the thread that is performing the involved process of resizing the hash map and redistributing the elements. \p %FeldmanHashSet implementation avoids global resizes through new array allocation. By allowing concurrent expansion this structure is free from the overhead of an explicit resize, which facilitates concurrent operations. The presented design includes dynamic hashing, the use of sub-arrays within the hash map data structure; which, in combination with perfect hashing, means that each element has a unique final, as well as current, position. It is important to note that the perfect hash function required by our hash map is trivial to realize as any hash function that permutes the bits of the key is suitable. This is possible because of our approach to the hash function; we require that it produces hash values that are equal in size to that of the key. We know that if we expand the hash map a fixed number of times there can be no collision as duplicate keys are not provided for in the standard semantics of a hash map. \p %FeldmanHashMap is a multi-level array which has an internal structure similar to a tree: @image html feldman_hashset.png The multi-level array differs from a tree in that each position on the tree could hold an array of nodes or a single node. A position that holds a single node is a \p dataNode which holds the hash value of a key and the value that is associated with that key; it is a simple struct holding two variables. A \p dataNode in the multi-level array could be marked. A \p markedDataNode refers to a pointer to a \p dataNode that has been bitmarked at the least significant bit (LSB) of the pointer to the node. This signifies that this \p dataNode is contended. An expansion must occur at this node; any thread that sees this \p markedDataNode will try to replace it with an \p arrayNode; which is a position that holds an array of nodes. The pointer to an \p arrayNode is differentiated from that of a pointer to a \p dataNode by a bitmark on the second-least significant bit. \p %FeldmanHashMap multi-level array is similar to a tree in that we keep a pointer to the root, which is a memory array called \p head. The length of the \p head memory array is unique, whereas every other \p arrayNode has a uniform length; a normal \p arrayNode has a fixed power-of-two length equal to the binary logarithm of a variable called \p arrayLength. The maximum depth of the tree, \p maxDepth, is the maximum number of pointers that must be followed to reach any node. We define \p currentDepth as the number of memory arrays that we need to traverse to reach the \p arrayNode on which we need to operate; this is initially one, because of \p head. That approach to the structure of the hash map uses an extensible hashing scheme; the hash value is treated as a bit string and rehash incrementally. @note Two important things you should keep in mind when you're using \p %FeldmanHashMap: - all keys is converted to fixed-size bit-string by hash functor provided. You can use variable-length keys, for example, \p std::string as a key for \p %FeldmanHashMap, but real key in the map will be fixed-size hash values of your keys. For the strings you may use well-known hashing algorithms like SHA1, SHA2, MurmurHash, CityHash or its successor FarmHash and so on, which converts variable-length strings to fixed-length bit-strings, and such hash values will be the keys in \p %FeldmanHashMap. If your key is fixed-sized the hash functor is optional, see \p feldman_hashmap::traits::hash for explanation and examples. - \p %FeldmanHashMap uses a perfect hashing. It means that if two different keys, for example, of type \p std::string, have identical hash then you cannot insert both that keys in the map. \p %FeldmanHashMap does not maintain the key, it maintains its fixed-size hash value. The map supports @ref cds_container_FeldmanHashMap_iterators "bidirectional thread-safe iterators". Template parameters: - \p GC - safe memory reclamation schema. Can be \p gc::HP, \p gc::DHP or one of \ref cds_urcu_type "RCU type" - \p Key - a key type to be stored in the map - \p T - a value type to be stored in the map - \p Traits - type traits, the structure based on \p feldman_hashmap::traits or result of \p feldman_hashmap::make_traits metafunction. There are several specializations of \p %FeldmanHashMap for each \p GC. You should include: - for \p gc::HP garbage collector - for \p gc::DHP garbage collector - for \ref cds_container_FeldmanHashMap_rcu "RCU type". RCU specialization has a slightly different interface. */ template < class GC ,typename Key ,typename T #ifdef CDS_DOXYGEN_INVOKED ,class Traits = feldman_hashmap::traits #else ,class Traits #endif > class FeldmanHashMap #ifdef CDS_DOXYGEN_INVOKED : protected cds::intrusive::FeldmanHashSet< GC, std::pair, Traits > #else : protected cds::container::details::make_feldman_hashmap< GC, Key, T, Traits >::type #endif { //@cond typedef cds::container::details::make_feldman_hashmap< GC, Key, T, Traits > maker; typedef typename maker::type base_class; //@endcond public: typedef GC gc; ///< Garbage collector typedef Key key_type; ///< Key type typedef T mapped_type; ///< Mapped type typedef std::pair< key_type const, mapped_type> value_type; ///< Key-value pair to be stored in the map typedef Traits traits; ///< Map traits #ifdef CDS_DOXYGEN_INVOKED typedef typename traits::hash hasher; ///< Hash functor, see \p feldman_hashmap::traits::hash #else typedef typename maker::hasher hasher; #endif typedef typename maker::hash_type hash_type; ///< Hash type deduced from \p hasher return type typedef typename base_class::hash_comparator hash_comparator; ///< hash compare functor based on \p Traits::compare and \p Traits::less typedef typename traits::item_counter item_counter; ///< Item counter type typedef typename traits::allocator allocator; ///< Element allocator typedef typename traits::node_allocator node_allocator; ///< Array node allocator typedef typename traits::memory_model memory_model; ///< Memory model typedef typename traits::back_off back_off; ///< Backoff strategy typedef typename traits::stat stat; ///< Internal statistics type /// Count of hazard pointers required static constexpr size_t const c_nHazardPtrCount = base_class::c_nHazardPtrCount; /// The size of \p hash_type in bytes, see \p feldman_hashmap::traits::hash_size for explanation static constexpr size_t const c_hash_size = base_class::c_hash_size; /// Level statistics typedef feldman_hashmap::level_statistics level_statistics; protected: //@cond typedef typename maker::node_type node_type; typedef typename maker::cxx_node_allocator cxx_node_allocator; typedef std::unique_ptr< node_type, typename maker::node_disposer > scoped_node_ptr; template class bidirectional_iterator: public base_class::iterator_base { friend class FeldmanHashMap; typedef typename base_class::iterator_base iterator_base; protected: static constexpr bool const c_bConstantIterator = IsConst; public: typedef typename std::conditional< IsConst, value_type const*, value_type*>::type value_ptr; ///< Value pointer typedef typename std::conditional< IsConst, value_type const&, value_type&>::type value_ref; ///< Value reference public: bidirectional_iterator() noexcept {} bidirectional_iterator( bidirectional_iterator const& rhs ) noexcept : iterator_base( rhs ) {} bidirectional_iterator& operator=( bidirectional_iterator const& rhs ) noexcept { iterator_base::operator=( rhs ); return *this; } bidirectional_iterator& operator++() { iterator_base::operator++(); return *this; } bidirectional_iterator& operator--() { iterator_base::operator--(); return *this; } value_ptr operator ->() const noexcept { node_type * p = iterator_base::pointer(); return p ? &p->m_Value : nullptr; } value_ref operator *() const noexcept { node_type * p = iterator_base::pointer(); assert( p ); return p->m_Value; } void release() { iterator_base::release(); } template bool operator ==( bidirectional_iterator const& rhs ) const noexcept { return iterator_base::operator==( rhs ); } template bool operator !=( bidirectional_iterator const& rhs ) const noexcept { return !( *this == rhs ); } public: // for internal use only! bidirectional_iterator( base_class const& set, typename base_class::array_node * pNode, size_t idx, bool ) : iterator_base( set, pNode, idx, false ) {} bidirectional_iterator( base_class const& set, typename base_class::array_node * pNode, size_t idx ) : iterator_base( set, pNode, idx ) {} }; /// Reverse bidirectional iterator template class reverse_bidirectional_iterator : public base_class::iterator_base { friend class FeldmanHashMap; typedef typename base_class::iterator_base iterator_base; public: typedef typename std::conditional< IsConst, value_type const*, value_type*>::type value_ptr; ///< Value pointer typedef typename std::conditional< IsConst, value_type const&, value_type&>::type value_ref; ///< Value reference public: reverse_bidirectional_iterator() noexcept : iterator_base() {} reverse_bidirectional_iterator( reverse_bidirectional_iterator const& rhs ) noexcept : iterator_base( rhs ) {} reverse_bidirectional_iterator& operator=( reverse_bidirectional_iterator const& rhs) noexcept { iterator_base::operator=( rhs ); return *this; } reverse_bidirectional_iterator& operator++() { iterator_base::operator--(); return *this; } reverse_bidirectional_iterator& operator--() { iterator_base::operator++(); return *this; } value_ptr operator ->() const noexcept { node_type * p = iterator_base::pointer(); return p ? &p->m_Value : nullptr; } value_ref operator *() const noexcept { node_type * p = iterator_base::pointer(); assert( p ); return p->m_Value; } void release() { iterator_base::release(); } template bool operator ==( reverse_bidirectional_iterator const& rhs ) const { return iterator_base::operator==( rhs ); } template bool operator !=( reverse_bidirectional_iterator const& rhs ) { return !( *this == rhs ); } public: // for internal use only! reverse_bidirectional_iterator( base_class const& set, typename base_class::array_node * pNode, size_t idx, bool ) : iterator_base( set, pNode, idx, false ) {} reverse_bidirectional_iterator( base_class const& set, typename base_class::array_node * pNode, size_t idx ) : iterator_base( set, pNode, idx, false ) { iterator_base::backward(); } }; //@endcond public: #ifdef CDS_DOXYGEN_INVOKED /// Guarded pointer typedef typename gc::template guarded_ptr< value_type > guarded_ptr; #else typedef typename gc::template guarded_ptr< node_type, value_type, cds::container::details::guarded_ptr_cast_set > guarded_ptr; #endif #ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined iterator; ///< @ref cds_container_FeldmanHashMap_iterators "bidirectional iterator" type typedef implementation_defined const_iterator; ///< @ref cds_container_FeldmanHashMap_iterators "bidirectional const iterator" type typedef implementation_defined reverse_iterator; ///< @ref cds_container_FeldmanHashMap_iterators "bidirectional reverse iterator" type typedef implementation_defined const_reverse_iterator; ///< @ref cds_container_FeldmanHashMap_iterators "bidirectional reverse const iterator" type #else typedef bidirectional_iterator iterator; typedef bidirectional_iterator const_iterator; typedef reverse_bidirectional_iterator reverse_iterator; typedef reverse_bidirectional_iterator const_reverse_iterator; #endif protected: //@cond hasher m_Hasher; //@endcond public: /// Creates empty map /** @param head_bits - 2head_bits specifies the size of head array, minimum is 4. @param array_bits - 2array_bits specifies the size of array node, minimum is 2. Equation for \p head_bits and \p array_bits: \code c_hash_size * 8 == head_bits + N * array_bits \endcode where \p N is multi-level array depth. */ FeldmanHashMap( size_t head_bits = 8, size_t array_bits = 4 ) : base_class( head_bits, array_bits ) {} /// Destructs the map and frees all data ~FeldmanHashMap() {} /// Inserts new element with key and default value /** The function creates an element with \p key and default value, and then inserts the node created into the map. Preconditions: - The \p key_type should be constructible from a value of type \p K. In trivial case, \p K is equal to \p key_type. - The \p mapped_type should be default-constructible. Returns \p true if inserting successful, \p false otherwise. */ template bool insert( K&& key ) { scoped_node_ptr sp( cxx_node_allocator().MoveNew( m_Hasher, std::forward( key ))); if ( base_class::insert( *sp )) { sp.release(); return true; } return false; } /// Inserts new element /** The function creates a node with copy of \p val value and then inserts the node created into the map. Preconditions: - The \p key_type should be constructible from \p key of type \p K. - The \p value_type should be constructible from \p val of type \p V. Returns \p true if \p val is inserted into the map, \p false otherwise. */ template bool insert( K&& key, V&& val ) { scoped_node_ptr sp( cxx_node_allocator().MoveNew( m_Hasher, std::forward( key ), std::forward( val ))); if ( base_class::insert( *sp )) { sp.release(); return true; } return false; } /// Inserts new element and initialize it by a functor /** This function inserts new element with key \p key and if inserting is successful then it calls \p func functor with signature \code struct functor { void operator()( value_type& item ); }; \endcode The argument \p item of user-defined functor \p func is the reference to the map's item inserted: - item.first is a const reference to item's key that cannot be changed. - item.second is a reference to item's value that may be changed. \p key_type should be constructible from value of type \p K. The function allows to split creating of new item into two part: - create item from \p key; - insert new item into the map; - if inserting is successful, initialize the value of item by calling \p func functor This can be useful if complete initialization of object of \p value_type is heavyweight and it is preferable that the initialization should be completed only if inserting is successful. */ template bool insert_with( K&& key, Func func ) { scoped_node_ptr sp( cxx_node_allocator().MoveNew( m_Hasher, std::forward( key ))); if ( base_class::insert( *sp, [&func]( node_type& item ) { func( item.m_Value ); } )) { sp.release(); return true; } return false; } /// For key \p key inserts data of type \p value_type created in-place from std::forward(args)... /** Returns \p true if inserting successful, \p false otherwise. */ template bool emplace( K&& key, Args&&... args ) { scoped_node_ptr sp( cxx_node_allocator().MoveNew( m_Hasher, std::forward( key ), std::forward( args )... )); if ( base_class::insert( *sp )) { sp.release(); return true; } return false; } /// Updates data by \p key /** The operation performs inserting or replacing the element with lock-free manner. If the \p key not found in the map, then the new item created from \p key will be inserted into the map iff \p bInsert is \p true (note that in this case the \ref key_type should be constructible from type \p K). Otherwise, if \p key is found, it is replaced with a new item created from \p key. The functor \p Func signature: \code struct my_functor { void operator()( value_type& item, value_type * old ); }; \endcode where: - \p item - item of the map - \p old - old item of the map, if \p nullptr - the new item was inserted The functor may change any fields of the \p item.second. Returns std::pair where \p first is \p true if operation is successful, \p second is \p true if new item has been added or \p false if \p key already exists. @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" */ template std::pair update( K&& key, Func func, bool bInsert = true ) { scoped_node_ptr sp( cxx_node_allocator().MoveNew( m_Hasher, std::forward( key ))); std::pair result = base_class::do_update( *sp, [&func]( node_type& node, node_type * old ) { func( node.m_Value, old ? &old->m_Value : nullptr );}, bInsert ); if ( result.first ) sp.release(); return result; } /// Delete \p key from the map /** \p key_type must be constructible from value of type \p K. The function deletes the element with hash value equal to hash( key_type( key )) Return \p true if \p key is found and deleted, \p false otherwise. */ template bool erase( K const& key ) { return base_class::erase( m_Hasher( key_type( key ))); } /// Delete \p key from the map /** The function searches an item with hash value equal to hash( key_type( key )), calls \p f functor and deletes the item. If \p key is not found, the functor is not called. The functor \p Func interface: \code struct extractor { void operator()( value_type& item ) { ... } }; \endcode where \p item is the element found. \p key_type must be constructible from value of type \p K. Return \p true if key is found and deleted, \p false otherwise */ template bool erase( K const& key, Func f ) { return base_class::erase( m_Hasher( key_type( key )), [&f]( node_type& node) { f( node.m_Value ); } ); } /// Deletes the element pointed by iterator \p iter /** Returns \p true if the operation is successful, \p false otherwise. The function does not invalidate the iterator, it remains valid and can be used for further traversing. */ bool erase_at( iterator const& iter ) { return base_class::do_erase_at( iter ); } //@cond bool erase_at( reverse_iterator const& iter ) { return base_class::do_erase_at( iter ); } bool erase_at( const_iterator const& iter ) { return base_class::do_erase_at( iter ); } bool erase_at( const_reverse_iterator const& iter ) { return base_class::do_erase_at( iter ); } //@endcond /// Extracts the item from the map with specified \p key /** The function searches an item with key equal to hash( key_type( key )) in the map, unlinks it from the map, and returns a guarded pointer to the item found. If \p key is not found the function returns an empty guarded pointer. The item extracted is freed automatically by garbage collector \p GC when returned \p guarded_ptr object will be destroyed or released. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. Usage: \code typedef cds::container::FeldmanHashMap< cds::gc::HP, int, foo, my_traits > map_type; map_type theMap; // ... { map_type::guarded_ptr gp( theMap.extract( 5 )); if ( gp ) { // Deal with gp // ... } // Destructor of gp releases internal HP guard and frees the pointer } \endcode */ template guarded_ptr extract( K const& key ) { return base_class::extract( m_Hasher( key_type( key ))); } /// Checks whether the map contains \p key /** The function searches the item by its hash that is equal to hash( key_type( key )) and returns \p true if it is found, or \p false otherwise. */ template bool contains( K const& key ) { return base_class::contains( m_Hasher( key_type( key ))); } /// Find the key \p key /** The function searches the item by its hash that is equal to hash( key_type( key )) and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item ); }; \endcode where \p item is the item found. The functor may change \p item.second. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( K const& key, Func f ) { return base_class::find( m_Hasher( key_type( key )), [&f]( node_type& node ) { f( node.m_Value );}); } /// Finds the key \p key and return the item found /** The function searches the item with a hash equal to hash( key_type( key )) and returns a guarded pointer to the item found. If \p key is not found the function returns an empty guarded pointer. It is safe when a concurrent thread erases the item returned as \p guarded_ptr. In this case the item will be freed later by garbage collector \p GC automatically when \p guarded_ptr object will be destroyed or released. @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. Usage: \code typedef cds::container::FeldmanHashMap< cds::gc::HP, int, foo, my_traits > map_type; map_type theMap; // ... { map_type::guarded_ptr gp( theMap.get( 5 )); if ( gp ) { // Deal with gp //... } // Destructor of guarded_ptr releases internal HP guard } \endcode */ template guarded_ptr get( K const& key ) { return base_class::get( m_Hasher( key_type( key ))); } /// Clears the map (non-atomic) /** The function unlink all data node from the map. The function is not atomic but is thread-safe. After \p %clear() the map may not be empty because another threads may insert items. */ void clear() { base_class::clear(); } /// Checks if the map is empty /** Emptiness is checked by item counting: if item count is zero then the map is empty. Thus, the correct item counting feature is an important part of the map implementation. */ bool empty() const { return base_class::empty(); } /// Returns item count in the map size_t size() const { return base_class::size(); } /// Returns const reference to internal statistics stat const& statistics() const { return base_class::statistics(); } /// Returns the size of head node size_t head_size() const { return base_class::head_size(); } /// Returns the size of the array node size_t array_node_size() const { return base_class::array_node_size(); } /// Collects tree level statistics into \p stat /** The function traverses the set and collects statistics for each level of the tree into \p feldman_hashset::level_statistics struct. The element of \p stat[i] represents statistics for level \p i, level 0 is head array. The function is thread-safe and may be called in multi-threaded environment. Result can be useful for estimating efficiency of hash functor you use. */ void get_level_statistics( std::vector< feldman_hashmap::level_statistics>& stat) const { base_class::get_level_statistics( stat ); } public: ///@name Thread-safe iterators /** @anchor cds_container_FeldmanHashMap_iterators The map supports thread-safe iterators: you may iterate over the map in multi-threaded environment. It is guaranteed that the iterators will remain valid even if another thread deletes the node the iterator points to: Hazard Pointer embedded into the iterator object protects the node from physical reclamation. @note Since the iterator object contains hazard pointer that is a thread-local resource, the iterator should not be passed to another thread. Each iterator object supports the common interface: - dereference operators: @code value_type [const] * operator ->() noexcept value_type [const] & operator *() noexcept @endcode - pre-increment and pre-decrement. Post-operators is not supported - equality operators == and !=. Iterators are equal iff they point to the same cell of the same array node. Note that for two iterators \p it1 and \p it2, the conditon it1 == it2 does not entail &(*it1) == &(*it2) - helper member function \p release() that clears internal hazard pointer. After \p release() the iterator points to \p nullptr but it still remain valid: further iterating is possible. During iteration you may safely erase any item from the set; @ref erase_at() function call doesn't invalidate any iterator. If some iterator points to the item to be erased, that item is not deleted immediately but only after that iterator will be advanced forward or backward. @note It is possible the item can be iterated more that once, for example, if an iterator points to the item in array node that is being splitted. */ ///@{ /// Returns an iterator to the beginning of the map iterator begin() { return base_class::template init_begin(); } /// Returns an const iterator to the beginning of the map const_iterator begin() const { return base_class::template init_begin(); } /// Returns an const iterator to the beginning of the map const_iterator cbegin() { return base_class::template init_begin(); } /// Returns an iterator to the element following the last element of the map. This element acts as a placeholder; attempting to access it results in undefined behavior. iterator end() { return base_class::template init_end(); } /// Returns a const iterator to the element following the last element of the map. This element acts as a placeholder; attempting to access it results in undefined behavior. const_iterator end() const { return base_class::template init_end(); } /// Returns a const iterator to the element following the last element of the map. This element acts as a placeholder; attempting to access it results in undefined behavior. const_iterator cend() { return base_class::template init_end(); } /// Returns a reverse iterator to the first element of the reversed map reverse_iterator rbegin() { return base_class::template init_rbegin(); } /// Returns a const reverse iterator to the first element of the reversed map const_reverse_iterator rbegin() const { return base_class::template init_rbegin(); } /// Returns a const reverse iterator to the first element of the reversed map const_reverse_iterator crbegin() { return base_class::template init_rbegin(); } /// Returns a reverse iterator to the element following the last element of the reversed map /** It corresponds to the element preceding the first element of the non-reversed container. This element acts as a placeholder, attempting to access it results in undefined behavior. */ reverse_iterator rend() { return base_class::template init_rend(); } /// Returns a const reverse iterator to the element following the last element of the reversed map /** It corresponds to the element preceding the first element of the non-reversed container. This element acts as a placeholder, attempting to access it results in undefined behavior. */ const_reverse_iterator rend() const { return base_class::template init_rend(); } /// Returns a const reverse iterator to the element following the last element of the reversed map /** It corresponds to the element preceding the first element of the non-reversed container. This element acts as a placeholder, attempting to access it results in undefined behavior. */ const_reverse_iterator crend() { return base_class::template init_rend(); } ///@} }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_IMPL_FELDMAN_HASHMAP_H libcds-2.3.3/cds/container/impl/feldman_hashset.h000066400000000000000000000635571341244201700217670ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_IMPL_FELDMAN_HASHSET_H #define CDSLIB_CONTAINER_IMPL_FELDMAN_HASHSET_H #include #include namespace cds { namespace container { /// Hash set based on multi-level array /** @ingroup cds_nonintrusive_set @anchor cds_container_FeldmanHashSet_hp Source: - [2013] Steven Feldman, Pierre LaBorde, Damian Dechev "Concurrent Multi-level Arrays: Wait-free Extensible Hash Maps" [From the paper] The hardest problem encountered while developing a parallel hash map is how to perform a global resize, the process of redistributing the elements in a hash map that occurs when adding new buckets. The negative impact of blocking synchronization is multiplied during a global resize, because all threads will be forced to wait on the thread that is performing the involved process of resizing the hash map and redistributing the elements. \p %FeldmanHashSet implementation avoids global resizes through new array allocation. By allowing concurrent expansion this structure is free from the overhead of an explicit resize, which facilitates concurrent operations. The presented design includes dynamic hashing, the use of sub-arrays within the hash map data structure; which, in combination with perfect hashing, means that each element has a unique final, as well as current, position. It is important to note that the perfect hash function required by our hash map is trivial to realize as any hash function that permutes the bits of the key is suitable. This is possible because of our approach to the hash function; we require that it produces hash values that are equal in size to that of the key. We know that if we expand the hash map a fixed number of times there can be no collision as duplicate keys are not provided for in the standard semantics of a hash map. \p %FeldmanHashSet is a multi-level array which has an internal structure similar to a tree: @image html feldman_hashset.png The multi-level array differs from a tree in that each position on the tree could hold an array of nodes or a single node. A position that holds a single node is a \p dataNode which holds the hash value of a key and the value that is associated with that key; it is a simple struct holding two variables. A \p dataNode in the multi-level array could be marked. A \p markedDataNode refers to a pointer to a \p dataNode that has been bitmarked at the least significant bit (LSB) of the pointer to the node. This signifies that this \p dataNode is contended. An expansion must occur at this node; any thread that sees this \p markedDataNode will try to replace it with an \p arrayNode; which is a position that holds an array of nodes. The pointer to an \p arrayNode is differentiated from that of a pointer to a \p dataNode by a bitmark on the second-least significant bit. \p %FeldmanHashSet multi-level array is similar to a tree in that we keep a pointer to the root, which is a memory array called \p head. The length of the \p head memory array is unique, whereas every other \p arrayNode has a uniform length; a normal \p arrayNode has a fixed power-of-two length equal to the binary logarithm of a variable called \p arrayLength. The maximum depth of the tree, \p maxDepth, is the maximum number of pointers that must be followed to reach any node. We define \p currentDepth as the number of memory arrays that we need to traverse to reach the \p arrayNode on which we need to operate; this is initially one, because of \p head. That approach to the structure of the hash set uses an extensible hashing scheme; the hash value is treated as a bit string and rehash incrementally. @note Two important things you should keep in mind when you're using \p %FeldmanHashSet: - all keys must be fixed-size. It means that you cannot use \p std::string as a key for \p %FeldmanHashSet. Instead, for the strings you should use well-known hashing algorithms like SHA1, SHA2, MurmurHash, CityHash or its successor FarmHash and so on, which converts variable-length strings to fixed-length bit-strings, and use that hash as a key in \p %FeldmanHashSet. - \p %FeldmanHashSet uses a perfect hashing. It means that if two different keys, for example, of type \p std::string, have identical hash then you cannot insert both that keys in the set. \p %FeldmanHashSet does not maintain the key, it maintains its fixed-size hash value. The set supports @ref cds_container_FeldmanHashSet_iterators "bidirectional thread-safe iterators". Template parameters: - \p GC - safe memory reclamation schema. Can be \p gc::HP, \p gc::DHP or one of \ref cds_urcu_type "RCU type" - \p T - a value type to be stored in the set - \p Traits - type traits, the structure based on \p feldman_hashset::traits or result of \p feldman_hashset::make_traits metafunction. \p Traits is the mandatory argument because it has one mandatory type - an @ref feldman_hashset::traits::hash_accessor "accessor" to hash value of \p T. The set algorithm does not calculate that hash value. There are several specializations of \p %FeldmanHashSet for each \p GC. You should include: - for \p gc::HP garbage collector - for \p gc::DHP garbage collector - for \ref cds_intrusive_FeldmanHashSet_rcu "RCU type". RCU specialization has a slightly different interface. */ template < class GC , typename T #ifdef CDS_DOXYGEN_INVOKED , class Traits = feldman_hashset::traits #else , class Traits #endif > class FeldmanHashSet #ifdef CDS_DOXYGEN_INVOKED : protected cds::intrusive::FeldmanHashSet< GC, T, Traits > #else : protected cds::container::details::make_feldman_hashset< GC, T, Traits >::type #endif { //@cond typedef cds::container::details::make_feldman_hashset< GC, T, Traits > maker; typedef typename maker::type base_class; //@endcond public: typedef GC gc; ///< Garbage collector typedef T value_type; ///< type of value stored in the set typedef Traits traits; ///< Traits template parameter, see \p feldman_hashset::traits typedef typename base_class::hash_accessor hash_accessor; ///< Hash accessor functor typedef typename base_class::hash_type hash_type; ///< Hash type deduced from \p hash_accessor return type typedef typename base_class::hash_comparator hash_comparator; ///< hash compare functor based on \p opt::compare and \p opt::less option setter typedef typename traits::item_counter item_counter; ///< Item counter type typedef typename traits::allocator allocator; ///< Element allocator typedef typename traits::node_allocator node_allocator; ///< Array node allocator typedef typename traits::memory_model memory_model; ///< Memory model typedef typename traits::back_off back_off; ///< Backoff strategy typedef typename traits::stat stat; ///< Internal statistics type typedef typename gc::template guarded_ptr< value_type > guarded_ptr; ///< Guarded pointer /// Count of hazard pointers required static constexpr size_t const c_nHazardPtrCount = base_class::c_nHazardPtrCount; /// The size of \p hash_type in bytes, see \p feldman_hashset::traits::hash_size for explanation static constexpr size_t const c_hash_size = base_class::c_hash_size; /// Level statistics typedef feldman_hashset::level_statistics level_statistics; protected: //@cond typedef typename maker::cxx_node_allocator cxx_node_allocator; typedef std::unique_ptr< value_type, typename maker::node_disposer > scoped_node_ptr; //@endcond public: ///@name Thread-safe iterators ///@{ /// Bidirectional iterator /** @anchor cds_container_FeldmanHashSet_iterators The set supports thread-safe iterators: you may iterate over the set in multi-threaded environment. It is guaranteed that the iterators will remain valid even if another thread deletes the node the iterator points to: Hazard Pointer embedded into the iterator object protects the node from physical reclamation. @note Since the iterator object contains hazard pointer that is a thread-local resource, the iterator should not be passed to another thread. Each iterator object supports the following interface: - dereference operators: @code value_type [const] * operator ->() noexcept value_type [const] & operator *() noexcept @endcode - pre-increment and pre-decrement. Post-operators is not supported - equality operators == and !=. Iterators are equal iff they point to the same cell of the same array node. Note that for two iterators \p it1 and \p it2, the conditon it1 == it2 does not entail &(*it1) == &(*it2) - helper member function \p release() that clears internal hazard pointer. After \p release() the iterator points to \p nullptr but it still remain valid: further iterating is possible. During iteration you may safely erase any item from the set; @ref erase_at() function call doesn't invalidate any iterator. If some iterator points to the item to be erased, that item is not deleted immediately but only after that iterator will be advanced forward or backward. @note It is possible the item can be iterated more that once, for example, if an iterator points to the item in array node that is being splitted. */ typedef typename base_class::iterator iterator; typedef typename base_class::const_iterator const_iterator; ///< @ref cds_container_FeldmanHashSet_iterators "bidirectional const iterator" type typedef typename base_class::reverse_iterator reverse_iterator; ///< @ref cds_container_FeldmanHashSet_iterators "bidirectional reverse iterator" type typedef typename base_class::const_reverse_iterator const_reverse_iterator; ///< @ref cds_container_FeldmanHashSet_iterators "bidirectional reverse const iterator" type /// Returns an iterator to the beginning of the set iterator begin() { return base_class::begin(); } /// Returns an const iterator to the beginning of the set const_iterator begin() const { return base_class::begin(); } /// Returns an const iterator to the beginning of the set const_iterator cbegin() { return base_class::cbegin(); } /// Returns an iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior. iterator end() { return base_class::end(); } /// Returns a const iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior. const_iterator end() const { return base_class::end(); } /// Returns a const iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior. const_iterator cend() { return base_class::cend(); } /// Returns a reverse iterator to the first element of the reversed set reverse_iterator rbegin() { return base_class::rbegin(); } /// Returns a const reverse iterator to the first element of the reversed set const_reverse_iterator rbegin() const { return base_class::rbegin(); } /// Returns a const reverse iterator to the first element of the reversed set const_reverse_iterator crbegin() { return base_class::crbegin(); } /// Returns a reverse iterator to the element following the last element of the reversed set /** It corresponds to the element preceding the first element of the non-reversed container. This element acts as a placeholder, attempting to access it results in undefined behavior. */ reverse_iterator rend() { return base_class::rend(); } /// Returns a const reverse iterator to the element following the last element of the reversed set /** It corresponds to the element preceding the first element of the non-reversed container. This element acts as a placeholder, attempting to access it results in undefined behavior. */ const_reverse_iterator rend() const { return base_class::rend(); } /// Returns a const reverse iterator to the element following the last element of the reversed set /** It corresponds to the element preceding the first element of the non-reversed container. This element acts as a placeholder, attempting to access it results in undefined behavior. */ const_reverse_iterator crend() { return base_class::crend(); } ///@} public: /// Creates empty set /** @param head_bits - 2head_bits specifies the size of head array, minimum is 4. @param array_bits - 2array_bits specifies the size of array node, minimum is 2. Equation for \p head_bits and \p array_bits: \code sizeof(hash_type) * 8 == head_bits + N * array_bits \endcode where \p N is multi-level array depth. */ FeldmanHashSet( size_t head_bits = 8, size_t array_bits = 4 ) : base_class( head_bits, array_bits ) {} /// Destructs the set and frees all data ~FeldmanHashSet() {} /// Inserts new element /** The function creates an element with copy of \p val value and then inserts it into the set. The type \p Q should contain as minimum the complete hash for the element. The object of \ref value_type should be constructible from a value of type \p Q. In trivial case, \p Q is equal to \ref value_type. Returns \p true if \p val is inserted into the set, \p false otherwise. */ template bool insert( Q const& val ) { scoped_node_ptr sp( cxx_node_allocator().New( val )); if ( base_class::insert( *sp )) { sp.release(); return true; } return false; } /// Inserts new element /** The function allows to split creating of new item into two part: - create item with key only - insert new item into the set - if inserting is success, calls \p f functor to initialize value-fields of \p val. The functor signature is: \code void func( value_type& val ); \endcode where \p val is the item inserted. User-defined functor \p f should guarantee that during changing \p val no any other changes could be made on this set's item by concurrent threads. The user-defined functor is called only if the inserting is success. */ template bool insert( Q const& val, Func f ) { scoped_node_ptr sp( cxx_node_allocator().New( val )); if ( base_class::insert( *sp, f )) { sp.release(); return true; } return false; } /// Updates the element /** The operation performs inserting or replacing with lock-free manner. If the \p val key not found in the set, then the new item created from \p val will be inserted into the set iff \p bInsert is \p true. Otherwise, if \p val is found, it is replaced with new item created from \p val and previous item is disposed. In both cases \p func functor is called. The functor \p Func signature: \code struct my_functor { void operator()( value_type& cur, value_type * prev ); }; \endcode where: - \p cur - current element - \p prev - pointer to previous element with such hash. \p prev is \p nullptr if \p cur was just inserted. The functor may change non-key fields of the \p item; however, \p func must guarantee that during changing no any other modifications could be made on this item by concurrent threads. Returns std::pair where \p first is \p true if operation is successful, i.e. the item has been inserted or updated, \p second is \p true if the new item has been added or \p false if the item with key equal to \p val already exists. */ template std::pair update( Q const& val, Func func, bool bInsert = true ) { scoped_node_ptr sp( cxx_node_allocator().New( val )); std::pair bRes = base_class::do_update( *sp, func, bInsert ); if ( bRes.first ) sp.release(); return bRes; } /// Inserts data of type \p value_type created in-place from std::forward(args)... /** Returns \p true if inserting successful, \p false otherwise. */ template bool emplace( Args&&... args ) { scoped_node_ptr sp( cxx_node_allocator().MoveNew( std::forward(args)... )); if ( base_class::insert( *sp )) { sp.release(); return true; } return false; } /// Deletes the item from the set /** The function searches \p hash in the set, deletes the item found, and returns \p true. If that item is not found the function returns \p false. */ bool erase( hash_type const& hash ) { return base_class::erase( hash ); } /// Deletes the item from the set /** The function searches \p hash in the set, call \p f functor with item found, and deltes the element from the set. The \p Func interface is \code struct functor { void operator()( value_type& item ); }; \endcode If \p hash is not found the function returns \p false. */ template bool erase( hash_type const& hash, Func f ) { return base_class::erase( hash, f ); } /// Deletes the item pointed by iterator \p iter /** Returns \p true if the operation is successful, \p false otherwise. The function does not invalidate the iterator, it remains valid and can be used for further traversing. */ bool erase_at( iterator const& iter ) { return base_class::erase_at( iter ); } //@cond bool erase_at( reverse_iterator const& iter ) { return base_class::erase_at( iter ); } //@endcond /// Extracts the item with specified \p hash /** The function searches \p hash in the set, unlinks it from the set, and returns a guarded pointer to the item extracted. If \p hash is not found the function returns an empty guarded pointer. The item returned is reclaimed by garbage collector \p GC when returned \ref guarded_ptr object to be destroyed or released. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. Usage: \code typedef cds::container::FeldmanHashSet< your_template_args > my_set; my_set theSet; // ... { my_set::guarded_ptr gp( theSet.extract( 5 )); if ( gp ) { // Deal with gp // ... } // Destructor of gp releases internal HP guard } \endcode */ guarded_ptr extract( hash_type const& hash ) { return base_class::extract( hash ); } /// Finds an item by it's \p hash /** The function searches the item by \p hash and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item ); }; \endcode where \p item is the item found. The functor may change non-key fields of \p item. Note that the functor is only guarantee that \p item cannot be disposed during the functor is executing. The functor does not serialize simultaneous access to the set's \p item. If such access is possible you must provide your own synchronization schema on item level to prevent unsafe item modifications. The function returns \p true if \p hash is found, \p false otherwise. */ template bool find( hash_type const& hash, Func f ) { return base_class::find( hash, f ); } /// Checks whether the set contains \p hash /** The function searches the item by its \p hash and returns \p true if it is found, or \p false otherwise. */ bool contains( hash_type const& hash ) { return base_class::contains( hash ); } /// Finds an item by it's \p hash and returns the item found /** The function searches the item by its \p hash and returns the guarded pointer to the item found. If \p hash is not found the function returns an empty \p guarded_ptr. @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. Usage: \code typedef cds::container::FeldmanHashSet< your_template_params > my_set; my_set theSet; // ... { my_set::guarded_ptr gp( theSet.get( 5 )); if ( theSet.get( 5 )) { // Deal with gp //... } // Destructor of guarded_ptr releases internal HP guard } \endcode */ guarded_ptr get( hash_type const& hash ) { return base_class::get( hash ); } /// Clears the set (non-atomic) /** The function unlink all data node from the set. The function is not atomic but is thread-safe. After \p %clear() the set may not be empty because another threads may insert items. */ void clear() { base_class::clear(); } /// Checks if the set is empty /** Emptiness is checked by item counting: if item count is zero then the set is empty. Thus, the correct item counting feature is an important part of the set implementation. */ bool empty() const { return base_class::empty(); } /// Returns item count in the set size_t size() const { return base_class::size(); } /// Returns const reference to internal statistics stat const& statistics() const { return base_class::statistics(); } /// Returns the size of head node size_t head_size() const { return base_class::head_size(); } /// Returns the size of the array node size_t array_node_size() const { return base_class::array_node_size(); } /// Collects tree level statistics into \p stat /** The function traverses the set and collects statistics for each level of the tree into \p feldman_hashset::level_statistics struct. The element of \p stat[i] represents statistics for level \p i, level 0 is head array. The function is thread-safe and may be called in multi-threaded environment. Result can be useful for estimating efficiency of hash functor you use. */ void get_level_statistics(std::vector< feldman_hashset::level_statistics>& stat) const { base_class::get_level_statistics(stat); } }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_IMPL_FELDMAN_HASHSET_H libcds-2.3.3/cds/container/impl/iterable_kvlist.h000066400000000000000000000717001341244201700220120ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_IMPL_ITERABLE_KVLIST_H #define CDSLIB_CONTAINER_IMPL_ITERABLE_KVLIST_H #include #include namespace cds { namespace container { /// Iterable ordered list for key-value pair /** @ingroup cds_nonintrusive_list \anchor cds_nonintrusive_IterableKVList_gc This is key-value variation of non-intrusive \p IterableList. Like standard container, this implementation split a value stored into two part - constant key and alterable value. Usually, ordered single-linked list is used as a building block for the hash table implementation. Iterable list is suitable for almost append-only hash table because the list doesn't delete its internal node when erasing a key but it is marked them as empty to be reused in the future. However, plenty of empty nodes degrades performance. The complexity of searching is O(N). Template arguments: - \p GC - garbage collector used - \p Key - key type of an item stored in the list. It should be copy-constructible - \p Value - value type stored in a list - \p Traits - type traits, default is \p iterable_list::traits It is possible to declare option-based list with \p cds::container::iterable_list::make_traits metafunction instead of \p Traits template argument. For example, the following traits-based declaration of \p gc::HP iterable list \code #include // Declare comparator for the item struct my_compare { int operator ()( int i1, int i2 ) { return i1 - i2; } }; // Declare traits struct my_traits: public cds::container::iterable_list::traits { typedef my_compare compare; }; // Declare traits-based list typedef cds::container::IterableKVList< cds::gc::HP, int, int, my_traits > traits_based_list; \endcode is equivalent for the following option-based list \code #include // my_compare is the same // Declare option-based list typedef cds::container::IterableKVList< cds::gc::HP, int, int, typename cds::container::iterable_list::make_traits< cds::container::opt::compare< my_compare > // item comparator option >::type > option_based_list; \endcode \par Usage There are different specializations of this template for each garbage collecting schema used. You should include appropriate .h-file depending on GC you are using: - for gc::HP: \code #include \endcode - for gc::DHP: \code #include \endcode - for \ref cds_urcu_desc "RCU": \code #include \endcode */ template < typename GC, typename Key, typename Value, #ifdef CDS_DOXYGEN_INVOKED typename Traits = iterable_list::traits #else typename Traits #endif > class IterableKVList: #ifdef CDS_DOXYGEN_INVOKED protected container::IterableList< GC, std::pair, Traits > #else protected details::make_iterable_kvlist< GC, Key, Value, Traits >::type #endif { //@cond typedef details::make_iterable_kvlist< GC, Key, Value, Traits > maker; typedef typename maker::type base_class; //@endcond public: #ifdef CDS_DOXYGEN_INVOKED typedef Key key_type; ///< Key type typedef Value mapped_type; ///< Type of value stored in the list typedef std::pair value_type; ///< key/value pair stored in the list #else typedef typename maker::key_type key_type; typedef typename maker::mapped_type mapped_type; typedef typename maker::value_type value_type; #endif typedef Traits traits; ///< List traits typedef typename base_class::gc gc; ///< Garbage collector used typedef typename base_class::back_off back_off; ///< Back-off strategy used typedef typename maker::data_allocator_type allocator_type; ///< Allocator type used for allocate/deallocate data typedef typename base_class::item_counter item_counter; ///< Item counting policy used typedef typename maker::key_comparator key_comparator; ///< key comparison functor typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option typedef typename base_class::stat stat; ///< Internal statistics static constexpr const size_t c_nHazardPtrCount = base_class::c_nHazardPtrCount; ///< Count of hazard pointer required for the algorithm /// Guarded pointer typedef typename base_class::guarded_ptr guarded_ptr; //@cond // Rebind traits (split-list support) template struct rebind_traits { typedef IterableKVList< gc , key_type, mapped_type , typename cds::opt::make_options< traits, Options...>::type > type; }; // Stat selector template using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >; //@endcond protected: //@cond typedef typename base_class::head_type head_type; typedef typename maker::cxx_data_allocator cxx_data_allocator; template using less_wrapper = typename maker::template less_wrapper< Less >; template using iterator_type = typename base_class::template iterator_type; //@endcond public: /// Forward iterator /** The forward iterator for iterable list has some features: - it has no post-increment operator - to protect the value, the iterator contains a GC-specific guard. For some GC (like as \p gc::HP), a guard is a limited resource per thread, so an exception (or assertion) "no free guard" may be thrown if the limit of guard count per thread is exceeded. - The iterator cannot be moved across thread boundary since it contains thread-private GC's guard. - Iterator is thread-safe: even if an element the iterator points to is removed, the iterator stays valid because it contains the guard keeping the value from to be recycled. The iterator interface: \code class iterator { public: // Default constructor iterator(); // Copy constructor iterator( iterator const& src ); // Dereference operator value_type * operator ->() const; // Dereference operator value_type& operator *() const; // Preincrement operator iterator& operator ++(); // Assignment operator iterator& operator = (iterator const& src); // Equality operators bool operator ==(iterator const& i ) const; bool operator !=(iterator const& i ) const; }; \endcode @note For two iterators pointed to the same element the value can be different; this code \code if ( it1 == it2 ) assert( &(*it1) == &(*it2)); \endcode can throw assertion. The point is that the iterator stores the value of element which can be modified later by other thread. The guard inside the iterator prevents recycling that value so the iterator's value remains valid even after such changing. Other iterator can observe modified value of the element. */ using typename base_class::iterator; using typename base_class::const_iterator; using base_class::begin; using base_class::end; using base_class::cbegin; using base_class::cend; public: /// Default constructor /** Initializes empty list */ IterableKVList() {} //@cond template >::value >> explicit IterableKVList( Stat& st ) : base_class( st ) {} //@endcond /// List destructor /** Clears the list */ ~IterableKVList() {} /// Inserts new node with key and default value /** The function creates a node with \p key and default value, and then inserts the node created into the list. Preconditions: - The \p key_type should be constructible from value of type \p K. In trivial case, \p K is equal to \p key_type. - The \p mapped_type should be default-constructible. Returns \p true if inserting successful, \p false otherwise. @note The function is supported only if \ref mapped_type is default constructible */ template bool insert( K&& key ) { return base_class::emplace( key_type( std::forward( key )), mapped_type()); } /// Inserts new node with a key and a value /** The function creates a node with \p key and value \p val, and then inserts the node created into the list. Preconditions: - The \p key_type should be constructible from \p key of type \p K. - The \p mapped_type should be constructible from \p val of type \p V. Returns \p true if inserting successful, \p false otherwise. */ template bool insert( K&& key, V&& val ) { return base_class::emplace( key_type( std::forward( key )), mapped_type( std::forward( val ))); } /// Inserts new node and initialize it by a functor /** This function inserts new node with key \p key and if inserting is successful then it calls \p func functor with signature \code struct functor { void operator()( value_type& item ); }; \endcode The argument \p item of user-defined functor \p func is the reference to the item inserted. item.second is a reference to item's value that may be changed. User-defined functor \p func should guarantee that during changing item's value no any other changes could be made on this list's item by concurrent threads. The user-defined functor is called only if inserting is successful. The \p key_type should be constructible from value of type \p K. The function allows to split creating of new item into two part: - create a new item from \p key; - insert the new item into the list; - if inserting is successful, initialize the value of item by calling \p func functor This can be useful if complete initialization of object of \p mapped_type is heavyweight and it is preferable that the initialization should be completed only if inserting is successful. @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" @note The function is supported only if \ref mapped_type is default constructible */ template bool insert_with( K&& key, Func func ) { return base_class::insert( value_type( key_type( std::forward( key )), mapped_type()), func ); } /// Updates data by \p key /** The operation performs inserting or replacing the element with lock-free manner. If the \p key not found in the list, then the new item created from \p key will be inserted iff \p bAllowInsert is \p true. (note that in this case the \ref key_type should be constructible from type \p K). Otherwise, if \p key is found, the functor \p func is called with item found. The functor \p func is called after inserting or replacing, it signature is: \code void func( value_type& val, value_type* old ); \endcode where - \p val - a new data constructed from \p key - \p old - old value that will be retired. If new item has been inserted then \p old is \p nullptr. The functor may change non-key fields of \p val; however, \p func must guarantee that during changing no any other modifications could be made on this item by concurrent threads. @return std::pair where \p first is true if operation is successful, \p second is true if new item has been added or \p false if the item with such \p key already exists. @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" @note The function is supported only if \ref mapped_type is default constructible */ template std::pair update( K&& key, Func f, bool bAllowInsert = true ) { return base_class::update( value_type( key_type( std::forward( key )), mapped_type()), f, bAllowInsert ); } /// Insert or update /** The operation performs inserting or updating data with lock-free manner. If the item \p key is not found in the list, then \p key is inserted iff \p bInsert is \p true. Otherwise, the current element is changed to value_type( key, val ), the old element will be retired later. Returns std::pair where \p first is \p true if operation is successful, \p second is \p true if \p key has been added or \p false if the item with that key already in the list. */ template std::pair upsert( Q&& key, V&& val, bool bInsert = true ) { return base_class::upsert( value_type( key_type( std::forward( key )), mapped_type( std::forward( val ))), bInsert ); } /// Inserts a new node using move semantics /** \p key_type field of new item is constructed from \p key argument, \p mapped_type field is done from \p args. Returns \p true if inserting successful, \p false otherwise. */ template bool emplace( K&& key, Args&&... args ) { return base_class::emplace( key_type( std::forward( key )), mapped_type( std::forward( args )... )); } /// Deletes \p key from the list /** Returns \p true if \p key is found and has been deleted, \p false otherwise */ template bool erase( K const& key ) { return base_class::erase( key ); } /// Deletes the item from the list using \p pred predicate for searching /** The function is an analog of \p erase(K const&) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool erase_with( K const& key, Less pred ) { CDS_UNUSED( pred ); return base_class::erase_with( key, less_wrapper()); } /// Deletes \p key from the list /** The function searches an item with key \p key, calls \p f functor and deletes the item. If \p key is not found, the functor is not called. The functor \p Func interface: \code struct extractor { void operator()(value_type& val) { ... } }; \endcode Return \p true if key is found and deleted, \p false otherwise */ template bool erase( K const& key, Func f ) { return base_class::erase( key, f ); } /// Deletes the item from the list using \p pred predicate for searching /** The function is an analog of \p erase(K const&, Func) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool erase_with( K const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return base_class::erase_with( key, less_wrapper(), f ); } /// Deletes the item pointed by iterator \p iter /** Returns \p true if the operation is successful, \p false otherwise. The function can return \p false if the node the iterator points to has already been deleted by other thread. The function does not invalidate the iterator, it remains valid and can be used for further traversing. */ bool erase_at( iterator const& iter ) { return base_class::erase_at( iter ); } /// Extracts the item from the list with specified \p key /** The function searches an item with key equal to \p key, unlinks it from the list, and returns it as \p guarded_ptr. If \p key is not found the function returns an empty guarded pointer. Note the compare functor should accept a parameter of type \p K that can be not the same as \p key_type. The \p disposer specified in \p Traits class template parameter is called automatically by garbage collector \p GC specified in class' template parameters when returned \p guarded_ptr object will be destroyed or released. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. Usage: \code typedef cds::container::IterableKVList< cds::gc::HP, int, foo, my_traits > ord_list; ord_list theList; // ... { ord_list::guarded_ptr gp(theList.extract( 5 )); if ( gp ) { // Deal with gp // ... } // Destructor of gp releases internal HP guard } \endcode */ template guarded_ptr extract( K const& key ) { return base_class::extract( key ); } /// Extracts the item from the list with comparing functor \p pred /** The function is an analog of \p extract(K const&) but \p pred predicate is used for key comparing. \p Less functor has the semantics like \p std::less but should take arguments of type \ref key_type and \p K in any order. \p pred must imply the same element order as the comparator used for building the list. */ template guarded_ptr extract_with( K const& key, Less pred ) { CDS_UNUSED( pred ); return base_class::extract_with( key, less_wrapper()); } /// Checks whether the list contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. */ template bool contains( Q const& key ) const { return base_class::contains( key ); } /// Checks whether the map contains \p key using \p pred predicate for searching /** The function is an analog of contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the list. */ template bool contains( Q const& key, Less pred ) const { CDS_UNUSED( pred ); return base_class::contains( key, less_wrapper()); } /// Finds the key \p key and performs an action with it /** The function searches an item with key equal to \p key and calls the functor \p f for the item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item ); }; \endcode where \p item is the item found. The functor may change item.second that is reference to value of node. Note that the function is only guarantee that \p item cannot be deleted during functor is executing. The function does not serialize simultaneous access to the list \p item. If such access is possible you must provide your own synchronization schema to exclude unsafe item modifications. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( Q const& key, Func f ) const { return base_class::find( key, [&f]( value_type& v, Q const& ) { f( v ); } ); } /// Finds \p key in the list and returns iterator pointed to the item found /** If \p key is not found the function returns \p end(). */ template iterator find( Q const& key ) const { return base_class::find( key ); } /// Finds the key \p val using \p pred predicate for searching /** The function is an analog of \p find(Q&, Func) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool find_with( Q const& key, Less pred, Func f ) const { CDS_UNUSED( pred ); return base_class::find_with( key, less_wrapper(), [&f]( value_type& v, Q const& ) { f( v ); } ); } /// Finds \p key in the list using \p pred predicate for searching and returns iterator pointed to the item found /** The function is an analog of \p find(Q&) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. If \p key is not found the function returns \p end(). */ template iterator find_with( Q const& key, Less pred ) const { CDS_UNUSED( pred ); return base_class::find_with( key, less_wrapper()); } /// Finds the \p key and return the item found /** The function searches the item with key equal to \p key and returns it as \p guarded_ptr. If \p key is not found the function returns an empty guarded pointer. @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. Usage: \code typedef cds::container::IterableKVList< cds::gc::HP, int, foo, my_traits > ord_list; ord_list theList; // ... { ord_list::guarded_ptr gp(theList.get( 5 )); if ( gp ) { // Deal with gp //... } // Destructor of guarded_ptr releases internal HP guard } \endcode Note the compare functor specified for class \p Traits template parameter should accept a parameter of type \p K that can be not the same as \p key_type. */ template guarded_ptr get( K const& key ) const { return base_class::get( key ); } /// Finds the \p key and return the item found /** The function is an analog of \p get( guarded_ptr& ptr, K const&) but \p pred is used for comparing the keys. \p Less functor has the semantics like \p std::less but should take arguments of type \ref key_type and \p K in any order. \p pred must imply the same element order as the comparator used for building the list. */ template guarded_ptr get_with( K const& key, Less pred ) const { CDS_UNUSED( pred ); return base_class::get_with( key, less_wrapper()); } /// Checks if the list is empty /** Emptiness is checked by item counting: if item count is zero then the set is empty. Thus, if you need to use \p %empty() you should provide appropriate (non-empty) \p iterable_list::traits::item_counter feature. */ bool empty() const { return base_class::empty(); } /// Returns list's item count /** The value returned depends on item counter provided by \p Traits. For \p atomicity::empty_item_counter, this function always returns 0. */ size_t size() const { return base_class::size(); } /// Clears the list void clear() { base_class::clear(); } /// Returns const reference to internal statistics stat const& statistics() const { return base_class::statistics(); } protected: //@cond // Split-list support template bool insert_at( head_type& refHead, K&& key ) { return base_class::insert_at( refHead, value_type( key_type( std::forward( key )), mapped_type())); } template bool insert_at( head_type& refHead, K&& key, V&& val ) { return base_class::insert_at( refHead, value_type( key_type( std::forward( key )), std::forward( val ))); } template bool insert_with_at( head_type& refHead, K&& key, Func f ) { return base_class::insert_at( refHead, value_type( key_type( std::forward( key )), mapped_type()), f ); } template bool emplace_at( head_type& refHead, K&& key, Args&&... args ) { return base_class::emplace_at( refHead, std::forward(key), std::forward(args)... ); } template std::pair update_at( head_type& refHead, K&& key, Func f, bool bAllowInsert ) { return base_class::update_at( refHead, value_type( key_type( std::forward( key )), mapped_type()), f, bAllowInsert ); } template bool erase_at( head_type& refHead, K const& key, Compare cmp ) { return base_class::erase_at( refHead, key, cmp ); } template bool erase_at( head_type& refHead, K const& key, Compare cmp, Func f ) { return base_class::erase_at( refHead, key, cmp, f ); } template guarded_ptr extract_at( head_type& refHead, K const& key, Compare cmp ) { return base_class::extract_at( refHead, key, cmp ); } template bool find_at( head_type& refHead, K const& key, Compare cmp ) { return base_class::find_at( refHead, key, cmp ); } template bool find_at( head_type& refHead, K& key, Compare cmp, Func f ) { return base_class::find_at( refHead, key, cmp, f ); } template guarded_ptr get_at( head_type& refHead, K const& key, Compare cmp ) { return base_class::get_at( refHead, key, cmp ); } //@endcond }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_IMPL_ITERABLE_KVLIST_H libcds-2.3.3/cds/container/impl/iterable_list.h000066400000000000000000001003651341244201700214510ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_IMPL_ITERABLE_LIST_H #define CDSLIB_CONTAINER_IMPL_ITERABLE_LIST_H #include #include namespace cds { namespace container { /// Iterable ordered list /** @ingroup cds_nonintrusive_list \anchor cds_nonintrusive_IterableList_gc This lock-free list implementation supports thread-safe iterators. Usually, ordered single-linked list is used as a building block for the hash table implementation. Iterable list is suitable for almost append-only hash table because the list doesn't delete its internal node when erasing a key but it is marked them as empty to be reused in the future. However, plenty of empty nodes degrades performance. The complexity of searching is O(N). Template arguments: - \p GC - Garbage collector used. - \p T - type to be stored in the list. - \p Traits - type traits, default is \p iterable_list::traits. Unlike standard container, this implementation does not divide type \p T into key and value part and may be used as a main building block for hash set algorithms. The key is a function (or a part) of type \p T, and this function is specified by Traits::compare functor or Traits::less predicate. \p IterableKVList is a key-value version of iterable non-intrusive list that is closer to the C++ std library approach. It is possible to declare option-based list with cds::container::iterable_list::make_traits metafunction istead of \p Traits template argument. For example, the following traits-based declaration of gc::HP iterable list \code #include // Declare comparator for the item struct my_compare { int operator ()( int i1, int i2 ) { return i1 - i2; } }; // Declare traits struct my_traits: public cds::container::iterable_list::traits { typedef my_compare compare; }; // Declare traits-based list typedef cds::container::IterableList< cds::gc::HP, int, my_traits > traits_based_list; \endcode is equivalent for the following option-based list \code #include // my_compare is the same // Declare option-based list typedef cds::container::IterableList< cds::gc::HP, int, typename cds::container::iterable_list::make_traits< cds::container::opt::compare< my_compare > // item comparator option >::type > option_based_list; \endcode \par Usage There are different specializations of this template for each garbage collecting schema used. You should include appropriate .h-file depending on GC you are using: - for gc::HP: \code #include \endcode - for gc::DHP: \code #include \endcode - for \ref cds_urcu_desc "RCU": \code #include \endcode */ template < typename GC, typename T, #ifdef CDS_DOXYGEN_INVOKED typename Traits = iterable_list::traits #else typename Traits #endif > class IterableList: #ifdef CDS_DOXYGEN_INVOKED protected intrusive::IterableList< GC, T, Traits > #else protected details::make_iterable_list< GC, T, Traits >::type #endif { //@cond typedef details::make_iterable_list< GC, T, Traits > maker; typedef typename maker::type base_class; //@endcond public: typedef T value_type; ///< Type of value stored in the list typedef Traits traits; ///< List traits typedef typename base_class::gc gc; ///< Garbage collector used typedef typename base_class::back_off back_off; ///< Back-off strategy used typedef typename maker::data_allocator_type allocator_type; ///< Allocator type used for allocate/deallocate data typedef typename base_class::item_counter item_counter; ///< Item counting policy used typedef typename maker::key_comparator key_comparator; ///< key comparison functor typedef typename base_class::memory_model memory_model; ///< Memory ordering. See \p cds::opt::memory_model option typedef typename base_class::stat stat; ///< Internal statistics static constexpr const size_t c_nHazardPtrCount = base_class::c_nHazardPtrCount; ///< Count of hazard pointer required for the algorithm //@cond // Rebind traits (split-list support) template struct rebind_traits { typedef IterableList< gc , value_type , typename cds::opt::make_options< traits, Options...>::type > type; }; // Stat selector template using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >; //@endcond protected: //@cond typedef typename maker::cxx_data_allocator cxx_data_allocator; typedef typename maker::data_disposer data_disposer; typedef typename base_class::node_type head_type; //@endcond public: /// Guarded pointer typedef typename base_class::guarded_ptr guarded_ptr; protected: //@cond template class iterator_type: protected base_class::template iterator_type { typedef typename base_class::template iterator_type iterator_base; friend class IterableList; iterator_type( iterator_base it ) : iterator_base( it ) {} public: typedef typename iterator_base::value_ptr value_ptr; typedef typename iterator_base::value_ref value_ref; iterator_type() {} iterator_type( iterator_type const& src ) : iterator_base( src ) {} value_ptr operator ->() const { return iterator_base::operator ->(); } value_ref operator *() const { return iterator_base::operator *(); } /// Pre-increment iterator_type& operator ++() { iterator_base::operator ++(); return *this; } template bool operator ==(iterator_type const& i ) const { return iterator_base::operator ==(i); } template bool operator !=(iterator_type const& i ) const { return iterator_base::operator !=(i); } }; //@endcond public: ///@name Thread-safe forward iterators //@{ /// Forward iterator /** The forward iterator for iterable list has some features: - it has no post-increment operator - to protect the value, the iterator contains a GC-specific guard. For some GC (like as \p gc::HP), a guard is a limited resource per thread, so an exception (or assertion) "no free guard" may be thrown if the limit of guard count per thread is exceeded. - The iterator cannot be moved across thread boundary since it contains thread-private GC's guard. - Iterator is thread-safe: even if an element the iterator points to is removed, the iterator stays valid because it contains the guard keeping the value from to be recycled. The iterator interface: \code class iterator { public: // Default constructor iterator(); // Copy constructor iterator( iterator const& src ); // Dereference operator value_type * operator ->() const; // Dereference operator value_type& operator *() const; // Preincrement operator iterator& operator ++(); // Assignment operator iterator& operator = (iterator const& src); // Equality operators bool operator ==(iterator const& i ) const; bool operator !=(iterator const& i ) const; }; \endcode @note For two iterators pointed to the same element the value can be different; this code \code if ( it1 == it2 ) assert( &(*it1) == &(*it2)); \endcode can throw assertion. The point is that the iterator stores the value of element which can be modified later by other thread. The guard inside the iterator prevents recycling that value so the iterator's value remains valid even after such changing. Other iterator can observe modified value of the element. */ typedef iterator_type iterator; /// Const forward iterator /** For iterator's features and requirements see \ref iterator */ typedef iterator_type const_iterator; /// Returns a forward iterator addressing the first element in a list /** For empty list \code begin() == end() \endcode */ iterator begin() { return iterator( base_class::begin()); } /// Returns an iterator that addresses the location succeeding the last element in a list /** Do not use the value returned by end function to access any item. Internally, end returning value equals to \p nullptr. The returned value can be used only to control reaching the end of the list. For empty list \code begin() == end() \endcode */ iterator end() { return iterator( base_class::end()); } /// Returns a forward const iterator addressing the first element in a list const_iterator begin() const { return const_iterator( base_class::cbegin()); } /// Returns a forward const iterator addressing the first element in a list const_iterator cbegin() const { return const_iterator( base_class::cbegin()); } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator end() const { return const_iterator( base_class::cend()); } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator cend() const { return const_iterator( base_class::cend()); } //@} public: /// Default constructor /** Initialize empty list */ IterableList() {} //@cond template >::value >> explicit IterableList( Stat& st ) : base_class( st ) {} //@endcond /// List destructor /** Clears the list */ ~IterableList() {} /// Inserts new node /** The function creates a node with copy of \p val value and then inserts the node created into the list. The type \p Q should contain least the complete key of the node. The object of \ref value_type should be constructible from \p val of type \p Q. In trivial case, \p Q is equal to \ref value_type. Returns \p true if inserting successful, \p false otherwise. */ template bool insert( Q&& val ) { return insert_at( head(), std::forward( val )); } /// Inserts new node /** This function inserts new node with default-constructed value and then it calls \p func functor with signature \code void func( value_type& data ); \endcode The argument \p data of user-defined functor \p func is the reference to the list's item inserted. User-defined functor \p func should guarantee that during changing item's value no any other changes could be made on this list's item by concurrent threads. The user-defined functor is called only if inserting is success. The type \p Q should contain the complete key of the node. The object of \p value_type should be constructible from \p key of type \p Q. The function allows to split creating of new item into two part: - create item from \p key with initializing key-fields only; - insert new item into the list; - if inserting is successful, initialize non-key fields of item by calling \p func functor The method can be useful if complete initialization of object of \p value_type is heavyweight and it is preferable that the initialization should be completed only if inserting is successful. @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" */ template bool insert( Q&& key, Func func ) { return insert_at( head(), std::forward( key ), func ); } /// Updates data by \p key /** The operation performs inserting or replacing the element with lock-free manner. If the \p key not found in the list, then the new item created from \p key will be inserted iff \p bAllowInsert is \p true. Otherwise, if \p key is found, the functor \p func is called with item found. The functor \p func is called after inserting or replacing, it signature is: \code void func( value_type& val, value_type * old ); \endcode where - \p val - a new data constructed from \p key - \p old - old value that will be retired. If new item has been inserted then \p old is \p nullptr. The functor may change non-key fields of \p val; however, \p func must guarantee that during changing no any other modifications could be made on this item by concurrent threads. Returns std::pair where \p first is true if operation is successful, \p second is true if new item has been added or \p false if the item with such \p key already exists. @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" */ template std::pair update( Q&& key, Func func, bool bAllowInsert = true ) { return update_at( head(), std::forward( key ), func, bAllowInsert ); } /// Insert or update /** The operation performs inserting or updating data with lock-free manner. If the item \p key is not found in the list, then \p key is inserted iff \p bInsert is \p true. Otherwise, the current element is changed to \p key, the old element will be retired later. \p value_type should be constructible from \p key. Returns std::pair where \p first is \p true if operation is successful, \p second is \p true if \p key has been added or \p false if the item with that key already in the list. */ template std::pair upsert( Q&& key, bool bInsert = true ) { return update_at( head(), std::forward( key ), []( value_type&, value_type* ) {}, bInsert ); } /// Inserts data of type \p value_type constructed with std::forward(args)... /** Returns \p true if inserting successful, \p false otherwise. */ template bool emplace( Args&&... args ) { return emplace_at( head(), std::forward(args)... ); } /// Delete \p key from the list /** Since the key of IterableList's item type \p value_type is not explicitly specified, template parameter \p Q sould contain the complete key to search in the list. The list item comparator should be able to compare the type \p value_type and the type \p Q. Return \p true if key is found and deleted, \p false otherwise */ template bool erase( Q const& key ) { return erase_at( head(), key, key_comparator(), [](value_type const&){} ); } /// Deletes the item from the list using \p pred predicate for searching /** The function is an analog of \p erase(Q const&) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool erase_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return erase_at( head(), key, typename maker::template less_wrapper(), [](value_type const&){} ); } /// Deletes \p key from the list /** The function searches an item with key \p key, calls \p f functor with item found and deletes it. If \p key is not found, the functor is not called. The functor \p Func interface: \code struct extractor { void operator()(const value_type& val) { ... } }; \endcode Since the key of IterableList's item type \p value_type is not explicitly specified, template parameter \p Q should contain the complete key to search in the list. The list item comparator should be able to compare the type \p value_type of list item and the type \p Q. Return \p true if key is found and deleted, \p false otherwise */ template bool erase( Q const& key, Func f ) { return erase_at( head(), key, key_comparator(), f ); } /// Deletes the item from the list using \p pred predicate for searching /** The function is an analog of \p erase(Q const&, Func) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool erase_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return erase_at( head(), key, typename maker::template less_wrapper(), f ); } /// Deletes the item pointed by iterator \p iter /** Returns \p true if the operation is successful, \p false otherwise. The function can return \p false if the node the iterator points to has already been deleted by other thread. The function does not invalidate the iterator, it remains valid and can be used for further traversing. */ bool erase_at( iterator const& iter ) { return base_class::erase_at( iter ); } /// Extracts the item from the list with specified \p key /** The function searches an item with key equal to \p key, unlinks it from the list, and returns it as \p guarded_ptr. If \p key is not found the function returns an empty guarded pointer. Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. Usage: \code typedef cds::container::IterableList< cds::gc::HP, foo, my_traits > ord_list; ord_list theList; // ... { ord_list::guarded_ptr gp(theList.extract( 5 )); if ( gp ) { // Deal with gp // ... } // Destructor of gp releases internal HP guard and frees the item } \endcode */ template guarded_ptr extract( Q const& key ) { return extract_at( head(), key, key_comparator()); } /// Extracts the item from the list with comparing functor \p pred /** The function is an analog of \p extract(Q const&) but \p pred predicate is used for key comparing. \p Less functor has the semantics like \p std::less but it should accept arguments of type \p value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the list. */ template guarded_ptr extract_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return extract_at( head(), key, typename maker::template less_wrapper()); } /// Checks whether the list contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. */ template bool contains( Q const& key ) const { return find_at( head(), key, key_comparator()); } /// Checks whether the list contains \p key using \p pred predicate for searching /** The function is an analog of contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool contains( Q const& key, Less pred ) const { CDS_UNUSED( pred ); return find_at( head(), key, typename maker::template less_wrapper()); } /// Finds \p key and perform an action with it /** The function searches an item with key equal to \p key and calls the functor \p f for the item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& key ); }; \endcode where \p item is the item found, \p key is the find function argument. The functor may change non-key fields of \p item. Note that the function is only guarantee that \p item cannot be deleted during functor is executing. The function does not serialize simultaneous access to the list \p item. If such access is possible you must provide your own synchronization schema to exclude unsafe item modifications. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( Q& key, Func f ) const { return find_at( head(), key, key_comparator(), f ); } //@cond template bool find( Q const& key, Func f ) const { return find_at( head(), key, key_comparator(), f ); } //@endcond /// Finds \p key in the list and returns iterator pointed to the item found /** If \p key is not found the function returns \p end(). */ template iterator find( Q const& key ) const { return find_iterator_at( head(), key, key_comparator()); } /// Finds \p key using \p pred predicate for searching /** The function is an analog of \p find(Q&, Func) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool find_with( Q& key, Less pred, Func f ) const { CDS_UNUSED( pred ); return find_at( head(), key, typename maker::template less_wrapper(), f ); } //@cond template bool find_with( Q const& key, Less pred, Func f ) const { CDS_UNUSED( pred ); return find_at( head(), key, typename maker::template less_wrapper(), f ); } //@endcond /// Finds \p key in the list using \p pred predicate for searching and returns iterator pointed to the item found /** The function is an analog of \p find(Q&) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. If \p key is not found the function returns \p end(). */ template iterator find_with( Q const& key, Less pred ) const { CDS_UNUSED( pred ); return find_iterator_at( head(), key, cds::opt::details::make_comparator_from_less()); } /// Finds \p key and return the item found /** \anchor cds_nonintrusive_MichaelList_hp_get The function searches the item with key equal to \p key and returns it as \p guarded_ptr. If \p key is not found the function returns an empty guarded pointer. @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. Usage: \code typedef cds::container::MichaelList< cds::gc::HP, foo, my_traits > ord_list; ord_list theList; // ... { ord_list::guarded_ptr gp(theList.get( 5 )); if ( gp ) { // Deal with gp //... } // Destructor of guarded_ptr releases internal HP guard and frees the item } \endcode Note the compare functor specified for class \p Traits template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. */ template guarded_ptr get( Q const& key ) const { return get_at( head(), key, key_comparator()); } /// Finds \p key and return the item found /** The function is an analog of \ref cds_nonintrusive_MichaelList_hp_get "get( Q const&)" but \p pred is used for comparing the keys. \p Less functor has the semantics like \p std::less but should accept arguments of type \p value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the list. */ template guarded_ptr get_with( Q const& key, Less pred ) const { CDS_UNUSED( pred ); return get_at( head(), key, typename maker::template less_wrapper()); } /// Checks if the list is empty /** Emptiness is checked by item counting: if item count is zero then the set is empty. Thus, if you need to use \p %empty() you should provide appropriate (non-empty) \p iterable_list::traits::item_counter feature. */ bool empty() const { return base_class::empty(); } /// Returns list's item count /** The value returned depends on item counter provided by \p Traits. For \p atomicity::empty_item_counter, this function always returns 0. */ size_t size() const { return base_class::size(); } /// Clears the list (thread safe, not atomic) void clear() { base_class::clear(); } /// Returns const reference to internal statistics stat const& statistics() const { return base_class::statistics(); } protected: //@cond template static value_type* alloc_data( Args&&... args ) { return cxx_data_allocator().MoveNew( std::forward(args)... ); } static void free_data( value_type* pData ) { cxx_data_allocator().Delete( pData ); } typedef std::unique_ptr< value_type, data_disposer > scoped_data_ptr; using base_class::head; //@endcond protected: //@cond bool insert_node( value_type* pData ) { return insert_node_at( head(), pData ); } bool insert_node_at( head_type* pHead, value_type* pData ) { assert( pData ); scoped_data_ptr p( pData ); if ( base_class::insert_at( pHead, *pData )) { p.release(); return true; } return false; } template bool insert_at( head_type* pHead, Q&& val ) { return insert_node_at( pHead, alloc_data( std::forward( val ))); } template bool insert_at( head_type* pHead, Q&& key, Func f ) { scoped_data_ptr pNode( alloc_data( std::forward( key ))); if ( base_class::insert_at( pHead, *pNode, f )) { pNode.release(); return true; } return false; } template bool emplace_at( head_type* pHead, Args&&... args ) { return insert_node_at( pHead, alloc_data( std::forward(args)... )); } template std::pair update_at( head_type* pHead, Q&& key, Func f, bool bAllowInsert ) { scoped_data_ptr pData( alloc_data( std::forward( key ))); std::pair ret = base_class::update_at( pHead, *pData, f, bAllowInsert ); if ( ret.first ) pData.release(); return ret; } template bool erase_at( head_type* pHead, Q const& key, Compare cmp, Func f ) { return base_class::erase_at( pHead, key, cmp, f ); } template guarded_ptr extract_at( head_type* pHead, Q const& key, Compare cmp ) { return base_class::extract_at( pHead, key, cmp ); } template bool find_at( head_type const* pHead, Q const& key, Compare cmp ) const { return base_class::find_at( pHead, key, cmp ); } template bool find_at( head_type const* pHead, Q& val, Compare cmp, Func f ) const { return base_class::find_at( pHead, val, cmp, f ); } template iterator find_iterator_at( head_type const* pHead, Q const& key, Compare cmp ) const { return iterator( base_class::find_iterator_at( pHead, key, cmp )); } template guarded_ptr get_at( head_type const* pHead, Q const& key, Compare cmp ) const { return base_class::get_at( pHead, key, cmp ); } //@endcond }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_IMPL_ITERABLE_LIST_H libcds-2.3.3/cds/container/impl/lazy_kvlist.h000066400000000000000000001003071341244201700211760ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_IMPL_LAZY_KVLIST_H #define CDSLIB_CONTAINER_IMPL_LAZY_KVLIST_H #include #include namespace cds { namespace container { /// Lazy ordered list (key-value pair) /** @ingroup cds_nonintrusive_list \anchor cds_nonintrusive_LazyKVList_gc This is key-value variation of non-intrusive LazyList. Like standard container, this implementation split a value stored into two part - constant key and alterable value. Usually, ordered single-linked list is used as a building block for the hash table implementation. The complexity of searching is O(N). Template arguments: - \p GC - garbage collector - \p Key - key type of an item to be stored in the list. It should be copy-constructible - \p Value - value type to be stored in the list - \p Traits - type traits, default is \p lazy_list::traits It is possible to declare option-based list with cds::container::lazy_list::make_traits metafunction istead of \p Traits template argument. For example, the following traits-based declaration of \p gc::HP lazy list \code #include // Declare comparator for the item struct my_compare { int operator ()( int i1, int i2 ) { return i1 - i2; } }; // Declare traits struct my_traits: public cds::container::lazy_list::traits { typedef my_compare compare; }; // Declare traits-based list typedef cds::container::LazyKVList< cds::gc::HP, int, int, my_traits > traits_based_list; \endcode is equal to the following option-based list \code #include // my_compare is the same // Declare option-based list typedef cds::container::LazyKVList< cds::gc::HP, int, int, typename cds::container::lazy_list::make_traits< cds::container::opt::compare< my_compare > // item comparator option >::type > option_based_list; \endcode \par Usage There are different specializations of this template for each garbage collecting schema used. You should include appropriate .h-file depending on GC you are using: - for \p gc::HP: - for \p gc::DHP: - for \ref cds_urcu_desc "RCU": - for \p gc::nogc: */ template < typename GC, typename Key, typename Value, #ifdef CDS_DOXYGEN_INVOKED typename Traits = lazy_list::traits #else typename Traits #endif > class LazyKVList: #ifdef CDS_DOXYGEN_INVOKED protected intrusive::LazyList< GC, implementation_defined, Traits > #else protected details::make_lazy_kvlist< GC, Key, Value, Traits >::type #endif { //@cond typedef details::make_lazy_kvlist< GC, Key, Value, Traits > maker; typedef typename maker::type base_class; //@endcond public: typedef GC gc; ///< Garbage collector typedef Traits traits; ///< Traits #ifdef CDS_DOXYGEN_INVOKED typedef Key key_type ; ///< Key type typedef Value mapped_type ; ///< Type of value stored in the list typedef std::pair value_type ; ///< key/value pair stored in the list #else typedef typename maker::key_type key_type; typedef typename maker::mapped_type mapped_type; typedef typename maker::value_type value_type; #endif typedef typename base_class::back_off back_off; ///< Back-off strategy typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes typedef typename base_class::item_counter item_counter; ///< Item counter type typedef typename maker::key_comparator key_comparator; ///< key comparing functor typedef typename base_class::memory_model memory_model; ///< Memory ordering. See \p cds::opt::memory_model typedef typename base_class::stat stat; ///< Internal statistics static constexpr const size_t c_nHazardPtrCount = base_class::c_nHazardPtrCount; ///< Count of hazard pointer required for the algorithm //@cond // Rebind traits (split-list support) template struct rebind_traits { typedef LazyKVList< gc , key_type, mapped_type , typename cds::opt::make_options< traits, Options...>::type > type; }; // Stat selector template using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >; //@endcond protected: //@cond typedef typename base_class::value_type node_type; typedef typename maker::cxx_allocator cxx_allocator; typedef typename maker::node_deallocator node_deallocator; typedef typename maker::intrusive_traits::compare intrusive_key_comparator; typedef typename base_class::node_type head_type; //@endcond public: /// Guarded pointer typedef typename gc::template guarded_ptr< node_type, value_type, details::guarded_ptr_cast_map > guarded_ptr; protected: //@cond template static node_type * alloc_node(const K& key) { return cxx_allocator().New( key ); } template static node_type * alloc_node( const K& key, const V& val ) { return cxx_allocator().New( key, val ); } template static node_type * alloc_node( Args&&... args ) { return cxx_allocator().MoveNew( std::forward(args)... ); } static void free_node( node_type * pNode ) { cxx_allocator().Delete( pNode ); } struct node_disposer { void operator()( node_type * pNode ) { free_node( pNode ); } }; typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; head_type& head() { return base_class::m_Head; } head_type const& head() const { return base_class::m_Head; } head_type& tail() { return base_class::m_Tail; } head_type const& tail() const { return base_class::m_Tail; } //@endcond protected: //@cond template class iterator_type: protected base_class::template iterator_type { typedef typename base_class::template iterator_type iterator_base; iterator_type( head_type const& pNode ) : iterator_base( const_cast(&pNode)) {} iterator_type( head_type const * pNode ) : iterator_base( const_cast(pNode)) {} friend class LazyKVList; public: typedef typename cds::details::make_const_type::reference value_ref; typedef typename cds::details::make_const_type::pointer value_ptr; typedef typename cds::details::make_const_type::reference pair_ref; typedef typename cds::details::make_const_type::pointer pair_ptr; iterator_type() {} iterator_type( iterator_type const& src ) : iterator_base( src ) {} key_type const& key() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); assert( p != nullptr ); return p->m_Data.first; } value_ref val() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); assert( p != nullptr ); return p->m_Data.second; } pair_ptr operator ->() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); return p ? &(p->m_Data) : nullptr; } pair_ref operator *() const { typename iterator_base::value_ref p = iterator_base::operator *(); return p.m_Data; } /// Pre-increment iterator_type& operator ++() { iterator_base::operator ++(); return *this; } template bool operator ==(iterator_type const& i ) const { return iterator_base::operator ==(i); } template bool operator !=(iterator_type const& i ) const { return iterator_base::operator !=(i); } }; //@endcond public: /// Forward iterator /** The forward iterator for lazy list has some features: - it has no post-increment operator - to protect the value, the iterator contains a GC-specific guard + another guard is required locally for increment operator. For some GC (\p gc::HP), a guard is limited resource per thread, so an exception (or assertion) "no free guard" may be thrown if a limit of guard count per thread is exceeded. - The iterator cannot be moved across thread boundary since it contains GC's guard that is thread-private GC data. - Iterator ensures thread-safety even if you delete the item that iterator points to. However, in case of concurrent deleting operations it is no guarantee that you iterate all item in the list. @warning Use this iterator on the concurrent container for debugging purpose only. The iterator interface to access item data: - operator -> - returns a pointer to \ref value_type for iterator - operator * - returns a reference (a const reference for \p const_iterator) to \ref value_type for iterator - const key_type& key() - returns a key reference for iterator - mapped_type& val() - retuns a value reference for iterator (const reference for \p const_iterator) For both functions the iterator should not be equal to end() */ typedef iterator_type iterator; /// Const forward iterator /** For iterator's features and requirements see \ref iterator */ typedef iterator_type const_iterator; ///@name Forward iterators (only for debugging purpose) //@{ /// Returns a forward iterator addressing the first element in a list /** For empty list \code begin() == end() \endcode */ iterator begin() { iterator it( head()); ++it ; // skip dummy head return it; } /// Returns an iterator that addresses the location succeeding the last element in a list /** Do not use the value returned by end function to access any item. Internally, end returning value equals to \p nullptr. The returned value can be used only to control reaching the end of the list. For empty list \code begin() == end() \endcode */ iterator end() { return iterator( tail()); } /// Returns a forward const iterator addressing the first element in a list const_iterator begin() const { const_iterator it( head()); ++it; // skip dummy head return it; } /// Returns a forward const iterator addressing the first element in a list const_iterator cbegin() const { const_iterator it( head()); ++it; // skip dummy head return it; } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator end() const { return const_iterator( tail()); } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator cend() const { return const_iterator( tail()); } //@} public: /// Default constructor LazyKVList() {} //@cond template >::value >> explicit LazyKVList( Stat& st ) : base_class( st ) {} //@endcond /// Destructor clears the list ~LazyKVList() { clear(); } /// Inserts new node with key and default value /** The function creates a node with \p key and default value, and then inserts the node created into the list. Preconditions: - The \ref key_type should be constructible from value of type \p K. In trivial case, \p K is equal to \ref key_type. - The \ref mapped_type should be default-constructible. Returns \p true if inserting successful, \p false otherwise. */ template bool insert( K&& key ) { return insert_at( head(), std::forward( key )); } /// Inserts new node with a key and a value /** The function creates a node with \p key and value \p val, and then inserts the node created into the list. Preconditions: - The \ref key_type should be constructible from \p key of type \p K. - The \ref mapped_type should be constructible from \p val of type \p V. Returns \p true if inserting successful, \p false otherwise. */ template bool insert( K&& key, V&& val ) { // We cannot use insert with functor here // because we cannot lock inserted node for updating // Therefore, we use separate function return insert_at( head(), std::forward( key ), std::forward( val )); } /// Inserts new node and initializes it by a functor /** This function inserts new node with key \p key and if inserting is successful then it calls \p func functor with signature \code struct functor { void operator()( value_type& item ); }; \endcode The argument \p item of user-defined functor \p func is the reference to the list's item inserted. item.second is a reference to item's value that may be changed. The user-defined functor is called only if inserting is successful. The \p key_type should be constructible from value of type \p K. The function allows to split creating of new item into two part: - create item from \p key; - insert new item into the list; - if inserting is successful, initialize the value of item by calling \p func functor This can be useful if complete initialization of object of \p mapped_type is heavyweight and it is preferable that the initialization should be completed only if inserting is successful. */ template bool insert_with( K&& key, Func func ) { return insert_with_at( head(), std::forward( key ), func ); } /// Inserts data of type \ref mapped_type constructed with std::forward(args)... /** Returns \p true if inserting successful, \p false otherwise. */ template bool emplace( Args&&... args ) { return emplace_at( head(), std::forward(args)... ); } /// Updates data by \p key /** The operation performs inserting or replacing the element with lock-free manner. If the \p key not found in the list, then the new item created from \p key will be inserted iff \p bAllowInsert is \p true. (note that in this case the \ref key_type should be constructible from type \p K). Otherwise, if \p key is found, the functor \p func is called with item found. The functor \p Func signature is: \code struct my_functor { void operator()( bool bNew, value_type& item ); }; \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - the item found or inserted The functor may change any fields of the \p item.second of \p mapped_type; during \p func call \p item is locked so it is safe to modify the item in multi-threaded environment. Returns std::pair where \p first is true if operation is successful, \p second is true if new item has been added or \p false if the item with \p key already exists. */ template std::pair update( K&& key, Func f, bool bAllowInsert = true ) { return update_at( head(), std::forward( key ), f, bAllowInsert ); } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( const K& key, Func f ) { return update( key, f, true ); } //@endcond /// Deletes \p key from the list /** \anchor cds_nonintrusive_LazyKVList_hp_erase_val Returns \p true if \p key is found and has been deleted, \p false otherwise */ template bool erase( K const& key ) { return erase_at( head(), key, intrusive_key_comparator()); } /// Deletes the item from the list using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_LazyKVList_hp_erase_val "erase(K const&)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool erase_with( K const& key, Less pred ) { CDS_UNUSED( pred ); return erase_at( head(), key, typename maker::template less_wrapper()); } /// Deletes \p key from the list /** \anchor cds_nonintrusive_LazyKVList_hp_erase_func The function searches an item with key \p key, calls \p f functor with item found and deletes it. If \p key is not found, the functor is not called. The functor \p Func interface: \code struct extractor { void operator()(value_type& val) { ... } }; \endcode Returns \p true if key is found and deleted, \p false otherwise */ template bool erase( K const& key, Func f ) { return erase_at( head(), key, intrusive_key_comparator(), f ); } /// Deletes the item from the list using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_LazyKVList_hp_erase_func "erase(K const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool erase_with( K const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return erase_at( head(), key, typename maker::template less_wrapper(), f ); } /// Extracts the item from the list with specified \p key /** \anchor cds_nonintrusive_LazyKVList_hp_extract The function searches an item with key equal to \p key, unlinks it from the list, and returns it as \p guarded_ptr. If \p key is not found the function returns an empty guarded pointer. Note the compare functor should accept a parameter of type \p K that can be not the same as \p key_type. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. Usage: \code typedef cds::container::LazyKVList< cds::gc::HP, int, foo, my_traits > ord_list; ord_list theList; // ... { ord_list::guarded_ptr gp( theList.extract( 5 )); if ( gp ) { // Deal with gp // ... } // Destructor of gp releases internal HP guard and frees the item } \endcode */ template guarded_ptr extract( K const& key ) { return extract_at( head(), key, intrusive_key_comparator()); } /// Extracts the item from the list with comparing functor \p pred /** The function is an analog of \ref cds_nonintrusive_LazyKVList_hp_extract "extract(K const&)" but \p pred predicate is used for key comparing. \p Less functor has the semantics like \p std::less but should take arguments of type \ref key_type and \p K in any order. \p pred must imply the same element order as the comparator used for building the list. */ template guarded_ptr extract_with( K const& key, Less pred ) { CDS_UNUSED( pred ); return extract_at( head(), key, typename maker::template less_wrapper()); } /// Checks whether the list contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. */ template bool contains( Q const& key ) { return find_at( head(), key, intrusive_key_comparator()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find( Q const& key ) { return contains( key ); } //@endcond /// Checks whether the map contains \p key using \p pred predicate for searching /** The function is an analog of contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the list. */ template bool contains( Q const& key, Less pred ) { CDS_UNUSED( pred ); return find_at( head(), key, typename maker::template less_wrapper()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find_with( Q const& key, Less pred ) { return contains( key, pred ); } //@endcond /// Finds the key \p key and performs an action with it /** \anchor cds_nonintrusive_LazyKVList_hp_find_func The function searches an item with key equal to \p key and calls the functor \p f for the item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item ); }; \endcode where \p item is the item found. The functor may change item.second that is reference to value of node. Note that the function is only guarantee that \p item cannot be deleted during functor is executing. The function does not serialize simultaneous access to the list \p item. If such access is possible you must provide your own synchronization schema to exclude unsafe item modifications. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( Q const& key, Func f ) { return find_at( head(), key, intrusive_key_comparator(), f ); } /// Finds the key \p val using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_LazyKVList_hp_find_func "find(Q&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool find_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return find_at( head(), key, typename maker::template less_wrapper(), f ); } /// Finds \p key and return the item found /** \anchor cds_nonintrusive_LazyKVList_hp_get The function searches the item with key equal to \p key and returns the item found as a guarded pointer. If \p key is not found the functions returns an empty \p guarded_ptr. @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. Usage: \code typedef cds::container::LazyKVList< cds::gc::HP, int, foo, my_traits > ord_list; ord_list theList; // ... { ord_list::guarded_ptr gp( theList.get( 5 )); if ( gp ) { // Deal with gp //... } // Destructor of guarded_ptr releases internal HP guard and frees the item } \endcode Note the compare functor specified for class \p Traits template parameter should accept a parameter of type \p K that can be not the same as \p key_type. */ template guarded_ptr get( K const& key ) { return get_at( head(), key, intrusive_key_comparator()); } /// Finds the key \p val and return the item found /** The function is an analog of \ref cds_nonintrusive_LazyKVList_hp_get "get(K const&)" but \p pred is used for comparing the keys. \p Less functor has the semantics like \p std::less but should take arguments of type \ref key_type and \p K in any order. \p pred must imply the same element order as the comparator used for building the list. */ template guarded_ptr get_with( K const& key, Less pred ) { CDS_UNUSED( pred ); return get_at( head(), key, typename maker::template less_wrapper()); } /// Checks if the list is empty bool empty() const { return base_class::empty(); } /// Returns list's item count /** The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, this function always returns 0. @note Even if you use real item counter and it returns 0, this fact is not mean that the list is empty. To check list emptyness use \ref empty() method. */ size_t size() const { return base_class::size(); } /// Returns const reference to internal statistics stat const& statistics() const { return base_class::statistics(); } /// Clears the list void clear() { base_class::clear(); } protected: //@cond bool insert_node_at( head_type& refHead, node_type * pNode ) { assert( pNode != nullptr ); scoped_node_ptr p( pNode ); if ( base_class::insert_at( &refHead, *p )) { p.release(); return true; } return false; } template bool insert_at( head_type& refHead, K&& key ) { return insert_node_at( refHead, alloc_node( std::forward( key ))); } template bool insert_at( head_type& refHead, K&& key, V&& val ) { return insert_node_at( refHead, alloc_node( std::forward( key ), std::forward( val ))); } template bool insert_with_at( head_type& refHead, K&& key, Func f ) { scoped_node_ptr pNode( alloc_node( std::forward( key ))); if ( base_class::insert_at( &refHead, *pNode, [&f](node_type& node){ f( node.m_Data ); } )) { pNode.release(); return true; } return false; } template bool emplace_at( head_type& refHead, Args&&... args ) { return insert_node_at( refHead, alloc_node( std::forward(args)... )); } template bool erase_at( head_type& refHead, K const& key, Compare cmp ) { return base_class::erase_at( &refHead, key, cmp ); } template bool erase_at( head_type& refHead, K const& key, Compare cmp, Func f ) { return base_class::erase_at( &refHead, key, cmp, [&f](node_type const & node){f( const_cast(node.m_Data)); }); } template guarded_ptr extract_at( head_type& refHead, K const& key, Compare cmp ) { return base_class::extract_at( &refHead, key, cmp ); } template std::pair update_at( head_type& refHead, K&& key, Func f, bool bAllowInsert ) { scoped_node_ptr pNode( alloc_node( std::forward( key ))); std::pair ret = base_class::update_at( &refHead, *pNode, [&f]( bool bNew, node_type& node, node_type& ){ f( bNew, node.m_Data ); }, bAllowInsert ); if ( ret.first && ret.second ) pNode.release(); return ret; } template bool find_at( head_type& refHead, K const& key, Compare cmp ) { return base_class::find_at( &refHead, key, cmp ); } template bool find_at( head_type& refHead, K& key, Compare cmp, Func f ) { return base_class::find_at( &refHead, key, cmp, [&f]( node_type& node, K& ){ f( node.m_Data ); }); } template guarded_ptr get_at( head_type& refHead, K const& key, Compare cmp ) { return base_class::get_at( &refHead, key, cmp ); } //@endcond }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_IMPL_LAZY_KVLIST_H libcds-2.3.3/cds/container/impl/lazy_list.h000066400000000000000000000764431341244201700206520ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_IMPL_LAZY_LIST_H #define CDSLIB_CONTAINER_IMPL_LAZY_LIST_H #include #include namespace cds { namespace container { /// Lazy ordered list /** @ingroup cds_nonintrusive_list @anchor cds_nonintrusive_LazyList_gc Usually, ordered single-linked list is used as a building block for the hash table implementation. The complexity of searching is O(N). Source: - [2005] Steve Heller, Maurice Herlihy, Victor Luchangco, Mark Moir, William N. Scherer III, and Nir Shavit "A Lazy Concurrent List-Based Set Algorithm" The lazy list is based on an optimistic locking scheme for inserts and removes, eliminating the need to use the equivalent of an atomically markable reference. It also has a novel wait-free membership \p find() operation that does not need to perform cleanup operations and is more efficient. It is non-intrusive version of \p cds::intrusive::LazyList class. Template arguments: - \p GC - garbage collector: \p gc::HP, \p gp::DHP - \p T - type to be stored in the list. - \p Traits - type traits, default is \p lazy_list::traits. It is possible to declare option-based list with \p lazy_list::make_traits metafunction istead of \p Traits template argument. For example, the following traits-based declaration of \p gc::HP lazy list \code #include // Declare comparator for the item struct my_compare { int operator ()( int i1, int i2 ) { return i1 - i2; } }; // Declare traits struct my_traits: public cds::container::lazy_list::traits { typedef my_compare compare; }; // Declare traits-based list typedef cds::container::LazyList< cds::gc::HP, int, my_traits > traits_based_list; \endcode is equal to the following option-based list: \code #include // my_compare is the same // Declare option-based list typedef cds::container::LazyList< cds::gc::HP, int, typename cds::container::lazy_list::make_traits< cds::container::opt::compare< my_compare > // item comparator option >::type > option_based_list; \endcode Unlike standard container, this implementation does not divide type \p T into key and value part and may be used as main building block for hash set algorithms. The key is a function (or a part) of type \p T, and the comparing function is specified by \p Traits::compare functor or \p Traits::less predicate. \p LazyKVList is a key-value version of lazy non-intrusive list that is closer to the C++ std library approach. \par Usage There are different specializations of this template for each garbage collecting schema used. You should include appropriate .h-file depending on GC you are using: - for gc::HP: - for gc::DHP: - for \ref cds_urcu_desc "RCU": - for gc::nogc: */ template < typename GC, typename T, #ifdef CDS_DOXYGEN_INVOKED typename Traits = lazy_list::traits #else typename Traits #endif > class LazyList: #ifdef CDS_DOXYGEN_INVOKED protected intrusive::LazyList< GC, T, Traits > #else protected details::make_lazy_list< GC, T, Traits >::type #endif { //@cond typedef details::make_lazy_list< GC, T, Traits > maker; typedef typename maker::type base_class; //@endcond public: typedef GC gc; ///< Garbage collector used typedef T value_type; ///< Type of value stored in the list typedef Traits traits; ///< List traits typedef typename base_class::back_off back_off; ///< Back-off strategy used typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes typedef typename base_class::item_counter item_counter; ///< Item counting policy used typedef typename maker::key_comparator key_comparator; ///< key comparison functor typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option typedef typename base_class::stat stat; ///< Internal statistics static constexpr const size_t c_nHazardPtrCount = base_class::c_nHazardPtrCount; ///< Count of hazard pointer required for the algorithm //@cond // Rebind traits (split-list support) template struct rebind_traits { typedef LazyList< gc , value_type , typename cds::opt::make_options< traits, Options...>::type > type; }; // Stat selector template using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >; //@endcond protected: //@cond typedef typename base_class::value_type node_type; typedef typename maker::cxx_allocator cxx_allocator; typedef typename maker::node_deallocator node_deallocator; typedef typename maker::intrusive_traits::compare intrusive_key_comparator; typedef typename base_class::node_type head_type; struct node_disposer { void operator()( node_type * pNode ) { free_node( pNode ); } }; typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; //@endcond public: /// Guarded pointer typedef typename gc::template guarded_ptr< node_type, value_type, details::guarded_ptr_cast_set > guarded_ptr; protected: //@cond template class iterator_type: protected base_class::template iterator_type { typedef typename base_class::template iterator_type iterator_base; iterator_type( head_type const& pNode ) : iterator_base( const_cast( &pNode )) {} iterator_type( head_type const * pNode ) : iterator_base( const_cast( pNode )) {} friend class LazyList; public: typedef typename cds::details::make_const_type::pointer value_ptr; typedef typename cds::details::make_const_type::reference value_ref; iterator_type() {} iterator_type( iterator_type const& src ) : iterator_base( src ) {} value_ptr operator ->() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); return p ? &(p->m_Value) : nullptr; } value_ref operator *() const { return (iterator_base::operator *()).m_Value; } /// Pre-increment iterator_type& operator ++() { iterator_base::operator ++(); return *this; } template bool operator ==(iterator_type const& i ) const { return iterator_base::operator ==(i); } template bool operator !=(iterator_type const& i ) const { return iterator_base::operator !=(i); } }; //@endcond public: ///@name Forward iterators (only for debugging purpose) //@{ /// Forward iterator /** The forward iterator for lazy list has some features: - it has no post-increment operator - to protect the value, the iterator contains a GC-specific guard + another guard is required locally for increment operator. For some GC (\p gc::HP), a guard is limited resource per thread, so an exception (or assertion) "no free guard" may be thrown if a limit of guard count per thread is exceeded. - The iterator cannot be moved across thread boundary since it contains GC's guard that is thread-private GC data. - Iterator ensures thread-safety even if you delete the item that iterator points to. However, in case of concurrent deleting operations it is no guarantee that you iterate all item in the list. Moreover, a crash is possible when you try to iterate the next element that has been deleted by concurrent thread. @warning Use this iterator on the concurrent container for debugging purpose only. */ typedef iterator_type iterator; /// Const forward iterator /** For iterator's features and requirements see \ref iterator */ typedef iterator_type const_iterator; /// Returns a forward iterator addressing the first element in a list /** For empty list \code begin() == end() \endcode */ iterator begin() { iterator it( head()); ++it ; // skip dummy head node return it; } /// Returns an iterator that addresses the location succeeding the last element in a list /** Do not use the value returned by end function to access any item. The returned value can be used only to control reaching the end of the list. For empty list \code begin() == end() \endcode */ iterator end() { return iterator( tail()); } /// Returns a forward const iterator addressing the first element in a list const_iterator begin() const { const_iterator it( head()); ++it ; // skip dummy head node return it; } /// Returns a forward const iterator addressing the first element in a list const_iterator cbegin() const { const_iterator it( head()); ++it ; // skip dummy head node return it; } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator end() const { return const_iterator( tail()); } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator cend() const { return const_iterator( tail()); } //@} public: /// Default constructor LazyList() {} //@cond template >::value >> explicit LazyList( Stat& st ) : base_class( st ) {} //@endcond /// Destructor clears the list ~LazyList() { clear(); } /// Inserts new node /** The function creates a node with copy of \p val value and then inserts the node created into the list. The type \p Q should contain as minimum the complete key of the node. The object of \ref value_type should be constructible from \p val of type \p Q. In trivial case, \p Q is equal to \ref value_type. Returns \p true if inserting successful, \p false otherwise. */ template bool insert( Q&& val ) { return insert_at( head(), std::forward( val )); } /// Inserts new node /** This function inserts new node with default-constructed value and then it calls \p func functor with signature \code void func( value_type& item ) ;\endcode The argument \p item of user-defined functor \p func is the reference to the list's item inserted. When \p func is called it has exclusive access to the item. The user-defined functor is called only if the inserting is success. The type \p Q should contain the complete key of the node. The object of \p value_type should be constructible from \p key of type \p Q. The function allows to split creating of new item into two part: - create item from \p key with initializing key-fields only; - insert new item into the list; - if inserting is successful, initialize non-key fields of item by calling \p func functor This can be useful if complete initialization of object of \p value_type is heavyweight and it is preferable that the initialization should be completed only if inserting is successful. */ template bool insert( Q&& key, Func func ) { return insert_at( head(), std::forward( key ), func ); } /// Inserts data of type \p value_type constructed from \p args /** Returns \p true if inserting successful, \p false otherwise. */ template bool emplace( Args&&... args ) { return emplace_at( head(), std::forward(args)... ); } /// Updates data by \p key /** The operation performs inserting or replacing the element with lock-free manner. If the \p key not found in the list, then the new item created from \p key will be inserted iff \p bAllowInsert is \p true. Otherwise, if \p key is found, the functor \p func is called with item found. The functor \p Func signature is: \code struct my_functor { void operator()( bool bNew, value_type& item, Q const& key ); }; \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the list - \p key - argument \p key passed into the \p %update() function The functor may change non-key fields of the \p item; during \p func call \p item is locked so it is safe to modify the item in multi-threaded environment. Returns std::pair where \p first is true if operation is successful, \p second is true if new item has been added or \p false if the item with \p key already exists. */ template std::pair update( Q const& key, Func func, bool bAllowInsert = true ) { return update_at( head(), key, func, bAllowInsert ); } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( Q const& key, Func f ) { return update( key, f, true ); } //@endcond /// Deletes \p key from the list /** \anchor cds_nonintrusive_LazyList_hp_erase_val Since the key of LazyList's item type \p T is not explicitly specified, template parameter \p Q defines the key type searching in the list. The list item comparator should be able to compare the type \p T of list item and the type \p Q. Return \p true if key is found and deleted, \p false otherwise */ template bool erase( Q const& key ) { return erase_at( head(), key, intrusive_key_comparator(), [](value_type const&){} ); } /// Deletes the item from the list using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_LazyList_hp_erase_val "erase(Q const&)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool erase_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return erase_at( head(), key, typename maker::template less_wrapper(), [](value_type const&){} ); } /// Deletes \p key from the list /** \anchor cds_nonintrusive_LazyList_hp_erase_func The function searches an item with key \p key, calls \p f functor with item found and deletes the item. If \p key is not found, the functor is not called. The functor \p Func interface: \code struct extractor { void operator()(const value_type& val) { ... } }; \endcode Since the key of LazyList's item type \p T is not explicitly specified, template parameter \p Q defines the key type searching in the list. The list item comparator should be able to compare the type \p T of list item and the type \p Q. Return \p true if key is found and deleted, \p false otherwise See also: \ref erase */ template bool erase( Q const& key, Func f ) { return erase_at( head(), key, intrusive_key_comparator(), f ); } /// Deletes the item from the list using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_LazyList_hp_erase_func "erase(Q const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool erase_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return erase_at( head(), key, typename maker::template less_wrapper(), f ); } /// Extracts the item from the list with specified \p key /** \anchor cds_nonintrusive_LazyList_hp_extract The function searches an item with key equal to \p key, unlinks it from the list, and returns it as \p guarded_ptr. If \p key is not found the function returns an empty guarded pointer. Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. Usage: \code typedef cds::container::LazyList< cds::gc::HP, foo, my_traits > ord_list; ord_list theList; // ... { ord_list::guarded_ptr gp(theList.extract( 5 )); if ( gp ) { // Deal with gp // ... } // Destructor of gp releases internal HP guard and frees the item } \endcode */ template guarded_ptr extract( Q const& key ) { return extract_at( head(), key, intrusive_key_comparator()); } /// Extracts the item from the list with comparing functor \p pred /** The function is an analog of \ref cds_nonintrusive_LazyList_hp_extract "extract(Q const&)" but \p pred predicate is used for key comparing. \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the list. */ template guarded_ptr extract_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return extract_at( head(), key, typename maker::template less_wrapper()); } /// Checks whether the list contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. */ template bool contains( Q const& key ) { return find_at( head(), key, intrusive_key_comparator()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find( Q const& key ) { return contains( key ); } //@endcond /// Checks whether the list contains \p key using \p pred predicate for searching /** The function is an analog of contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool contains( Q const& key, Less pred ) { CDS_UNUSED( pred ); return find_at( head(), key, typename maker::template less_wrapper()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find_with( Q const& key, Less pred ) { return contains( key, pred ); } //@endcond /// Finds the key \p key and performs an action with it /** \anchor cds_nonintrusive_LazyList_hp_find_func The function searches an item with key equal to \p key and calls the functor \p f for the item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& key ); }; \endcode where \p item is the item found, \p key is the find function argument. The functor may change non-key fields of \p item. Note that the function is only guarantee that \p item cannot be deleted during functor is executing. The function does not serialize simultaneous access to the list \p item. If such access is possible you must provide your own synchronization schema to exclude unsafe item modifications. The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor may modify both arguments. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( Q& key, Func f ) { return find_at( head(), key, intrusive_key_comparator(), f ); } //@cond template bool find( Q const& key, Func f ) { return find_at( head(), key, intrusive_key_comparator(), f ); } //@endcond /// Finds the key \p key using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_LazyList_hp_find_func "find(Q&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool find_with( Q& key, Less pred, Func f ) { CDS_UNUSED( pred ); return find_at( head(), key, typename maker::template less_wrapper(), f ); } //@cond template bool find_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return find_at( head(), key, typename maker::template less_wrapper(), f ); } //@endcond /// Finds the key \p key and return the item found /** \anchor cds_nonintrusive_LazyList_hp_get The function searches the item with key equal to \p key and returns the item found as \p guarded_ptr. If \p key is not found the function returns an empty guarded pointer. @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. Usage: \code typedef cds::container::LazyList< cds::gc::HP, foo, my_traits > ord_list; ord_list theList; // ... { ord_list::guarded_ptr gp( theList.get( 5 )); if ( gp ) { // Deal with gp //... } // Destructor of guarded_ptr releases internal HP guard and frees the item } \endcode Note the compare functor specified for class \p Traits template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. */ template guarded_ptr get( Q const& key ) { return get_at( head(), key, intrusive_key_comparator()); } /// Finds the key \p key and return the item found /** The function is an analog of \ref cds_nonintrusive_LazyList_hp_get "get( Q const&)" but \p pred is used for comparing the keys. \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the list. */ template guarded_ptr get_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return get_at( head(), key, typename maker::template less_wrapper()); } /// Checks whether the list is empty bool empty() const { return base_class::empty(); } /// Returns list's item count /** The value returned depends on \p Traits::item_counter type. For \p atomicity::empty_item_counter, this function always returns 0. @note Even if you use real item counter and it returns 0, this fact is not mean that the list is empty. To check list emptyness use \ref empty() method. */ size_t size() const { return base_class::size(); } /// Returns const reference to internal statistics stat const& statistics() const { return base_class::statistics(); } /// Clears the list void clear() { base_class::clear(); } protected: //@cond static value_type& node_to_value( node_type& n ) { return n.m_Value; } static value_type const& node_to_value( node_type const& n ) { return n.m_Value; } template static node_type * alloc_node( Q const& v ) { return cxx_allocator().New( v ); } template static node_type * alloc_node( Args&&... args ) { return cxx_allocator().MoveNew( std::forward( args )... ); } static void free_node( node_type * pNode ) { cxx_allocator().Delete( pNode ); } head_type& head() { return base_class::m_Head; } head_type const& head() const { return base_class::m_Head; } head_type& tail() { return base_class::m_Tail; } head_type const& tail() const { return base_class::m_Tail; } bool insert_node( node_type * pNode ) { return insert_node_at( head(), pNode ); } bool insert_node_at( head_type& refHead, node_type * pNode ) { assert( pNode != nullptr ); scoped_node_ptr p( pNode ); if ( base_class::insert_at( &refHead, *pNode )) { p.release(); return true; } return false; } template bool insert_at( head_type& refHead, Q&& val ) { return insert_node_at( refHead, alloc_node( std::forward( val ))); } template bool emplace_at( head_type& refHead, Args&&... args ) { return insert_node_at( refHead, alloc_node( std::forward(args)... )); } template bool insert_at( head_type& refHead, Q&& key, Func f ) { scoped_node_ptr pNode( alloc_node( std::forward( key ))); if ( base_class::insert_at( &refHead, *pNode, [&f](node_type& node){ f( node_to_value(node)); } )) { pNode.release(); return true; } return false; } template bool erase_at( head_type& refHead, Q const& key, Compare cmp, Func f ) { return base_class::erase_at( &refHead, key, cmp, [&f](node_type const& node){ f( node_to_value(node)); } ); } template guarded_ptr extract_at( head_type& refHead, Q const& key, Compare cmp ) { return base_class::extract_at( &refHead, key, cmp ); } template std::pair update_at( head_type& refHead, Q const& key, Func f, bool bAllowInsert ) { scoped_node_ptr pNode( alloc_node( key )); std::pair ret = base_class::update_at( &refHead, *pNode, [&f, &key](bool bNew, node_type& node, node_type&) { f( bNew, node_to_value(node), key );}, bAllowInsert ); if ( ret.first && ret.second ) pNode.release(); return ret; } template bool find_at( head_type& refHead, Q const& key, Compare cmp ) { return base_class::find_at( &refHead, key, cmp ); } template bool find_at( head_type& refHead, Q& val, Compare cmp, Func f ) { return base_class::find_at( &refHead, val, cmp, [&f](node_type& node, Q& v){ f( node_to_value(node), v ); }); } template guarded_ptr get_at( head_type& refHead, Q const& key, Compare cmp ) { return base_class::get_at( &refHead, key, cmp ); } //@endcond }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_IMPL_LAZY_LIST_H libcds-2.3.3/cds/container/impl/michael_kvlist.h000066400000000000000000001015571341244201700216310ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_IMPL_MICHAEL_KVLIST_H #define CDSLIB_CONTAINER_IMPL_MICHAEL_KVLIST_H #include #include namespace cds { namespace container { /// Michael's ordered list for key-value pair /** @ingroup cds_nonintrusive_list \anchor cds_nonintrusive_MichaelKVList_gc This is key-value variation of non-intrusive MichaelList. Like standard container, this implementation split a value stored into two part - constant key and alterable value. Usually, ordered single-linked list is used as a building block for the hash table implementation. The complexity of searching is O(N) where \p N is the item count in the list, not in the hash table. Template arguments: - \p GC - garbage collector used - \p Key - key type of an item stored in the list. It should be copy-constructible - \p Value - value type stored in a list - \p Traits - type traits, default is \p michael_list::traits It is possible to declare option-based list with \p cds::container::michael_list::make_traits metafunction instead of \p Traits template argument. For example, the following traits-based declaration of \p gc::HP Michael's list \code #include // Declare comparator for the item struct my_compare { int operator ()( int i1, int i2 ) { return i1 - i2; } }; // Declare traits struct my_traits: public cds::container::michael_list::traits { typedef my_compare compare; }; // Declare traits-based list typedef cds::container::MichaelKVList< cds::gc::HP, int, int, my_traits > traits_based_list; \endcode is equivalent for the following option-based list \code #include // my_compare is the same // Declare option-based list typedef cds::container::MichaelKVList< cds::gc::HP, int, int, typename cds::container::michael_list::make_traits< cds::container::opt::compare< my_compare > // item comparator option >::type > option_based_list; \endcode \par Usage There are different specializations of this template for each garbage collecting schema used. You should include appropriate .h-file depending on GC you are using: - for gc::HP: \code #include \endcode - for gc::DHP: \code #include \endcode - for \ref cds_urcu_desc "RCU": \code #include \endcode - for gc::nogc: \code #include \endcode */ template < typename GC, typename Key, typename Value, #ifdef CDS_DOXYGEN_INVOKED typename Traits = michael_list::traits #else typename Traits #endif > class MichaelKVList: #ifdef CDS_DOXYGEN_INVOKED protected intrusive::MichaelList< GC, implementation_defined, Traits > #else protected details::make_michael_kvlist< GC, Key, Value, Traits >::type #endif { //@cond typedef details::make_michael_kvlist< GC, Key, Value, Traits > maker; typedef typename maker::type base_class; //@endcond public: #ifdef CDS_DOXYGEN_INVOKED typedef Key key_type ; ///< Key type typedef Value mapped_type ; ///< Type of value stored in the list typedef std::pair value_type ; ///< key/value pair stored in the list #else typedef typename maker::key_type key_type; typedef typename maker::value_type mapped_type; typedef typename maker::pair_type value_type; #endif typedef typename base_class::gc gc; ///< Garbage collector used typedef Traits traits; ///< List traits typedef typename base_class::back_off back_off; ///< Back-off strategy used typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes typedef typename base_class::item_counter item_counter; ///< Item counting policy used typedef typename maker::key_comparator key_comparator; ///< key comparison functor typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option typedef typename base_class::stat stat; ///< Internal statistics static constexpr const size_t c_nHazardPtrCount = base_class::c_nHazardPtrCount; ///< Count of hazard pointer required for the algorithm //@cond // Rebind traits (split-list support) template struct rebind_traits { typedef MichaelKVList< gc , key_type, mapped_type , typename cds::opt::make_options< traits, Options...>::type > type; }; // Stat selector template using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >; //@endcond protected: //@cond typedef typename base_class::value_type node_type; typedef typename maker::cxx_allocator cxx_allocator; typedef typename maker::node_deallocator node_deallocator; typedef typename maker::intrusive_traits::compare intrusive_key_comparator; typedef typename base_class::atomic_node_ptr head_type; //@endcond public: /// Guarded pointer typedef typename gc::template guarded_ptr< node_type, value_type, details::guarded_ptr_cast_map > guarded_ptr; protected: //@cond template static node_type * alloc_node(const K& key) { return cxx_allocator().New( key ); } template static node_type * alloc_node( const K& key, const V& val ) { return cxx_allocator().New( key, val ); } template static node_type * alloc_node( K&& key, Args&&... args ) { return cxx_allocator().MoveNew( std::forward(key), std::forward(args)...); } static void free_node( node_type * pNode ) { cxx_allocator().Delete( pNode ); } struct node_disposer { void operator()( node_type * pNode ) { free_node( pNode ); } }; typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; head_type& head() { return base_class::m_pHead; } head_type const& head() const { return base_class::m_pHead; } //@endcond protected: //@cond template class iterator_type: protected base_class::template iterator_type { typedef typename base_class::template iterator_type iterator_base; iterator_type( head_type const& pNode ) : iterator_base( pNode ) {} friend class MichaelKVList; public: typedef typename cds::details::make_const_type::reference value_ref; typedef typename cds::details::make_const_type::pointer value_ptr; typedef typename cds::details::make_const_type::reference pair_ref; typedef typename cds::details::make_const_type::pointer pair_ptr; iterator_type() {} iterator_type( iterator_type const& src ) : iterator_base( src ) {} key_type const& key() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); assert( p != nullptr ); return p->m_Data.first; } pair_ptr operator ->() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); return p ? &(p->m_Data) : nullptr; } pair_ref operator *() const { typename iterator_base::value_ref p = iterator_base::operator *(); return p.m_Data; } value_ref val() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); assert( p != nullptr ); return p->m_Data.second; } /// Pre-increment iterator_type& operator ++() { iterator_base::operator ++(); return *this; } template bool operator ==(iterator_type const& i ) const { return iterator_base::operator ==(i); } template bool operator !=(iterator_type const& i ) const { return iterator_base::operator !=(i); } }; //@endcond public: /// Forward iterator /** The forward iterator for Michael's list has some features: - it has no post-increment operator - to protect the value, the iterator contains a GC-specific guard + another guard is required locally for increment operator. For some GC (\p gc::HP), a guard is limited resource per thread, so an exception (or assertion) "no free guard" may be thrown if a limit of guard count per thread is exceeded. - The iterator cannot be moved across thread boundary since it contains GC's guard that is thread-private GC data. - Iterator ensures thread-safety even if you delete the item that iterator points to. However, in case of concurrent deleting operations it is no guarantee that you iterate all item in the list. @warning Use this iterator on the concurrent container for debugging purpose only. The iterator interface to access item data: - operator -> - returns a pointer to \ref value_type for iterator - operator * - returns a reference (a const reference for \p const_iterator) to \ref value_type for iterator - const key_type& key() - returns a key reference for iterator - mapped_type& val() - retuns a value reference for iterator (const reference for \p const_iterator) For both functions the iterator should not be equal to end() */ typedef iterator_type iterator; /// Const forward iterator /** For iterator's features and requirements see \ref iterator */ typedef iterator_type const_iterator; ///@name Forward iterators (only for debugging purpose) //@{ /// Returns a forward iterator addressing the first element in a list /** For empty list \code begin() == end() \endcode */ iterator begin() { return iterator( head()); } /// Returns an iterator that addresses the location succeeding the last element in a list /** Do not use the value returned by end function to access any item. Internally, end returning value equals to \p nullptr. The returned value can be used only to control reaching the end of the list. For empty list \code begin() == end() \endcode */ iterator end() { return iterator(); } /// Returns a forward const iterator addressing the first element in a list const_iterator begin() const { return const_iterator( head()); } /// Returns a forward const iterator addressing the first element in a list const_iterator cbegin() const { return const_iterator( head()); } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator end() const { return const_iterator(); } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator cend() const { return const_iterator(); } //@} public: /// Default constructor /** Initializes empty list */ MichaelKVList() {} //@cond template >::value >> explicit MichaelKVList( Stat& st ) : base_class( st ) {} //@endcond /// List destructor /** Clears the list */ ~MichaelKVList() { clear(); } /// Inserts new node with key and default value /** The function creates a node with \p key and default value, and then inserts the node created into the list. Preconditions: - The \p key_type should be constructible from value of type \p K. In trivial case, \p K is equal to \p key_type. - The \p mapped_type should be default-constructible. Returns \p true if inserting successful, \p false otherwise. */ template bool insert( K&& key ) { return insert_at( head(), std::forward( key )); } /// Inserts new node with a key and a value /** The function creates a node with \p key and value \p val, and then inserts the node created into the list. Preconditions: - The \p key_type should be constructible from \p key of type \p K. - The \p mapped_type should be constructible from \p val of type \p V. Returns \p true if inserting successful, \p false otherwise. */ template bool insert( K&& key, V&& val ) { // We cannot use insert with functor here // because we cannot lock inserted node for updating // Therefore, we use separate function return insert_at( head(), std::forward( key ), std::forward( val )); } /// Inserts new node and initialize it by a functor /** This function inserts new node with key \p key and if inserting is successful then it calls \p func functor with signature \code struct functor { void operator()( value_type& item ); }; \endcode The argument \p item of user-defined functor \p func is the reference to the item inserted. item.second is a reference to item's value that may be changed. User-defined functor \p func should guarantee that during changing item's value no any other changes could be made on this list's item by concurrent threads. The user-defined functor is called only if inserting is successful. The \p key_type should be constructible from value of type \p K. The function allows to split creating of new item into two part: - create a new item from \p key; - insert the new item into the list; - if inserting is successful, initialize the value of item by calling \p func functor This can be useful if complete initialization of object of \p mapped_type is heavyweight and it is preferable that the initialization should be completed only if inserting is successful. @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" */ template bool insert_with( K&& key, Func func ) { return insert_with_at( head(), std::forward( key ), func ); } /// Updates data by \p key /** The operation performs inserting or replacing the element with lock-free manner. If the \p key not found in the list, then the new item created from \p key will be inserted iff \p bAllowInsert is \p true. (note that in this case the \ref key_type should be constructible from type \p K). Otherwise, if \p key is found, the functor \p func is called with item found. The functor \p Func signature is: \code struct my_functor { void operator()( bool bNew, value_type& item ); }; \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - the item found or inserted The functor may change any fields of the \p item.second of \p mapped_type; however, \p func must guarantee that during changing no any other modifications could be made on this item by concurrent threads. Returns std::pair where \p first is true if operation is successful, \p second is true if new item has been added or \p false if the item with \p key already exists. @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" */ template std::pair update( K&& key, Func f, bool bAllowInsert = true ) { return update_at( head(), std::forward( key ), f, bAllowInsert ); } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( K const& key, Func f ) { return update( key, f, true ); } //@endcond /// Inserts a new node using move semantics /** \p key_type field of new item is constructed from \p key argument, \p mapped_type field is done from \p args. Returns \p true if inserting successful, \p false otherwise. */ template bool emplace( K&& key, Args&&... args ) { return emplace_at( head(), std::forward(key), std::forward(args)... ); } /// Deletes \p key from the list /** \anchor cds_nonintrusive_MichaelKVList_hp_erase_val Returns \p true if \p key is found and has been deleted, \p false otherwise */ template bool erase( K const& key ) { return erase_at( head(), key, intrusive_key_comparator()); } /// Deletes the item from the list using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_MichaelKVList_hp_erase_val "erase(K const&)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool erase_with( K const& key, Less pred ) { CDS_UNUSED( pred ); return erase_at( head(), key, typename maker::template less_wrapper()); } /// Deletes \p key from the list /** \anchor cds_nonintrusive_MichaelKVList_hp_erase_func The function searches an item with key \p key, calls \p f functor and deletes the item. If \p key is not found, the functor is not called. The functor \p Func interface: \code struct extractor { void operator()(value_type& val) { ... } }; \endcode Return \p true if key is found and deleted, \p false otherwise See also: \ref erase */ template bool erase( K const& key, Func f ) { return erase_at( head(), key, intrusive_key_comparator(), f ); } /// Deletes the item from the list using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_MichaelKVList_hp_erase_func "erase(K const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool erase_with( K const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return erase_at( head(), key, typename maker::template less_wrapper(), f ); } /// Extracts the item from the list with specified \p key /** \anchor cds_nonintrusive_MichaelKVList_hp_extract The function searches an item with key equal to \p key, unlinks it from the list, and returns it as \p guarded_ptr. If \p key is not found the function returns an empty guarded pointer. Note the compare functor should accept a parameter of type \p K that can be not the same as \p key_type. The \p disposer specified in \p Traits class template parameter is called automatically by garbage collector \p GC specified in class' template parameters when returned \p guarded_ptr object will be destroyed or released. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. Usage: \code typedef cds::container::MichaelKVList< cds::gc::HP, int, foo, my_traits > ord_list; ord_list theList; // ... { ord_list::guarded_ptr gp(theList.extract( 5 )); if ( gp ) { // Deal with gp // ... } // Destructor of gp releases internal HP guard } \endcode */ template guarded_ptr extract( K const& key ) { return extract_at( head(), key, intrusive_key_comparator()); } /// Extracts the item from the list with comparing functor \p pred /** The function is an analog of \ref cds_nonintrusive_MichaelKVList_hp_extract "extract(K const&)" but \p pred predicate is used for key comparing. \p Less functor has the semantics like \p std::less but should take arguments of type \ref key_type and \p K in any order. \p pred must imply the same element order as the comparator used for building the list. */ template guarded_ptr extract_with( K const& key, Less pred ) { CDS_UNUSED( pred ); return extract_at( head(), key, typename maker::template less_wrapper()); } /// Checks whether the list contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. */ template bool contains( Q const& key ) { return find_at( head(), key, intrusive_key_comparator()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find( Q const& key ) { return contains( key ); } //@endcond /// Checks whether the map contains \p key using \p pred predicate for searching /** The function is an analog of contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the list. */ template bool contains( Q const& key, Less pred ) { CDS_UNUSED( pred ); return find_at( head(), key, typename maker::template less_wrapper()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return contains( key, pred ); } //@endcond /// Finds the key \p key and performs an action with it /** \anchor cds_nonintrusive_MichaelKVList_hp_find_func The function searches an item with key equal to \p key and calls the functor \p f for the item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item ); }; \endcode where \p item is the item found. The functor may change item.second that is reference to value of node. Note that the function is only guarantee that \p item cannot be deleted during functor is executing. The function does not serialize simultaneous access to the list \p item. If such access is possible you must provide your own synchronization schema to exclude unsafe item modifications. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( Q const& key, Func f ) { return find_at( head(), key, intrusive_key_comparator(), f ); } /// Finds the key \p val using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_MichaelKVList_hp_find_func "find(Q&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool find_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return find_at( head(), key, typename maker::template less_wrapper(), f ); } /// Finds the \p key and return the item found /** \anchor cds_nonintrusive_MichaelKVList_hp_get The function searches the item with key equal to \p key and returns it as \p guarded_ptr. If \p key is not found the function returns an empty guarded pointer. @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. Usage: \code typedef cds::container::MichaelKVList< cds::gc::HP, int, foo, my_traits > ord_list; ord_list theList; // ... { ord_list::guarded_ptr gp(theList.get( 5 )); if ( gp ) { // Deal with gp //... } // Destructor of guarded_ptr releases internal HP guard } \endcode Note the compare functor specified for class \p Traits template parameter should accept a parameter of type \p K that can be not the same as \p key_type. */ template guarded_ptr get( K const& key ) { return get_at( head(), key, intrusive_key_comparator()); } /// Finds the \p key and return the item found /** The function is an analog of \ref cds_nonintrusive_MichaelKVList_hp_get "get( guarded_ptr& ptr, K const&)" but \p pred is used for comparing the keys. \p Less functor has the semantics like \p std::less but should take arguments of type \ref key_type and \p K in any order. \p pred must imply the same element order as the comparator used for building the list. */ template guarded_ptr get_with( K const& key, Less pred ) { CDS_UNUSED( pred ); return get_at( head(), key, typename maker::template less_wrapper()); } /// Checks if the list is empty bool empty() const { return base_class::empty(); } /// Returns list's item count /** The value returned depends on item counter provided by \p Traits. For \p atomicity::empty_item_counter, this function always returns 0. @note Even if you use real item counter and it returns 0, this fact is not mean that the list is empty. To check list emptyness use \p empty() method. */ size_t size() const { return base_class::size(); } /// Clears the list void clear() { base_class::clear(); } /// Returns const reference to internal statistics stat const& statistics() const { return base_class::statistics(); } protected: //@cond bool insert_node_at( head_type& refHead, node_type * pNode ) { assert( pNode != nullptr ); scoped_node_ptr p( pNode ); if ( base_class::insert_at( refHead, *pNode )) { p.release(); return true; } return false; } template bool insert_at( head_type& refHead, K&& key ) { return insert_node_at( refHead, alloc_node( std::forward( key ))); } template bool insert_at( head_type& refHead, K&& key, V&& val ) { return insert_node_at( refHead, alloc_node( std::forward( key ), std::forward( val ))); } template bool insert_with_at( head_type& refHead, K&& key, Func f ) { scoped_node_ptr pNode( alloc_node( std::forward( key ))); if ( base_class::insert_at( refHead, *pNode, [&f](node_type& node){ f( node.m_Data ); })) { pNode.release(); return true; } return false; } template bool emplace_at( head_type& refHead, K&& key, Args&&... args ) { return insert_node_at( refHead, alloc_node( std::forward(key), std::forward(args)... )); } template std::pair update_at( head_type& refHead, K&& key, Func f, bool bAllowInsert ) { scoped_node_ptr pNode( alloc_node( std::forward( key ))); std::pair ret = base_class::update_at( refHead, *pNode, [&f]( bool bNew, node_type& node, node_type& ){ f( bNew, node.m_Data ); }, bAllowInsert ); if ( ret.first && ret.second ) pNode.release(); return ret; } template bool erase_at( head_type& refHead, K const& key, Compare cmp ) { return base_class::erase_at( refHead, key, cmp ); } template bool erase_at( head_type& refHead, K const& key, Compare cmp, Func f ) { return base_class::erase_at( refHead, key, cmp, [&f]( node_type const & node ){ f( const_cast(node.m_Data)); }); } template guarded_ptr extract_at( head_type& refHead, K const& key, Compare cmp ) { return base_class::extract_at( refHead, key, cmp ); } template bool find_at( head_type& refHead, K const& key, Compare cmp ) { return base_class::find_at( refHead, key, cmp ); } template bool find_at( head_type& refHead, K& key, Compare cmp, Func f ) { return base_class::find_at( refHead, key, cmp, [&f](node_type& node, K const&){ f( node.m_Data ); }); } template guarded_ptr get_at( head_type& refHead, K const& key, Compare cmp ) { return base_class::get_at( refHead, key, cmp ); } //@endcond }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_IMPL_MICHAEL_KVLIST_H libcds-2.3.3/cds/container/impl/michael_list.h000066400000000000000000000754361341244201700212760ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_IMPL_MICHAEL_LIST_H #define CDSLIB_CONTAINER_IMPL_MICHAEL_LIST_H #include #include namespace cds { namespace container { /// Michael's ordered list /** @ingroup cds_nonintrusive_list \anchor cds_nonintrusive_MichaelList_gc Usually, ordered single-linked list is used as a building block for the hash table implementation. The complexity of searching is O(N), where \p N is the item count in the list, not in the hash table. Source: - [2002] Maged Michael "High performance dynamic lock-free hash tables and list-based sets" This class is non-intrusive version of cds::intrusive::MichaelList class Template arguments: - \p GC - garbage collector used - \p T - type stored in the list. The type must be default- and copy-constructible. - \p Traits - type traits, default is \p michael_list::traits Unlike standard container, this implementation does not divide type \p T into key and value part and may be used as a main building block for hash set algorithms. The key is a function (or a part) of type \p T, and this function is specified by Traits::compare functor or Traits::less predicate MichaelKVList is a key-value version of Michael's non-intrusive list that is closer to the C++ std library approach. It is possible to declare option-based list with cds::container::michael_list::make_traits metafunction istead of \p Traits template argument. For example, the following traits-based declaration of gc::HP Michael's list \code #include // Declare comparator for the item struct my_compare { int operator ()( int i1, int i2 ) { return i1 - i2; } }; // Declare traits struct my_traits: public cds::container::michael_list::traits { typedef my_compare compare; }; // Declare traits-based list typedef cds::container::MichaelList< cds::gc::HP, int, my_traits > traits_based_list; \endcode is equivalent for the following option-based list \code #include // my_compare is the same // Declare option-based list typedef cds::container::MichaelList< cds::gc::HP, int, typename cds::container::michael_list::make_traits< cds::container::opt::compare< my_compare > // item comparator option >::type > option_based_list; \endcode \par Usage There are different specializations of this template for each garbage collecting schema used. You should include appropriate .h-file depending on GC you are using: - for gc::HP: \code #include \endcode - for gc::DHP: \code #include \endcode - for \ref cds_urcu_desc "RCU": \code #include \endcode - for gc::nogc: \code #include \endcode */ template < typename GC, typename T, #ifdef CDS_DOXYGEN_INVOKED typename Traits = michael_list::traits #else typename Traits #endif > class MichaelList: #ifdef CDS_DOXYGEN_INVOKED protected intrusive::MichaelList< GC, T, Traits > #else protected details::make_michael_list< GC, T, Traits >::type #endif { //@cond typedef details::make_michael_list< GC, T, Traits > maker; typedef typename maker::type base_class; //@endcond public: typedef T value_type; ///< Type of value stored in the list typedef Traits traits; ///< List traits typedef typename base_class::gc gc; ///< Garbage collector used typedef typename base_class::back_off back_off; ///< Back-off strategy used typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes typedef typename base_class::item_counter item_counter; ///< Item counting policy used typedef typename maker::key_comparator key_comparator; ///< key comparison functor typedef typename base_class::memory_model memory_model; ///< Memory ordering. See \p cds::opt::memory_model option typedef typename base_class::stat stat; ///< Internal statistics static constexpr const size_t c_nHazardPtrCount = base_class::c_nHazardPtrCount; ///< Count of hazard pointer required for the algorithm //@cond // Rebind traits (split-list support) template struct rebind_traits { typedef MichaelList< gc , value_type , typename cds::opt::make_options< traits, Options...>::type > type; }; // Stat selector template using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >; //@endcond protected: //@cond typedef typename base_class::value_type node_type; typedef typename maker::cxx_allocator cxx_allocator; typedef typename maker::node_deallocator node_deallocator; typedef typename maker::intrusive_traits::compare intrusive_key_comparator; typedef typename base_class::atomic_node_ptr head_type; struct node_disposer { void operator()( node_type * pNode ) { free_node( pNode ); } }; typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; //@endcond public: /// Guarded pointer typedef typename gc::template guarded_ptr< node_type, value_type, details::guarded_ptr_cast_set > guarded_ptr; protected: //@cond template class iterator_type: protected base_class::template iterator_type { typedef typename base_class::template iterator_type iterator_base; iterator_type( head_type const& pNode ) : iterator_base( pNode ) {} friend class MichaelList; public: typedef typename cds::details::make_const_type::pointer value_ptr; typedef typename cds::details::make_const_type::reference value_ref; iterator_type() {} iterator_type( iterator_type const& src ) : iterator_base( src ) {} value_ptr operator ->() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); return p ? &(p->m_Value) : nullptr; } value_ref operator *() const { return (iterator_base::operator *()).m_Value; } /// Pre-increment iterator_type& operator ++() { iterator_base::operator ++(); return *this; } template bool operator ==(iterator_type const& i ) const { return iterator_base::operator ==(i); } template bool operator !=(iterator_type const& i ) const { return iterator_base::operator !=(i); } }; //@endcond public: ///@name Forward iterators (only for debugging purpose) //@{ /// Forward iterator /** The forward iterator for Michael's list has some features: - it has no post-increment operator - to protect the value, the iterator contains a GC-specific guard + another guard is required locally for increment operator. For some GC (\p gc::HP), a guard is limited resource per thread, so an exception (or assertion) "no free guard" may be thrown if a limit of guard count per thread is exceeded. - The iterator cannot be moved across thread boundary since it contains GC's guard that is thread-private GC data. - Iterator ensures thread-safety even if you delete the item that iterator points to. However, in case of concurrent deleting operations it is no guarantee that you iterate all item in the list. Moreover, a crash is possible when you try to iterate the next element that has been deleted by concurrent thread. @warning Use this iterator on the concurrent container for debugging purpose only. */ typedef iterator_type iterator; /// Const forward iterator /** For iterator's features and requirements see \ref iterator */ typedef iterator_type const_iterator; /// Returns a forward iterator addressing the first element in a list /** For empty list \code begin() == end() \endcode */ iterator begin() { return iterator( head()); } /// Returns an iterator that addresses the location succeeding the last element in a list /** Do not use the value returned by end function to access any item. Internally, end returning value equals to \p nullptr. The returned value can be used only to control reaching the end of the list. For empty list \code begin() == end() \endcode */ iterator end() { return iterator(); } /// Returns a forward const iterator addressing the first element in a list const_iterator begin() const { return const_iterator( head()); } /// Returns a forward const iterator addressing the first element in a list const_iterator cbegin() const { return const_iterator( head()); } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator end() const { return const_iterator(); } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator cend() const { return const_iterator(); } //@} public: /// Default constructor /** Initialize empty list */ MichaelList() {} //@cond template >::value >> explicit MichaelList( Stat& st ) : base_class( st ) {} //@endcond /// List destructor /** Clears the list */ ~MichaelList() { clear(); } /// Inserts new node /** The function creates a node with copy of \p val value and then inserts the node created into the list. The type \p Q should contain least the complete key of the node. The object of \ref value_type should be constructible from \p val of type \p Q. In trivial case, \p Q is equal to \ref value_type. Returns \p true if inserting successful, \p false otherwise. */ template bool insert( Q&& val ) { return insert_at( head(), std::forward( val )); } /// Inserts new node /** This function inserts new node with default-constructed value and then it calls \p func functor with signature \code void func( value_type& itemValue ) ;\endcode The argument \p itemValue of user-defined functor \p func is the reference to the list's item inserted. User-defined functor \p func should guarantee that during changing item's value no any other changes could be made on this list's item by concurrent threads. The user-defined functor is called only if inserting is success. The type \p Q should contain the complete key of the node. The object of \p value_type should be constructible from \p key of type \p Q. The function allows to split creating of new item into two part: - create item from \p key with initializing key-fields only; - insert new item into the list; - if inserting is successful, initialize non-key fields of item by calling \p func functor The method can be useful if complete initialization of object of \p value_type is heavyweight and it is preferable that the initialization should be completed only if inserting is successful. @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" */ template bool insert( Q&& key, Func func ) { return insert_at( head(), std::forward(key), func ); } /// Updates data by \p key /** The operation performs inserting or replacing the element with lock-free manner. If the \p key not found in the list, then the new item created from \p key will be inserted iff \p bAllowInsert is \p true. Otherwise, if \p key is found, the functor \p func is called with item found. The functor \p Func signature is: \code struct my_functor { void operator()( bool bNew, value_type& item, Q const& key ); }; \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the list - \p key - argument \p key passed into the \p %update() function The functor may change non-key fields of the \p item; however, \p func must guarantee that during changing no any other modifications could be made on this item by concurrent threads. Returns std::pair where \p first is true if operation is successful, \p second is true if new item has been added or \p false if the item with \p key already exists. @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" */ template std::pair update( Q const& key, Func func, bool bAllowInsert = true ) { return update_at( head(), key, func, bAllowInsert ); } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( Q const& key, Func func ) { return update( key, func ); } //@endcond /// Inserts data of type \p value_type constructed with std::forward(args)... /** Returns \p true if inserting successful, \p false otherwise. */ template bool emplace( Args&&... args ) { return emplace_at( head(), std::forward(args)... ); } /// Delete \p key from the list /** \anchor cds_nonintrusive_MichealList_hp_erase_val Since the key of MichaelList's item type \p value_type is not explicitly specified, template parameter \p Q sould contain the complete key to search in the list. The list item comparator should be able to compare the type \p value_type and the type \p Q. Return \p true if key is found and deleted, \p false otherwise */ template bool erase( Q const& key ) { return erase_at( head(), key, intrusive_key_comparator(), [](value_type const&){} ); } /// Deletes the item from the list using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_MichealList_hp_erase_val "erase(Q const&)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool erase_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return erase_at( head(), key, typename maker::template less_wrapper(), [](value_type const&){} ); } /// Deletes \p key from the list /** \anchor cds_nonintrusive_MichaelList_hp_erase_func The function searches an item with key \p key, calls \p f functor with item found and deletes it. If \p key is not found, the functor is not called. The functor \p Func interface: \code struct extractor { void operator()(const value_type& val) { ... } }; \endcode Since the key of MichaelList's item type \p value_type is not explicitly specified, template parameter \p Q should contain the complete key to search in the list. The list item comparator should be able to compare the type \p value_type of list item and the type \p Q. Return \p true if key is found and deleted, \p false otherwise */ template bool erase( Q const& key, Func f ) { return erase_at( head(), key, intrusive_key_comparator(), f ); } /// Deletes the item from the list using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_MichaelList_hp_erase_func "erase(Q const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool erase_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return erase_at( head(), key, typename maker::template less_wrapper(), f ); } /// Extracts the item from the list with specified \p key /** \anchor cds_nonintrusive_MichaelList_hp_extract The function searches an item with key equal to \p key, unlinks it from the list, and returns it as \p guarded_ptr. If \p key is not found the function returns an empty guarded pointer. Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. Usage: \code typedef cds::container::MichaelList< cds::gc::HP, foo, my_traits > ord_list; ord_list theList; // ... { ord_list::guarded_ptr gp(theList.extract( 5 )); if ( gp ) { // Deal with gp // ... } // Destructor of gp releases internal HP guard and frees the item } \endcode */ template guarded_ptr extract( Q const& key ) { return extract_at( head(), key, intrusive_key_comparator()); } /// Extracts the item from the list with comparing functor \p pred /** The function is an analog of \ref cds_nonintrusive_MichaelList_hp_extract "extract(Q const&)" but \p pred predicate is used for key comparing. \p Less functor has the semantics like \p std::less but it should accept arguments of type \p value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the list. */ template guarded_ptr extract_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return extract_at( head(), key, typename maker::template less_wrapper()); } /// Checks whether the list contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. */ template bool contains( Q const& key ) { return find_at( head(), key, intrusive_key_comparator()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find( Q const& key ) { return contains( key ); } //@endcond /// Checks whether the list contains \p key using \p pred predicate for searching /** The function is an analog of contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool contains( Q const& key, Less pred ) { CDS_UNUSED( pred ); return find_at( head(), key, typename maker::template less_wrapper()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find_with( Q const& key, Less pred ) { return contains( key, pred ); } //@endcond /// Finds \p key and perform an action with it /** \anchor cds_nonintrusive_MichaelList_hp_find_func The function searches an item with key equal to \p key and calls the functor \p f for the item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& key ); }; \endcode where \p item is the item found, \p key is the find function argument. The functor may change non-key fields of \p item. Note that the function is only guarantee that \p item cannot be deleted during functor is executing. The function does not serialize simultaneous access to the list \p item. If such access is possible you must provide your own synchronization schema to exclude unsafe item modifications. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( Q& key, Func f ) { return find_at( head(), key, intrusive_key_comparator(), f ); } //@cond template bool find( Q const& key, Func f ) { return find_at( head(), key, intrusive_key_comparator(), f ); } //@endcond /// Finds \p key using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_MichaelList_hp_find_func "find(Q&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool find_with( Q& key, Less pred, Func f ) { CDS_UNUSED( pred ); return find_at( head(), key, typename maker::template less_wrapper(), f ); } //@cond template bool find_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return find_at( head(), key, typename maker::template less_wrapper(), f ); } //@endcond /// Finds \p key and return the item found /** The function searches the item with key equal to \p key and returns it as \p guarded_ptr. If \p key is not found the function returns an empty guarded pointer. @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. Usage: \code typedef cds::container::MichaelList< cds::gc::HP, foo, my_traits > ord_list; ord_list theList; // ... { ord_list::guarded_ptr gp(theList.get( 5 )); if ( gp ) { // Deal with gp //... } // Destructor of guarded_ptr releases internal HP guard and frees the item } \endcode Note the compare functor specified for class \p Traits template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. */ template guarded_ptr get( Q const& key ) { return get_at( head(), key, intrusive_key_comparator()); } /// Finds \p key and return the item found /** The function is an analog of \p get( Q const&) but \p pred is used for comparing the keys. \p Less functor has the semantics like \p std::less but should accept arguments of type \p value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the list. */ template guarded_ptr get_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return get_at( head(), key, typename maker::template less_wrapper()); } /// Check if the list is empty bool empty() const { return base_class::empty(); } /// Returns list's item count /** The value returned depends on item counter provided by \p Traits. For \p atomicity::empty_item_counter, this function always returns 0. @note Even if you use real item counter and it returns 0, this fact is not mean that the list is empty. To check list emptyness use \p empty() method. */ size_t size() const { return base_class::size(); } /// Clears the list void clear() { base_class::clear(); } /// Returns const reference to internal statistics stat const& statistics() const { return base_class::statistics(); } protected: //@cond static value_type& node_to_value( node_type& n ) { return n.m_Value; } static value_type const& node_to_value( node_type const& n ) { return n.m_Value; } template static node_type * alloc_node( Q const& v ) { return cxx_allocator().New( v ); } template static node_type * alloc_node( Args&&... args ) { return cxx_allocator().MoveNew( std::forward( args )... ); } static void free_node( node_type * pNode ) { cxx_allocator().Delete( pNode ); } head_type& head() { return base_class::m_pHead; } head_type const& head() const { return base_class::m_pHead; } bool insert_node( node_type * pNode ) { return insert_node_at( head(), pNode ); } bool insert_node_at( head_type& refHead, node_type * pNode ) { assert( pNode ); scoped_node_ptr p(pNode); if ( base_class::insert_at( refHead, *pNode )) { p.release(); return true; } return false; } template bool insert_at( head_type& refHead, Q&& val ) { return insert_node_at( refHead, alloc_node( std::forward(val))); } template bool insert_at( head_type& refHead, Q&& key, Func f ) { scoped_node_ptr pNode( alloc_node( std::forward( key ))); if ( base_class::insert_at( refHead, *pNode, [&f]( node_type& node ) { f( node_to_value(node)); } )) { pNode.release(); return true; } return false; } template bool emplace_at( head_type& refHead, Args&&... args ) { return insert_node_at( refHead, alloc_node( std::forward(args) ... )); } template bool erase_at( head_type& refHead, Q const& key, Compare cmp, Func f ) { return base_class::erase_at( refHead, key, cmp, [&f](node_type const& node){ f( node_to_value(node)); } ); } template guarded_ptr extract_at( head_type& refHead, Q const& key, Compare cmp ) { return base_class::extract_at( refHead, key, cmp ); } template std::pair update_at( head_type& refHead, Q const& key, Func f, bool bAllowInsert ) { scoped_node_ptr pNode( alloc_node( key )); std::pair ret = base_class::update_at( refHead, *pNode, [&f, &key](bool bNew, node_type& node, node_type&){ f( bNew, node_to_value(node), key );}, bAllowInsert ); if ( ret.first && ret.second ) pNode.release(); return ret; } template bool find_at( head_type& refHead, Q const& key, Compare cmp ) { return base_class::find_at( refHead, key, cmp ); } template bool find_at( head_type& refHead, Q& val, Compare cmp, Func f ) { return base_class::find_at( refHead, val, cmp, [&f](node_type& node, Q& v){ f( node_to_value(node), v ); }); } template guarded_ptr get_at( head_type& refHead, Q const& key, Compare cmp ) { return base_class::get_at( refHead, key, cmp ); } //@endcond }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_IMPL_MICHAEL_LIST_H libcds-2.3.3/cds/container/impl/skip_list_map.h000066400000000000000000000671021341244201700214660ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_IMPL_SKIP_LIST_MAP_H #define CDSLIB_CONTAINER_IMPL_SKIP_LIST_MAP_H #include namespace cds { namespace container { /// Lock-free skip-list map /** @ingroup cds_nonintrusive_map \anchor cds_nonintrusive_SkipListMap_hp The implementation of well-known probabilistic data structure called skip-list invented by W.Pugh in his papers: - [1989] W.Pugh Skip Lists: A Probabilistic Alternative to Balanced Trees - [1990] W.Pugh A Skip List Cookbook A skip-list is a probabilistic data structure that provides expected logarithmic time search without the need of rebalance. The skip-list is a collection of sorted linked list. Nodes are ordered by key. Each node is linked into a subset of the lists. Each list has a level, ranging from 0 to 32. The bottom-level list contains all the nodes, and each higher-level list is a sublist of the lower-level lists. Each node is created with a random top level (with a random height), and belongs to all lists up to that level. The probability that a node has the height 1 is 1/2. The probability that a node has the height N is 1/2 ** N (more precisely, the distribution depends on an random generator provided, but our generators have this property). The lock-free variant of skip-list is implemented according to book - [2008] M.Herlihy, N.Shavit "The Art of Multiprocessor Programming", chapter 14.4 "A Lock-Free Concurrent Skiplist" Template arguments: - \p GC - Garbage collector used. - \p K - type of a key to be stored in the list. - \p T - type of a value to be stored in the list. - \p Traits - map traits, default is \p skip_list::traits It is possible to declare option-based list with \p cds::container::skip_list::make_traits metafunction istead of \p Traits template argument. Like STL map class, \p %SkipListMap stores the key-value pair as std:pair< K const, T>. @warning The skip-list requires up to 67 hazard pointers that may be critical for some GCs for which the guard count is limited (like \p gc::HP). Those GCs should be explicitly initialized with hazard pointer enough: \code cds::gc::HP myhp( 67 ) \endcode. Otherwise an run-time exception may be raised when you try to create skip-list object. @note There are several specializations of \p %SkipListMap for each \p GC. You should include: - for \p gc::HP garbage collector - for \p gc::DHP garbage collector - for \ref cds_nonintrusive_SkipListMap_rcu "RCU type" - for \ref cds_nonintrusive_SkipListMap_nogc "non-deletable SkipListMap" */ template < typename GC, typename Key, typename T, #ifdef CDS_DOXYGEN_INVOKED typename Traits = skip_list::traits #else typename Traits #endif > class SkipListMap: #ifdef CDS_DOXYGEN_INVOKED protected intrusive::SkipListSet< GC, std::pair, Traits > #else protected details::make_skip_list_map< GC, Key, T, Traits >::type #endif { //@cond typedef details::make_skip_list_map< GC, Key, T, Traits > maker; typedef typename maker::type base_class; //@endcond public: typedef GC gc; ///< Garbage collector typedef Key key_type; ///< Key type typedef T mapped_type; ///< Mapped type typedef Traits traits; ///< Map traits # ifdef CDS_DOXYGEN_INVOKED typedef std::pair< Key const, T> value_type; ///< Key-value pair to be stored in the map # else typedef typename maker::value_type value_type; # endif typedef typename base_class::back_off back_off; ///< Back-off strategy typedef typename traits::allocator allocator_type; ///< Allocator type used for allocate/deallocate the skip-list nodes typedef typename base_class::item_counter item_counter; ///< Item counting policy used typedef typename maker::key_comparator key_comparator; ///< key comparison functor typedef typename base_class::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model typedef typename traits::random_level_generator random_level_generator ; ///< random level generator typedef typename traits::stat stat; ///< internal statistics type static size_t const c_nHazardPtrCount = base_class::c_nHazardPtrCount; ///< Count of hazard pointer required for the skip-list protected: //@cond typedef typename maker::node_type node_type; typedef typename maker::node_allocator node_allocator; typedef std::unique_ptr< node_type, typename maker::node_deallocator > scoped_node_ptr; //@endcond public: /// Guarded pointer typedef typename gc::template guarded_ptr< node_type, value_type, details::guarded_ptr_cast_set > guarded_ptr; protected: //@cond unsigned int random_level() { return base_class::random_level(); } //@endcond public: /// Default ctor SkipListMap() : base_class() {} /// Destructor destroys the set object ~SkipListMap() {} public: ///@name Forward iterators (only for debugging purpose) //@{ /// Iterator type /** The forward iterator has some features: - it is ordered - it has no post-increment operator - to protect the value, the iterator contains a GC-specific guard + another guard is required locally for increment operator. For some GC (like as \p gc::HP), a guard is a limited resource per thread, so an exception (or assertion) "no free guard" may be thrown if the limit of guard count per thread is exceeded. - The iterator cannot be moved across thread boundary because it contains thread-private GC's guard. - Iterator ensures thread-safety even if you delete the item the iterator points to. However, in case of concurrent deleting operations there is no guarantee that you iterate all item in the list. Moreover, a crash is possible when you try to iterate the next element that has been deleted by concurrent thread. @warning Use this iterator on the concurrent container for debugging purpose only. @note \p end() and \p cend() are not dereferenceable. The iterator interface: \code class iterator { public: // Default constructor iterator(); // Copy construtor iterator( iterator const& src ); // Dereference operator value_type * operator ->() const; // Dereference operator value_type& operator *() const; // Preincrement operator iterator& operator ++(); // Assignment operator iterator& operator = (iterator const& src); // Equality operators bool operator ==(iterator const& i ) const; bool operator !=(iterator const& i ) const; }; \endcode */ typedef skip_list::details::iterator< typename base_class::iterator > iterator; /// Const forward iterator type typedef skip_list::details::iterator< typename base_class::const_iterator > const_iterator; /// Returns a forward iterator addressing the first element in a map iterator begin() { return iterator( base_class::begin()); } /// Returns a forward const iterator addressing the first element in a map const_iterator begin() const { return cbegin(); } /// Returns a forward const iterator addressing the first element in a map const_iterator cbegin() const { return const_iterator( base_class::cbegin()); } /// Returns a forward iterator that addresses the location succeeding the last element in a map. iterator end() { return iterator( base_class::end()); } /// Returns a forward const iterator that addresses the location succeeding the last element in a map. const_iterator end() const { return cend(); } /// Returns a forward const iterator that addresses the location succeeding the last element in a map. const_iterator cend() const { return const_iterator( base_class::cend()); } //@} public: /// Inserts new node with key and default value /** The function creates a node with \p key and default value, and then inserts the node created into the map. Preconditions: - The \p key_type should be constructible from a value of type \p K. In trivial case, \p K is equal to \p key_type. - The \p mapped_type should be default-constructible. Returns \p true if inserting successful, \p false otherwise. */ template bool insert( K const& key ) { return insert_with( key, [](value_type&){} ); } /// Inserts new node /** The function creates a node with copy of \p val value and then inserts the node created into the map. Preconditions: - The \p key_type should be constructible from \p key of type \p K. - The \p value_type should be constructible from \p val of type \p V. Returns \p true if \p val is inserted into the set, \p false otherwise. */ template bool insert( K const& key, V const& val ) { return insert_with( key, [&val]( value_type& item ) { item.second = val; } ); } /// Inserts new node and initialize it by a functor /** This function inserts new node with key \p key and if inserting is successful then it calls \p func functor with signature \code struct functor { void operator()( value_type& item ); }; \endcode The argument \p item of user-defined functor \p func is the reference to the map's item inserted: - item.first is a const reference to item's key that cannot be changed. - item.second is a reference to item's value that may be changed. \p key_type should be constructible from value of type \p K. The function allows to split creating of new item into two part: - create item from \p key; - insert new item into the map; - if inserting is successful, initialize the value of item by calling \p func functor This can be useful if complete initialization of object of \p value_type is heavyweight and it is preferable that the initialization should be completed only if inserting is successful. */ template bool insert_with( K const& key, Func func ) { scoped_node_ptr pNode( node_allocator().New( random_level(), key )); if ( base_class::insert( *pNode, [&func]( node_type& item ) { func( item.m_Value ); } )) { pNode.release(); return true; } return false; } /// For key \p key inserts data of type \p value_type created in-place from std::forward(args)... /** Returns \p true if inserting successful, \p false otherwise. */ template bool emplace( K&& key, Args&&... args ) { scoped_node_ptr pNode( node_allocator().New( random_level(), std::forward(key), std::forward(args)... )); if ( base_class::insert( *pNode )) { pNode.release(); return true; } return false; } /// Updates data by \p key /** The operation performs inserting or changing data with lock-free manner. If the \p key not found in the map, then the new item created from \p key will be inserted into the map iff \p bInsert is \p true (note that in this case the \ref key_type should be constructible from type \p K). Otherwise, if \p key is found, the functor \p func is called with item found. The functor \p Func signature: \code struct my_functor { void operator()( bool bNew, value_type& item ); }; \endcode where: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the map The functor may change any fields of the \p item.second that is \ref value_type. Returns std::pair where \p first is \p true if operation is successful, \p second is \p true if new item has been added or \p false if \p key already exists. @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" */ template std::pair update( K const& key, Func func, bool bInsert = true ) { scoped_node_ptr pNode( node_allocator().New( random_level(), key )); std::pair res = base_class::update( *pNode, [&func](bool bNew, node_type& item, node_type const& ){ func( bNew, item.m_Value );}, bInsert ); if ( res.first && res.second ) pNode.release(); return res; } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( K const& key, Func func ) { return update( key, func, true ); } //@endcond /// Delete \p key from the map /** \anchor cds_nonintrusive_SkipListMap_erase_val Return \p true if \p key is found and deleted, \p false otherwise */ template bool erase( K const& key ) { return base_class::erase(key); } /// Deletes the item from the map using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_SkipListMap_erase_val "erase(K const&)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool erase_with( K const& key, Less pred ) { CDS_UNUSED( pred ); return base_class::erase_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor >()); } /// Delete \p key from the map /** \anchor cds_nonintrusive_SkipListMap_erase_func The function searches an item with key \p key, calls \p f functor and deletes the item. If \p key is not found, the functor is not called. The functor \p Func interface: \code struct extractor { void operator()(value_type& item) { ... } }; \endcode Return \p true if key is found and deleted, \p false otherwise */ template bool erase( K const& key, Func f ) { return base_class::erase( key, [&f]( node_type& node) { f( node.m_Value ); } ); } /// Deletes the item from the map using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_SkipListMap_erase_func "erase(K const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool erase_with( K const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return base_class::erase_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor >(), [&f]( node_type& node) { f( node.m_Value ); } ); } /// Extracts the item from the map with specified \p key /** \anchor cds_nonintrusive_SkipListMap_hp_extract The function searches an item with key equal to \p key in the map, unlinks it from the map, and returns a guarded pointer to the item found. If \p key is not found the function returns an empty guarded pointer. Note the compare functor should accept a parameter of type \p K that can be not the same as \p key_type. The item extracted is freed automatically by garbage collector \p GC when returned \p guarded_ptr object will be destroyed or released. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. Usage: \code typedef cds::container::SkipListMap< cds::gc::HP, int, foo, my_traits > skip_list; skip_list theList; // ... { skip_list::guarded_ptr gp( theList.extract( 5 )); if ( gp ) { // Deal with gp // ... } // Destructor of gp releases internal HP guard and frees the pointer } \endcode */ template guarded_ptr extract( K const& key ) { return base_class::extract_( key, typename base_class::key_comparator()); } /// Extracts the item from the map with comparing functor \p pred /** The function is an analog of \ref cds_nonintrusive_SkipListMap_hp_extract "extract(K const&)" but \p pred predicate is used for key comparing. \p Less functor has the semantics like \p std::less but should take arguments of type \ref key_type and \p K in any order. \p pred must imply the same element order as the comparator used for building the map. */ template guarded_ptr extract_with( K const& key, Less pred ) { CDS_UNUSED( pred ); typedef cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor > wrapped_less; return base_class::extract_( key, cds::opt::details::make_comparator_from_less()); } /// Extracts an item with minimal key from the map /** The function searches an item with minimal key, unlinks it, and returns an guarded pointer to the item found. If the skip-list is empty the function returns an empty guarded pointer. The item extracted is freed automatically by garbage collector \p GC when returned \p guarded_ptr object will be destroyed or released. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. Usage: \code typedef cds::continer::SkipListMap< cds::gc::HP, int, foo, my_traits > skip_list; skip_list theList; // ... { skip_list::guarded_ptr gp( theList.extract_min()); if ( gp ) { // Deal with gp //... } // Destructor of gp releases internal HP guard and then frees the pointer } \endcode */ guarded_ptr extract_min() { return base_class::extract_min_(); } /// Extracts an item with maximal key from the map /** The function searches an item with maximal key, unlinks it, and returns a guarded pointer to item found. If the skip-list is empty the function returns an empty \p guarded_ptr. The item found is freed by garbage collector \p GC automatically when returned \p guarded_ptr object will be destroyed or released. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. Usage: \code typedef cds::container::SkipListMap< cds::gc::HP, int, foo, my_traits > skip_list; skip_list theList; // ... { skip_list::guarded_ptr gp( theList.extract_max()); if ( gp ) { // Deal with gp //... } // Destructor of gp releases internal HP guard and then frees the pointer } \endcode */ guarded_ptr extract_max() { return base_class::extract_max_(); } /// Find the key \p key /** \anchor cds_nonintrusive_SkipListMap_find_cfunc The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item ); }; \endcode where \p item is the item found. The functor may change \p item.second. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( K const& key, Func f ) { return base_class::find( key, [&f](node_type& item, K const& ) { f( item.m_Value );}); } /// Finds the key \p val using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_SkipListMap_find_cfunc "find(K const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool find_with( K const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return base_class::find_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor >(), [&f](node_type& item, K const& ) { f( item.m_Value );}); } /// Checks whether the map contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. */ template bool contains( K const& key ) { return base_class::contains( key ); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find( K const& key ) { return contains( key ); } //@endcond /// Checks whether the set contains \p key using \p pred predicate for searching /** The function is similar to contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool contains( K const& key, Less pred ) { CDS_UNUSED( pred ); return base_class::contains( key, cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor >()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find_with( K const& key, Less pred ) { return contains( key, pred ); } //@endcond /// Finds the key \p key and return the item found /** \anchor cds_nonintrusive_SkipListMap_hp_get The function searches the item with key equal to \p key and returns a guarded pointer to the item found. If \p key is not found the function returns an empty guarded pointer. It is safe when a concurrent thread erases the item returned as \p guarded_ptr. In this case the item will be freed later by garbage collector \p GC automatically when \p guarded_ptr object will be destroyed or released. @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. Usage: \code typedef cds::container::SkipListMap< cds::gc::HP, int, foo, my_traits > skip_list; skip_list theList; // ... { skip_list::guarded_ptr gp( theList.get( 5 )); if ( gp ) { // Deal with gp //... } // Destructor of guarded_ptr releases internal HP guard } \endcode Note the compare functor specified for class \p Traits template parameter should accept a parameter of type \p K that can be not the same as \p value_type. */ template guarded_ptr get( K const& key ) { return base_class::get_with_( key, typename base_class::key_comparator()); } /// Finds the key \p key and return the item found /** The function is an analog of \ref cds_nonintrusive_SkipListMap_hp_get "get( K const&)" but \p pred is used for comparing the keys. \p Less functor has the semantics like \p std::less but should take arguments of type \ref key_type and \p K in any order. \p pred must imply the same element order as the comparator used for building the map. */ template guarded_ptr get_with( K const& key, Less pred ) { CDS_UNUSED( pred ); typedef cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor > wrapped_less; return base_class::get_with_( key, cds::opt::details::make_comparator_from_less< wrapped_less >()); } /// Clears the map void clear() { base_class::clear(); } /// Checks if the map is empty bool empty() const { return base_class::empty(); } /// Returns item count in the map size_t size() const { return base_class::size(); } /// Returns const reference to internal statistics stat const& statistics() const { return base_class::statistics(); } }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_IMPL_SKIP_LIST_MAP_H libcds-2.3.3/cds/container/impl/skip_list_set.h000066400000000000000000000746011341244201700215060ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_IMPL_SKIP_LIST_SET_H #define CDSLIB_CONTAINER_IMPL_SKIP_LIST_SET_H #include #include namespace cds { namespace container { /// Lock-free skip-list set /** @ingroup cds_nonintrusive_set \anchor cds_nonintrusive_SkipListSet_hp The implementation of well-known probabilistic data structure called skip-list invented by W.Pugh in his papers: - [1989] W.Pugh Skip Lists: A Probabilistic Alternative to Balanced Trees - [1990] W.Pugh A Skip List Cookbook A skip-list is a probabilistic data structure that provides expected logarithmic time search without the need of rebalance. The skip-list is a collection of sorted linked list. Nodes are ordered by key. Each node is linked into a subset of the lists. Each list has a level, ranging from 0 to 32. The bottom-level list contains all the nodes, and each higher-level list is a sublist of the lower-level lists. Each node is created with a random top level (with a random height), and belongs to all lists up to that level. The probability that a node has the height 1 is 1/2. The probability that a node has the height N is 1/2 ** N (more precisely, the distribution depends on an random generator provided, but our generators have this property). The lock-free variant of skip-list is implemented according to book - [2008] M.Herlihy, N.Shavit "The Art of Multiprocessor Programming", chapter 14.4 "A Lock-Free Concurrent Skiplist" Template arguments: - \p GC - Garbage collector used. - \p T - type to be stored in the list. - \p Traits - set traits, default is \p skip_list::traits. It is possible to declare option-based list with \p cds::container::skip_list::make_traits metafunction istead of \p Traits template argument. @warning The skip-list requires up to 67 hazard pointers that may be critical for some GCs for which the guard count is limited (like as \p gc::HP). Those GCs should be explicitly initialized with hazard pointer enough: \code cds::gc::HP myhp( 67 ) \endcode. Otherwise an run-time exception may be raised when you try to create skip-list object. @note There are several specializations of \p %SkipListSet for each \p GC. You should include: - for \p gc::HP garbage collector - for \p gc::DHP garbage collector - for \ref cds_nonintrusive_SkipListSet_rcu "RCU type" - for \ref cds_nonintrusive_SkipListSet_nogc "non-deletable SkipListSet" Iterators The class supports a forward iterator (\ref iterator and \ref const_iterator). The iteration is ordered. The iterator object is thread-safe: the element pointed by the iterator object is guarded, so, the element cannot be reclaimed while the iterator object is alive. However, passing an iterator object between threads is dangerous. \warning Due to concurrent nature of skip-list set it is not guarantee that you can iterate all elements in the set: any concurrent deletion can exclude the element pointed by the iterator from the set, and your iteration can be terminated before end of the set. Therefore, such iteration is more suitable for debugging purpose only Remember, each iterator object requires 2 additional hazard pointers, that may be a limited resource for \p GC like \p gc::HP (for \p gc::DHP the count of guards is unlimited). The iterator class supports the following minimalistic interface: \code struct iterator { // Default ctor iterator(); // Copy ctor iterator( iterator const& s); value_type * operator ->() const; value_type& operator *() const; // Pre-increment iterator& operator ++(); // Copy assignment iterator& operator = (const iterator& src); bool operator ==(iterator const& i ) const; bool operator !=(iterator const& i ) const; }; \endcode Note, the iterator object returned by \p end(), \p cend() member functions points to \p nullptr and should not be dereferenced. */ template < typename GC, typename T, #ifdef CDS_DOXYGEN_INVOKED typename Traits = skip_list::traits #else typename Traits #endif > class SkipListSet: #ifdef CDS_DOXYGEN_INVOKED protected intrusive::SkipListSet< GC, T, Traits > #else protected details::make_skip_list_set< GC, T, Traits >::type #endif { //@cond typedef details::make_skip_list_set< GC, T, Traits > maker; typedef typename maker::type base_class; //@endcond public: typedef GC gc; ///< Garbage collector used typedef T value_type; ///< @anchor cds_containewr_SkipListSet_value_type Value type to be stored in the set typedef Traits traits; ///< Options specified typedef typename base_class::back_off back_off; ///< Back-off strategy typedef typename traits::allocator allocator_type; ///< Allocator type used for allocate/deallocate the skip-list nodes typedef typename base_class::item_counter item_counter; ///< Item counting policy used typedef typename maker::key_comparator key_comparator; ///< key comparison functor typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option typedef typename traits::random_level_generator random_level_generator; ///< random level generator typedef typename traits::stat stat; ///< internal statistics type static size_t const c_nHazardPtrCount = base_class::c_nHazardPtrCount; ///< Count of hazard pointer required for the skip-list protected: //@cond typedef typename maker::node_type node_type; typedef typename maker::node_allocator node_allocator; typedef std::unique_ptr< node_type, typename maker::node_deallocator > scoped_node_ptr; //@endcond public: /// Guarded pointer typedef typename gc::template guarded_ptr< node_type, value_type, details::guarded_ptr_cast_set > guarded_ptr; protected: //@cond unsigned int random_level() { return base_class::random_level(); } //@endcond public: /// Default ctor SkipListSet() : base_class() {} /// Destructor destroys the set object ~SkipListSet() {} public: ///@name Forward iterators (only for debugging purpose) //@{ /// Iterator type /** The forward iterator has some features: - it has no post-increment operator - to protect the value, the iterator contains a GC-specific guard + another guard is required locally for increment operator. For some GC (like as \p gc::HP), a guard is a limited resource per thread, so an exception (or assertion) "no free guard" may be thrown if the limit of guard count per thread is exceeded. - The iterator cannot be moved across thread boundary because it contains thread-private GC's guard. - Iterator ensures thread-safety even if you delete the item the iterator points to. However, in case of concurrent deleting operations there is no guarantee that you iterate all item in the list. Moreover, a crash is possible when you try to iterate the next element that has been deleted by concurrent thread. @warning Use this iterator on the concurrent container for debugging purpose only. The iterator interface: \code class iterator { public: // Default constructor iterator(); // Copy construtor iterator( iterator const& src ); // Dereference operator value_type * operator ->() const; // Dereference operator value_type& operator *() const; // Preincrement operator iterator& operator ++(); // Assignment operator iterator& operator = (iterator const& src); // Equality operators bool operator ==(iterator const& i ) const; bool operator !=(iterator const& i ) const; }; \endcode */ typedef skip_list::details::iterator< typename base_class::iterator > iterator; /// Const iterator type typedef skip_list::details::iterator< typename base_class::const_iterator > const_iterator; /// Returns a forward iterator addressing the first element in a set iterator begin() { return iterator( base_class::begin()); } /// Returns a forward const iterator addressing the first element in a set const_iterator begin() const { return const_iterator( base_class::begin()); } /// Returns a forward const iterator addressing the first element in a set const_iterator cbegin() const { return const_iterator( base_class::cbegin()); } /// Returns a forward iterator that addresses the location succeeding the last element in a set. iterator end() { return iterator( base_class::end()); } /// Returns a forward const iterator that addresses the location succeeding the last element in a set. const_iterator end() const { return const_iterator( base_class::end()); } /// Returns a forward const iterator that addresses the location succeeding the last element in a set. const_iterator cend() const { return const_iterator( base_class::cend()); } //@} public: /// Inserts new node /** The function creates a node with copy of \p val value and then inserts the node created into the set. The type \p Q should contain as minimum the complete key for the node. The object of \ref value_type should be constructible from a value of type \p Q. In trivial case, \p Q is equal to \ref value_type. Returns \p true if \p val is inserted into the set, \p false otherwise. */ template bool insert( Q const& val ) { scoped_node_ptr sp( node_allocator().New( random_level(), val )); if ( base_class::insert( *sp.get())) { sp.release(); return true; } return false; } /// Inserts new node /** The function allows to split creating of new item into two part: - create item with key only - insert new item into the set - if inserting is success, calls \p f functor to initialize value-fields of \p val. The functor signature is: \code void func( value_type& val ); \endcode where \p val is the item inserted. User-defined functor \p f should guarantee that during changing \p val no any other changes could be made on this set's item by concurrent threads. The user-defined functor is called only if the inserting is success. */ template bool insert( Q const& val, Func f ) { scoped_node_ptr sp( node_allocator().New( random_level(), val )); if ( base_class::insert( *sp.get(), [&f]( node_type& v ) { f( v.m_Value ); } )) { sp.release(); return true; } return false; } /// Updates the item /** The operation performs inserting or changing data with lock-free manner. If the \p val key not found in the set, then the new item created from \p val will be inserted into the set iff \p bInsert is \p true. Otherwise, if \p val is found, the functor \p func will be called with the item found. The functor \p Func signature: \code struct my_functor { void operator()( bool bNew, value_type& item, const Q& val ); }; \endcode where: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the set - \p val - argument \p key passed into the \p %update() function The functor may change non-key fields of the \p item; however, \p func must guarantee that during changing no any other modifications could be made on this item by concurrent threads. Returns std::pair where \p first is \p true if operation is successful, i.e. the item has been inserted or updated, \p second is \p true if new item has been added or \p false if the item with key equal to \p val already exists. @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" */ template std::pair update( const Q& val, Func func, bool bInsert = true ) { scoped_node_ptr sp( node_allocator().New( random_level(), val )); std::pair bRes = base_class::update( *sp, [&func, &val](bool bNew, node_type& node, node_type&){ func( bNew, node.m_Value, val ); }, bInsert ); if ( bRes.first && bRes.second ) sp.release(); return bRes; } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( const Q& val, Func func ) { return update( val, func, true ); } //@endcond /// Inserts data of type \p value_type created in-place from std::forward(args)... /** Returns \p true if inserting successful, \p false otherwise. */ template bool emplace( Args&&... args ) { scoped_node_ptr sp( node_allocator().New( random_level(), std::forward(args)... )); if ( base_class::insert( *sp.get())) { sp.release(); return true; } return false; } /// Delete \p key from the set /** \anchor cds_nonintrusive_SkipListSet_erase_val The set item comparator should be able to compare the type \p value_type and the type \p Q. Return \p true if key is found and deleted, \p false otherwise */ template bool erase( Q const& key ) { return base_class::erase( key ); } /// Deletes the item from the set using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_SkipListSet_erase_val "erase(Q const&)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool erase_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return base_class::erase_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >()); } /// Delete \p key from the set /** \anchor cds_nonintrusive_SkipListSet_erase_func The function searches an item with key \p key, calls \p f functor and deletes the item. If \p key is not found, the functor is not called. The functor \p Func interface: \code struct extractor { void operator()(value_type const& val); }; \endcode Since the key of \p value_type is not explicitly specified, template parameter \p Q defines the key type to search in the list. The list item comparator should be able to compare the type \p T of list item and the type \p Q. Return \p true if key is found and deleted, \p false otherwise */ template bool erase( Q const& key, Func f ) { return base_class::erase( key, [&f]( node_type const& node) { f( node.m_Value ); } ); } /// Deletes the item from the set using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_SkipListSet_erase_func "erase(Q const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool erase_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return base_class::erase_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >(), [&f]( node_type const& node) { f( node.m_Value ); } ); } /// Extracts the item from the set with specified \p key /** \anchor cds_nonintrusive_SkipListSet_hp_extract The function searches an item with key equal to \p key in the set, unlinks it from the set, and returns it as \p guarded_ptr. If \p key is not found the function returns an empty guarded pointer. Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. The item extracted is freed automatically by garbage collector \p GC when returned \p guarded_ptr object will be destroyed or released. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. Usage: \code typedef cds::container::SkipListSet< cds::gc::HP, foo, my_traits > skip_list; skip_list theList; // ... { skip_list::guarded_ptr gp(theList.extract( 5 )) if ( gp ) { // Deal with gp // ... } // Destructor of gp releases internal HP guard and frees the pointer } \endcode */ template guarded_ptr extract( Q const& key ) { return base_class::extract_( key, typename base_class::key_comparator()); } /// Extracts the item from the set with comparing functor \p pred /** The function is an analog of \ref cds_nonintrusive_SkipListSet_hp_extract "extract(Q const&)" but \p pred predicate is used for key comparing. \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the set. */ template guarded_ptr extract_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); typedef cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor > wrapped_less; return base_class::extract_( key, cds::opt::details::make_comparator_from_less()); } /// Extracts an item with minimal key from the set /** The function searches an item with minimal key, unlinks it, and returns pointer to the item found as \p guarded_ptr. If the skip-list is empty the function returns an empty guarded pointer. The item extracted is freed automatically by garbage collector \p GC when returned \p guarded_ptr object will be destroyed or released. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. Usage: \code typedef cds::continer::SkipListSet< cds::gc::HP, foo, my_traits > skip_list; skip_list theList; // ... { skip_list::guarded_ptr gp( theList.extract_min()); if ( gp ) { // Deal with gp //... } // Destructor of gp releases internal HP guard and then frees the pointer } \endcode */ guarded_ptr extract_min() { return base_class::extract_min_(); } /// Extracts an item with maximal key from the set /** The function searches an item with maximal key, unlinks it, and returns the pointer to item found as \p guarded_ptr. If the skip-list is empty the function returns an empty guarded pointer. The item found is freed by garbage collector \p GC automatically when returned \p guarded_ptr object will be destroyed or released. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. Usage: \code typedef cds::container::SkipListSet< cds::gc::HP, foo, my_traits > skip_list; skip_list theList; // ... { skip_list::guarded_ptr gp( theList.extract_max()); if ( gp ) { // Deal with gp //... } // Destructor of gp releases internal HP guard and then frees the pointer } \endcode */ guarded_ptr extract_max() { return base_class::extract_max_(); } /// Find the \p key /** \anchor cds_nonintrusive_SkipListSet_find_func The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& key ); }; \endcode where \p item is the item found, \p key is the find function argument. The functor may change non-key fields of \p item. Note that the functor is only guarantee that \p item cannot be disposed during functor is executing. The functor does not serialize simultaneous access to the set's \p item. If such access is possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. Note the hash functor specified for class \p Traits template parameter should accept a parameter of type \p Q that may be not the same as \p value_type. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( Q& key, Func f ) { return base_class::find( key, [&f]( node_type& node, Q& v ) { f( node.m_Value, v ); }); } //@cond template bool find( Q const& key, Func f ) { return base_class::find( key, [&f]( node_type& node, Q& v ) { f( node.m_Value, v ); } ); } //@endcond /// Finds \p key using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_SkipListSet_find_func "find(Q&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool find_with( Q& key, Less pred, Func f ) { CDS_UNUSED( pred ); return base_class::find_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >(), [&f]( node_type& node, Q& v ) { f( node.m_Value, v ); } ); } //@cond template bool find_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return base_class::find_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >(), [&f]( node_type& node, Q const& v ) { f( node.m_Value, v ); } ); } //@endcond /// Checks whether the set contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. */ template bool contains( Q const& key ) { return base_class::contains( key ); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find( Q const& key ) { return contains( key ); } //@endcond /// Checks whether the set contains \p key using \p pred predicate for searching /** The function is similar to contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool contains( Q const& key, Less pred ) { CDS_UNUSED( pred ); return base_class::contains( key, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find_with( Q const& key, Less pred ) { return contains( key, pred ); } //@endcond /// Finds \p key and return the item found /** \anchor cds_nonintrusive_SkipListSet_hp_get The function searches the item with key equal to \p key and returns a guarded pointer to the item found. If \p key is not found the function returns an empty guarded pointer. It is safe when a concurrent thread erases the item returned in \p result guarded pointer. In this case the item will be freed later by garbage collector \p GC automatically when \p guarded_ptr object will be destroyed or released. @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. Usage: \code typedef cds::container::SkipListSet< cds::gc::HP, foo, my_traits > skip_list; skip_list theList; // ... { skip_list::guarded_ptr gp( theList.get( 5 )); if ( theList.get( 5 )) { // Deal with gp //... } // Destructor of guarded_ptr releases internal HP guard } \endcode Note the compare functor specified for class \p Traits template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. */ template guarded_ptr get( Q const& key ) { return base_class::get_with_( key, typename base_class::key_comparator()); } /// Finds \p key and return the item found /** The function is an analog of \ref cds_nonintrusive_SkipListSet_hp_get "get(Q const&)" but \p pred is used for comparing the keys. \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the set. */ template guarded_ptr get_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); typedef cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor > wrapped_less; return base_class::get_with_( key, cds::opt::details::make_comparator_from_less< wrapped_less >()); } /// Clears the set (not atomic). /** The function deletes all items from the set. The function is not atomic, thus, in multi-threaded environment with parallel insertions this sequence \code set.clear(); assert( set.empty()); \endcode the assertion could be raised. For each item the \ref disposer provided by \p Traits template parameter will be called. */ void clear() { base_class::clear(); } /// Checks if the set is empty bool empty() const { return base_class::empty(); } /// Returns item count in the set /** The value returned depends on item counter type provided by \p Traits template parameter. If it is \p atomicity::empty_item_counter this function always returns 0. Therefore, the function is not suitable for checking the set emptiness, use \p empty() member function for this purpose. */ size_t size() const { return base_class::size(); } /// Returns const reference to internal statistics stat const& statistics() const { return base_class::statistics(); } }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_IMPL_SKIP_LIST_SET_H libcds-2.3.3/cds/container/iterable_kvlist_dhp.h000066400000000000000000000010461341244201700217000ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_ITERABLE_KVLIST_DHP_H #define CDSLIB_CONTAINER_ITERABLE_KVLIST_DHP_H #include #include #include #include #endif // #ifndef CDSLIB_CONTAINER_ITERABLE_KVLIST_DHP_H libcds-2.3.3/cds/container/iterable_kvlist_hp.h000066400000000000000000000010421341244201700215300ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_ITERABLE_KVLIST_HP_H #define CDSLIB_CONTAINER_ITERABLE_KVLIST_HP_H #include #include #include #include #endif // #ifndef CDSLIB_CONTAINER_ITERABLE_KVLIST_HP_H libcds-2.3.3/cds/container/iterable_list_dhp.h000066400000000000000000000010331341244201700213330ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_ITERABLE_LIST_DHP_H #define CDSLIB_CONTAINER_ITERABLE_LIST_DHP_H #include #include #include #include #endif // #ifndef CDSLIB_CONTAINER_ITERABLE_LIST_DHP_H libcds-2.3.3/cds/container/iterable_list_hp.h000066400000000000000000000010301341244201700211640ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_ITERABLE_LIST_HP_H #define CDSLIB_CONTAINER_ITERABLE_LIST_HP_H #include #include #include #include #endif // #ifndef CDSLIB_CONTAINER_ITERABLE_LIST_HP_H libcds-2.3.3/cds/container/lazy_kvlist_dhp.h000066400000000000000000000010121341244201700210610ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_LAZY_KVLIST_DHP_H #define CDSLIB_CONTAINER_LAZY_KVLIST_DHP_H #include #include #include #include #endif // #ifndef CDSLIB_CONTAINER_LAZY_KVLIST_DHP_H libcds-2.3.3/cds/container/lazy_kvlist_hp.h000066400000000000000000000010061341244201700207200ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_LAZY_KVLIST_HP_H #define CDSLIB_CONTAINER_LAZY_KVLIST_HP_H #include #include #include #include #endif // #ifndef CDSLIB_CONTAINER_LAZY_KVLIST_HP_H libcds-2.3.3/cds/container/lazy_kvlist_nogc.h000066400000000000000000000530101341244201700212410ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_LAZY_KVLIST_NOGC_H #define CDSLIB_CONTAINER_LAZY_KVLIST_NOGC_H #include #include #include #include namespace cds { namespace container { /// Lazy ordered list (key-value pair, template specialization for gc::nogc) /** @ingroup cds_nonintrusive_list @anchor cds_nonintrusive_LazyKVList_nogc This specialization is append-only list when no item reclamation may be performed. The class does not support deleting of list's item. See @ref cds_nonintrusive_LazyList_gc "cds::container::LazyList" */ template < typename Key, typename Value, #ifdef CDS_DOXYGEN_INVOKED typename Traits = lazy_list::traits #else typename Traits #endif > class LazyKVList: #ifdef CDS_DOXYGEN_INVOKED protected intrusive::LazyList< gc::nogc, implementation_defined, Traits > #else protected details::make_lazy_kvlist< cds::gc::nogc, Key, Value, Traits >::type #endif { //@cond typedef details::make_lazy_kvlist< cds::gc::nogc, Key, Value, Traits > maker; typedef typename maker::type base_class; //@endcond public: typedef Traits traits; ///< List traits typedef cds::gc::nogc gc; ///< Garbage collector #ifdef CDS_DOXYGEN_INVOKED typedef Key key_type ; ///< Key type typedef Value mapped_type ; ///< Type of value stored in the list typedef std::pair value_type ; ///< key/value pair stored in the list #else typedef typename maker::key_type key_type; typedef typename maker::mapped_type mapped_type; typedef typename maker::value_type value_type; #endif typedef typename base_class::back_off back_off; ///< Back-off strategy used typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes typedef typename base_class::item_counter item_counter; ///< Item counting policy used typedef typename maker::key_comparator key_comparator; ///< key comparison functor typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option typedef typename base_class::stat stat; ///< Internal statistics static constexpr bool const c_bSort = base_class::c_bSort; ///< List type: ordered (\p true) or unordered (\p false) //@cond // Rebind traits (split-list support) template struct rebind_traits { typedef LazyKVList< gc , key_type, mapped_type , typename cds::opt::make_options< traits, Options...>::type > type; }; // Stat selector template using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >; //@endcond protected: //@cond typedef typename base_class::value_type node_type; typedef typename maker::cxx_allocator cxx_allocator; typedef typename maker::node_deallocator node_deallocator; typedef typename base_class::key_comparator intrusive_key_comparator; typedef typename base_class::node_type head_type; //@endcond protected: //@cond template static node_type * alloc_node(const K& key) { return cxx_allocator().New( key ); } template static node_type * alloc_node( const K& key, const V& val ) { return cxx_allocator().New( key, val ); } template static node_type * alloc_node( Args&&... args ) { return cxx_allocator().MoveNew( std::forward(args)... ); } static void free_node( node_type * pNode ) { cxx_allocator().Delete( pNode ); } struct node_disposer { void operator()( node_type * pNode ) { free_node( pNode ); } }; typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; head_type& head() { return base_class::m_Head; } head_type const& head() const { return base_class::m_Head; } head_type& tail() { return base_class::m_Tail; } head_type const& tail() const { return base_class::m_Tail; } //@endcond protected: //@cond template class iterator_type: protected base_class::template iterator_type { typedef typename base_class::template iterator_type iterator_base; iterator_type( head_type const& refNode ) : iterator_base( const_cast( &refNode )) {} explicit iterator_type( const iterator_base& it ) : iterator_base( it ) {} friend class LazyKVList; protected: explicit iterator_type( node_type& pNode ) : iterator_base( &pNode ) {} public: typedef typename cds::details::make_const_type::reference value_ref; typedef typename cds::details::make_const_type::pointer value_ptr; typedef typename cds::details::make_const_type::reference pair_ref; typedef typename cds::details::make_const_type::pointer pair_ptr; iterator_type() : iterator_base() {} iterator_type( const iterator_type& src ) : iterator_base( src ) {} key_type const& key() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); assert( p != nullptr ); return p->m_Data.first; } value_ref val() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); assert( p != nullptr ); return p->m_Data.second; } pair_ptr operator ->() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); return p ? &(p->m_Data) : nullptr; } pair_ref operator *() const { typename iterator_base::value_ref p = iterator_base::operator *(); return p.m_Data; } /// Pre-increment iterator_type& operator ++() { iterator_base::operator ++(); return *this; } /// Post-increment iterator_type operator ++(int) { return iterator_base::operator ++(0); } template bool operator ==(iterator_type const& i ) const { return iterator_base::operator ==(i); } template bool operator !=(iterator_type const& i ) const { return iterator_base::operator !=(i); } }; //@endcond public: ///@name Forward iterators //@{ /// Forward iterator /** The forward iterator is safe: you may use it in multi-threaded enviromnent without any synchronization. The forward iterator for lazy list based on \p gc::nogc has pre- and post-increment operators. The iterator interface to access item data: - operator -> - returns a pointer to \p value_type - operator * - returns a reference (a const reference for \p const_iterator) to \p value_type - const key_type& key() - returns a key reference for iterator - mapped_type& val() - retuns a value reference for iterator (const reference for \p const_iterator) For both functions the iterator should not be equal to \p end() */ typedef iterator_type iterator; /// Const forward iterator /** For iterator's features and requirements see \ref iterator */ typedef iterator_type const_iterator; /// Returns a forward iterator addressing the first element in a list /** For empty list \code begin() == end() \endcode */ iterator begin() { iterator it( head()); ++it ; // skip dummy head return it; } /// Returns an iterator that addresses the location succeeding the last element in a list /** Do not use the value returned by end function to access any item. Internally, end returning value equals to nullptr. The returned value can be used only to control reaching the end of the list. For empty list \code begin() == end() \endcode */ iterator end() { return iterator( tail()); } /// Returns a forward const iterator addressing the first element in a list const_iterator begin() const { const_iterator it( head()); ++it ; // skip dummy head return it; } /// Returns a forward const iterator addressing the first element in a list const_iterator cbegin() const { const_iterator it( head()); ++it ; // skip dummy head return it; } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator end() const { return const_iterator( tail()); } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator cend() const { return const_iterator( tail()); } //@} protected: //@cond iterator node_to_iterator( node_type * pNode ) { if ( pNode ) return iterator( *pNode ); return end(); } //@endcond public: /// Default constructor LazyKVList() {} //@cond template >::value >> explicit LazyKVList( Stat& st ) : base_class( st ) {} //@endcond /// Desctructor clears the list ~LazyKVList() { clear(); } /// Inserts new node with key and default value /** The function creates a node with \p key and default value, and then inserts the node created into the list. Preconditions: - The \ref key_type should be constructible from value of type \p K. In trivial case, \p K is equal to \ref key_type. - The \ref mapped_type should be default-constructible. Returns an iterator pointed to inserted value, or \p end() if inserting is failed */ template iterator insert( const K& key ) { return node_to_iterator( insert_at( head(), key )); } /// Inserts new node with a key and a value /** The function creates a node with \p key and value \p val, and then inserts the node created into the list. Preconditions: - The \ref key_type should be constructible from \p key of type \p K. - The \ref mapped_type should be constructible from \p val of type \p V. Returns an iterator pointed to inserted value, or \p end() if inserting is failed */ template iterator insert( const K& key, const V& val ) { // We cannot use insert with functor here // because we cannot lock inserted node for updating // Therefore, we use separate function return node_to_iterator( insert_at( head(), key, val )); } /// Inserts new node and initialize it by a functor /** This function inserts new node with key \p key and if inserting is successful then it calls \p func functor with signature \code void func( value_type& item ) ; endcode or \code struct functor { void operator()( value_type& item ); }; \endcode The argument \p item of user-defined functor \p func is the reference to the list's item inserted. item.second is a reference to item's value that may be changed. The user-defined functor is called only if the inserting is successful. The key_type should be constructible from value of type \p K. The function allows to split creating of new item into two part: - create item from \p key; - insert new item into the list; - if inserting is successful, initialize the value of item by calling \p f functor This can be useful if complete initialization of object of \p mapped_type is heavyweight and it is preferable that the initialization should be completed only if inserting is successful. Returns an iterator pointed to inserted value, or \p end() if inserting is failed */ template iterator insert_with( const K& key, Func func ) { return node_to_iterator( insert_with_at( head(), key, func )); } /// Updates the item /** If \p key is not in the list and \p bAllowInsert is \p true, the function inserts a new item. Otherwise, the function returns an iterator pointing to the item found. Returns std::pair where \p first is an iterator pointing to item found or inserted, \p second is true if new item has been added or \p false if the item already is in the list. */ template std::pair update( const K& key, bool bAllowInsert = true ) { std::pair< node_type *, bool > ret = update_at( head(), key, bAllowInsert ); return std::make_pair( node_to_iterator( ret.first ), ret.second ); } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( const K& key ) { return update( key, true ); } //@endcond /// Inserts data of type \ref mapped_type constructed with std::forward(args)... /** Returns an iterator pointed to inserted value, or \p end() if inserting is failed */ template iterator emplace( Args&&... args ) { return node_to_iterator( emplace_at( head(), std::forward(args)... )); } /// Checks whether the list contains \p key /** The function searches the item with key equal to \p key and returns an iterator pointed to item found if the key is found, and \ref end() otherwise */ template iterator contains( Q const& key ) { return node_to_iterator( find_at( head(), key, intrusive_key_comparator())); } //@cond template CDS_DEPRECATED("deprecated, use contains()") iterator find( Q const& key ) { return contains( key ); } //@endcond /// Checks whether the map contains \p key using \p pred predicate for searching (ordered list version) /** The function is an analog of contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the list. */ template typename std::enable_if::type contains( Q const& key, Less pred ) { CDS_UNUSED( pred ); return node_to_iterator( find_at( head(), key, typename maker::template less_wrapper())); } //@cond template CDS_DEPRECATED("deprecated, use contains()") typename std::enable_if::type find_with( Q const& key, Less pred ) { return contains( key, pred ); } //@endcond /// Finds the key \p val using \p equal predicate for searching (unordered list version) /** The function is an analog of contains( key ) but \p equal is used for key comparing. \p Equal functor has the interface like \p std::equal_to. */ template typename std::enable_if::type contains( Q const& key, Equal equal ) { CDS_UNUSED( equal ); return node_to_iterator( find_at( head(), key, typename maker::template equal_to_wrapper::type())); } //@cond template CDS_DEPRECATED("deprecated, use contains()") typename std::enable_if::type find_with( Q const& key, Equal equal ) { return contains( key, equal ); } //@endcond /// Check if the list is empty bool empty() const { return base_class::empty(); } /// Returns list's item count /** The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, this function always returns 0. @note Even if you use real item counter and it returns 0, this fact is not mean that the list is empty. To check list emptyness use \ref empty() method. */ size_t size() const { return base_class::size(); } /// Returns const reference to internal statistics stat const& statistics() const { return base_class::statistics(); } /// Clears the list /** Post-condition: the list is empty */ void clear() { base_class::clear(); } protected: //@cond node_type * insert_node_at( head_type& refHead, node_type * pNode ) { assert( pNode != nullptr ); scoped_node_ptr p( pNode ); if ( base_class::insert_at( &refHead, *p )) return p.release(); return nullptr; } template node_type * insert_at( head_type& refHead, const K& key ) { return insert_node_at( refHead, alloc_node( key )); } template node_type * insert_at( head_type& refHead, const K& key, const V& val ) { return insert_node_at( refHead, alloc_node( key, val )); } template node_type * insert_with_at( head_type& refHead, const K& key, Func f ) { scoped_node_ptr pNode( alloc_node( key )); if ( base_class::insert_at( &refHead, *pNode )) { f( pNode->m_Data ); return pNode.release(); } return nullptr; } template std::pair< node_type *, bool > update_at( head_type& refHead, const K& key, bool bAllowInsert ) { scoped_node_ptr pNode( alloc_node( key )); node_type * pItemFound = nullptr; std::pair ret = base_class::update_at( &refHead, *pNode, [&pItemFound](bool, node_type& item, node_type&){ pItemFound = &item; }, bAllowInsert ); if ( ret.second ) pNode.release(); return std::make_pair( pItemFound, ret.second ); } template node_type * emplace_at( head_type& refHead, Args&&... args ) { return insert_node_at( refHead, alloc_node( std::forward(args)... )); } template node_type * find_at( head_type& refHead, const K& key, Compare cmp ) { return base_class::find_at( &refHead, key, cmp ); } /* template bool find_at( head_type& refHead, K& key, Compare cmp, Func f ) { return base_class::find_at( &refHead, key, cmp, [&f]( node_type& node, K const& ){ f( node.m_Data ); }); } */ //@endcond }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_LAZY_KVLIST_NOGC_H libcds-2.3.3/cds/container/lazy_kvlist_rcu.h000066400000000000000000001007051341244201700211100ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_LAZY_KVLIST_RCU_H #define CDSLIB_CONTAINER_LAZY_KVLIST_RCU_H #include #include #include #include namespace cds { namespace container { /// Lazy ordered list (key-value pair), template specialization for \ref cds_urcu_desc "RCU" /** @ingroup cds_nonintrusive_list \anchor cds_nonintrusive_LazyKVList_rcu This is key-value variation of non-intrusive \p %LazyList. Like standard container, this implementation split a value stored into two part - constant key and alterable value. Usually, ordered single-linked list is used as a building block for the hash table implementation. The complexity of searching is O(N). Template arguments: - \p RCU - one of \ref cds_urcu_gc "RCU type" - \p Key - key type of an item to be stored in the list. It should be copy-constructible - \p Value - value type to be stored in the list - \p Traits - type traits, default is \p lazy_list::traits It is possible to declare option-based list with \p lazy_list::make_traits metafunction istead of \p Traits template argument. For example, the following traits-based declaration of \p gc::HP lazy list \code #include #include // Declare comparator for the item struct my_compare { int operator ()( int i1, int i2 ) { return i1 - i2; } }; // Declare traits struct my_traits: public cds::container::lazy_list::traits { typedef my_compare compare; }; // Declare traits-based list typedef cds::container::LazyKVList< cds::urcu::gc< cds::urcu::general_threaded<> >, int, int, my_traits > traits_based_list; \endcode is equal to the following option-based list \code #include #include // my_compare is the same // Declare option-based list typedef cds::container::LazyKVList< cds::urcu::gc< cds::urcu::general_threaded<> >, int, int, typename cds::container::lazy_list::make_traits< cds::container::opt::compare< my_compare > // item comparator option >::type > option_based_list; \endcode @note Before including you should include appropriate RCU header file, see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. */ template < typename RCU, typename Key, typename Value, #ifdef CDS_DOXYGEN_INVOKED typename Traits = lazy_list::traits #else typename Traits #endif > class LazyKVList< cds::urcu::gc, Key, Value, Traits >: #ifdef CDS_DOXYGEN_INVOKED protected intrusive::LazyList< cds::urcu::gc, implementation_defined, Traits > #else protected details::make_lazy_kvlist< cds::urcu::gc, Key, Value, Traits >::type #endif { //@cond typedef details::make_lazy_kvlist< cds::urcu::gc, Key, Value, Traits > maker; typedef typename maker::type base_class; //@endcond public: typedef cds::urcu::gc gc; ///< Garbage collector typedef Traits traits; ///< List traits #ifdef CDS_DOXYGEN_INVOKED typedef Key key_type ; ///< Key type typedef Value mapped_type ; ///< Type of value stored in the list typedef std::pair value_type ; ///< key/value pair stored in the list #else typedef typename maker::key_type key_type; typedef typename maker::mapped_type mapped_type; typedef typename maker::value_type value_type; #endif typedef typename base_class::back_off back_off; ///< Back-off strategy used typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes typedef typename base_class::item_counter item_counter; ///< Item counting policy used typedef typename maker::key_comparator key_comparator; ///< key comparison functor typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option typedef typename base_class::stat stat; ///< Internal statistics typedef typename base_class::rcu_check_deadlock rcu_check_deadlock ; ///< RCU deadlock checking policy typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock static constexpr const bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; ///< Group of \p extract_xxx functions require external locking //@cond // Rebind traits (split-list support) template struct rebind_traits { typedef LazyKVList< gc , key_type, mapped_type , typename cds::opt::make_options< traits, Options...>::type > type; }; // Stat selector template using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >; //@endcond protected: //@cond typedef typename base_class::value_type node_type; typedef typename maker::cxx_allocator cxx_allocator; typedef typename maker::node_deallocator node_deallocator; typedef typename maker::intrusive_traits::compare intrusive_key_comparator; typedef typename base_class::node_type head_type; //@endcond public: /// pointer to extracted node using exempt_ptr = cds::urcu::exempt_ptr< gc, node_type, value_type, typename maker::intrusive_traits::disposer, cds::urcu::details::conventional_exempt_pair_cast >; /// Type of \p get() member function return value typedef value_type * raw_ptr; protected: //@cond template static node_type * alloc_node(const K& key) { return cxx_allocator().New( key ); } template static node_type * alloc_node( const K& key, const V& val ) { return cxx_allocator().New( key, val ); } template static node_type * alloc_node( Args&&... args ) { return cxx_allocator().MoveNew( std::forward(args)... ); } static void free_node( node_type * pNode ) { cxx_allocator().Delete( pNode ); } struct node_disposer { void operator()( node_type * pNode ) { free_node( pNode ); } }; typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; head_type& head() { return base_class::m_Head; } head_type& head() const { return const_cast( base_class::m_Head ); } head_type& tail() { return base_class::m_Tail; } head_type& tail() const { return const_cast( base_class::m_Tail ); } //@endcond protected: //@cond template class iterator_type: protected base_class::template iterator_type { typedef typename base_class::template iterator_type iterator_base; iterator_type( head_type const& pNode ) : iterator_base( const_cast(&pNode)) {} iterator_type( head_type const * pNode ) : iterator_base( const_cast(pNode)) {} friend class LazyKVList; public: typedef typename cds::details::make_const_type::reference value_ref; typedef typename cds::details::make_const_type::pointer value_ptr; typedef typename cds::details::make_const_type::reference pair_ref; typedef typename cds::details::make_const_type::pointer pair_ptr; iterator_type() {} iterator_type( iterator_type const& src ) : iterator_base( src ) {} key_type const& key() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); assert( p != nullptr ); return p->m_Data.first; } value_ref val() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); assert( p != nullptr ); return p->m_Data.second; } pair_ptr operator ->() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); return p ? &(p->m_Data) : nullptr; } pair_ref operator *() const { typename iterator_base::value_ref p = iterator_base::operator *(); return p.m_Data; } /// Pre-increment iterator_type& operator ++() { iterator_base::operator ++(); return *this; } template bool operator ==(iterator_type const& i ) const { return iterator_base::operator ==(i); } template bool operator !=(iterator_type const& i ) const { return iterator_base::operator !=(i); } }; //@endcond public: ///@name Forward iterators //@{ /// Forward iterator /** You may safely use iterators in multi-threaded environment only under external RCU lock. Otherwise, a program crash is possible if another thread deletes the item the iterator points to. */ typedef iterator_type iterator; /// Const forward iterator typedef iterator_type const_iterator; /// Returns a forward iterator addressing the first element in a list /** For empty list \code begin() == end() \endcode */ iterator begin() { iterator it( head()); ++it ; // skip dummy head return it; } /// Returns an iterator that addresses the location succeeding the last element in a list /** Do not use the value returned by end function to access any item. Internally, end returning value pointing to dummy tail node. The returned value can be used only to control reaching the end of the list. For empty list \code begin() == end() \endcode */ iterator end() { return iterator( tail()); } /// Returns a forward const iterator addressing the first element in a list const_iterator begin() const { const_iterator it( head()); ++it; // skip dummy head return it; } /// Returns a forward const iterator addressing the first element in a list const_iterator cbegin() const { const_iterator it( head()); ++it; // skip dummy head return it; } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator end() const { return const_iterator( tail()); } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator cend() const { return const_iterator( tail()); } //@} public: /// Default constructor LazyKVList() {} //@cond template >::value >> explicit LazyKVList( Stat& st ) : base_class( st ) {} //@endcond /// Destructor clears the list ~LazyKVList() { clear(); } /// Inserts new node with key and default value /** The function creates a node with \p key and default value, and then inserts the node created into the list. Preconditions: - The \ref key_type should be constructible from value of type \p K. In trivial case, \p K is equal to \p key_type. - The \ref mapped_type should be default-constructible. The function makes RCU lock internally. Returns \p true if inserting successful, \p false otherwise. */ template bool insert( const K& key ) { return insert_at( head(), key ); } /// Inserts new node with a key and a value /** The function creates a node with \p key and value \p val, and then inserts the node created into the list. Preconditions: - The \p key_type should be constructible from \p key of type \p K. - The \p mapped_type should be constructible from \p val of type \p V. The function makes RCU lock internally. Returns \p true if inserting successful, \p false otherwise. */ template bool insert( const K& key, const V& val ) { return insert_at( head(), key, val ); } /// Inserts new node and initializes it by a functor /** This function inserts new node with key \p key and if inserting is successful then it calls \p func functor with signature \code struct functor { void operator()( value_type& item ); }; \endcode The argument \p item of user-defined functor \p func is the reference to the list's item inserted. item.second is a reference to item's value that may be changed. The user-defined functor is called only if inserting is successful. The key_type should be constructible from value of type \p K. The function allows to split creating of new item into two part: - create item from \p key; - insert new item into the list; - if inserting is successful, initialize the value of item by calling \p func functor This can be useful if complete initialization of object of \p mapped_type is heavyweight and it is preferable that the initialization should be completed only if inserting is successful. The function makes RCU lock internally. @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" */ template bool insert_with( const K& key, Func func ) { return insert_with_at( head(), key, func ); } /// Inserts data of type \p mapped_type constructed from \p args /** Returns \p true if inserting successful, \p false otherwise. The function makes RCU lock internally. */ template bool emplace( Args&&... args ) { return emplace_at( head(), std::forward(args)... ); } /// Updates data by \p key /** The operation performs inserting or replacing the element with lock-free manner. If the \p key not found in the list, then the new item created from \p key will be inserted iff \p bAllowInsert is \p true. (note that in this case the \ref key_type should be constructible from type \p K). Otherwise, if \p key is found, the functor \p func is called with item found. The functor \p Func signature is: \code struct my_functor { void operator()( bool bNew, value_type& item ); }; \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - the item found or inserted The functor may change any fields of the \p item.second of \p mapped_type; during \p func call \p item is locked so it is safe to modify the item in multi-threaded environment. The function applies RCU lock internally. Returns std::pair where \p first is true if operation is successful, \p second is true if new item has been added or \p false if the item with \p key already exists. */ template std::pair update( const K& key, Func func, bool bAllowInsert = true ) { return update_at( head(), key, func, bAllowInsert ); } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( const K& key, Func f ) { return update( key, f, true ); } //@endcond /// Deletes \p key from the list /** \anchor cds_nonintrusive_LazyKVList_rcu_erase RCU \p synchronize method can be called. RCU should not be locked. Returns \p true if \p key is found and has been deleted, \p false otherwise */ template bool erase( K const& key ) { return erase_at( head(), key, intrusive_key_comparator()); } /// Deletes the item from the list using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_LazyKVList_rcu_erase "erase(K const&)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool erase_with( K const& key, Less pred ) { CDS_UNUSED( pred ); return erase_at( head(), key, typename maker::template less_wrapper()); } /// Deletes \p key from the list /** \anchor cds_nonintrusive_LazyKVList_rcu_erase_func The function searches an item with key \p key, calls \p f functor and deletes the item. If \p key is not found, the functor is not called. The functor \p Func interface: \code struct extractor { void operator()(value_type& val) { ... } }; \endcode RCU \p synchronize method can be called. RCU should not be locked. Returns \p true if key is found and deleted, \p false otherwise */ template bool erase( K const& key, Func f ) { return erase_at( head(), key, intrusive_key_comparator(), f ); } /// Deletes the item from the list using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_LazyKVList_rcu_erase_func "erase(K const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool erase_with( K const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return erase_at( head(), key, typename maker::template less_wrapper(), f ); } /// Extracts an item from the list /** @anchor cds_nonintrusive_LazyKVList_rcu_extract The function searches an item with key equal to \p key in the list, unlinks it from the list, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found. If \p key is not found the function returns an empty \p exempt_ptr. @note The function does NOT call RCU read-side lock or synchronization, and does NOT dispose the item found. It just excludes the item from the list and returns a pointer to item found. You should manually lock RCU before calling this function. \code #include #include typedef cds::urcu::gc< general_buffered<> > rcu; typedef cds::container::LazyKVList< rcu, int, Foo > rcu_lazy_list; rcu_lazy_list theList; // ... rcu_lazy_list::exempt_ptr p; { // first, we should lock RCU rcu_lazy_list::rcu_lock sl; // Now, you can apply extract function // Note that you must not delete the item found inside the RCU lock p = theList.extract( 10 ); if ( !p ) { // do something with p ... } } // Outside RCU lock section we may safely release extracted pointer. // release() passes the pointer to RCU reclamation cycle. p.release(); \endcode */ template exempt_ptr extract( K const& key ) { return exempt_ptr( extract_at( head(), key, intrusive_key_comparator())); } /// Extracts an item from the list using \p pred predicate for searching /** This function is the analog for \p extract(K const&). The \p pred is a predicate used for key comparing. \p Less has the interface like \p std::less. \p pred must imply the same element order as \ref key_comparator. */ template exempt_ptr extract_with( K const& key, Less pred ) { CDS_UNUSED( pred ); return exempt_ptr( extract_at( head(), key, typename maker::template less_wrapper())); } /// Checks whether the list contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. The function applies RCU lock internally. */ template bool contains( Q const& key ) const { return find_at( head(), key, intrusive_key_comparator()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find( Q const& key ) const { return contains( key ); } //@endcond /// Checks whether the map contains \p key using \p pred predicate for searching /** The function is an analog of contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the list. The function applies RCU lock internally. */ template bool contains( Q const& key, Less pred ) const { CDS_UNUSED( pred ); return find_at( head(), key, typename maker::template less_wrapper()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find_with( Q const& key, Less pred ) const { return contains( key, pred ); } //@endcond /// Finds the key \p key and performs an action with it /** \anchor cds_nonintrusive_LazyKVList_rcu_find_func The function searches an item with key equal to \p key and calls the functor \p f for the item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item ); }; \endcode where \p item is the item found. The functor may change item.second that is reference to value of node. Note that the function is only guarantee that \p item cannot be deleted during functor is executing. The function does not serialize simultaneous access to the list \p item. If such access is possible you must provide your own synchronization schema to exclude unsafe item modifications. The function applies RCU lock internally. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( Q const& key, Func f ) const { return find_at( head(), key, intrusive_key_comparator(), f ); } /// Finds the key \p val using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_LazyKVList_rcu_find_func "find(Q const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool find_with( Q const& key, Less pred, Func f ) const { CDS_UNUSED( pred ); return find_at( head(), key, typename maker::template less_wrapper(), f ); } /// Finds \p key and return the item found /** \anchor cds_nonintrusive_LazyKVList_rcu_get The function searches the item with \p key and returns the pointer to item found. If \p key is not found it returns \p nullptr. Note the compare functor should accept a parameter of type \p K that can be not the same as \p key_type. RCU should be locked before call of this function. Returned item is valid only while RCU is locked: \code typedef cds::container::LazyKVList< cds::urcu::gc< cds::urcu::general_buffered<> >, int, foo, my_traits > ord_list; ord_list theList; // ... { // Lock RCU ord_list::rcu_lock lock; ord_list::value_type * pVal = theList.get( 5 ); if ( pVal ) { // Deal with pVal //... } // Unlock RCU by rcu_lock destructor // pVal can be freed at any time after RCU has been unlocked } \endcode */ template value_type * get( K const& key ) const { return get_at( head(), key, intrusive_key_comparator()); } /// Finds \p key and return the item found /** The function is an analog of \ref cds_nonintrusive_LazyKVList_rcu_get "get(K const&)" but \p pred is used for comparing the keys. \p Less functor has the semantics like \p std::less but should take arguments of type \ref key_type and \p K in any order. \p pred must imply the same element order as the comparator used for building the list. */ template value_type * get_with( K const& key, Less pred ) const { CDS_UNUSED( pred ); return get_at( head(), key, typename maker::template less_wrapper()); } /// Checks if the list is empty bool empty() const { return base_class::empty(); } /// Returns list's item count /** The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, this function always returns 0. @note Even if you use real item counter and it returns 0, this fact is not mean that the list is empty. To check list emptyness use \ref empty() method. */ size_t size() const { return base_class::size(); } /// Returns const reference to internal statistics stat const& statistics() const { return base_class::statistics(); } /// Clears the list void clear() { base_class::clear(); } protected: //@cond bool insert_node_at( head_type& refHead, node_type * pNode ) { assert( pNode != nullptr ); scoped_node_ptr p( pNode ); if ( base_class::insert_at( &refHead, *p )) { p.release(); return true; } return false; } template bool insert_at( head_type& refHead, const K& key ) { return insert_node_at( refHead, alloc_node( key )); } template bool insert_at( head_type& refHead, const K& key, const V& val ) { return insert_node_at( refHead, alloc_node( key, val )); } template bool insert_with_at( head_type& refHead, const K& key, Func f ) { scoped_node_ptr pNode( alloc_node( key )); if ( base_class::insert_at( &refHead, *pNode, [&f](node_type& node){ f( node.m_Data ); } )) { pNode.release(); return true; } return false; } template bool emplace_at( head_type& refHead, Args&&... args ) { return insert_node_at( refHead, alloc_node( std::forward(args)... )); } template bool erase_at( head_type& refHead, K const& key, Compare cmp ) { return base_class::erase_at( &refHead, key, cmp ); } template bool erase_at( head_type& refHead, K const & key, Compare cmp, Func f ) { return base_class::erase_at( &refHead, key, cmp, [&f](node_type const & node){f( const_cast(node.m_Data)); }); } template std::pair update_at( head_type& refHead, const K& key, Func f, bool bAllowInsert ) { scoped_node_ptr pNode( alloc_node( key )); std::pair ret = base_class::update_at( &refHead, *pNode, [&f]( bool bNew, node_type& node, node_type& ){ f( bNew, node.m_Data ); }, bAllowInsert ); if ( ret.first && ret.second ) pNode.release(); return ret; } template node_type * extract_at( head_type& refHead, K const& key, Compare cmp ) { return base_class::extract_at( &refHead, key, cmp ); } template bool find_at( head_type& refHead, K const& key, Compare cmp ) const { return base_class::find_at( &refHead, key, cmp, [](node_type&, K const&) {} ); } template bool find_at( head_type& refHead, K& key, Compare cmp, Func f ) const { return base_class::find_at( &refHead, key, cmp, [&f]( node_type& node, K& ){ f( node.m_Data ); }); } template value_type * get_at( head_type& refHead, K const& val, Compare cmp ) const { node_type * pNode = base_class::get_at( &refHead, val, cmp ); return pNode ? &pNode->m_Data : nullptr; } //@endcond }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_LAZY_KVLIST_RCU_H libcds-2.3.3/cds/container/lazy_list_dhp.h000066400000000000000000000007771341244201700205410ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_LAZY_LIST_DHP_H #define CDSLIB_CONTAINER_LAZY_LIST_DHP_H #include #include #include #include #endif // #ifndef CDSLIB_CONTAINER_LAZY_LIST_DHP_H libcds-2.3.3/cds/container/lazy_list_hp.h000066400000000000000000000007741341244201700203720ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_LAZY_LIST_HP_H #define CDSLIB_CONTAINER_LAZY_LIST_HP_H #include #include #include #include #endif // #ifndef CDSLIB_CONTAINER_LAZY_LIST_HP_H libcds-2.3.3/cds/container/lazy_list_nogc.h000066400000000000000000000406331341244201700207070ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_LAZY_LIST_NOGC_H #define CDSLIB_CONTAINER_LAZY_LIST_NOGC_H #include #include #include #include namespace cds { namespace container { /// Lazy ordered single-linked list (template specialization for gc::nogc) /** @ingroup cds_nonintrusive_list \anchor cds_nonintrusive_LazyList_nogc This specialization is so-called append-only when no item reclamation may be performed. The class does not support deleting of list item. The list can be ordered if \p Traits::sort is \p true that is default or unordered otherwise. Unordered list can be maintained by \p equal_to relationship (\p Traits::equal_to), but for the ordered list \p less or \p compare relations should be specified in \p Traits. See @ref cds_nonintrusive_LazyList_gc "cds::container::LazyList" */ template < typename T, #ifdef CDS_DOXYGEN_INVOKED typename Traits = lazy_list::traits #else typename Traits #endif > class LazyList: #ifdef CDS_DOXYGEN_INVOKED protected intrusive::LazyList< gc::nogc, T, Traits > #else protected details::make_lazy_list< cds::gc::nogc, T, Traits >::type #endif { //@cond typedef details::make_lazy_list< cds::gc::nogc, T, Traits > maker; typedef typename maker::type base_class; //@endcond public: typedef cds::gc::nogc gc; ///< Garbage collector typedef T value_type; ///< Type of value stored in the list typedef Traits traits; ///< List traits typedef typename base_class::back_off back_off; ///< Back-off strategy used typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes typedef typename base_class::item_counter item_counter; ///< Item counting policy used typedef typename maker::key_comparator key_comparator; ///< key comparing functor typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option typedef typename base_class::stat stat; ///< Internal statistics static constexpr bool const c_bSort = base_class::c_bSort; ///< List type: ordered (\p true) or unordered (\p false) //@cond // Rebind traits (split-list support) template struct rebind_traits { typedef LazyList< gc , value_type , typename cds::opt::make_options< traits, Options...>::type > type; }; // Stat selector template using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >; //@endcond protected: //@cond typedef typename base_class::value_type node_type; typedef typename maker::cxx_allocator cxx_allocator; typedef typename maker::node_deallocator node_deallocator; typedef typename base_class::key_comparator intrusive_key_comparator; typedef typename base_class::node_type head_type; struct node_disposer { void operator()( node_type * pNode ) { free_node( pNode ); } }; typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; //@endcond protected: //@cond template class iterator_type: protected base_class::template iterator_type { typedef typename base_class::template iterator_type iterator_base; iterator_type( head_type const& pNode ) : iterator_base( const_cast(&pNode)) {} explicit iterator_type( const iterator_base& it ) : iterator_base( it ) {} friend class LazyList; protected: explicit iterator_type( node_type& pNode ) : iterator_base( &pNode ) {} public: typedef typename cds::details::make_const_type::pointer value_ptr; typedef typename cds::details::make_const_type::reference value_ref; iterator_type() {} iterator_type( const iterator_type& src ) : iterator_base( src ) {} value_ptr operator ->() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); return p ? &(p->m_Value) : nullptr; } value_ref operator *() const { return (iterator_base::operator *()).m_Value; } /// Pre-increment iterator_type& operator ++() { iterator_base::operator ++(); return *this; } /// Post-increment iterator_type operator ++(int) { return iterator_base::operator ++(0); } template bool operator ==(iterator_type const& i ) const { return iterator_base::operator ==(i); } template bool operator !=(iterator_type const& i ) const { return iterator_base::operator !=(i); } }; //@endcond public: ///@name Forward iterators //@{ /// Returns a forward iterator addressing the first element in a list /** For empty list \code begin() == end() \endcode */ typedef iterator_type iterator; /// Const forward iterator /** For iterator's features and requirements see \ref iterator */ typedef iterator_type const_iterator; /// Returns a forward iterator addressing the first element in a list /** For empty list \code begin() == end() \endcode */ iterator begin() { iterator it( head()); ++it ; // skip dummy head node return it; } /// Returns an iterator that addresses the location succeeding the last element in a list /** Do not use the value returned by end function to access any item. The returned value can be used only to control reaching the end of the list. For empty list \code begin() == end() \endcode */ iterator end() { return iterator( tail()); } /// Returns a forward const iterator addressing the first element in a list const_iterator begin() const { const_iterator it( head()); ++it ; // skip dummy head node return it; } /// Returns a forward const iterator addressing the first element in a list const_iterator cbegin() const { const_iterator it( head()); ++it ; // skip dummy head node return it; } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator end() const { return const_iterator( tail()); } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator cend() const { return const_iterator( tail()); } //@} public: /// Default constructor LazyList() {} //@cond template >::value >> explicit LazyList( Stat& st ) : base_class( st ) {} //@endcond /// Desctructor clears the list ~LazyList() { clear(); } /// Inserts new node /** The function inserts \p val in the list if the list does not contain an item with key equal to \p val. Return an iterator pointing to inserted item if success \ref end() otherwise */ template iterator insert( Q&& val ) { return node_to_iterator( insert_at( head(), std::forward( val ))); } /// Inserts data of type \p value_type created from \p args /** Return an iterator pointing to inserted item if success \ref end() otherwise */ template iterator emplace( Args&&... args ) { return node_to_iterator( emplace_at( head(), std::forward(args)... )); } /// Updates the item /** If \p key is not in the list and \p bAllowInsert is \p true, the function inserts a new item. Otherwise, the function returns an iterator pointing to the item found. Returns std::pair where \p first is an iterator pointing to item found or inserted, \p second is true if new item has been added or \p false if the item already is in the list. */ template std::pair update( Q&& val, bool bAllowInsert = true ) { std::pair< node_type *, bool > ret = update_at( head(), std::forward( val ), bAllowInsert ); return std::make_pair( node_to_iterator( ret.first ), ret.second ); } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( Q const& val ) { return update( val, true ); } //@endcond /// Checks whether the list contains \p key /** The function searches the item with key equal to \p key and returns an iterator pointed to item found if the key is found, and \ref end() otherwise */ template iterator contains( Q const& key ) { return node_to_iterator( find_at( head(), key, intrusive_key_comparator())); } //@cond template CDS_DEPRECATED("deprecated, use contains()") iterator find( Q const& key ) { return contains( key ); } //@endcond /// Checks whether the map contains \p key using \p pred predicate for searching (ordered list version) /** The function is an analog of contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the list. */ template typename std::enable_if::type contains( Q const& key, Less pred ) { CDS_UNUSED( pred ); return node_to_iterator( find_at( head(), key, typename maker::template less_wrapper())); } //@cond template CDS_DEPRECATED("deprecated, use contains()") typename std::enable_if::type find_with( Q const& key, Less pred ) { return contains( key, pred ); } //@endcond /// Finds the key \p val using \p equal predicate for searching (unordered list version) /** The function is an analog of contains( key ) but \p equal is used for key comparing. \p Equal functor has the interface like \p std::equal_to. */ template typename std::enable_if::type contains( Q const& key, Equal equal ) { CDS_UNUSED( equal ); return node_to_iterator( find_at( head(), key, typename maker::template equal_to_wrapper::type())); } //@cond template CDS_DEPRECATED("deprecated, use contains()") typename std::enable_if::type find_with( Q const& key, Equal equal ) { return contains( key, equal ); } //@endcond /// Check if the list is empty bool empty() const { return base_class::empty(); } /// Returns list's item count /** The value returned depends on \p Traits::item_counter type. For \p atomicity::empty_item_counter, this function always returns 0. @note Even if you use real item counter and it returns 0, this fact is not mean that the list is empty. To check list emptyness use \ref empty() method. */ size_t size() const { return base_class::size(); } /// Returns const reference to internal statistics stat const& statistics() const { return base_class::statistics(); } /// Clears the list void clear() { base_class::clear(); } protected: //@cond static value_type& node_to_value( node_type& n ) { return n.m_Value; } static node_type * alloc_node() { return cxx_allocator().New(); } static node_type * alloc_node( value_type const& v ) { return cxx_allocator().New( v ); } template static node_type * alloc_node( Args&&... args ) { return cxx_allocator().MoveNew( std::forward( args )... ); } static void free_node( node_type * pNode ) { cxx_allocator().Delete( pNode ); } head_type& head() { return base_class::m_Head; } head_type const& head() const { return base_class::m_Head; } head_type& tail() { return base_class::m_Tail; } head_type const& tail() const { return base_class::m_Tail; } iterator node_to_iterator( node_type * pNode ) { if ( pNode ) return iterator( *pNode ); return end(); } iterator insert_node( node_type * pNode ) { return node_to_iterator( insert_node_at( head(), pNode )); } node_type * insert_node_at( head_type& refHead, node_type * pNode ) { assert( pNode != nullptr ); scoped_node_ptr p( pNode ); if ( base_class::insert_at( &refHead, *p )) return p.release(); return nullptr; } template node_type * insert_at( head_type& refHead, Q&& val ) { return insert_node_at( refHead, alloc_node( std::forward( val ))); } template node_type * emplace_at( head_type& refHead, Args&&... args ) { return insert_node_at( refHead, alloc_node( std::forward(args)... )); } template std::pair< node_type *, bool > update_at( head_type& refHead, Q&& val, bool bAllowInsert ) { scoped_node_ptr pNode( alloc_node( std::forward( val ))); node_type * pItemFound = nullptr; std::pair ret = base_class::update_at( &refHead, *pNode, [&pItemFound](bool, node_type& item, node_type&) { pItemFound = &item; }, bAllowInsert ); if ( ret.second ) pNode.release(); return std::make_pair( pItemFound, ret.second ); } template node_type * find_at( head_type& refHead, Q const& key, Compare cmp ) { return base_class::find_at( &refHead, key, cmp ); } //@endcond }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_LAZY_LIST_NOGC_H libcds-2.3.3/cds/container/lazy_list_rcu.h000066400000000000000000001000321341244201700205400ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_LAZY_LIST_RCU_H #define CDSLIB_CONTAINER_LAZY_LIST_RCU_H #include #include #include #include #include namespace cds { namespace container { /// Lazy ordered list (template specialization for \ref cds_urcu_desc "RCU") /** @ingroup cds_nonintrusive_list \anchor cds_nonintrusive_LazyList_rcu Usually, ordered single-linked list is used as a building block for the hash table implementation. The complexity of searching is O(N). Source: - [2005] Steve Heller, Maurice Herlihy, Victor Luchangco, Mark Moir, William N. Scherer III, and Nir Shavit "A Lazy Concurrent List-Based Set Algorithm" The lazy list is based on an optimistic locking scheme for inserts and removes, eliminating the need to use the equivalent of an atomically markable reference. It also has a novel wait-free membership \p find operation that does not need to perform cleanup operations and is more efficient. It is non-intrusive version of \p cds::intrusive::LazyList class Template arguments: - \p RCU - one of \ref cds_urcu_gc "RCU type" - \p T - type to be stored in the list. - \p Traits - type traits, default is lazy_list::traits It is possible to declare option-based list with cds::container::lazy_list::make_traits metafunction istead of \p Traits template argument. For example, the following traits-based declaration of \p gc::HP lazy list \code #include #include // Declare comparator for the item struct my_compare { int operator ()( int i1, int i2 ) { return i1 - i2; } }; // Declare traits struct my_traits: public cds::container::lazy_list::traits { typedef my_compare compare; }; // Declare traits-based list typedef cds::container::LazyList< cds::urcu::gc< cds::urcu::general_instant<> >, int, my_traits > traits_based_list; \endcode is equal to the following option-based list \code #include #include // my_compare is the same // Declare option-based list typedef cds::container::LazyList< cds::urcu::gc< cds::urcu::general_instant<> >, int, typename cds::container::lazy_list::make_traits< cds::container::opt::compare< my_compare > // item comparator option >::type > option_based_list; \endcode The implementation does not divide type \p T into key and value part and may be used as main building block for some hash set containers. The key is a function (or a part) of type \p T, and this function is specified by \p Traits::compare functor or \p Traits::less predicate \ref cds_nonintrusive_LazyKVList_rcu "LazyKVList" is a key-value version of lazy non-intrusive list that is closer to the C++ std library approach. @note Before including you should include appropriate RCU header file, see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. */ template < typename RCU, typename T, #ifdef CDS_DOXYGEN_INVOKED typename Traits = lazy_list::traits #else typename Traits #endif > class LazyList< cds::urcu::gc, T, Traits >: #ifdef CDS_DOXYGEN_INVOKED protected intrusive::LazyList< cds::urcu::gc, T, Traits > #else protected details::make_lazy_list< cds::urcu::gc, T, Traits >::type #endif { //@cond typedef details::make_lazy_list< cds::urcu::gc, T, Traits > maker; typedef typename maker::type base_class; //@endcond public: typedef cds::urcu::gc gc; ///< Garbage collector typedef T value_type; ///< Type of value stored in the list typedef Traits traits; ///< List traits typedef typename base_class::back_off back_off; ///< Back-off strategy typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes typedef typename base_class::item_counter item_counter; ///< Item counting policy used typedef typename maker::key_comparator key_comparator; ///< key compare functor typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option typedef typename base_class::stat stat; ///< Internal statistics typedef typename base_class::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock static constexpr const bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; ///< Group of \p extract_xxx functions require external locking //@cond // Rebind traits (split-list support) template struct rebind_traits { typedef LazyList< gc , value_type , typename cds::opt::make_options< traits, Options...>::type > type; }; // Stat selector template using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >; //@endcond protected: //@cond typedef typename base_class::value_type node_type; typedef typename maker::cxx_allocator cxx_allocator; typedef typename maker::node_deallocator node_deallocator; typedef typename maker::intrusive_traits::compare intrusive_key_comparator; typedef typename base_class::node_type head_type; struct node_disposer { void operator()( node_type * pNode ) { free_node( pNode ); } }; typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; //@endcond public: using exempt_ptr = cds::urcu::exempt_ptr< gc, node_type, value_type, typename maker::intrusive_traits::disposer >; ///< pointer to extracted node /// Type of \p get() member function return value typedef value_type * raw_ptr; protected: //@cond template class iterator_type: protected base_class::template iterator_type { typedef typename base_class::template iterator_type iterator_base; iterator_type( head_type const& pNode ) : iterator_base( const_cast( &pNode )) {} iterator_type( head_type const * pNode ) : iterator_base( const_cast( pNode )) {} friend class LazyList; public: typedef typename cds::details::make_const_type::pointer value_ptr; typedef typename cds::details::make_const_type::reference value_ref; iterator_type() {} iterator_type( iterator_type const& src ) : iterator_base( src ) {} value_ptr operator ->() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); return p ? &(p->m_Value) : nullptr; } value_ref operator *() const { return (iterator_base::operator *()).m_Value; } /// Pre-increment iterator_type& operator ++() { iterator_base::operator ++(); return *this; } template bool operator ==(iterator_type const& i ) const { return iterator_base::operator ==(i); } template bool operator !=(iterator_type const& i ) const { return iterator_base::operator !=(i); } }; //@endcond public: ///@name Forward iterators (only for debugging purpose) //@{ /// Forward iterator /** You may safely use iterators in multi-threaded environment only under RCU lock. Otherwise, a crash is possible if another thread deletes the item the iterator points to. */ typedef iterator_type iterator; /// Const forward iterator /** For iterator's features and requirements see \ref iterator */ typedef iterator_type const_iterator; /// Returns a forward iterator addressing the first element in a list /** For empty list \code begin() == end() \endcode */ iterator begin() { iterator it( head()); ++it ; // skip dummy head node return it; } /// Returns an iterator that addresses the location succeeding the last element in a list /** Do not use the value returned by end function to access any item. The returned value can be used only to control reaching the end of the list. For empty list \code begin() == end() \endcode */ iterator end() { return iterator( tail()); } /// Returns a forward const iterator addressing the first element in a list const_iterator begin() const { const_iterator it( head()); ++it ; // skip dummy head node return it; } /// Returns a forward const iterator addressing the first element in a list const_iterator cbegin() const { const_iterator it( head()); ++it ; // skip dummy head node return it; } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator end() const { return const_iterator( tail()); } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator cend() const { return const_iterator( tail()); } //@} public: /// Default constructor LazyList() {} //@cond template >::value >> explicit LazyList( Stat& st ) : base_class( st ) {} //@endcond /// Desctructor clears the list ~LazyList() { clear(); } /// Inserts new node /** The function creates a node with copy of \p val value and then inserts the node created into the list. The type \p Q should contain as minimum the complete key of the node. The object of \p value_type should be constructible from \p val of type \p Q. In trivial case, \p Q is equal to \p value_type. The function makes RCU lock internally. Returns \p true if inserting successful, \p false otherwise. */ template bool insert( Q&& val ) { return insert_at( head(), std::forward( val )); } /// Inserts new node /** This function inserts new node with default-constructed value and then it calls \p func functor with signature \code void func( value_type& itemValue ) ;\endcode The argument \p itemValue of user-defined functor \p func is the reference to the list's item inserted. The user-defined functor is called only if the inserting is success. The type \p Q should contain the complete key of the node. The object of \ref value_type should be constructible from \p key of type \p Q. The function allows to split creating of new item into two part: - create item from \p key with initializing key-fields only; - insert new item into the list; - if inserting is successful, initialize non-key fields of item by calling \p f functor This can be useful if complete initialization of object of \p value_type is heavyweight and it is preferable that the initialization should be completed only if inserting is successful. The function makes RCU lock internally. */ template bool insert( Q&& key, Func func ) { return insert_at( head(), std::forward( key ), func ); } /// Inserts data of type \p value_type constructed from \p args /** Returns \p true if inserting successful, \p false otherwise. The function makes RCU lock internally. */ template bool emplace( Args&&... args ) { return emplace_at( head(), std::forward(args)... ); } /// Updates data by \p key /** The operation performs inserting or replacing the element with lock-free manner. If the \p key not found in the list, then the new item created from \p key will be inserted iff \p bAllowInsert is \p true. Otherwise, if \p key is found, the functor \p func is called with item found. The functor \p Func signature is: \code struct my_functor { void operator()( bool bNew, value_type& item, Q const& val ); }; \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the list - \p val - argument \p key passed into the \p %update() function The functor may change non-key fields of the \p item; during \p func call \p item is locked so it is safe to modify the item in multi-threaded environment. The function applies RCU lock internally. Returns std::pair where \p first is true if operation is successful, \p second is true if new item has been added or \p false if the item with \p key already exists. */ template std::pair update( Q const& key, Func func, bool bAllowInsert = true ) { return update_at( head(), key, func, bAllowInsert ); } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( Q const& key, Func f ) { return update( key, f, true ); } //@endcond /// Deletes \p key from the list /** \anchor cds_nonintrusive_LazyList_rcu_erase Since the key of LazyList's item type \p T is not explicitly specified, template parameter \p Q defines the key type searching in the list. The list item comparator should be able to compare the type \p T of list item and the type \p Q. RCU \p synchronize method can be called. RCU should not be locked. Return \p true if key is found and deleted, \p false otherwise */ template bool erase( Q const& key ) { return erase_at( head(), key, intrusive_key_comparator(), [](value_type const&){} ); } /// Deletes the item from the list using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_LazyList_rcu_erase "erase(Q const&)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool erase_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return erase_at( head(), key, typename maker::template less_wrapper(), [](value_type const&){} ); } /// Deletes \p key from the list /** \anchor cds_nonintrusive_LazyList_rcu_erase_func The function searches an item with key \p key, calls \p f functor and deletes the item. If \p key is not found, the functor is not called. The functor \p Func interface: \code struct extractor { void operator()(value_type const& val) { ... } }; \endcode Since the key of LazyList's item type \p T is not explicitly specified, template parameter \p Q defines the key type searching in the list. The list item comparator should be able to compare the type \p T of list item and the type \p Q. RCU \p synchronize method can be called. RCU should not be locked. Return \p true if key is found and deleted, \p false otherwise */ template bool erase( Q const& key, Func f ) { return erase_at( head(), key, intrusive_key_comparator(), f ); } /// Deletes the item from the list using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_LazyList_rcu_erase_func "erase(Q const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool erase_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return erase_at( head(), key, typename maker::template less_wrapper(), f ); } /// Extracts an item from the list /** @anchor cds_nonintrusive_LazyList_rcu_extract The function searches an item with key equal to \p key in the list, unlinks it from the list, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to an item found. If the item with the key equal to \p key is not found the function returns an empty \p exempt_ptr. @note The function does NOT call RCU read-side lock or synchronization, and does NOT dispose the item found. It just excludes the item from the list and returns a pointer to item found. You should lock RCU before calling this function. \code #include #include typedef cds::urcu::gc< general_buffered<> > rcu; typedef cds::container::LazyList< rcu, Foo > rcu_lazy_list; rcu_lazy_list theList; // ... rcu_lazy_list::exempt_ptr p; { // first, we should lock RCU rcu_lazy_list::rcu_lock sl; // Now, you can apply extract function // Note that you must not delete the item found inside the RCU lock p = theList.extract( 10 ); if ( p ) { // do something with p ... } } // Outside RCU lock section we may safely release extracted pointer. // release() passes the pointer to RCU reclamation cycle. p.release(); \endcode */ template exempt_ptr extract( Q const& key ) { return exempt_ptr(extract_at( head(), key, intrusive_key_comparator())); } /// Extracts an item from the list using \p pred predicate for searching /** This function is the analog for \p extract(Q const&). The \p pred is a predicate used for key comparing. \p Less has the interface like \p std::less. \p pred must imply the same element order as \ref key_comparator. */ template exempt_ptr extract_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return exempt_ptr( extract_at( head(), key, typename maker::template less_wrapper())); } /// Checks whether the list contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. The function applies RCU lock internally. */ template bool contains( Q const& key ) const { return find_at( head(), key, intrusive_key_comparator()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find( Q const& key ) const { return contains( key ); } //@endcond /// Checks whether the list contains \p key using \p pred predicate for searching /** The function is an analog of contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool contains( Q const& key, Less pred ) const { CDS_UNUSED( pred ); return find_at( head(), key, typename maker::template less_wrapper()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find_with( Q const& key, Less pred ) const { return contains( key, pred ); } //@endcond /// Finds the key \p key and performs an action with it /** \anchor cds_nonintrusive_LazyList_rcu_find_func The function searches an item with key equal to \p key and calls the functor \p f for the item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& key ); }; \endcode where \p item is the item found, \p key is the \p find() function argument. The functor may change non-key fields of \p item. Note that the function is only guarantee that \p item cannot be deleted during functor is executing. The function does not serialize simultaneous access to the list \p item. If such access is possible you must provide your own synchronization schema to exclude unsafe item modifications. The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor may modify both arguments. The function makes RCU lock internally. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( Q& key, Func f ) const { return find_at( head(), key, intrusive_key_comparator(), f ); } //@cond template bool find( Q const& key, Func f ) const { return find_at( head(), key, intrusive_key_comparator(), f ); } //@endcond /// Finds the key \p key using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_LazyList_rcu_find_func "find(Q&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool find_with( Q& key, Less pred, Func f ) const { CDS_UNUSED( pred ); return find_at( head(), key, typename maker::template less_wrapper(), f ); } //@cond template bool find_with( Q const& key, Less pred, Func f ) const { CDS_UNUSED( pred ); return find_at( head(), key, typename maker::template less_wrapper(), f ); } //@endcond /// Finds the key \p key and return the item found /** \anchor cds_nonintrusive_LazyList_rcu_get The function searches the item with key equal to \p key and returns the pointer to item found. If \p key is not found it returns \p nullptr. Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. RCU should be locked before call of this function. Returned item is valid only while RCU is locked: \code typedef cds::container::LazyList< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > ord_list; ord_list theList; // ... { // Lock RCU ord_list::rcu_lock lock; foo * pVal = theList.get( 5 ); if ( pVal ) { // Deal with pVal //... } // Unlock RCU by rcu_lock destructor // pVal can be freed at any time after RCU has been unlocked } \endcode */ template value_type * get( Q const& key ) const { return get_at( head(), key, intrusive_key_comparator()); } /// Finds the key \p key and return the item found /** The function is an analog of \ref cds_nonintrusive_LazyList_rcu_get "get(Q const&)" but \p pred is used for comparing the keys. \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the list. */ template value_type * get_with( Q const& key, Less pred ) const { CDS_UNUSED( pred ); return get_at( head(), key, typename maker::template less_wrapper()); } /// Checks if the list is empty bool empty() const { return base_class::empty(); } /// Returns list's item count /** The value returned depends on \p Traits::item_counter type. For \p atomicity::empty_item_counter, this function always returns 0. @note Even if you use real item counter and it returns 0, this fact is not mean that the list is empty. To check list emptyness use \ref empty() method. */ size_t size() const { return base_class::size(); } /// Returns const reference to internal statistics stat const& statistics() const { return base_class::statistics(); } /// Clears the list void clear() { base_class::clear(); } protected: //@cond bool insert_node( node_type * pNode ) { return insert_node_at( head(), pNode ); } bool insert_node_at( head_type& refHead, node_type * pNode ) { assert( pNode != nullptr ); scoped_node_ptr p( pNode ); if ( base_class::insert_at( &refHead, *pNode )) { p.release(); return true; } return false; } template bool insert_at( head_type& refHead, Q&& val ) { return insert_node_at( refHead, alloc_node( std::forward( val ))); } template bool emplace_at( head_type& refHead, Args&&... args ) { return insert_node_at( refHead, alloc_node( std::forward(args)... )); } template bool insert_at( head_type& refHead, Q&& key, Func f ) { scoped_node_ptr pNode( alloc_node( std::forward( key ))); if ( base_class::insert_at( &refHead, *pNode, [&f](node_type& node){ f( node_to_value(node)); } )) { pNode.release(); return true; } return false; } template bool erase_at( head_type& refHead, Q const& key, Compare cmp, Func f ) { return base_class::erase_at( &refHead, key, cmp, [&f](node_type const& node){ f( node_to_value(node)); } ); } template node_type * extract_at( head_type& refHead, Q const& key, Compare cmp ) { return base_class::extract_at( &refHead, key, cmp ); } template std::pair update_at( head_type& refHead, Q const& key, Func f, bool bAllowInsert ) { scoped_node_ptr pNode( alloc_node( key )); std::pair ret = base_class::update_at( &refHead, *pNode, [&f, &key](bool bNew, node_type& node, node_type&){f( bNew, node_to_value(node), key );}, bAllowInsert ); if ( ret.first && ret.second ) pNode.release(); return ret; } template bool find_at( head_type& refHead, Q const& key, Compare cmp ) const { return base_class::find_at( &refHead, key, cmp, [](node_type&, Q const&) {} ); } template bool find_at( head_type& refHead, Q& val, Compare cmp, Func f ) const { return base_class::find_at( &refHead, val, cmp, [&f](node_type& node, Q& v){ f( node_to_value(node), v ); }); } template value_type * get_at( head_type& refHead, Q const& val, Compare cmp ) const { node_type * pNode = base_class::get_at( &refHead, val, cmp ); return pNode ? &pNode->m_Value : nullptr; } static value_type& node_to_value( node_type& n ) { return n.m_Value; } static value_type const& node_to_value( node_type const& n ) { return n.m_Value; } template static node_type * alloc_node( Q&& v ) { return cxx_allocator().New( std::forward( v )); } template static node_type * alloc_node( Args&&... args ) { return cxx_allocator().MoveNew( std::forward( args )... ); } static void free_node( node_type * pNode ) { cxx_allocator().Delete( pNode ); } head_type& head() { return base_class::m_Head; } head_type& head() const { return const_cast(base_class::m_Head); } head_type& tail() { return base_class::m_Tail; } head_type const& tail() const { return base_class::m_Tail; } //@endcond }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_LAZY_LIST_RCU_H libcds-2.3.3/cds/container/michael_kvlist_dhp.h000066400000000000000000000010371341244201700215130ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_MICHAEL_KVLIST_DHP_H #define CDSLIB_CONTAINER_MICHAEL_KVLIST_DHP_H #include #include #include #include #endif // #ifndef CDSLIB_CONTAINER_MICHAEL_KVLIST_DHP_H libcds-2.3.3/cds/container/michael_kvlist_hp.h000066400000000000000000000010331341244201700213430ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_MICHAEL_KVLIST_HP_H #define CDSLIB_CONTAINER_MICHAEL_KVLIST_HP_H #include #include #include #include #endif // #ifndef CDSLIB_CONTAINER_MICHAEL_KVLIST_HP_H libcds-2.3.3/cds/container/michael_kvlist_nogc.h000066400000000000000000000514721341244201700216760ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_MICHAEL_KVLIST_NOGC_H #define CDSLIB_CONTAINER_MICHAEL_KVLIST_NOGC_H #include #include #include #include namespace cds { namespace container { //@cond namespace details { template struct make_michael_kvlist_nogc: public make_michael_kvlist { typedef make_michael_kvlist base_maker; typedef typename base_maker::node_type node_type; struct intrusive_traits: public base_maker::intrusive_traits { typedef typename base_maker::node_deallocator disposer; }; typedef intrusive::MichaelList type; }; } // namespace details //@endcond /// Michael's ordered list (key-value pair, template specialization for gc::nogc) /** @ingroup cds_nonintrusive_list @anchor cds_nonintrusive_MichaelKVList_nogc This specialization is intended for so-called persistent usage when no item reclamation may be performed. The class does not support deleting of list item. Usually, ordered single-linked list is used as a building block for the hash table implementation. The complexity of searching is O(N). See \ref cds_nonintrusive_MichaelList_gc "MichaelList" for description of template parameters. The interface of the specialization is a little different. */ template < typename Key, typename Value, #ifdef CDS_DOXYGEN_INVOKED typename Traits = michael_list::traits #else typename Traits #endif > class MichaelKVList: #ifdef CDS_DOXYGEN_INVOKED protected intrusive::MichaelList< gc::nogc, implementation_defined, Traits > #else protected details::make_michael_kvlist_nogc< Key, Value, Traits >::type #endif { //@cond typedef details::make_michael_kvlist_nogc< Key, Value, Traits > maker; typedef typename maker::type base_class; //@endcond public: typedef cds::gc::nogc gc; ///< Garbage collector used typedef Traits traits; ///< List traits #ifdef CDS_DOXYGEN_INVOKED typedef Key key_type ; ///< Key type typedef Value mapped_type ; ///< Type of value stored in the list typedef std::pair value_type ; ///< key/value pair stored in the list #else typedef typename maker::key_type key_type; typedef typename maker::value_type mapped_type; typedef typename maker::pair_type value_type; #endif typedef typename base_class::back_off back_off; ///< Back-off strategy used typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes typedef typename base_class::item_counter item_counter; ///< Item counting policy used typedef typename maker::key_comparator key_comparator; ///< key comparison functor typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option typedef typename base_class::stat stat; ///< Internal statistics //@cond // Rebind traits (split-list support) template struct rebind_traits { typedef MichaelKVList< gc , key_type, mapped_type , typename cds::opt::make_options< traits, Options...>::type > type; }; // Stat selector template using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >; //@endcond protected: //@cond typedef typename base_class::value_type node_type; typedef typename maker::cxx_allocator cxx_allocator; typedef typename maker::node_deallocator node_deallocator; typedef typename maker::intrusive_traits::compare intrusive_key_comparator; typedef typename base_class::atomic_node_ptr head_type; struct node_disposer { void operator()( node_type * pNode ) { free_node( pNode ); } }; typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; //@endcond protected: //@cond template class iterator_type: protected base_class::template iterator_type { typedef typename base_class::template iterator_type iterator_base; iterator_type( head_type const& refNode ) : iterator_base( refNode ) {} explicit iterator_type( const iterator_base& it ) : iterator_base( it ) {} friend class MichaelKVList; protected: explicit iterator_type( node_type& pNode ) : iterator_base( &pNode ) {} public: typedef typename cds::details::make_const_type::reference value_ref; typedef typename cds::details::make_const_type::pointer value_ptr; typedef typename cds::details::make_const_type::reference pair_ref; typedef typename cds::details::make_const_type::pointer pair_ptr; iterator_type() : iterator_base() {} iterator_type( const iterator_type& src ) : iterator_base( src ) {} key_type const& key() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); assert( p != nullptr ); return p->m_Data.first; } value_ref val() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); assert( p != nullptr ); return p->m_Data.second; } pair_ptr operator ->() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); return p ? &(p->m_Data) : nullptr; } pair_ref operator *() const { typename iterator_base::value_ref p = iterator_base::operator *(); return p.m_Data; } /// Pre-increment iterator_type& operator ++() { iterator_base::operator ++(); return *this; } /// Post-increment iterator_type operator ++(int) { return iterator_base::operator ++(0); } template bool operator ==(iterator_type const& i ) const { return iterator_base::operator ==(i); } template bool operator !=(iterator_type const& i ) const { return iterator_base::operator !=(i); } }; //@endcond public: ///@name Forward iterators //@{ /// Forward iterator /** The forward iterator is safe: you may use it in multi-threaded enviromnent without any synchronization. The forward iterator for Michael's list based on \p gc::nogc has pre- and post-increment operators. The iterator interface to access item data: - operator -> - returns a pointer to \p value_type - operator * - returns a reference (a const reference for \p const_iterator) to \p value_type - const key_type& key() - returns a key reference for iterator - mapped_type& val() - retuns a value reference for iterator (const reference for \p const_iterator) For both functions the iterator should not be equal to \p end(). @note \p end() iterator is not dereferenceable */ typedef iterator_type iterator; /// Const forward iterator /** For iterator's features and requirements see \ref iterator */ typedef iterator_type const_iterator; /// Returns a forward iterator addressing the first element in a list /** For empty list \code begin() == end() \endcode */ iterator begin() { return iterator( head()); } /// Returns an iterator that addresses the location succeeding the last element in a list /** Do not use the value returned by end function to access any item. Internally, end returning value equals to \p nullptr. The returned value can be used only to control reaching the end of the list. For empty list \code begin() == end() \endcode */ iterator end() { return iterator(); } /// Returns a forward const iterator addressing the first element in a list const_iterator begin() const { return const_iterator( head()); } /// Returns a forward const iterator addressing the first element in a list const_iterator cbegin() const { return const_iterator( head()); } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator end() const { return const_iterator(); } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator cend() const { return const_iterator(); } //@} public: /// Default constructor /** Initialize empty list */ MichaelKVList() {} //@cond template >::value >> explicit MichaelKVList( Stat& st ) : base_class( st ) {} //@endcond /// List destructor /** Clears the list */ ~MichaelKVList() { clear(); } /// Inserts new node with key and default value /** The function creates a node with \p key and default value, and then inserts the node created into the list. Preconditions: - The \ref key_type should be constructible from value of type \p K. In trivial case, \p K is equal to \ref key_type. - The \ref mapped_type should be default-constructible. Returns an iterator pointed to inserted value, or \p end() if inserting is failed */ template iterator insert( const K& key ) { return node_to_iterator( insert_at( head(), key )); } /// Inserts new node with a key and a value /** The function creates a node with \p key and value \p val, and then inserts the node created into the list. Preconditions: - The \ref key_type should be constructible from \p key of type \p K. - The \ref mapped_type should be constructible from \p val of type \p V. Returns an iterator pointed to inserted value, or \p end() if inserting is failed */ template iterator insert( const K& key, const V& val ) { // We cannot use insert with functor here // because we cannot lock inserted node for updating // Therefore, we use separate function return node_to_iterator( insert_at( head(), key, val )); } /// Inserts new node and initialize it by a functor /** This function inserts new node with key \p key and if inserting is successful then it calls \p func functor with signature \code void func( value_type& item ); struct functor { void operator()( value_type& item ); }; \endcode The argument \p item of user-defined functor \p func is the reference to the list's item inserted. item.second is a reference to item's value that may be changed. User-defined functor \p func should guarantee that during changing item's value no any other changes could be made on this list's item by concurrent threads. The key_type should be constructible from value of type \p K. The function allows to split creating of new item into two part: - create item from \p key; - insert new item into the list; - if inserting is successful, initialize the value of item by calling \p f functor This can be useful if complete initialization of object of \p mapped_type is heavyweight and it is preferable that the initialization should be completed only if inserting is successful. Returns an iterator pointed to inserted value, or \p end() if inserting is failed */ template iterator insert_with( const K& key, Func func ) { return node_to_iterator( insert_with_at( head(), key, func )); } /// Updates the item /** If \p key is not in the list and \p bAllowInsert is \p true, the function inserts a new item. Otherwise, the function returns an iterator pointing to the item found. Returns std::pair where \p first is an iterator pointing to item found or inserted, \p second is true if new item has been added or \p false if the item already is in the list. */ template std::pair update( K const& key, bool bAllowInsert = true ) { std::pair< node_type *, bool > ret = update_at( head(), key, bAllowInsert ); return std::make_pair( node_to_iterator( ret.first ), ret.second ); } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( K const& key ) { return update( key ); } //@endcond /// Inserts data of type \ref mapped_type constructed with std::forward(args)... /** Returns an iterator pointed to inserted value, or \p end() if inserting is failed */ template iterator emplace( K&& key, Args&&... args ) { return node_to_iterator( emplace_at( head(), std::forward(key), std::forward(args)... )); } /// Checks whether the list contains \p key /** The function searches the item with key equal to \p key and returns an iterator pointed to item found and \ref end() otherwise */ template iterator contains( Q const& key ) { return node_to_iterator( find_at( head(), key, intrusive_key_comparator())); } //@cond template CDS_DEPRECATED("deprecated, use contains()") iterator find( Q const& key ) { return contains( key ); } //@endcond /// Checks whether the list contains \p key using \p pred predicate for searching /** The function is an analog of contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template iterator contains( Q const& key, Less pred ) { CDS_UNUSED( pred ); return node_to_iterator( find_at( head(), key, typename maker::template less_wrapper())); } //@cond template CDS_DEPRECATED("deprecated, use contains()") iterator find_with( Q const& key, Less pred ) { return contains( key, pred ); } //@endcond /// Check if the list is empty bool empty() const { return base_class::empty(); } /// Returns list's item count /** The value returned depends on item counter provided by \p Traits. For \p atomicity::empty_item_counter, this function always returns 0. @note Even if you use real item counter and it returns 0, this fact does not mean that the list is empty. To check list emptyness use \p empty() method. */ size_t size() const { return base_class::size(); } /// Returns const reference to internal statistics stat const& statistics() const { return base_class::statistics(); } /// Clears the list void clear() { base_class::clear(); } protected: //@cond node_type * insert_node_at( head_type& refHead, node_type * pNode ) { assert( pNode != nullptr ); scoped_node_ptr p( pNode ); if ( base_class::insert_at( refHead, *pNode )) return p.release(); return nullptr; } template node_type * insert_at( head_type& refHead, const K& key ) { return insert_node_at( refHead, alloc_node( key )); } template node_type * insert_at( head_type& refHead, const K& key, const V& val ) { return insert_node_at( refHead, alloc_node( key, val )); } template node_type * insert_with_at( head_type& refHead, const K& key, Func f ) { scoped_node_ptr pNode( alloc_node( key )); if ( base_class::insert_at( refHead, *pNode )) { f( pNode->m_Data ); return pNode.release(); } return nullptr; } template std::pair< node_type *, bool > update_at( head_type& refHead, const K& key, bool bAllowInsert ) { scoped_node_ptr pNode( alloc_node( key )); node_type * pItemFound = nullptr; std::pair ret = base_class::update_at( refHead, *pNode, [&pItemFound](bool, node_type& item, node_type&){ pItemFound = &item; }, bAllowInsert ); if ( ret.second ) pNode.release(); return std::make_pair( pItemFound, ret.second ); } template node_type * emplace_at( head_type& refHead, K&& key, Args&&... args ) { return insert_node_at( refHead, alloc_node( std::forward(key), std::forward(args)... )); } template node_type * find_at( head_type& refHead, K const& key, Compare cmp ) { return base_class::find_at( refHead, key, cmp ); } template static node_type * alloc_node( const K& key ) { return cxx_allocator().New( key ); } template static node_type * alloc_node( const K& key, const V& val ) { return cxx_allocator().New( key, val ); } template static node_type * alloc_node( K&& key, Args&&... args ) { return cxx_allocator().MoveNew( std::forward( key ), std::forward( args )... ); } static void free_node( node_type * pNode ) { cxx_allocator().Delete( pNode ); } head_type& head() { return base_class::m_pHead; } head_type const& head() const { return base_class::m_pHead; } iterator node_to_iterator( node_type * pNode ) { if ( pNode ) return iterator( *pNode ); return end(); } //@endcond }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_MICHAEL_KVLIST_NOGC_H libcds-2.3.3/cds/container/michael_kvlist_rcu.h000066400000000000000000001045151341244201700215360ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_MICHAEL_KVLIST_RCU_H #define CDSLIB_CONTAINER_MICHAEL_KVLIST_RCU_H #include #include // ref #include #include #include namespace cds { namespace container { /// Michael's ordered list (key-value pair), template specialization for \ref cds_urcu_desc "RCU" /** @ingroup cds_nonintrusive_list \anchor cds_nonintrusive_MichaelKVList_rcu This is key-value variation of non-intrusive \ref cds_nonintrusive_MichaelList_rcu "MichaelList". Like standard container, this implementation split a value stored into two part - constant key and alterable value. Usually, ordered single-linked list is used as a building block for the hash table implementation. The complexity of searching is O(N). Template arguments: - \p RCU - one of \ref cds_urcu_gc "RCU type" - \p Key - key type of an item stored in the list. It should be copy-constructible - \p Value - value type stored in a list - \p Traits - type traits, default is \p michael_list::traits @note Before including you should include appropriate RCU header file, see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. It is possible to declare option-based list using \p cds::container::michael_list::make_traits metafunction istead of \p Traits template argument. For example, the following traits-based declaration of Michael's list \code #include #include // Declare comparator for the item struct my_compare { int operator ()( int i1, int i2 ) { return i1 - i2; } }; // Declare traits struct my_traits: public cds::container::michael_list::traits { typedef my_compare compare; }; // Declare traits-based list typedef cds::container::MichaelKVList< cds::urcu::gc< cds::urcu::general_buffered<> >, int, int, my_traits > traits_based_list; \endcode is equivalent for the following option-based list \code #include #include // my_compare is the same // Declare option-based list typedef cds::container::MichaelKVList< cds::urcu::gc< cds::urcu::general_buffered<> >, int, int, typename cds::container::michael_list::make_traits< cds::container::opt::compare< my_compare > // item comparator option >::type > option_based_list; \endcode */ template < typename RCU, typename Key, typename Value, #ifdef CDS_DOXYGEN_INVOKED typename Traits = michael_list::traits #else typename Traits #endif > class MichaelKVList< cds::urcu::gc, Key, Value, Traits >: #ifdef CDS_DOXYGEN_INVOKED protected intrusive::MichaelList< cds::urcu::gc, implementation_defined, Traits > #else protected details::make_michael_kvlist< cds::urcu::gc, Key, Value, Traits >::type #endif { //@cond typedef details::make_michael_kvlist< cds::urcu::gc, Key, Value, Traits > maker; typedef typename maker::type base_class; //@endcond public: typedef cds::urcu::gc gc; ///< Garbage collector #ifdef CDS_DOXYGEN_INVOKED typedef Key key_type; ///< Key type typedef Value mapped_type; ///< Type of value stored in the list typedef std::pair value_type; ///< key/value pair stored in the list #else typedef typename maker::key_type key_type; typedef typename maker::value_type mapped_type; typedef typename maker::pair_type value_type; #endif typedef Traits traits; ///< List traits typedef typename base_class::back_off back_off; ///< Back-off strategy typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes typedef typename base_class::item_counter item_counter; ///< Item counting policy typedef typename maker::key_comparator key_comparator; ///< key comparison functor typedef typename base_class::memory_model memory_model; ///< Memory ordering. See \p michael_list::traits::memory_model typedef typename base_class::stat stat; ///< Internal statistics typedef typename base_class::rcu_check_deadlock rcu_check_deadlock ; ///< RCU deadlock checking policy typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock static constexpr const bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; ///< Group of \p extract_xxx functions do not require external locking //@cond // Rebind traits (split-list support) template struct rebind_traits { typedef MichaelKVList< gc , key_type, mapped_type , typename cds::opt::make_options< traits, Options...>::type > type; }; // Stat selector template using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >; //@endcond protected: //@cond typedef typename base_class::value_type node_type; typedef typename maker::cxx_allocator cxx_allocator; typedef typename maker::node_deallocator node_deallocator; typedef typename maker::intrusive_traits::compare intrusive_key_comparator; typedef typename base_class::atomic_node_ptr head_type; struct node_disposer { void operator()( node_type * pNode ) { free_node( pNode ); } }; typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; //@endcond public: /// pointer to extracted node using exempt_ptr = cds::urcu::exempt_ptr< gc, node_type, value_type, typename maker::intrusive_traits::disposer, cds::urcu::details::conventional_exempt_pair_cast >; private: //@cond struct raw_ptr_converter { value_type * operator()( node_type * p ) const { return p ? &p->m_Data : nullptr; } value_type& operator()( node_type& n ) const { return n.m_Data; } value_type const& operator()( node_type const& n ) const { return n.m_Data; } }; //@endcond public: /// Result of \p get(), \p get_with() functions - pointer to the node found typedef cds::urcu::raw_ptr_adaptor< value_type, typename base_class::raw_ptr, raw_ptr_converter > raw_ptr; protected: //@cond template class iterator_type: protected base_class::template iterator_type { typedef typename base_class::template iterator_type iterator_base; iterator_type( head_type const& pNode ) : iterator_base( pNode ) {} friend class MichaelKVList; public: typedef typename cds::details::make_const_type::reference value_ref; typedef typename cds::details::make_const_type::pointer value_ptr; typedef typename cds::details::make_const_type::reference pair_ref; typedef typename cds::details::make_const_type::pointer pair_ptr; iterator_type() {} iterator_type( iterator_type const& src ) : iterator_base( src ) {} key_type const& key() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); assert( p != nullptr ); return p->m_Data.first; } pair_ptr operator ->() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); return p ? &(p->m_Data) : nullptr; } pair_ref operator *() const { typename iterator_base::value_ref p = iterator_base::operator *(); return p.m_Data; } value_ref val() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); assert( p != nullptr ); return p->m_Data.second; } /// Pre-increment iterator_type& operator ++() { iterator_base::operator ++(); return *this; } template bool operator ==(iterator_type const& i ) const { return iterator_base::operator ==(i); } template bool operator !=(iterator_type const& i ) const { return iterator_base::operator !=(i); } }; //@endcond public: ///@name Forward iterators //@{ /// Forward iterator /** You may safely use iterators in multi-threaded environment only under external RCU lock. Otherwise, a program crash is possible if another thread deletes the item the iterator points to. */ typedef iterator_type iterator; /// Const forward iterator typedef iterator_type const_iterator; /// Returns a forward iterator addressing the first element in a list /** For empty list \code begin() == end() \endcode */ iterator begin() { return iterator( head()); } /// Returns an iterator that addresses the location succeeding the last element in a list /** Do not use the value returned by end function to access any item. Internally, end returning value equals to \p nullptr. The returned value can be used only to control reaching the end of the list. For empty list \code begin() == end() \endcode */ iterator end() { return iterator(); } /// Returns a forward const iterator addressing the first element in a list const_iterator begin() const { return const_iterator( head()); } /// Returns a forward const iterator addressing the first element in a list const_iterator cbegin() const { return const_iterator( head()); } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator end() const { return const_iterator(); } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator cend() const { return const_iterator(); } //@} public: /// Default constructor /** Initializes empty list */ MichaelKVList() {} //@cond template >::value >> explicit MichaelKVList( Stat& st ) : base_class( st ) {} //@endcond /// List destructor /** Clears the list */ ~MichaelKVList() { clear(); } /// Inserts new node with key and default value /** The function creates a node with \p key and default value, and then inserts the node created into the list. Preconditions: - The \ref key_type should be constructible from value of type \p K. In trivial case, \p K is equal to \ref key_type. - The \ref mapped_type should be default-constructible. The function applies RCU lock internally. Returns \p true if inserting successful, \p false otherwise. */ template bool insert( const K& key ) { return insert_at( head(), key ); } /// Inserts new node with a key and a value /** The function creates a node with \p key and value \p val, and then inserts the node created into the list. Preconditions: - The \ref key_type should be constructible from \p key of type \p K. - The \ref mapped_type should be constructible from \p val of type \p V. The function applies RCU lock internally. Returns \p true if inserting successful, \p false otherwise. */ template bool insert( const K& key, const V& val ) { return insert_at( head(), key, val ); } /// Inserts new node and initialize it by a functor /** This function inserts new node with key \p key and if inserting is successful then it calls \p func functor with signature \code struct functor { void operator()( value_type& item ); }; \endcode The argument \p item of user-defined functor \p func is the reference to the list's item inserted. item.second is a reference to item's value that may be changed. User-defined functor \p func should guarantee that during changing item's value no any other changes could be made on this list's item by concurrent threads. The key_type should be constructible from value of type \p K. The function allows to split creating of new item into two part: - create item from \p key; - insert new item into the list; - if inserting is successful, initialize the value of item by calling \p func functor This can be useful if complete initialization of object of \p mapped_type is heavyweight and it is preferable that the initialization should be completed only if inserting is successful. The function applies RCU lock internally. @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" */ template bool insert_with( const K& key, Func func ) { return insert_with_at( head(), key, func ); } /// Updates an element with given \p key /** The operation performs inserting or changing data with lock-free manner. If the \p key not found in the list, then the new item created from \p key is inserted into the list (note that in this case the \ref key_type should be copy-constructible from type \p K). Otherwise, the functor \p func is called with item found. The functor \p Func may be a function with signature: \code void func( bool bNew, value_type& item ); \endcode or a functor: \code struct my_functor { void operator()( bool bNew, value_type& item ); }; \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the list The functor may change any fields of the \p item.second that is \ref mapped_type; however, \p func must guarantee that during changing no any other modifications could be made on this item by concurrent threads. The function applies RCU lock internally. Returns std::pair where \p first is true if operation is successful, \p second is true if new item has been added or \p false if the item with \p key already is in the list. @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" */ /// Updates data by \p key /** The operation performs inserting or replacing the element with lock-free manner. If the \p key not found in the list, then the new item created from \p key will be inserted iff \p bAllowInsert is \p true. (note that in this case the \ref key_type should be constructible from type \p K). Otherwise, if \p key is found, the functor \p func is called with item found. The functor \p Func signature is: \code struct my_functor { void operator()( bool bNew, value_type& item ); }; \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - the item found or inserted The functor may change any fields of the \p item.second that is \ref mapped_type; however, \p func must guarantee that during changing no any other modifications could be made on this item by concurrent threads. The function applies RCU lock internally. Returns std::pair where \p first is true if operation is successful, \p second is true if new item has been added or \p false if the item with \p key already exists. @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" */ template std::pair update( const K& key, Func func, bool bAllowInsert = true ) { return update_at( head(), key, func, bAllowInsert ); } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( const K& key, Func f ) { return update( key, f, true ); } //@endcond /// Inserts data of type \ref mapped_type constructed with std::forward(args)... /** Returns \p true if inserting successful, \p false otherwise. The function applies RCU lock internally. */ template bool emplace( K&& key, Args&&... args ) { return emplace_at( head(), std::forward(key), std::forward(args)... ); } /// Deletes \p key from the list /** \anchor cds_nonintrusive_MichaelKVList_rcu_erase RCU \p synchronize method can be called. RCU should not be locked. Returns \p true if \p key is found and has been deleted, \p false otherwise */ template bool erase( K const& key ) { return erase_at( head(), key, intrusive_key_comparator()); } /// Deletes the item from the list using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_MichaelKVList_rcu_erase "erase(K const&)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool erase_with( K const& key, Less pred ) { CDS_UNUSED( pred ); return erase_at( head(), key, typename maker::template less_wrapper()); } /// Deletes \p key from the list /** \anchor cds_nonintrusive_MichaelKVList_rcu_erase_func The function searches an item with key \p key, calls \p f functor and deletes the item. If \p key is not found, the functor is not called. The functor \p Func interface: \code struct functor { void operator()(value_type& val) { ... } }; \endcode RCU \p synchronize method can be called. RCU should not be locked. Return \p true if key is found and deleted, \p false otherwise See also: \ref erase */ template bool erase( K const& key, Func f ) { return erase_at( head(), key, intrusive_key_comparator(), f ); } /// Deletes the item from the list using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_MichaelKVList_rcu_erase_func "erase(K const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool erase_with( K const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return erase_at( head(), key, typename maker::template less_wrapper(), f ); } /// Extracts an item from the list /** @anchor cds_nonintrusive_MichaelKVList_rcu_extract The function searches an item with key equal to \p key in the list, unlinks it from the list, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found. If \p key is not found the function returns an empty \p exempt_ptr. @note The function does NOT dispose the item found. It just excludes the item from the list and returns a pointer to item found. You shouldn't lock RCU before calling this function. \code #include #include typedef cds::urcu::gc< general_buffered<> > rcu; typedef cds::container::MichaelKVList< rcu, int, Foo > rcu_michael_list; rcu_michael_list theList; // ... rcu_michael_list::exempt_ptr p; // The RCU should NOT be locked when extract() is called! assert( !rcu::is_locked()); // extract() call p = theList.extract( 10 ); if ( p ) { // do something with p ... } // we may safely release extracted pointer here. // release() passes the pointer to RCU reclamation cycle. p.release(); \endcode */ template exempt_ptr extract( K const& key ) { return exempt_ptr( extract_at( head(), key, intrusive_key_comparator())); } /// Extracts an item from the list using \p pred predicate for searching /** This function is the analog for \p extract(K const&). The \p pred is a predicate used for key comparing. \p Less has the interface like \p std::less. \p pred must imply the same element order as \ref key_comparator. */ template exempt_ptr extract_with( K const& key, Less pred ) { CDS_UNUSED( pred ); return exempt_ptr( extract_at( head(), key, typename maker::template less_wrapper())); } /// Checks whether the list contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. The function applies RCU lock internally. */ template bool contains( Q const& key ) { return find_at( head(), key, intrusive_key_comparator()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find( Q const& key ) { return contains( key ); } //@endcond /// Checks whether the map contains \p key using \p pred predicate for searching /** The function is an analog of contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the list. The function applies RCU lock internally. */ template bool contains( Q const& key, Less pred ) { CDS_UNUSED( pred ); return find_at( head(), key, typename maker::template less_wrapper()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find_with( Q const& key, Less pred ) { return contains( key, pred ); } //@endcond /// Finds \p key and performs an action with it /** \anchor cds_nonintrusive_MichaelKVList_rcu_find_func The function searches an item with key equal to \p key and calls the functor \p f for the item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item ); }; \endcode where \p item is the item found. The functor may change item.second that is reference to value of node. Note that the function is only guarantee that \p item cannot be deleted during functor is executing. The function does not serialize simultaneous access to the list \p item. If such access is possible you must provide your own synchronization schema to exclude unsafe item modifications. The function makes RCU lock internally. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( Q const& key, Func f ) { return find_at( head(), key, intrusive_key_comparator(), f ); } /// Finds the key \p val using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_MichaelKVList_rcu_find_func "find(Q const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool find_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return find_at( head(), key, typename maker::template less_wrapper(), f ); } /// Finds \p key and return the item found /** \anchor cds_nonintrusive_MichaelKVList_rcu_get The function searches the item with \p key and returns the pointer to item found. If \p key is not found it returns an empty \p raw_ptr object. Note the compare functor should accept a parameter of type \p K that can be not the same as \p key_type. RCU should be locked before call of this function. Returned item is valid only while RCU is locked: \code typedef cds::container::MichaelKVList< cds::urcu::gc< cds::urcu::general_buffered<> >, int, foo, my_traits > ord_list; ord_list theList; // ... tyename ord_list::raw_ptr rp; { // Lock RCU ord_list::rcu_lock lock; rp = theList.get( 5 ); if ( rp ) { // Deal with rp //... } // Unlock RCU by rcu_lock destructor } // rp can be released at any time after RCU has been unlocked rp.release(); \endcode */ template raw_ptr get( K const& key ) { return get_at( head(), key, intrusive_key_comparator()); } /// Finds \p key and return the item found /** The function is an analog of \ref cds_nonintrusive_MichaelKVList_rcu_get "get(K const&)" but \p pred is used for comparing the keys. \p Less functor has the semantics like \p std::less but should take arguments of type \ref key_type and \p K in any order. \p pred must imply the same element order as the comparator used for building the list. */ template raw_ptr get_with( K const& key, Less pred ) { CDS_UNUSED( pred ); return get_at( head(), key, typename maker::template less_wrapper()); } /// Checks if the list is empty bool empty() const { return base_class::empty(); } /// Returns list's item count /** The value returned depends on item counter provided by \p Traits. For \p atomicity::empty_item_counter, this function always returns 0. @note Even if you use real item counter and it returns 0, this fact does not mean that the list is empty. To check list emptyness use \p empty() method. */ size_t size() const { return base_class::size(); } /// Returns const reference to internal statistics stat const& statistics() const { return base_class::statistics(); } /// Clears the list /** Post-condition: the list is empty */ void clear() { base_class::clear(); } protected: //@cond bool insert_node_at( head_type& refHead, node_type * pNode ) { assert( pNode != nullptr ); scoped_node_ptr p( pNode ); if ( base_class::insert_at( refHead, *pNode )) { p.release(); return true; } return false; } template bool insert_at( head_type& refHead, const K& key ) { return insert_node_at( refHead, alloc_node( key )); } template bool insert_at( head_type& refHead, const K& key, const V& val ) { return insert_node_at( refHead, alloc_node( key, val )); } template bool insert_with_at( head_type& refHead, const K& key, Func f ) { scoped_node_ptr pNode( alloc_node( key )); if ( base_class::insert_at( refHead, *pNode, [&f](node_type& node){ f( node.m_Data ); })) { pNode.release(); return true; } return false; } template bool emplace_at( head_type& refHead, K&& key, Args&&... args ) { return insert_node_at( refHead, alloc_node( std::forward(key), std::forward(args)... )); } template std::pair update_at( head_type& refHead, const K& key, Func f, bool bAllowInsert ) { scoped_node_ptr pNode( alloc_node( key )); std::pair ret = base_class::update_at( refHead, *pNode, [&f]( bool bNew, node_type& node, node_type& ){ f( bNew, node.m_Data ); }, bAllowInsert ); if ( ret.first && ret.second ) pNode.release(); return ret; } template bool erase_at( head_type& refHead, K const& key, Compare cmp ) { return base_class::erase_at( refHead, key, cmp ); } template bool erase_at( head_type& refHead, K const& key, Compare cmp, Func f ) { return base_class::erase_at( refHead, key, cmp, [&f]( node_type const & node ){ f( const_cast(node.m_Data)); }); } template node_type * extract_at( head_type& refHead, K const& key, Compare cmp ) { return base_class::extract_at( refHead, key, cmp ); } template bool find_at( head_type& refHead, K const& key, Compare cmp ) { return base_class::find_at( refHead, key, cmp, [](node_type&, K const&) {} ); } template bool find_at( head_type& refHead, K& key, Compare cmp, Func f ) { return base_class::find_at( refHead, key, cmp, [&f](node_type& node, K const&){ f( node.m_Data ); }); } template raw_ptr get_at( head_type& refHead, K const& val, Compare cmp ) { return raw_ptr( base_class::get_at( refHead, val, cmp )); } template static node_type * alloc_node( const K& key ) { return cxx_allocator().New( key ); } template static node_type * alloc_node( const K& key, const V& val ) { return cxx_allocator().New( key, val ); } template static node_type * alloc_node( K&& key, Args&&... args ) { return cxx_allocator().MoveNew( std::forward( key ), std::forward( args )... ); } static void free_node( node_type * pNode ) { cxx_allocator().Delete( pNode ); } head_type& head() { return base_class::m_pHead; } head_type& head() const { return const_cast(base_class::m_pHead); } //@endcond }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_MICHAEL_KVLIST_RCU_H libcds-2.3.3/cds/container/michael_list_dhp.h000066400000000000000000000010241341244201700211460ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_MICHAEL_LIST_DHP_H #define CDSLIB_CONTAINER_MICHAEL_LIST_DHP_H #include #include #include #include #endif // #ifndef CDSLIB_CONTAINER_MICHAEL_LIST_DHP_H libcds-2.3.3/cds/container/michael_list_hp.h000066400000000000000000000010211341244201700207770ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_MICHAEL_LIST_HP_H #define CDSLIB_CONTAINER_MICHAEL_LIST_HP_H #include #include #include #include #endif // #ifndef CDSLIB_CONTAINER_MICHAEL_LIST_HP_H libcds-2.3.3/cds/container/michael_list_nogc.h000066400000000000000000000371071341244201700213340ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_MICHAEL_LIST_NOGC_H #define CDSLIB_CONTAINER_MICHAEL_LIST_NOGC_H #include #include #include #include namespace cds { namespace container { //@cond namespace details { template struct make_michael_list_nogc: public make_michael_list { typedef make_michael_list base_maker; typedef typename base_maker::node_type node_type; struct intrusive_traits: public base_maker::intrusive_traits { typedef typename base_maker::node_deallocator disposer; }; typedef intrusive::MichaelList type; }; } // namespace details //@endcond /// Michael's lock-free ordered single-linked list (template specialization for \p gc::nogc) /** @ingroup cds_nonintrusive_list \anchor cds_nonintrusive_MichaelList_nogc This specialization is intended for so-called append-only usage when no item reclamation may be performed. The class does not support deleting of list item. Usually, ordered single-linked list is used as a building block for the hash table implementation. The complexity of searching is O(N). See \ref cds_nonintrusive_MichaelList_gc "MichaelList" for description of template parameters. */ template class MichaelList: #ifdef CDS_DOXYGEN_INVOKED protected intrusive::MichaelList< gc::nogc, T, Traits > #else protected details::make_michael_list_nogc< T, Traits >::type #endif { //@cond typedef details::make_michael_list_nogc< T, Traits > maker; typedef typename maker::type base_class; //@endcond public: typedef cds::gc::nogc gc; ///< Garbage collector used typedef T value_type; ///< Type of value stored in the list typedef Traits traits; ///< List traits typedef typename base_class::back_off back_off; ///< Back-off strategy used typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes typedef typename base_class::item_counter item_counter; ///< Item counting policy used typedef typename maker::key_comparator key_comparator; ///< key comparison functor typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option typedef typename base_class::stat stat; ///< Internal statistics //@cond // Rebind traits (split-list support) template struct rebind_traits { typedef MichaelList< gc , value_type , typename cds::opt::make_options< traits, Options...>::type > type; }; // Stat selector template using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >; //@endcond protected: //@cond typedef typename base_class::value_type node_type; typedef typename maker::cxx_allocator cxx_allocator; typedef typename maker::node_deallocator node_deallocator; typedef typename maker::intrusive_traits::compare intrusive_key_comparator; typedef typename base_class::atomic_node_ptr head_type; struct node_disposer { void operator()( node_type * pNode ) { free_node( pNode ); } }; typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; //@endcond protected: //@cond template class iterator_type: protected base_class::template iterator_type { typedef typename base_class::template iterator_type iterator_base; iterator_type( head_type const& pNode ) : iterator_base( pNode ) {} explicit iterator_type( const iterator_base& it ) : iterator_base( it ) {} friend class MichaelList; protected: explicit iterator_type( node_type& pNode ) : iterator_base( &pNode ) {} public: typedef typename cds::details::make_const_type::pointer value_ptr; typedef typename cds::details::make_const_type::reference value_ref; iterator_type() {} iterator_type( const iterator_type& src ) : iterator_base( src ) {} value_ptr operator ->() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); return p ? &(p->m_Value) : nullptr; } value_ref operator *() const { return (iterator_base::operator *()).m_Value; } /// Pre-increment iterator_type& operator ++() { iterator_base::operator ++(); return *this; } /// Post-increment iterator_type operator ++(int) { return iterator_base::operator ++(0); } template bool operator ==(iterator_type const& i ) const { return iterator_base::operator ==(i); } template bool operator !=(iterator_type const& i ) const { return iterator_base::operator !=(i); } }; //@endcond public: ///@name Forward iterators //@{ /// Returns a forward iterator addressing the first element in a list /** For empty list \code begin() == end() \endcode */ typedef iterator_type iterator; /// Const forward iterator /** For iterator's features and requirements see \ref iterator */ typedef iterator_type const_iterator; /// Returns a forward iterator addressing the first element in a list /** For empty list \code begin() == end() \endcode */ iterator begin() { return iterator( head()); } /// Returns an iterator that addresses the location succeeding the last element in a list /** Do not use the value returned by end function to access any item. Internally, end returning value equals to \p nullptr. The returned value can be used only to control reaching the end of the list. For empty list \code begin() == end() \endcode */ iterator end() { return iterator(); } /// Returns a forward const iterator addressing the first element in a list const_iterator begin() const { return const_iterator( head()); } /// Returns a forward const iterator addressing the first element in a list const_iterator cbegin() const { return const_iterator( head()); } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator end() const { return const_iterator(); } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator cend() const { return const_iterator(); } //@} public: /// Default constructor /** Initialize empty list */ MichaelList() {} //@cond template >::value >> explicit MichaelList( Stat& st ) : base_class( st ) {} //@endcond /// List destructor /** Clears the list */ ~MichaelList() { clear(); } /// Inserts new node /** The function inserts \p val in the list if the list does not contain an item with key equal to \p val. Return an iterator pointing to inserted item if success \ref end() otherwise */ template iterator insert( Q&& val ) { return node_to_iterator( insert_at( head(), std::forward( val ))); } /// Updates the item /** If \p key is not in the list and \p bAllowInsert is \p true, the function inserts a new item. Otherwise, the function returns an iterator pointing to the item found. Returns std::pair where \p first is an iterator pointing to item found or inserted, \p second is true if new item has been added or \p false if the item already is in the list. */ template std::pair update( Q&& key, bool bAllowInsert = true ) { std::pair< node_type *, bool > ret = update_at( head(), std::forward( key ), bAllowInsert ); return std::make_pair( node_to_iterator( ret.first ), ret.second ); } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( const Q& val ) { return update( val, true ); } //@endcond /// Inserts data of type \ref value_type constructed with std::forward(args)... /** Return an iterator pointing to inserted item if success \ref end() otherwise */ template iterator emplace( Args&&... args ) { return node_to_iterator( emplace_at( head(), std::forward(args)... )); } /// Checks whether the list contains \p key /** The function searches the item with key equal to \p key and returns an iterator pointed to item found if the key is found, and \ref end() otherwise */ template iterator contains( Q const& key ) { return node_to_iterator( find_at( head(), key, intrusive_key_comparator())); } //@cond template CDS_DEPRECATED("deprecated, use contains()") iterator find( Q const& key ) { return contains( key ); } //@endcond /// Checks whether the map contains \p key using \p pred predicate for searching /** The function is an analog of contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the list. */ template iterator contains( Q const& key, Less pred ) { CDS_UNUSED( pred ); return node_to_iterator( find_at( head(), key, typename maker::template less_wrapper())); } //@cond template CDS_DEPRECATED("deprecated, use contains()") iterator find_with( Q const& key, Less pred ) { return contains( key, pred ); } //@endcond /// Check if the list is empty bool empty() const { return base_class::empty(); } /// Returns list's item count /** The value returned depends on item counter provided by \p Traits. For \p atomicity::empty_item_counter, this function always returns 0. @note Even if you use real item counter and it returns 0, this fact does not mean that the list is empty. To check list emptyness use \p empty() method. */ size_t size() const { return base_class::size(); } /// Returns const reference to internal statistics stat const& statistics() const { return base_class::statistics(); } /// Clears the list void clear() { base_class::clear(); } protected: //@cond static value_type& node_to_value( node_type& n ) { return n.m_Value; } static node_type * alloc_node() { return cxx_allocator().New(); } static node_type * alloc_node( value_type const& v ) { return cxx_allocator().New( v ); } template static node_type * alloc_node( Args&&... args ) { return cxx_allocator().MoveNew( std::forward( args )... ); } static void free_node( node_type * pNode ) { cxx_allocator().Delete( pNode ); } head_type& head() { return base_class::m_pHead; } head_type const& head() const { return base_class::m_pHead; } iterator node_to_iterator( node_type * pNode ) { if ( pNode ) return iterator( *pNode ); return end(); } iterator insert_node( node_type * pNode ) { return node_to_iterator( insert_node_at( head(), pNode )); } node_type * insert_node_at( head_type& refHead, node_type * pNode ) { assert( pNode != nullptr ); scoped_node_ptr p(pNode); if ( base_class::insert_at( refHead, *pNode )) return p.release(); return nullptr; } template node_type * insert_at( head_type& refHead, Q&& val ) { return insert_node_at( refHead, alloc_node( std::forward( val ))); } template std::pair< node_type *, bool > update_at( head_type& refHead, Q&& val, bool bAllowInsert ) { scoped_node_ptr pNode( alloc_node( std::forward( val ))); node_type * pItemFound = nullptr; std::pair ret = base_class::update_at( refHead, *pNode, [&pItemFound](bool, node_type& item, node_type&) { pItemFound = &item; }, bAllowInsert ); if ( ret.second ) pNode.release(); return std::make_pair( pItemFound, ret.second ); } template node_type * emplace_at( head_type& refHead, Args&&... args ) { return insert_node_at( refHead, alloc_node( std::forward(args)...)); } template node_type * find_at( head_type& refHead, Q const& key, Compare cmp ) { return base_class::find_at( refHead, key, cmp ); } //@endcond }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_MICHAEL_LIST_NOGC_H libcds-2.3.3/cds/container/michael_list_rcu.h000066400000000000000000001016621341244201700211750ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_MICHAEL_LIST_RCU_H #define CDSLIB_CONTAINER_MICHAEL_LIST_RCU_H #include #include #include #include #include namespace cds { namespace container { /// Michael's ordered list (template specialization for \ref cds_urcu_desc "RCU") /** @ingroup cds_nonintrusive_list \anchor cds_nonintrusive_MichaelList_rcu Usually, ordered single-linked list is used as a building block for the hash table implementation. The complexity of searching is O(N). Source: - [2002] Maged Michael "High performance dynamic lock-free hash tables and list-based sets" This class is non-intrusive version of \ref cds_intrusive_MichaelList_rcu "cds::intrusive::MichaelList" RCU specialization. Template arguments: - \p RCU - one of \ref cds_urcu_gc "RCU type" - \p T - type stored in the list. The type must be default- and copy-constructible. - \p Traits - type traits, default is michael_list::traits The implementation does not divide type \p T into key and value part and may be used as a main building block for hash set containers. The key is a function (or a part) of type \p T, and this function is specified by Traits::compare functor or Traits::less predicate. \ref cds_nonintrusive_MichaelKVList_rcu "MichaelKVList" is a key-value version of Michael's non-intrusive list that is closer to the C++ std library approach. @note Before including you should include appropriate RCU header file, see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. It is possible to declare option-based list with cds::container::michael_list::make_traits metafunction istead of \p Traits template argument. For example, the following traits-based declaration of Michael's list \code #include #include // Declare comparator for the item struct my_compare { int operator ()( int i1, int i2 ) { return i1 - i2; } }; // Declare traits struct my_traits: public cds::container::michael_list::traits { typedef my_compare compare; }; // Declare traits-based list typedef cds::container::MichaelList< cds::urcu::gc< cds::urcu::general_buffered<> >, int, my_traits > traits_based_list; \endcode is equivalent for the following option-based list \code #include #include // my_compare is the same // Declare option-based list typedef cds::container::MichaelList< cds::urcu::gc< cds::urcu::general_buffered<> >, int, typename cds::container::michael_list::make_traits< cds::container::opt::compare< my_compare > // item comparator option >::type > option_based_list; \endcode Template argument list \p Options of cds::container::michael_list::make_traits metafunction are: - opt::compare - key comparison functor. No default functor is provided. If the option is not specified, the opt::less is used. - opt::less - specifies binary predicate used for key comparison. Default is \p std::less. - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::empty is used. - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter that is no item counting. - opt::allocator - the allocator used for creating and freeing list's item. Default is \ref CDS_DEFAULT_ALLOCATOR macro. - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) or opt::v::sequential_consistent (sequentially consisnent memory model). - opt::rcu_check_deadlock - a deadlock checking policy. Default is opt::v::rcu_throw_deadlock */ template < typename RCU, typename T, #ifdef CDS_DOXYGEN_INVOKED typename Traits = michael_list::traits #else typename Traits #endif > class MichaelList< cds::urcu::gc, T, Traits > : #ifdef CDS_DOXYGEN_INVOKED protected intrusive::MichaelList< cds::urcu::gc, T, Traits > #else protected details::make_michael_list< cds::urcu::gc, T, Traits >::type #endif { //@cond typedef details::make_michael_list< cds::urcu::gc, T, Traits > maker; typedef typename maker::type base_class; //@endcond public: typedef cds::urcu::gc gc; ///< RCU typedef T value_type; ///< Type of value stored in the list typedef Traits traits; ///< List traits typedef typename base_class::back_off back_off; ///< Back-off strategy used typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes typedef typename base_class::item_counter item_counter; ///< Item counting policy used typedef typename maker::key_comparator key_comparator; ///< key comparison functor typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option typedef typename base_class::stat stat; ///< Internal statistics typedef typename base_class::rcu_check_deadlock rcu_check_deadlock ; ///< RCU deadlock checking policy typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock static constexpr const bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; ///< Group of \p extract_xxx functions do not require external locking //@cond // Rebind traits (split-list support) template struct rebind_traits { typedef MichaelList< gc , value_type , typename cds::opt::make_options< traits, Options...>::type > type; }; // Stat selector template using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >; //@endcond protected: //@cond typedef typename base_class::value_type node_type; typedef typename maker::cxx_allocator cxx_allocator; typedef typename maker::node_deallocator node_deallocator; typedef typename maker::intrusive_traits::compare intrusive_key_comparator; typedef typename base_class::atomic_node_ptr head_type; struct node_disposer { void operator()( node_type * pNode ) { free_node( pNode ); } }; typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; //@endcond private: //@cond struct raw_ptr_converter { value_type * operator()( node_type * p ) const { return p ? &p->m_Value : nullptr; } value_type& operator()( node_type& n ) const { return n.m_Value; } value_type const& operator()( node_type const& n ) const { return n.m_Value; } }; //@endcond public: ///< pointer to extracted node using exempt_ptr = cds::urcu::exempt_ptr< gc, node_type, value_type, typename maker::intrusive_traits::disposer >; /// Result of \p get(), \p get_with() functions - pointer to the node found typedef cds::urcu::raw_ptr_adaptor< value_type, typename base_class::raw_ptr, raw_ptr_converter > raw_ptr; protected: //@cond template class iterator_type: protected base_class::template iterator_type { typedef typename base_class::template iterator_type iterator_base; iterator_type( head_type const& pNode ) : iterator_base( pNode ) {} friend class MichaelList; public: typedef typename cds::details::make_const_type::pointer value_ptr; typedef typename cds::details::make_const_type::reference value_ref; iterator_type() {} iterator_type( iterator_type const& src ) : iterator_base( src ) {} value_ptr operator ->() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); return p ? &(p->m_Value) : nullptr; } value_ref operator *() const { return (iterator_base::operator *()).m_Value; } /// Pre-increment iterator_type& operator ++() { iterator_base::operator ++(); return *this; } template bool operator ==(iterator_type const& i ) const { return iterator_base::operator ==(i); } template bool operator !=(iterator_type const& i ) const { return iterator_base::operator !=(i); } }; //@endcond public: ///@name Forward iterators (only for debugging purpose) //@{ /// Forward iterator /** You may safely use iterators in multi-threaded environment only under RCU lock. Otherwise, a crash is possible if another thread deletes the item the iterator points to. */ typedef iterator_type iterator; /// Const forward iterator typedef iterator_type const_iterator; /// Returns a forward iterator addressing the first element in a list /** For empty list \code begin() == end() \endcode */ iterator begin() { return iterator( head()); } /// Returns an iterator that addresses the location succeeding the last element in a list /** Do not use the value returned by end function to access any item. Internally, end returning value equals to \p nullptr. The returned value can be used only to control reaching the end of the list. For empty list \code begin() == end() \endcode */ iterator end() { return iterator(); } /// Returns a forward const iterator addressing the first element in a list const_iterator begin() const { return const_iterator( head()); } /// Returns a forward const iterator addressing the first element in a list const_iterator cbegin() const { return const_iterator( head()); } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator end() const { return const_iterator(); } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator cend() const { return const_iterator(); } //@} public: /// Default constructor /** Initialize empty list */ MichaelList() {} //@cond template >::value >> explicit MichaelList( Stat& st ) : base_class( st ) {} //@endcond /// List destructor /** Clears the list */ ~MichaelList() { clear(); } /// Inserts new node /** The function creates a node with copy of \p val value and then inserts the node created into the list. The type \p Q should contain as minimum the complete key of the node. The object of \ref value_type should be constructible from \p val of type \p Q. In trivial case, \p Q is equal to \ref value_type. The function makes RCU lock internally. Returns \p true if inserting successful, \p false otherwise. */ template bool insert( Q&& val ) { return insert_at( head(), std::forward( val )); } /// Inserts new node /** This function inserts new node with default-constructed value and then it calls \p func functor with signature \code void func( value_type& itemValue ) ;\endcode The argument \p itemValue of user-defined functor \p func is the reference to the list's item inserted. User-defined functor \p func should guarantee that during changing item's value no any other changes could be made on this list's item by concurrent threads. The type \p Q should contain the complete key of the node. The object of \ref value_type should be constructible from \p key of type \p Q. The function allows to split creating of new item into two part: - create item from \p key with initializing key-fields only; - insert new item into the list; - if inserting is successful, initialize non-key fields of item by calling \p f functor This can be useful if complete initialization of object of \p value_type is heavyweight and it is preferable that the initialization should be completed only if inserting is successful. The function makes RCU lock internally. @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" */ template bool insert( Q&& key, Func func ) { return insert_at( head(), std::forward( key ), func ); } /// Updates data by \p key /** The operation performs inserting or replacing the element with lock-free manner. If the \p key not found in the list, then the new item created from \p key will be inserted iff \p bAllowInsert is \p true. Otherwise, if \p key is found, the functor \p func is called with item found. The functor \p Func signature is: \code struct my_functor { void operator()( bool bNew, value_type& item, Q const& val ); }; \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the list - \p val - argument \p key passed into the \p %update() function The functor may change non-key fields of the \p item; however, \p func must guarantee that during changing no any other modifications could be made on this item by concurrent threads. The function applies RCU lock internally. Returns std::pair where \p first is true if operation is successful, \p second is true if new item has been added or \p false if the item with \p key already exists. @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" */ template std::pair update( Q const& key, Func func, bool bAllowInsert = true ) { return update_at( head(), key, func, bAllowInsert ); } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( Q const& key, Func f ) { return update( key, f, true ); } //@endcond /// Inserts data of type \ref value_type constructed from \p args /** Returns \p true if inserting successful, \p false otherwise. The function makes RCU lock internally. */ template bool emplace( Args&&... args ) { return emplace_at( head(), std::forward(args)... ); } /// Deletes \p key from the list /** \anchor cds_nonintrusive_MichealList_rcu_erase_val Since the key of MichaelList's item type \p value_type is not explicitly specified, template parameter \p Q defines the key type searching in the list. The list item comparator should be able to compare values of the type \p value_type and \p Q in any order. RCU \p synchronize method can be called. RCU should not be locked. Return \p true if key is found and deleted, \p false otherwise */ template bool erase( Q const& key ) { return erase_at( head(), key, intrusive_key_comparator(), [](value_type const&){} ); } /// Deletes the item from the list using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_MichealList_rcu_erase_val "erase(Q const&)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool erase_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return erase_at( head(), key, typename maker::template less_wrapper(), [](value_type const&){} ); } /// Deletes \p key from the list /** \anchor cds_nonintrusive_MichaelList_rcu_erase_func The function searches an item with key \p key, calls \p f functor with item found and deletes it. If \p key is not found, the functor is not called. The functor \p Func interface: \code struct functor { void operator()(const value_type& val) { ... } }; \endcode Since the key of MichaelList's item type \p value_type is not explicitly specified, template parameter \p Q defines the key type searching in the list. The list item comparator should be able to compare the values of type \p value_type and \p Q in any order. RCU \p synchronize method can be called. RCU should not be locked. Return \p true if key is found and deleted, \p false otherwise */ template bool erase( Q const& key, Func f ) { return erase_at( head(), key, intrusive_key_comparator(), f ); } /// Deletes the item from the list using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_MichaelList_rcu_erase_func "erase(Q const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool erase_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return erase_at( head(), key, typename maker::template less_wrapper(), f ); } /// Extracts an item from the list /** @anchor cds_nonintrusive_MichaelList_rcu_extract The function searches an item with key equal to \p key in the list, unlinks it from the list, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found. If the item with the key equal to \p key is not found the function returns an empty \p exempt_ptr. @note The function does NOT dispose the item found. It just excludes the item from the list and returns a pointer to the item. You shouldn't lock RCU for current thread before calling this function. \code #include #include typedef cds::urcu::gc< general_buffered<> > rcu; typedef cds::container::MichaelList< rcu, Foo > rcu_michael_list; rcu_michael_list theList; // ... rcu_michael_list::exempt_ptr p; // The RCU should NOT be locked when extract() is called! assert( !rcu::is_locked()); // extract() call p = theList.extract( 10 ) if ( p ) { // do something with p ... } // we may safely release extracted pointer here. // release() passes the pointer to RCU reclamation cycle. p.release(); \endcode */ template exempt_ptr extract( Q const& key ) { return exempt_ptr( extract_at( head(), key, intrusive_key_comparator())); } /// Extracts an item from the list using \p pred predicate for searching /** This function is the analog for \p extract(Q const&). The \p pred is a predicate used for key comparing. \p Less has the interface like \p std::less. \p pred must imply the same element order as \ref key_comparator. */ template exempt_ptr extract_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return exempt_ptr( extract_at( head(), key, typename maker::template less_wrapper())); } /// Checks whether the list contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. The function applies RCU lock internally. */ template bool contains( Q const& key ) { return find_at( head(), key, intrusive_key_comparator()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find( Q const& key ) { return contains( key ); } //@endcond /// Checks whether the list contains \p key using \p pred predicate for searching /** The function is an analog of contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool contains( Q const& key, Less pred ) { CDS_UNUSED( pred ); return find_at( head(), key, typename maker::template less_wrapper()); } //@cond // Deprecatd, use contains() template bool find_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return contains( key, pred ); } //@endcond /// Finds the key \p key and performs an action with it /** \anchor cds_nonintrusive_MichaelList_rcu_find_func The function searches an item with key equal to \p key and calls the functor \p f for the item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& key ); }; \endcode where \p item is the item found, \p key is the \p %find() function argument. The functor may change non-key fields of \p item. Note that the function is only guarantee that \p item cannot be deleted during functor is executing. The function does not serialize simultaneous access to the list \p item. If such access is possible you must provide your own synchronization schema to exclude unsafe item modifications. The function makes RCU lock internally. The function returns \p true if \p val is found, \p false otherwise. */ template bool find( Q& key, Func f ) { return find_at( head(), key, intrusive_key_comparator(), f ); } //@cond template bool find( Q const& key, Func f ) { return find_at( head(), key, intrusive_key_comparator(), f ); } //@endcond /// Finds the key \p key using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_MichaelList_rcu_find_func "find(Q&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool find_with( Q& key, Less pred, Func f ) { CDS_UNUSED( pred ); return find_at( head(), key, typename maker::template less_wrapper(), f ); } //@cond template bool find_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return find_at( head(), key, typename maker::template less_wrapper(), f ); } //@endcond /// Finds the key \p key and return the item found /** \anchor cds_nonintrusive_MichaelList_rcu_get The function searches the item with key equal to \p key and returns the pointer to item found. If \p key is not found it returns an empty \p raw_ptr. Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. RCU should be locked before call of this function. Returned item is valid only while RCU is locked: \code typedef cds::container::MichaelList< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > ord_list; ord_list theList; // ... typename ord_list::raw_ptr rp; { // Lock RCU ord_list::rcu_lock lock; rp = theList.get( 5 ); if ( rp ) { // Deal with rp //... } // Unlock RCU by rcu_lock destructor // A value owned by rp can be freed at any time after RCU has been unlocked } // You can manually release rp after RCU-locked section rp.release(); \endcode */ template raw_ptr get( Q const& key ) { return get_at( head(), key, intrusive_key_comparator()); } /// Finds \p key and return the item found /** The function is an analog of \ref cds_nonintrusive_MichaelList_rcu_get "get(Q const&)" but \p pred is used for comparing the keys. \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the list. */ template raw_ptr get_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return get_at( head(), key, typename maker::template less_wrapper()); } /// Checks if the list is empty bool empty() const { return base_class::empty(); } /// Returns list's item count /** The value returned depends on item counter provided by \p Traits. For \p atomicity::empty_item_counter, this function always returns 0. @note Even if you use real item counter and it returns 0, this fact does not mean that the list is empty. To check list emptyness use \p empty() method. */ size_t size() const { return base_class::size(); } /// Returns const reference to internal statistics stat const& statistics() const { return base_class::statistics(); } /// Clears the list void clear() { base_class::clear(); } protected: //@cond bool insert_node( node_type * pNode ) { return insert_node_at( head(), pNode ); } bool insert_node_at( head_type& refHead, node_type * pNode ) { assert( pNode ); scoped_node_ptr p(pNode); if ( base_class::insert_at( refHead, *pNode )) { p.release(); return true; } return false; } template bool insert_at( head_type& refHead, Q&& val ) { return insert_node_at( refHead, alloc_node( std::forward( val ))); } template bool insert_at( head_type& refHead, Q&& key, Func f ) { scoped_node_ptr pNode( alloc_node( std::forward( key ))); if ( base_class::insert_at( refHead, *pNode, [&f]( node_type& node ) { f( node_to_value(node)); } )) { pNode.release(); return true; } return false; } template bool emplace_at( head_type& refHead, Args&&... args ) { return insert_node_at( refHead, alloc_node( std::forward(args) ... )); } template bool erase_at( head_type& refHead, Q const& key, Compare cmp, Func f ) { return base_class::erase_at( refHead, key, cmp, [&f](node_type const& node){ f( node_to_value(node)); } ); } template std::pair update_at( head_type& refHead, Q const& key, Func f, bool bAllowInsert ) { scoped_node_ptr pNode( alloc_node( key )); std::pair ret = base_class::update_at( refHead, *pNode, [&f, &key](bool bNew, node_type& node, node_type&){ f( bNew, node_to_value(node), key );}, bAllowInsert ); if ( ret.first && ret.second ) pNode.release(); return ret; } template node_type * extract_at( head_type& refHead, Q const& key, Compare cmp ) { return base_class::extract_at( refHead, key, cmp ); } template bool find_at( head_type& refHead, Q const& key, Compare cmp ) { return base_class::find_at( refHead, key, cmp, [](node_type&, Q const &) {} ); } template bool find_at( head_type& refHead, Q& val, Compare cmp, Func f ) { return base_class::find_at( refHead, val, cmp, [&f](node_type& node, Q& v){ f( node_to_value(node), v ); }); } template raw_ptr get_at( head_type& refHead, Q const& val, Compare cmp ) { return raw_ptr( base_class::get_at( refHead, val, cmp )); } static value_type& node_to_value( node_type& n ) { return n.m_Value; } static value_type const& node_to_value( node_type const& n ) { return n.m_Value; } template static node_type * alloc_node( Q&& v ) { return cxx_allocator().New( std::forward( v )); } template static node_type * alloc_node( Args&&... args ) { return cxx_allocator().MoveNew( std::forward( args )... ); } static void free_node( node_type * pNode ) { cxx_allocator().Delete( pNode ); } head_type& head() { return base_class::m_pHead; } head_type& head() const { return const_cast(base_class::m_pHead); } //@endcond }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_MICHAEL_LIST_RCU_H libcds-2.3.3/cds/container/michael_map.h000066400000000000000000001155421341244201700201300ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_MICHAEL_MAP_H #define CDSLIB_CONTAINER_MICHAEL_MAP_H #include #include #include namespace cds { namespace container { /// Michael's hash map /** @ingroup cds_nonintrusive_map \anchor cds_nonintrusive_MichaelHashMap_hp Source: - [2002] Maged Michael "High performance dynamic lock-free hash tables and list-based sets" Michael's hash table algorithm is based on lock-free ordered list and it is very simple. The main structure is an array \p T of size \p M. Each element in \p T is basically a pointer to a hash bucket, implemented as a singly linked list. The array of buckets cannot be dynamically expanded. However, each bucket may contain unbounded number of items. Template parameters are: - \p GC - Garbage collector used. You may use any \ref cds_garbage_collector "Garbage collector" from the \p libcds library. Note the \p GC must be the same as the GC used for \p OrderedList - \p OrderedList - ordered key-value list implementation used as bucket for hash map, for example, \p MichaelKVList, \p LazyKVList, \p IterableKVList. The ordered list implementation specifies the \p Key and \p Value types stored in the hash-map, the reclamation schema \p GC used by hash-map, the comparison functor for the type \p Key and other features specific for the ordered list. - \p Traits - map traits, default is \p michael_map::traits. Instead of defining \p Traits struct you may use option-based syntax with \p michael_map::make_traits metafunction. Many of the class function take a key argument of type \p K that in general is not \p key_type. \p key_type and an argument of template type \p K must meet the following requirements: - \p key_type should be constructible from value of type \p K; - the hash functor should be able to calculate correct hash value from argument \p key of type \p K: hash( key_type(key)) == hash( key ) - values of type \p key_type and \p K should be comparable There are the specializations: - for \ref cds_urcu_desc "RCU" - declared in cds/container/michael_map_rcu.h, see \ref cds_nonintrusive_MichaelHashMap_rcu "MichaelHashMap". - for \p cds::gc::nogc declared in cds/container/michael_map_nogc.h, see \ref cds_nonintrusive_MichaelHashMap_nogc "MichaelHashMap". \anchor cds_nonintrusive_MichaelHashMap_how_touse How to use Suppose, you want to make \p int to \p int map for Hazard Pointer garbage collector. You should choose suitable ordered list class that will be used as a bucket for the map; it may be \p MichaelKVList. \code #include // MichaelKVList for gc::HP #include // MichaelHashMap // List traits based on std::less predicate struct list_traits: public cds::container::michael_list::traits { typedef std::less less; }; // Ordered list typedef cds::container::MichaelKVList< cds::gc::HP, int, int, list_traits> int2int_list; // Map traits struct map_traits: public cds::container::michael_map::traits { struct hash { size_t operator()( int i ) const { return cds::opt::v::hash()( i ); } } }; // Your map typedef cds::container::MichaelHashMap< cds::gc::HP, int2int_list, map_traits > int2int_map; // Now you can use int2int_map class int main() { int2int_map theMap; theMap.insert( 100 ); ... } \endcode You may use option-based declaration: \code #include // MichaelKVList for gc::HP #include // MichaelHashMap // Ordered list typedef cds::container::MichaelKVList< cds::gc::HP, int, int, typename cds::container::michael_list::make_traits< cds::container::opt::less< std::less > // item comparator option >::type > int2int_list; // Map typedef cds::container::MichaelHashMap< cds::gc::HP, int2int_list, cds::container::michael_map::make_traits< cc::opt::hash< cds::opt::v::hash > > > int2int_map; \endcode */ template < class GC, class OrderedList, #ifdef CDS_DOXYGEN_INVOKED class Traits = michael_map::traits #else class Traits #endif > class MichaelHashMap { public: typedef GC gc; ///< Garbage collector typedef OrderedList ordered_list; ///< type of ordered list to be used as a bucket typedef Traits traits; ///< Map traits typedef typename ordered_list::key_type key_type; ///< key type typedef typename ordered_list::mapped_type mapped_type; ///< value type typedef typename ordered_list::value_type value_type; ///< key/value pair stored in the map typedef typename traits::allocator allocator; ///< Bucket table allocator typedef typename ordered_list::key_comparator key_comparator; ///< key compare functor #ifdef CDS_DOXYGEN_INVOKED typedef typename ordered_list::stat stat; ///< Internal statistics /// Guarded pointer - a result of \p get() and \p extract() functions typedef typename ordered_list::guarded_ptr guarded_ptr; #endif /// Hash functor for \ref key_type and all its derivatives that you use typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash; typedef typename traits::item_counter item_counter; ///< Item counter type // GC and OrderedList::gc must be the same static_assert( std::is_same::value, "GC and OrderedList::gc must be the same"); static constexpr const size_t c_nHazardPtrCount = ordered_list::c_nHazardPtrCount; ///< Count of hazard pointer required //@cond typedef typename ordered_list::template select_stat_wrapper< typename ordered_list::stat > bucket_stat; typedef typename ordered_list::template rebind_traits< cds::opt::item_counter< cds::atomicity::empty_item_counter > , cds::opt::stat< typename bucket_stat::wrapped_stat > >::type internal_bucket_type; typedef typename internal_bucket_type::guarded_ptr guarded_ptr; typedef typename std::allocator_traits< allocator >::template rebind_alloc bucket_table_allocator; typedef typename bucket_stat::stat stat; //@endcond protected: //@cond const size_t m_nHashBitmask; internal_bucket_type* m_Buckets; ///< bucket table hash m_HashFunctor; ///< Hash functor item_counter m_ItemCounter; ///< Item counter stat m_Stat; ///< Internal statistics //@endcond protected: //@cond /// Forward iterator template class iterator_type: protected cds::intrusive::michael_set::details::iterator< internal_bucket_type, IsConst > { typedef cds::intrusive::michael_set::details::iterator< internal_bucket_type, IsConst > base_class; friend class MichaelHashMap; protected: typedef typename base_class::bucket_ptr bucket_ptr; typedef typename base_class::list_iterator list_iterator; public: /// Value pointer type (const for const_iterator) typedef typename cds::details::make_const_type::pointer value_ptr; /// Value reference type (const for const_iterator) typedef typename cds::details::make_const_type::reference value_ref; /// Key-value pair pointer type (const for const_iterator) typedef typename cds::details::make_const_type::pointer pair_ptr; /// Key-value pair reference type (const for const_iterator) typedef typename cds::details::make_const_type::reference pair_ref; protected: iterator_type( list_iterator const& it, bucket_ptr pFirst, bucket_ptr pLast ) : base_class( it, pFirst, pLast ) {} public: /// Default ctor iterator_type() : base_class() {} /// Copy ctor iterator_type( const iterator_type& src ) : base_class( src ) {} /// Dereference operator pair_ptr operator ->() const { assert( base_class::m_pCurBucket != nullptr ); return base_class::m_itList.operator ->(); } /// Dereference operator pair_ref operator *() const { assert( base_class::m_pCurBucket != nullptr ); return base_class::m_itList.operator *(); } /// Pre-increment iterator_type& operator ++() { base_class::operator++(); return *this; } /// Assignment operator iterator_type& operator = (const iterator_type& src) { base_class::operator =(src); return *this; } /// Returns current bucket (debug function) bucket_ptr bucket() const { return base_class::bucket(); } /// Equality operator template bool operator ==(iterator_type const& i ) const { return base_class::operator ==( i ); } /// Equality operator template bool operator !=(iterator_type const& i ) const { return !( *this == i ); } }; //@endcond public: ///@name Forward iterators (only for debugging purpose) //@{ /// Forward iterator /** The forward iterator for Michael's map has some features: - it has no post-increment operator - to protect the value, the iterator contains a GC-specific guard + another guard is required locally for increment operator. For some GC (like as \p gc::HP), a guard is a limited resource per thread, so an exception (or assertion) "no free guard" may be thrown if the limit of guard count per thread is exceeded. - The iterator cannot be moved across thread boundary because it contains thread-private GC's guard. Iterator thread safety depends on type of \p OrderedList: - for \p MichaelKVList and \p LazyKVList: iterator guarantees safety even if you delete the item that iterator points to because that item is guarded by hazard pointer. However, in case of concurrent deleting operations it is no guarantee that you iterate all item in the map. Moreover, a crash is possible when you try to iterate the next element that has been deleted by concurrent thread. Use this iterator on the concurrent container for debugging purpose only. - for \p IterableList: iterator is thread-safe. You may use it freely in concurrent environment. The iterator interface: \code class iterator { public: // Default constructor iterator(); // Copy construtor iterator( iterator const& src ); // Dereference operator value_type * operator ->() const; // Dereference operator value_type& operator *() const; // Preincrement operator iterator& operator ++(); // Assignment operator iterator& operator = (iterator const& src); // Equality operators bool operator ==(iterator const& i ) const; bool operator !=(iterator const& i ) const; }; \endcode @note The iterator object returned by \p end(), \p cend() member functions points to \p nullptr and should not be dereferenced. */ typedef iterator_type< false > iterator; /// Const forward iterator typedef iterator_type< true > const_iterator; /// Returns a forward iterator addressing the first element in a map /** For empty map \code begin() == end() \endcode */ iterator begin() { return iterator( bucket_begin()->begin(), bucket_begin(), bucket_end()); } /// Returns an iterator that addresses the location succeeding the last element in a map /** Do not use the value returned by end function to access any item. The returned value can be used only to control reaching the end of the map. For empty map \code begin() == end() \endcode */ iterator end() { return iterator( bucket_end()[-1].end(), bucket_end() - 1, bucket_end()); } /// Returns a forward const iterator addressing the first element in a map const_iterator begin() const { return get_const_begin(); } /// Returns a forward const iterator addressing the first element in a map const_iterator cbegin() const { return get_const_begin(); } /// Returns an const iterator that addresses the location succeeding the last element in a map const_iterator end() const { return get_const_end(); } /// Returns an const iterator that addresses the location succeeding the last element in a map const_iterator cend() const { return get_const_end(); } //@} public: /// Initializes the map /** @anchor cds_nonintrusive_MichaelHashMap_hp_ctor The Michael's hash map is non-expandable container. You should point the average count of items \p nMaxItemCount when you create an object. \p nLoadFactor parameter defines average count of items per bucket and it should be small number between 1 and 10. Remember, since the bucket implementation is an ordered list, searching in the bucket is linear [O(nLoadFactor)]. Note, that many popular STL hash map implementation uses load factor 1. The ctor defines hash table size as rounding nMacItemCount / nLoadFactor up to nearest power of two. */ MichaelHashMap( size_t nMaxItemCount, ///< estimation of max item count in the hash map size_t nLoadFactor ///< load factor: estimation of max number of items in the bucket ) : m_nHashBitmask( michael_map::details::init_hash_bitmask( nMaxItemCount, nLoadFactor )) , m_Buckets( bucket_table_allocator().allocate( bucket_count())) { for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it ) construct_bucket( it ); } /// Clears hash map and destroys it ~MichaelHashMap() { clear(); for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it ) it->~internal_bucket_type(); bucket_table_allocator().deallocate( m_Buckets, bucket_count()); } /// Inserts new node with key and default value /** The function creates a node with \p key and default value, and then inserts the node created into the map. Preconditions: - The \p key_type should be constructible from value of type \p K. In trivial case, \p K is equal to \p key_type. - The \p mapped_type should be default-constructible. Returns \p true if inserting successful, \p false otherwise. */ template bool insert( K&& key ) { const bool bRet = bucket( key ).insert( std::forward( key )); if ( bRet ) ++m_ItemCounter; return bRet; } /// Inserts new node /** The function creates a node with copy of \p val value and then inserts the node created into the map. Preconditions: - The \p key_type should be constructible from \p key of type \p K. - The \p mapped_type should be constructible from \p val of type \p V. Returns \p true if \p val is inserted into the map, \p false otherwise. */ template bool insert( K&& key, V&& val ) { const bool bRet = bucket( key ).insert( std::forward( key ), std::forward( val )); if ( bRet ) ++m_ItemCounter; return bRet; } /// Inserts new node and initialize it by a functor /** This function inserts new node with key \p key and if inserting is successful then it calls \p func functor with signature \code struct functor { void operator()( value_type& item ); }; \endcode The argument \p item of user-defined functor \p func is the reference to the map's item inserted: - item.first is a const reference to item's key that cannot be changed. - item.second is a reference to item's value that may be changed. The user-defined functor is called only if inserting is successful. The \p key_type should be constructible from value of type \p K. The function allows to split creating of new item into two part: - create item from \p key; - insert new item into the map; - if inserting is successful, initialize the value of item by calling \p func functor This can be useful if complete initialization of object of \p mapped_type is heavyweight and it is preferable that the initialization should be completed only if inserting is successful. @warning For \ref cds_nonintrusive_MichaelKVList_gc "MichaelKVList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". \ref cds_nonintrusive_LazyKVList_gc "LazyKVList" provides exclusive access to inserted item and does not require any node-level synchronization. */ template bool insert_with( K&& key, Func func ) { const bool bRet = bucket( key ).insert_with( std::forward( key ), func ); if ( bRet ) ++m_ItemCounter; return bRet; } /// Updates data by \p key /** The operation performs inserting or replacing the element with lock-free manner. If the \p key not found in the map, then the new item created from \p key will be inserted into the map iff \p bAllowInsert is \p true. (note that in this case the \ref key_type should be constructible from type \p K). Otherwise, if \p key is found, the functor \p func is called with item found. The functor \p func signature depends on \p OrderedList: for \p MichaelKVList, \p LazyKVList \code struct my_functor { void operator()( bool bNew, value_type& item ); }; \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - the item found or inserted The functor may change any fields of the \p item.second that is \p mapped_type. for \p IterableKVList \code void func( value_type& val, value_type * old ); \endcode where - \p val - a new data constructed from \p key - \p old - old value that will be retired. If new item has been inserted then \p old is \p nullptr. The functor may change non-key fields of \p val; however, \p func must guarantee that during changing no any other modifications could be made on this item by concurrent threads. @return std::pair where \p first is true if operation is successful, \p second is true if new item has been added or \p false if the item with \p key already exists. @warning For \ref cds_nonintrusive_MichaelKVList_gc "MichaelKVList" and \ref cds_nonintrusive_IterableKVList_gc "IterableKVList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". \ref cds_nonintrusive_LazyKVList_gc "LazyKVList" provides exclusive access to inserted item and does not require any node-level synchronization. */ template std::pair update( K&& key, Func func, bool bAllowInsert = true ) { std::pair bRet = bucket( key ).update( std::forward( key ), func, bAllowInsert ); if ( bRet.first && bRet.second ) ++m_ItemCounter; return bRet; } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( K const& key, Func func ) { std::pair bRet = bucket( key ).update( key, func, true ); if ( bRet.first && bRet.second ) ++m_ItemCounter; return bRet; } //@endcond /// Inserts or updates the node (only for \p IterableKVList) /** The operation performs inserting or changing data with lock-free manner. If \p key is not found in the map, then \p key is inserted iff \p bAllowInsert is \p true. Otherwise, the current element is changed to \p val, the old element will be retired later. Returns std::pair where \p first is \p true if operation is successful, \p second is \p true if \p val has been added or \p false if the item with that key already in the map. */ template #ifdef CDS_DOXYGEN_INVOKED std::pair #else typename std::enable_if< std::is_same< Q, Q>::value && is_iterable_list< ordered_list >::value, std::pair >::type #endif upsert( Q&& key, V&& val, bool bAllowInsert = true ) { std::pair bRet = bucket( val ).upsert( std::forward( key ), std::forward( val ), bAllowInsert ); if ( bRet.second ) ++m_ItemCounter; return bRet; } /// For key \p key inserts data of type \p mapped_type created from \p args /** \p key_type should be constructible from type \p K Returns \p true if inserting successful, \p false otherwise. */ template bool emplace( K&& key, Args&&... args ) { const bool bRet = bucket( key ).emplace( std::forward(key), std::forward(args)... ); if ( bRet ) ++m_ItemCounter; return bRet; } /// Deletes \p key from the map /** \anchor cds_nonintrusive_MichaelMap_erase_val Return \p true if \p key is found and deleted, \p false otherwise */ template bool erase( K const& key ) { const bool bRet = bucket( key ).erase( key ); if ( bRet ) --m_ItemCounter; return bRet; } /// Deletes the item from the map using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_MichaelMap_erase_val "erase(K const&)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool erase_with( K const& key, Less pred ) { const bool bRet = bucket( key ).erase_with( key, pred ); if ( bRet ) --m_ItemCounter; return bRet; } /// Deletes \p key from the map /** \anchor cds_nonintrusive_MichaelMap_erase_func The function searches an item with key \p key, calls \p f functor and deletes the item. If \p key is not found, the functor is not called. The functor \p Func interface: \code struct extractor { void operator()(value_type& item) { ... } }; \endcode Return \p true if key is found and deleted, \p false otherwise */ template bool erase( K const& key, Func f ) { const bool bRet = bucket( key ).erase( key, f ); if ( bRet ) --m_ItemCounter; return bRet; } /// Deletes the item from the map using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_MichaelMap_erase_func "erase(K const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool erase_with( K const& key, Less pred, Func f ) { const bool bRet = bucket( key ).erase_with( key, pred, f ); if ( bRet ) --m_ItemCounter; return bRet; } /// Deletes the item pointed by iterator \p iter (only for \p IterableList based map) /** Returns \p true if the operation is successful, \p false otherwise. The function can return \p false if the node the iterator points to has already been deleted by other thread. The function does not invalidate the iterator, it remains valid and can be used for further traversing. @note \p %erase_at() is supported only for \p %MichaelHashMap based on \p IterableList. */ #ifdef CDS_DOXYGEN_INVOKED bool erase_at( iterator const& iter ) #else template typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, bool >::type erase_at( Iterator const& iter ) #endif { assert( iter != end()); assert( iter.bucket() != nullptr ); if ( iter.bucket()->erase_at( iter.underlying_iterator())) { --m_ItemCounter; return true; } return false; } /// Extracts the item with specified \p key /** \anchor cds_nonintrusive_MichaelHashMap_hp_extract The function searches an item with key equal to \p key, unlinks it from the map, and returns it as \p guarded_ptr. If \p key is not found the function returns an empty guarded pointer. Note the compare functor should accept a parameter of type \p K that may be not the same as \p key_type. The extracted item is freed automatically when returned \p guarded_ptr object will be destroyed or released. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. Usage: \code typedef cds::container::MichaelHashMap< your_template_args > michael_map; michael_map theMap; // ... { michael_map::guarded_ptr gp( theMap.extract( 5 )); if ( gp ) { // Deal with gp // ... } // Destructor of gp releases internal HP guard } \endcode */ template guarded_ptr extract( K const& key ) { guarded_ptr gp( bucket( key ).extract( key )); if ( gp ) --m_ItemCounter; return gp; } /// Extracts the item using compare functor \p pred /** The function is an analog of \ref cds_nonintrusive_MichaelHashMap_hp_extract "extract(K const&)" but \p pred predicate is used for key comparing. \p Less functor has the semantics like \p std::less but should take arguments of type \p key_type and \p K in any order. \p pred must imply the same element order as the comparator used for building the map. */ template guarded_ptr extract_with( K const& key, Less pred ) { guarded_ptr gp( bucket( key ).extract_with( key, pred )); if ( gp ) --m_ItemCounter; return gp; } /// Finds the key \p key /** \anchor cds_nonintrusive_MichaelMap_find_cfunc The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item ); }; \endcode where \p item is the item found. The functor may change \p item.second. Note that the functor is only guarantee that \p item cannot be disposed during functor is executing. The functor does not serialize simultaneous access to the map's \p item. If such access is possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( K const& key, Func f ) { return bucket( key ).find( key, f ); } /// Finds \p key and returns iterator pointed to the item found (only for \p IterableList) /** If \p key is not found the function returns \p end(). @note This function is supported only for map based on \p IterableList */ template #ifdef CDS_DOXYGEN_INVOKED iterator #else typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type #endif find( K const& key ) { auto& b = bucket( key ); auto it = b.find( key ); if ( it == b.end()) return end(); return iterator( it, &b, bucket_end()); } /// Finds the key \p val using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_MichaelMap_find_cfunc "find(K const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool find_with( K const& key, Less pred, Func f ) { return bucket( key ).find_with( key, pred, f ); } /// Finds \p key using \p pred predicate and returns iterator pointed to the item found (only for \p IterableList) /** The function is an analog of \p find(K&) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. If \p key is not found the function returns \p end(). @note This function is supported only for map based on \p IterableList */ template #ifdef CDS_DOXYGEN_INVOKED iterator #else typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type #endif find_with( K const& key, Less pred ) { auto& b = bucket( key ); auto it = b.find_with( key, pred ); if ( it == b.end()) return end(); return iterator( it, &b, bucket_end()); } /// Checks whether the map contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. */ template bool contains( K const& key ) { return bucket( key ).contains( key ); } /// Checks whether the map contains \p key using \p pred predicate for searching /** The function is an analog of contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool contains( K const& key, Less pred ) { return bucket( key ).contains( key, pred ); } /// Finds \p key and return the item found /** \anchor cds_nonintrusive_MichaelHashMap_hp_get The function searches the item with key equal to \p key and returns the guarded pointer to the item found. If \p key is not found the function returns an empty guarded pointer, @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. Usage: \code typedef cds::container::MichaeHashMap< your_template_params > michael_map; michael_map theMap; // ... { michael_map::guarded_ptr gp( theMap.get( 5 )); if ( gp ) { // Deal with gp //... } // Destructor of guarded_ptr releases internal HP guard } \endcode Note the compare functor specified for \p OrderedList template parameter should accept a parameter of type \p K that can be not the same as \p key_type. */ template guarded_ptr get( K const& key ) { return bucket( key ).get( key ); } /// Finds \p key and return the item found /** The function is an analog of \ref cds_nonintrusive_MichaelHashMap_hp_get "get( K const&)" but \p pred is used for comparing the keys. \p Less functor has the semantics like \p std::less but should take arguments of type \p key_type and \p K in any order. \p pred must imply the same element order as the comparator used for building the map. */ template guarded_ptr get_with( K const& key, Less pred ) { return bucket( key ).get_with( key, pred ); } /// Clears the map (not atomic) void clear() { for ( size_t i = 0; i < bucket_count(); ++i ) m_Buckets[i].clear(); m_ItemCounter.reset(); } /// Checks if the map is empty /** @warning If you use \p atomicity::empty_item_counter in \p traits::item_counter, the function always returns \p true. */ bool empty() const { return size() == 0; } /// Returns item count in the map /** If you use \p atomicity::empty_item_counter in \p traits::item_counter, the function always returns 0. */ size_t size() const { return m_ItemCounter; } /// Returns the size of hash table /** Since \p %MichaelHashMap cannot dynamically extend the hash table size, the value returned is an constant depending on object initialization parameters; see \p MichaelHashMap::MichaelHashMap for explanation. */ size_t bucket_count() const { return m_nHashBitmask + 1; } /// Returns const reference to internal statistics stat const& statistics() const { return m_Stat; } protected: //@cond /// Calculates hash value of \p key template size_t hash_value( Q const& key ) const { return m_HashFunctor( key ) & m_nHashBitmask; } /// Returns the bucket (ordered list) for \p key template internal_bucket_type& bucket( Q const& key ) { return m_Buckets[hash_value( key )]; } //@endcond private: //@cond internal_bucket_type* bucket_begin() const { return m_Buckets; } internal_bucket_type* bucket_end() const { return m_Buckets + bucket_count(); } const_iterator get_const_begin() const { return const_iterator( bucket_begin()->cbegin(), bucket_begin(), bucket_end()); } const_iterator get_const_end() const { return const_iterator( (bucket_end() - 1)->cend(), bucket_end() - 1, bucket_end()); } template typename std::enable_if< Stat::empty >::type construct_bucket( internal_bucket_type* b ) { new (b) internal_bucket_type; } template typename std::enable_if< !Stat::empty >::type construct_bucket( internal_bucket_type* b ) { new (b) internal_bucket_type( m_Stat ); } //@endcond }; }} // namespace cds::container #endif // ifndef CDSLIB_CONTAINER_MICHAEL_MAP_H libcds-2.3.3/cds/container/michael_map_nogc.h000066400000000000000000000533211341244201700211320ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_MICHAEL_MAP_NOGC_H #define CDSLIB_CONTAINER_MICHAEL_MAP_NOGC_H #include #include #include namespace cds { namespace container { /// Michael's hash map (template specialization for \p cds::gc::nogc) /** @ingroup cds_nonintrusive_map \anchor cds_nonintrusive_MichaelHashMap_nogc This specialization is so-called append-only when no item reclamation may be performed. The class does not support deleting of map item. See @ref cds_nonintrusive_MichaelHashMap_hp "MichaelHashMap" for description of template parameters. */ template < class OrderedList, #ifdef CDS_DOXYGEN_INVOKED class Traits = michael_map::traits #else class Traits #endif > class MichaelHashMap { public: typedef cds::gc::nogc gc; ///< No garbage collector typedef OrderedList ordered_list; ///< type of ordered list used as a bucket implementation typedef Traits traits; ///< Map traits typedef typename ordered_list::key_type key_type; ///< key type typedef typename ordered_list::mapped_type mapped_type; ///< type of value to be stored in the map typedef typename ordered_list::value_type value_type; ///< Pair used as the some functor's argument typedef typename ordered_list::key_comparator key_comparator; ///< key comparing functor /// Hash functor for \ref key_type and all its derivatives that you use typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash; typedef typename traits::item_counter item_counter; ///< Item counter type typedef typename traits::allocator allocator; ///< Bucket table allocator #ifdef CDS_DOXYGEN_INVOKED typedef typename ordered_list::stat stat; ///< Internal statistics #endif // GC and OrderedList::gc must be the same static_assert(std::is_same::value, "GC and OrderedList::gc must be the same"); protected: //@cond typedef typename ordered_list::template select_stat_wrapper< typename ordered_list::stat > bucket_stat; typedef typename ordered_list::template rebind_traits< cds::opt::item_counter< cds::atomicity::empty_item_counter > , cds::opt::stat< typename bucket_stat::wrapped_stat > >::type internal_bucket_type; /// Bucket table allocator typedef typename std::allocator_traits< allocator >::template rebind_alloc bucket_table_allocator; typedef typename internal_bucket_type::iterator bucket_iterator; typedef typename internal_bucket_type::const_iterator bucket_const_iterator; //@endcond public: //@cond typedef typename bucket_stat::stat stat; //@endcond protected: //@cond const size_t m_nHashBitmask; hash m_HashFunctor; ///< Hash functor internal_bucket_type* m_Buckets; ///< bucket table item_counter m_ItemCounter; ///< Item counter stat m_Stat; ///< Internal statistics //@endcond protected: //@cond template class iterator_type: private cds::intrusive::michael_set::details::iterator< internal_bucket_type, IsConst > { typedef cds::intrusive::michael_set::details::iterator< internal_bucket_type, IsConst > base_class; friend class MichaelHashMap; protected: typedef typename base_class::bucket_ptr bucket_ptr; typedef typename base_class::list_iterator list_iterator; public: /// Value pointer type (const for const_iterator) typedef typename cds::details::make_const_type::pointer value_ptr; /// Value reference type (const for const_iterator) typedef typename cds::details::make_const_type::reference value_ref; /// Key-value pair pointer type (const for const_iterator) typedef typename cds::details::make_const_type::pointer pair_ptr; /// Key-value pair reference type (const for const_iterator) typedef typename cds::details::make_const_type::reference pair_ref; protected: iterator_type( list_iterator const& it, bucket_ptr pFirst, bucket_ptr pLast ) : base_class( it, pFirst, pLast ) {} public: /// Default ctor iterator_type() : base_class() {} /// Copy ctor iterator_type( const iterator_type& src ) : base_class( src ) {} /// Dereference operator pair_ptr operator ->() const { assert( base_class::m_pCurBucket != nullptr ); return base_class::m_itList.operator ->(); } /// Dereference operator pair_ref operator *() const { assert( base_class::m_pCurBucket != nullptr ); return base_class::m_itList.operator *(); } /// Pre-increment iterator_type& operator ++() { base_class::operator++(); return *this; } /// Assignment operator iterator_type& operator = (const iterator_type& src) { base_class::operator =(src); return *this; } /// Returns current bucket (debug function) bucket_ptr bucket() const { return base_class::bucket(); } /// Equality operator template bool operator ==(iterator_type const& i ) const { return base_class::operator ==( i ); } /// Equality operator template bool operator !=(iterator_type const& i ) const { return !( *this == i ); } }; //@endcond public: ///@name Forward iterators //@{ /// Forward iterator /** The forward iterator for Michael's map is based on \p OrderedList forward iterator and has some features: - it has no post-increment operator - it iterates items in unordered fashion The iterator interface: \code class iterator { public: // Default constructor iterator(); // Copy construtor iterator( iterator const& src ); // Dereference operator value_type * operator ->() const; // Dereference operator value_type& operator *() const; // Preincrement operator iterator& operator ++(); // Assignment operator iterator& operator = (iterator const& src); // Equality operators bool operator ==(iterator const& i ) const; bool operator !=(iterator const& i ) const; }; \endcode */ typedef iterator_type< false > iterator; /// Const forward iterator typedef iterator_type< true > const_iterator; /// Returns a forward iterator addressing the first element in a set /** For empty set \code begin() == end() \endcode */ iterator begin() { return iterator( m_Buckets[0].begin(), m_Buckets, m_Buckets + bucket_count()); } /// Returns an iterator that addresses the location succeeding the last element in a set /** Do not use the value returned by end function to access any item. The returned value can be used only to control reaching the end of the set. For empty set \code begin() == end() \endcode */ iterator end() { return iterator( m_Buckets[bucket_count() - 1].end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count()); } /// Returns a forward const iterator addressing the first element in a set const_iterator begin() const { return get_const_begin(); } /// Returns a forward const iterator addressing the first element in a set const_iterator cbegin() const { return get_const_begin(); } /// Returns an const iterator that addresses the location succeeding the last element in a set const_iterator end() const { return get_const_end(); } /// Returns an const iterator that addresses the location succeeding the last element in a set const_iterator cend() const { return get_const_end(); } //@} public: /// Initialize the map /** The Michael's hash map is non-expandable container. You should point the average count of items \p nMaxItemCount when you create an object. \p nLoadFactor parameter defines average count of items per bucket and it should be small number between 1 and 10. Remember, since the bucket implementation is an ordered list, searching in the bucket is linear [O(nLoadFactor)]. Note, that many popular STL hash map implementation uses load factor 1. The ctor defines hash table size as rounding nMacItemCount / nLoadFactor up to nearest power of two. */ MichaelHashMap( size_t nMaxItemCount, ///< estimation of max item count in the hash set size_t nLoadFactor ///< load factor: estimation of max number of items in the bucket ) : m_nHashBitmask( michael_map::details::init_hash_bitmask( nMaxItemCount, nLoadFactor )) , m_Buckets( bucket_table_allocator().allocate( bucket_count())) { for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it ) construct_bucket( it ); } /// Clears hash set and destroys it ~MichaelHashMap() { clear(); for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it ) it->~internal_bucket_type(); bucket_table_allocator().deallocate( m_Buckets, bucket_count()); } /// Inserts new node with key and default value /** The function creates a node with \p key and default value, and then inserts the node created into the map. Preconditions: - The \ref key_type should be constructible from value of type \p K. In trivial case, \p K is equal to \ref key_type. - The \ref mapped_type should be default-constructible. Returns an iterator pointed to inserted value, or \p end() if inserting is failed */ template iterator insert( const K& key ) { internal_bucket_type& refBucket = bucket( key ); bucket_iterator it = refBucket.insert( key ); if ( it != refBucket.end()) { ++m_ItemCounter; return iterator( it, &refBucket, m_Buckets + bucket_count()); } return end(); } /// Inserts new node /** The function creates a node with copy of \p val value and then inserts the node created into the map. Preconditions: - The \ref key_type should be constructible from \p key of type \p K. - The \ref mapped_type should be constructible from \p val of type \p V. Returns an iterator pointed to inserted value, or \p end() if inserting is failed */ template iterator insert( K const& key, V const& val ) { internal_bucket_type& refBucket = bucket( key ); bucket_iterator it = refBucket.insert( key, val ); if ( it != refBucket.end()) { ++m_ItemCounter; return iterator( it, &refBucket, m_Buckets + bucket_count()); } return end(); } /// Inserts new node and initialize it by a functor /** This function inserts new node with key \p key and if inserting is successful then it calls \p func functor with signature \code struct functor { void operator()( value_type& item ); }; \endcode The argument \p item of user-defined functor \p func is the reference to the map's item inserted. item.second is a reference to item's value that may be changed. The user-defined functor it is called only if the inserting is successful. The \p key_type should be constructible from value of type \p K. The function allows to split creating of new item into two part: - create item from \p key; - insert new item into the map; - if inserting is successful, initialize the value of item by calling \p f functor This can be useful if complete initialization of object of \p mapped_type is heavyweight and it is preferable that the initialization should be completed only if inserting is successful. Returns an iterator pointed to inserted value, or \p end() if inserting is failed @warning For \ref cds_nonintrusive_MichaelKVList_nogc "MichaelKVList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". \ref cds_nonintrusive_LazyKVList_nogc "LazyKVList" provides exclusive access to inserted item and does not require any node-level synchronization. */ template iterator insert_with( const K& key, Func func ) { internal_bucket_type& refBucket = bucket( key ); bucket_iterator it = refBucket.insert_with( key, func ); if ( it != refBucket.end()) { ++m_ItemCounter; return iterator( it, &refBucket, m_Buckets + bucket_count()); } return end(); } /// For key \p key inserts data of type \p mapped_type created from \p args /** \p key_type should be constructible from type \p K Returns an iterator pointed to inserted value, or \p end() if inserting is failed */ template iterator emplace( K&& key, Args&&... args ) { internal_bucket_type& refBucket = bucket( key ); bucket_iterator it = refBucket.emplace( std::forward(key), std::forward(args)... ); if ( it != refBucket.end()) { ++m_ItemCounter; return iterator( it, &refBucket, m_Buckets + bucket_count()); } return end(); } /// Updates the item /** If \p key is not in the map and \p bAllowInsert is \p true, the function inserts a new item. Otherwise, the function returns an iterator pointing to the item found. Returns std::pair where \p first is an iterator pointing to item found or inserted (if inserting is not allowed and \p key is not found, the iterator will be \p end()), \p second is true if new item has been added or \p false if the item already is in the map. @warning For \ref cds_nonintrusive_MichaelKVList_nogc "MichaelKVList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". \ref cds_nonintrusive_LazyKVList_nogc "LazyKVList" provides exclusive access to inserted item and does not require any node-level synchronization. */ template std::pair update( const K& key, bool bAllowInsert = true ) { internal_bucket_type& refBucket = bucket( key ); std::pair ret = refBucket.update( key, bAllowInsert ); if ( ret.second ) ++m_ItemCounter; else if ( ret.first == refBucket.end()) return std::make_pair( end(), false ); return std::make_pair( iterator( ret.first, &refBucket, m_Buckets + bucket_count()), ret.second ); } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( K const& key ) { return update( key, true ); } //@endcond /// Checks whether the map contains \p key /** The function searches the item with key equal to \p key and returns an iterator pointed to item found and \ref end() otherwise */ template iterator contains( K const& key ) { internal_bucket_type& refBucket = bucket( key ); bucket_iterator it = refBucket.contains( key ); if ( it != refBucket.end()) return iterator( it, &refBucket, m_Buckets + bucket_count()); return end(); } //@cond template CDS_DEPRECATED("deprecated, use contains()") iterator find( K const& key ) { return contains( key ); } //@endcond /// Checks whether the map contains \p key using \p pred predicate for searching /** The function is an analog of contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the map. Hash functor specified in \p Traits should accept parameters of type \p K. */ template iterator contains( K const& key, Less pred ) { internal_bucket_type& refBucket = bucket( key ); bucket_iterator it = refBucket.contains( key, pred ); if ( it != refBucket.end()) return iterator( it, &refBucket, m_Buckets + bucket_count()); return end(); } //@cond template CDS_DEPRECATED("deprecated, use contains()") iterator find_with( K const& key, Less pred ) { return contains( key, pred ); } //@endcond /// Clears the map (not atomic) void clear() { for ( size_t i = 0; i < bucket_count(); ++i ) m_Buckets[i].clear(); m_ItemCounter.reset(); } /// Checks whether the map is empty /** @warning If you use \p atomicity::empty_item_counter in \p traits::item_counter, the function always returns \p true. */ bool empty() const { return size() == 0; } /// Returns item count in the map /** If you use \p atomicity::empty_item_counter in \p traits::item_counter, the function always returns 0. */ size_t size() const { return m_ItemCounter; } /// Returns const reference to internal statistics stat const& statistics() const { return m_Stat; } /// Returns the size of hash table /** Since \p %MichaelHashMap cannot dynamically extend the hash table size, the value returned is an constant depending on object initialization parameters; see \p MichaelHashMap::MichaelHashMap for explanation. */ size_t bucket_count() const { return m_nHashBitmask + 1; } protected: //@cond /// Calculates hash value of \p key template size_t hash_value( K const & key ) const { return m_HashFunctor( key ) & m_nHashBitmask; } /// Returns the bucket (ordered list) for \p key template internal_bucket_type& bucket( K const& key ) { return m_Buckets[hash_value( key )]; } //@endcond private: //@cond const_iterator get_const_begin() const { return const_iterator( const_cast(m_Buckets[0]).begin(), m_Buckets, m_Buckets + bucket_count()); } const_iterator get_const_end() const { return const_iterator( const_cast(m_Buckets[bucket_count() - 1]).end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count()); } template typename std::enable_if< Stat::empty >::type construct_bucket( internal_bucket_type* b ) { new (b) internal_bucket_type; } template typename std::enable_if< !Stat::empty >::type construct_bucket( internal_bucket_type* b ) { new (b) internal_bucket_type( m_Stat ); } //@endcond }; }} // namespace cds::container #endif // ifndef CDSLIB_CONTAINER_MICHAEL_MAP_NOGC_H libcds-2.3.3/cds/container/michael_map_rcu.h000066400000000000000000001026461341244201700210020ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_MICHAEL_MAP_RCU_H #define CDSLIB_CONTAINER_MICHAEL_MAP_RCU_H #include #include namespace cds { namespace container { /// Michael's hash map (template specialization for \ref cds_urcu_desc "RCU") /** @ingroup cds_nonintrusive_map \anchor cds_nonintrusive_MichaelHashMap_rcu Source: - [2002] Maged Michael "High performance dynamic lock-free hash tables and list-based sets" Michael's hash table algorithm is based on lock-free ordered list and it is very simple. The main structure is an array \p T of size \p M. Each element in \p T is basically a pointer to a hash bucket, implemented as a singly linked list. The array of buckets cannot be dynamically expanded. However, each bucket may contain unbounded number of items. Template parameters are: - \p RCU - one of \ref cds_urcu_gc "RCU type" - \p OrderedList - ordered key-value list implementation used as bucket for hash map, for example, \p MichaelKVList. The ordered list implementation specifies the \p Key and \p Value types stored in the hash-map, the reclamation schema \p GC used by hash-map, the comparison functor for the type \p Key and other features specific for the ordered list. - \p Traits - map traits, default is \p michael_map::traits. Instead of defining \p Traits struct you may use option-based syntax with \p michael_map::make_traits metafunction Many of the class function take a key argument of type \p K that in general is not \p key_type. \p key_type and an argument of template type \p K must meet the following requirements: - \p key_type should be constructible from value of type \p K; - the hash functor should be able to calculate correct hash value from argument \p key of type \p K: hash( key_type(key)) == hash( key ) - values of type \p key_type and \p K should be comparable How to use The tips about how to use Michael's map see \ref cds_nonintrusive_MichaelHashMap_how_touse "MichaelHashMap". Remember, that you should include RCU-related header file (for example, cds/urcu/general_buffered.h) before including cds/container/michael_map_rcu.h. */ template < class RCU, class OrderedList, #ifdef CDS_DOXYGEN_INVOKED class Traits = michael_map::traits #else class Traits #endif > class MichaelHashMap< cds::urcu::gc< RCU >, OrderedList, Traits > { public: typedef cds::urcu::gc< RCU > gc; ///< RCU used as garbage collector typedef OrderedList ordered_list; ///< type of ordered list used as a bucket implementation typedef Traits traits; ///< Map traits typedef typename ordered_list::key_type key_type; ///< key type typedef typename ordered_list::mapped_type mapped_type; ///< value type typedef typename ordered_list::value_type value_type; ///< key/value pair stored in the list typedef typename ordered_list::key_comparator key_comparator;///< key comparison functor #ifdef CDS_DOXYGEN_INVOKED typedef typename ordered_list::stat stat; ///< Internal statistics typedef typename ordered_list::exempt_ptr exempt_ptr; ///< pointer to extracted node /// Type of \p get() member function return value typedef typename ordered_list::raw_ptr raw_ptr; typedef typename ordered_list::rcu_lock rcu_lock; ///< RCU scoped lock #endif /// Hash functor for \ref key_type and all its derivatives that you use typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash; typedef typename traits::item_counter item_counter; ///< Item counter type typedef typename traits::allocator allocator; ///< Bucket table allocator /// Group of \p extract_xxx functions require external locking if underlying ordered list requires that static constexpr const bool c_bExtractLockExternal = ordered_list::c_bExtractLockExternal; // GC and OrderedList::gc must be the same static_assert(std::is_same::value, "GC and OrderedList::gc must be the same"); protected: //@cond typedef typename ordered_list::template select_stat_wrapper< typename ordered_list::stat > bucket_stat; typedef typename ordered_list::template rebind_traits< cds::opt::item_counter< cds::atomicity::empty_item_counter > , cds::opt::stat< typename bucket_stat::wrapped_stat > >::type internal_bucket_type; /// Bucket table allocator typedef typename std::allocator_traits< allocator >::template rebind_alloc< internal_bucket_type > bucket_table_allocator; //@endcond public: //@cond typedef typename bucket_stat::stat stat; typedef typename internal_bucket_type::exempt_ptr exempt_ptr; typedef typename internal_bucket_type::raw_ptr raw_ptr; typedef typename internal_bucket_type::rcu_lock rcu_lock; //@endcond protected: //@cond const size_t m_nHashBitmask; hash m_HashFunctor; ///< Hash functor internal_bucket_type* m_Buckets; ///< bucket table item_counter m_ItemCounter; ///< Item counter stat m_Stat; ///< Internal statistics //@endcond protected: //@cond template class iterator_type: private cds::intrusive::michael_set::details::iterator< internal_bucket_type, IsConst > { typedef cds::intrusive::michael_set::details::iterator< internal_bucket_type, IsConst > base_class; friend class MichaelHashMap; protected: typedef typename base_class::bucket_ptr bucket_ptr; typedef typename base_class::list_iterator list_iterator; public: /// Value pointer type (const for const_iterator) typedef typename cds::details::make_const_type::pointer value_ptr; /// Value reference type (const for const_iterator) typedef typename cds::details::make_const_type::reference value_ref; /// Key-value pair pointer type (const for const_iterator) typedef typename cds::details::make_const_type::pointer pair_ptr; /// Key-value pair reference type (const for const_iterator) typedef typename cds::details::make_const_type::reference pair_ref; protected: iterator_type( list_iterator const& it, bucket_ptr pFirst, bucket_ptr pLast ) : base_class( it, pFirst, pLast ) {} public: /// Default ctor iterator_type() : base_class() {} /// Copy ctor iterator_type( const iterator_type& src ) : base_class( src ) {} /// Dereference operator pair_ptr operator ->() const { assert( base_class::m_pCurBucket != nullptr ); return base_class::m_itList.operator ->(); } /// Dereference operator pair_ref operator *() const { assert( base_class::m_pCurBucket != nullptr ); return base_class::m_itList.operator *(); } /// Pre-increment iterator_type& operator ++() { base_class::operator++(); return *this; } /// Assignment operator iterator_type& operator = (const iterator_type& src) { base_class::operator =(src); return *this; } /// Returns current bucket (debug function) bucket_ptr bucket() const { return base_class::bucket(); } /// Equality operator template bool operator ==(iterator_type const& i ) { return base_class::operator ==( i ); } /// Equality operator template bool operator !=(iterator_type const& i ) { return !( *this == i ); } }; //@endcond public: ///@name Forward iterators (thread-safe under RCU lock) //@{ /// Forward iterator /** The forward iterator for Michael's map is based on \p OrderedList forward iterator and has some features: - it has no post-increment operator - it iterates items in unordered fashion You may safely use iterators in multi-threaded environment only under RCU lock. Otherwise, a crash is possible if another thread deletes the element the iterator points to. The iterator interface: \code class iterator { public: // Default constructor iterator(); // Copy construtor iterator( iterator const& src ); // Dereference operator value_type * operator ->() const; // Dereference operator value_type& operator *() const; // Preincrement operator iterator& operator ++(); // Assignment operator iterator& operator = (iterator const& src); // Equality operators bool operator ==(iterator const& i ) const; bool operator !=(iterator const& i ) const; }; \endcode */ typedef iterator_type< false > iterator; /// Const forward iterator typedef iterator_type< true > const_iterator; /// Returns a forward iterator addressing the first element in a map /** For empty map \code begin() == end() \endcode */ iterator begin() { return iterator( m_Buckets[0].begin(), m_Buckets, m_Buckets + bucket_count()); } /// Returns an iterator that addresses the location succeeding the last element in a map /** Do not use the value returned by end function to access any item. The returned value can be used only to control reaching the end of the map. For empty map \code begin() == end() \endcode */ iterator end() { return iterator( m_Buckets[bucket_count() - 1].end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count()); } /// Returns a forward const iterator addressing the first element in a map const_iterator begin() const { return get_const_begin(); } /// Returns a forward const iterator addressing the first element in a map const_iterator cbegin() const { return get_const_begin(); } /// Returns an const iterator that addresses the location succeeding the last element in a map const_iterator end() const { return get_const_end(); } /// Returns an const iterator that addresses the location succeeding the last element in a map const_iterator cend() const { return get_const_end(); } //@} public: /// Initializes the map /** The Michael's hash map is non-expandable container. You should point the average count of items \p nMaxItemCount when you create an object. \p nLoadFactor parameter defines average count of items per bucket and it should be small number between 1 and 10. Remember, since the bucket implementation is an ordered list, searching in the bucket is linear [O(nLoadFactor)]. Note, that many popular STL hash map implementation uses load factor 1. The ctor defines hash table size as rounding nMacItemCount / nLoadFactor up to nearest power of two. */ MichaelHashMap( size_t nMaxItemCount, ///< estimation of max item count in the hash map size_t nLoadFactor ///< load factor: estimation of max number of items in the bucket ) : m_nHashBitmask( michael_map::details::init_hash_bitmask( nMaxItemCount, nLoadFactor )) , m_Buckets( bucket_table_allocator().allocate( bucket_count())) { for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it ) construct_bucket( it ); } /// Clears hash map and destroys it ~MichaelHashMap() { clear(); for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it ) it->~internal_bucket_type(); bucket_table_allocator().deallocate( m_Buckets, bucket_count()); } /// Inserts new node with key and default value /** The function creates a node with \p key and default value, and then inserts the node created into the map. Preconditions: - The \p key_type should be constructible from value of type \p K. In trivial case, \p K is equal to \ref key_type. - The \p mapped_type should be default-constructible. The function applies RCU lock internally. Returns \p true if inserting successful, \p false otherwise. */ template bool insert( const K& key ) { const bool bRet = bucket( key ).insert( key ); if ( bRet ) ++m_ItemCounter; return bRet; } /// Inserts new node /** The function creates a node with copy of \p val value and then inserts the node created into the map. Preconditions: - The \p key_type should be constructible from \p key of type \p K. - The \p mapped_type should be constructible from \p val of type \p V. The function applies RCU lock internally. Returns \p true if \p val is inserted into the map, \p false otherwise. */ template bool insert( K const& key, V const& val ) { const bool bRet = bucket( key ).insert( key, val ); if ( bRet ) ++m_ItemCounter; return bRet; } /// Inserts new node and initialize it by a functor /** This function inserts new node with key \p key and if inserting is successful then it calls \p func functor with signature \code struct functor { void operator()( value_type& item ); }; \endcode The argument \p item of user-defined functor \p func is the reference to the map's item inserted: - item.first is a const reference to item's key that cannot be changed. - item.second is a reference to item's value that may be changed. The user-defined functor is called only if inserting is successful. The key_type should be constructible from value of type \p K. The function allows to split creating of new item into two part: - create item from \p key; - insert new item into the map; - if inserting is successful, initialize the value of item by calling \p func functor This can be useful if complete initialization of object of \p mapped_type is heavyweight and it is preferable that the initialization should be completed only if inserting is successful. The function applies RCU lock internally. @warning For \ref cds_nonintrusive_MichaelKVList_rcu "MichaelKVList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". \ref cds_nonintrusive_LazyKVList_rcu "LazyKVList" provides exclusive access to inserted item and does not require any node-level synchronization. */ template bool insert_with( const K& key, Func func ) { const bool bRet = bucket( key ).insert_with( key, func ); if ( bRet ) ++m_ItemCounter; return bRet; } /// Updates data by \p key /** The operation performs inserting or replacing the element with lock-free manner. If the \p key not found in the map, then the new item created from \p key will be inserted into the map iff \p bAllowInsert is \p true. (note that in this case the \ref key_type should be constructible from type \p K). Otherwise, if \p key is found, the functor \p func is called with item found. The functor \p Func signature is: \code struct my_functor { void operator()( bool bNew, value_type& item ); }; \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - the item found or inserted The functor may change any fields of the \p item.second that is \p mapped_type. The function applies RCU lock internally. Returns std::pair where \p first is true if operation is successful, \p second is true if new item has been added or \p false if the item with \p key already exists. @warning For \ref cds_nonintrusive_MichaelKVList_rcu "MichaelKVList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". \ref cds_nonintrusive_LazyKVList_rcu "LazyKVList" provides exclusive access to inserted item and does not require any node-level synchronization. */ template std::pair update( K const& key, Func func, bool bAllowInsert = true ) { std::pair bRet = bucket( key ).update( key, func, bAllowInsert ); if ( bRet.second ) ++m_ItemCounter; return bRet; } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( K const& key, Func func ) { return update( key, func, true ); } //@endcond /// For key \p key inserts data of type \p mapped_type created from \p args /** \p key_type should be constructible from type \p K Returns \p true if inserting successful, \p false otherwise. */ template bool emplace( K&& key, Args&&... args ) { const bool bRet = bucket( key ).emplace( std::forward(key), std::forward(args)... ); if ( bRet ) ++m_ItemCounter; return bRet; } /// Deletes \p key from the map /** \anchor cds_nonintrusive_MichaelMap_rcu_erase_val RCU \p synchronize method can be called. RCU should not be locked. Return \p true if \p key is found and deleted, \p false otherwise */ template bool erase( const K& key ) { const bool bRet = bucket( key ).erase( key ); if ( bRet ) --m_ItemCounter; return bRet; } /// Deletes the item from the map using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_MichaelMap_rcu_erase_val "erase(K const&)" but \p pred is used for key comparing. \p Less predicate has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool erase_with( const K& key, Less pred ) { const bool bRet = bucket( key ).erase_with( key, pred ); if ( bRet ) --m_ItemCounter; return bRet; } /// Deletes \p key from the map /** \anchor cds_nonintrusive_MichaelMap_rcu_erase_func The function searches an item with key \p key, calls \p f functor and deletes the item. If \p key is not found, the functor is not called. The functor \p Func interface: \code struct extractor { void operator()(value_type& item) { ... } }; \endcode RCU \p synchronize method can be called. RCU should not be locked. Return \p true if key is found and deleted, \p false otherwise */ template bool erase( const K& key, Func f ) { const bool bRet = bucket( key ).erase( key, f ); if ( bRet ) --m_ItemCounter; return bRet; } /// Deletes the item from the map using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_MichaelMap_rcu_erase_func "erase(K const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool erase_with( const K& key, Less pred, Func f ) { const bool bRet = bucket( key ).erase_with( key, pred, f ); if ( bRet ) --m_ItemCounter; return bRet; } /// Extracts an item from the map /** \anchor cds_nonintrusive_MichaelHashMap_rcu_extract The function searches an item with key equal to \p key, unlinks it from the map, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found. If the item is not found the function return an empty \p exempt_ptr. The function just excludes the key from the map and returns a pointer to item found. Depends on \p ordered_list you should or should not lock RCU before calling of this function: - for the set based on \ref cds_nonintrusive_MichaelList_rcu "MichaelList" RCU should not be locked - for the set based on \ref cds_nonintrusive_LazyList_rcu "LazyList" RCU should be locked See ordered list implementation for details. \code #include #include #include typedef cds::urcu::gc< general_buffered<> > rcu; typedef cds::container::MichaelKVList< rcu, int, Foo > rcu_michael_list; typedef cds::container::MichaelHashMap< rcu, rcu_michael_list, foo_traits > rcu_michael_map; rcu_michael_map theMap; // ... rcu_michael_map::exempt_ptr p; // For MichaelList we should not lock RCU // Note that you must not delete the item found inside the RCU lock p = theMap.extract( 10 ); if ( p ) { // do something with p ... } // We may safely release p here // release() passes the pointer to RCU reclamation cycle p.release(); \endcode */ template exempt_ptr extract( K const& key ) { exempt_ptr p = bucket( key ).extract( key ); if ( p ) --m_ItemCounter; return p; } /// Extracts an item from the map using \p pred predicate for searching /** The function is an analog of \p extract(K const&) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the map. */ template exempt_ptr extract_with( K const& key, Less pred ) { exempt_ptr p = bucket( key ).extract_with( key, pred ); if ( p ) --m_ItemCounter; return p; } /// Finds the key \p key /** \anchor cds_nonintrusive_MichaelMap_rcu_find_cfunc The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item ); }; \endcode where \p item is the item found. The functor may change \p item.second. Note that the functor is only guarantee that \p item cannot be disposed during functor is executing. The functor does not serialize simultaneous access to the map's \p item. If such access is possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. The function applies RCU lock internally. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( K const& key, Func f ) { return bucket( key ).find( key, f ); } /// Finds the key \p val using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_MichaelMap_rcu_find_cfunc "find(K const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool find_with( K const& key, Less pred, Func f ) { return bucket( key ).find_with( key, pred, f ); } /// Checks whether the map contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. The function applies RCU lock internally. */ template bool contains( K const& key ) { return bucket( key ).contains( key ); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find( K const& key ) { return bucket( key ).contains( key ); } //@endcond /// Checks whether the map contains \p key using \p pred predicate for searching /** The function is an analog of contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool contains( K const& key, Less pred ) { return bucket( key ).contains( key, pred ); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find_with( K const& key, Less pred ) { return bucket( key ).contains( key, pred ); } //@endcond /// Finds \p key and return the item found /** \anchor cds_nonintrusive_MichaelHashMap_rcu_get The function searches the item with key equal to \p key and returns the pointer to item found. If \p key is not found it returns \p nullptr. Note the type of returned value depends on underlying \p ordered_list. For details, see documentation of ordered list you use. Note the compare functor should accept a parameter of type \p K that can be not the same as \p key_type. RCU should be locked before call of this function. Returned item is valid only while RCU is locked: \code typedef cds::container::MichaelHashMap< your_template_parameters > hash_map; hash_map theMap; // ... typename hash_map::raw_ptr gp; { // Lock RCU hash_map::rcu_lock lock; gp = theMap.get( 5 ); if ( gp ) { // Deal with gp //... } // Unlock RCU by rcu_lock destructor // gp can be reclaimed at any time after RCU has been unlocked } \endcode */ template raw_ptr get( K const& key ) { return bucket( key ).get( key ); } /// Finds \p key and return the item found /** The function is an analog of \ref cds_nonintrusive_MichaelHashMap_rcu_get "get(K const&)" but \p pred is used for comparing the keys. \p Less functor has the semantics like \p std::less but should take arguments of type \ref key_type and \p K in any order. \p pred must imply the same element order as the comparator used for building the map. */ template raw_ptr get_with( K const& key, Less pred ) { return bucket( key ).get_with( key, pred ); } /// Clears the map (not atomic) /** The function erases all items from the map. The function is not atomic. It cleans up each bucket and then resets the item counter to zero. If there are a thread that performs insertion while \p clear is working the result is undefined in general case: empty() may return \p true but the map may contain item(s). Therefore, \p clear may be used only for debugging purposes. RCU \p synchronize method can be called. RCU should not be locked. */ void clear() { for ( size_t i = 0; i < bucket_count(); ++i ) m_Buckets[i].clear(); m_ItemCounter.reset(); } /// Checks if the map is empty /** @warning If you use \p atomicity::empty_item_counter in \p traits::item_counter, the function always returns \p true. */ bool empty() const { return size() == 0; } /// Returns item count in the map /** @warning If you use \p atomicity::empty_item_counter in \p traits::item_counter, the function always returns 0. */ size_t size() const { return m_ItemCounter; } /// Returns const reference to internal statistics stat const& statistics() const { return m_Stat; } /// Returns the size of hash table /** Since \p %MichaelHashMap cannot dynamically extend the hash table size, the value returned is an constant depending on object initialization parameters; see \p MichaelHashMap::MichaelHashMap for explanation. */ size_t bucket_count() const { return m_nHashBitmask + 1; } protected: //@cond /// Calculates hash value of \p key template size_t hash_value( Q const& key ) const { return m_HashFunctor( key ) & m_nHashBitmask; } /// Returns the bucket (ordered list) for \p key template internal_bucket_type& bucket( Q const& key ) { return m_Buckets[hash_value( key )]; } template internal_bucket_type const& bucket( Q const& key ) const { return m_Buckets[hash_value( key )]; } //@endcond private: //@cond const_iterator get_const_begin() const { return const_iterator( const_cast(m_Buckets[0]).begin(), m_Buckets, m_Buckets + bucket_count()); } const_iterator get_const_end() const { return const_iterator( const_cast(m_Buckets[bucket_count() - 1]).end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count()); } template typename std::enable_if< Stat::empty >::type construct_bucket( internal_bucket_type* bkt ) { new (bkt) internal_bucket_type; } template typename std::enable_if< !Stat::empty >::type construct_bucket( internal_bucket_type* bkt ) { new (bkt) internal_bucket_type( m_Stat ); } //@endcond }; }} // namespace cds::container #endif // ifndef CDSLIB_CONTAINER_MICHAEL_MAP_RCU_H libcds-2.3.3/cds/container/michael_set.h000066400000000000000000001132241341244201700201410ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_MICHAEL_SET_H #define CDSLIB_CONTAINER_MICHAEL_SET_H #include #include #include namespace cds { namespace container { /// Michael's hash set /** @ingroup cds_nonintrusive_set \anchor cds_nonintrusive_MichaelHashSet_hp Source: - [2002] Maged Michael "High performance dynamic lock-free hash tables and list-based sets" Michael's hash table algorithm is based on lock-free ordered list and it is very simple. The main structure is an array \p T of size \p M. Each element in \p T is basically a pointer to a hash bucket, implemented as a singly linked list. The array of buckets cannot be dynamically expanded. However, each bucket may contain unbounded number of items. Template parameters are: - \p GC - Garbage collector used. You may use any \ref cds_garbage_collector "Garbage collector" from the \p libcds library. Note the \p GC must be the same as the \p GC used for \p OrderedList - \p OrderedList - ordered list implementation used as bucket for hash set, possible implementations: \p MichaelList, \p LazyList, \p IterableList. The ordered list implementation specifies the type \p T to be stored in the hash-set, the comparing functor for the type \p T and other features specific for the ordered list. - \p Traits - set traits, default is \p michael_set::traits. Instead of defining \p Traits struct you may use option-based syntax with \p michael_set::make_traits metafunction. There are the specializations: - for \ref cds_urcu_desc "RCU" - declared in cd/container/michael_set_rcu.h, see \ref cds_nonintrusive_MichaelHashSet_rcu "MichaelHashSet". - for \ref cds::gc::nogc declared in cds/container/michael_set_nogc.h, see \ref cds_nonintrusive_MichaelHashSet_nogc "MichaelHashSet". \anchor cds_nonintrusive_MichaelHashSet_hash_functor Hash functor Some member functions of Michael's hash set accept the key parameter of type \p Q which differs from node type \p value_type. It is expected that type \p Q contains full key of node type \p value_type, and if keys of type \p Q and \p value_type are equal the hash values of these keys must be equal too. The hash functor \p Traits::hash should accept parameters of both type: \code // Our node type struct Foo { std::string key_; // key field // ... other fields }; // Hash functor struct fooHash { size_t operator()( const std::string& s ) const { return std::hash( s ); } size_t operator()( const Foo& f ) const { return (*this)( f.key_ ); } }; \endcode How to use Suppose, we have the following type \p Foo that we want to store in our \p %MichaelHashSet: \code struct Foo { int nKey; // key field int nVal; // value field }; \endcode To use \p %MichaelHashSet for \p Foo values, you should first choose suitable ordered list class that will be used as a bucket for the set. We will use \p gc::DHP reclamation schema and \p MichaelList as a bucket type. Also, for ordered list we should develop a comparator for our \p Foo struct. \code #include #include namespace cc = cds::container; // Foo comparator struct Foo_cmp { int operator ()(Foo const& v1, Foo const& v2 ) const { if ( std::less( v1.nKey, v2.nKey )) return -1; return std::less(v2.nKey, v1.nKey) ? 1 : 0; } }; // Our ordered list typedef cc::MichaelList< cds::gc::DHP, Foo, typename cc::michael_list::make_traits< cc::opt::compare< Foo_cmp > // item comparator option >::type > bucket_list; // Hash functor for Foo struct foo_hash { size_t operator ()( int i ) const { return std::hash( i ); } size_t operator()( Foo const& i ) const { return std::hash( i.nKey ); } }; // Declare set type. // Note that \p GC template parameter of ordered list must be equal \p GC for the set. typedef cc::MichaelHashSet< cds::gc::DHP, bucket_list, cc::michael_set::make_traits< cc::opt::hash< foo_hash > >::type > foo_set; // Set variable foo_set fooSet; \endcode */ template < class GC, class OrderedList, #ifdef CDS_DOXYGEN_INVOKED class Traits = michael_set::traits #else class Traits #endif > class MichaelHashSet { public: typedef GC gc; ///< Garbage collector typedef OrderedList ordered_list; ///< type of ordered list used as a bucket implementation typedef Traits traits; ///< Set traits typedef typename ordered_list::value_type value_type; ///< type of value to be stored in the list typedef typename ordered_list::key_comparator key_comparator; ///< key comparison functor #ifdef CDS_DOXYGEN_INVOKED typedef typename ordered_list::stat stat; ///< Internal statistics #endif /// Hash functor for \ref value_type and all its derivatives that you use typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash; typedef typename traits::item_counter item_counter; ///< Item counter type typedef typename traits::allocator allocator; ///< Bucket table allocator static constexpr const size_t c_nHazardPtrCount = ordered_list::c_nHazardPtrCount; ///< Count of hazard pointer required // GC and OrderedList::gc must be the same static_assert( std::is_same::value, "GC and OrderedList::gc must be the same"); //@cond typedef typename ordered_list::template select_stat_wrapper< typename ordered_list::stat > bucket_stat; typedef typename ordered_list::template rebind_traits< cds::opt::item_counter< cds::atomicity::empty_item_counter > , cds::opt::stat< typename bucket_stat::wrapped_stat > >::type internal_bucket_type; /// Bucket table allocator typedef typename std::allocator_traits::template rebind_alloc< internal_bucket_type > bucket_table_allocator; typedef typename bucket_stat::stat stat; //@endcond /// Guarded pointer - a result of \p get() and \p extract() functions typedef typename internal_bucket_type::guarded_ptr guarded_ptr; protected: //@cond size_t const m_nHashBitmask; internal_bucket_type * m_Buckets; ///< bucket table hash m_HashFunctor; ///< Hash functor item_counter m_ItemCounter; ///< Item counter stat m_Stat; ///< Internal statistics //@endcond public: ///@name Forward iterators //@{ /// Forward iterator /** The forward iterator for Michael's set has some features: - it has no post-increment operator - to protect the value, the iterator contains a GC-specific guard + another guard is required locally for increment operator. For some GC (like as \p gc::HP), a guard is a limited resource per thread, so an exception (or assertion) "no free guard" may be thrown if the limit of guard count per thread is exceeded. - The iterator cannot be moved across thread boundary because it contains thread-private GC's guard. Iterator thread safety depends on type of \p OrderedList: - for \p MichaelList and \p LazyList: iterator guarantees safety even if you delete the item that iterator points to because that item is guarded by hazard pointer. However, in case of concurrent deleting operations it is no guarantee that you iterate all item in the set. Moreover, a crash is possible when you try to iterate the next element that has been deleted by concurrent thread. Use this iterator on the concurrent container for debugging purpose only. - for \p IterableList: iterator is thread-safe. You may use it freely in concurrent environment. The iterator interface: \code class iterator { public: // Default constructor iterator(); // Copy construtor iterator( iterator const& src ); // Dereference operator value_type * operator ->() const; // Dereference operator value_type& operator *() const; // Preincrement operator iterator& operator ++(); // Assignment operator iterator& operator = (iterator const& src); // Equality operators bool operator ==(iterator const& i ) const; bool operator !=(iterator const& i ) const; }; \endcode */ /// Forward iterator typedef michael_set::details::iterator< internal_bucket_type, false > iterator; /// Const forward iterator typedef michael_set::details::iterator< internal_bucket_type, true > const_iterator; /// Returns a forward iterator addressing the first element in a set /** For empty set \code begin() == end() \endcode */ iterator begin() { return iterator( bucket_begin()->begin(), bucket_begin(), bucket_end()); } /// Returns an iterator that addresses the location succeeding the last element in a set /** Do not use the value returned by end function to access any item. The returned value can be used only to control reaching the end of the set. For empty set \code begin() == end() \endcode */ iterator end() { return iterator( bucket_end()[-1].end(), bucket_end() - 1, bucket_end()); } /// Returns a forward const iterator addressing the first element in a set const_iterator begin() const { return get_const_begin(); } /// Returns a forward const iterator addressing the first element in a set const_iterator cbegin() const { return get_const_begin(); } /// Returns an const iterator that addresses the location succeeding the last element in a set const_iterator end() const { return get_const_end(); } /// Returns an const iterator that addresses the location succeeding the last element in a set const_iterator cend() const { return get_const_end(); } //@} public: /// Initialize hash set /** The Michael's hash set is non-expandable container. You should point the average count of items \p nMaxItemCount when you create an object. \p nLoadFactor parameter defines average count of items per bucket and it should be small number between 1 and 10. Remember, since the bucket implementation is an ordered list, searching in the bucket is linear [O(nLoadFactor)]. The ctor defines hash table size as rounding nMaxItemCount / nLoadFactor up to nearest power of two. */ MichaelHashSet( size_t nMaxItemCount, ///< estimation of max item count in the hash set size_t nLoadFactor ///< load factor: estimation of max number of items in the bucket ) : m_nHashBitmask( michael_set::details::init_hash_bitmask( nMaxItemCount, nLoadFactor )) , m_Buckets( bucket_table_allocator().allocate( bucket_count())) { for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it ) construct_bucket( it ); } /// Clears hash set and destroys it ~MichaelHashSet() { clear(); for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it ) it->~internal_bucket_type(); bucket_table_allocator().deallocate( m_Buckets, bucket_count()); } /// Inserts new node /** The function creates a node with copy of \p val value and then inserts the node created into the set. The type \p Q should contain as minimum the complete key for the node. The object of \ref value_type should be constructible from a value of type \p Q. In trivial case, \p Q is equal to \ref value_type. Returns \p true if \p val is inserted into the set, \p false otherwise. */ template bool insert( Q&& val ) { const bool bRet = bucket( val ).insert( std::forward( val )); if ( bRet ) ++m_ItemCounter; return bRet; } /// Inserts new node /** The function allows to split creating of new item into two part: - create item with key only - insert new item into the set - if inserting is success, calls \p f functor to initialize value-fields of \p val. The functor signature is: \code void func( value_type& val ); \endcode where \p val is the item inserted. The user-defined functor is called only if the inserting is success. @warning For \ref cds_nonintrusive_MichaelList_gc "MichaelList" and \ref cds_nonintrusive_IterableList_gc "IterableList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". @ref cds_nonintrusive_LazyList_gc "LazyList" provides exclusive access to inserted item and does not require any node-level synchronization. */ template bool insert( Q&& val, Func f ) { const bool bRet = bucket( val ).insert( std::forward( val ), f ); if ( bRet ) ++m_ItemCounter; return bRet; } /// Updates the element /** The operation performs inserting or changing data with lock-free manner. If the item \p val not found in the set, then \p val is inserted iff \p bAllowInsert is \p true. Otherwise, the functor \p func is called with item found. The functor \p func signature depends of \p OrderedList: for \p MichaelList, \p LazyList \code struct functor { void operator()( bool bNew, value_type& item, Q const& val ); }; \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the set - \p val - argument \p val passed into the \p %update() function The functor may change non-key fields of the \p item. for \p IterableList \code void func( value_type& val, value_type * old ); \endcode where - \p val - a new data constructed from \p key - \p old - old value that will be retired. If new item has been inserted then \p old is \p nullptr. @return std::pair where \p first is \p true if operation is successful, \p second is \p true if new item has been added or \p false if the item with \p key already is in the set. @warning For \ref cds_intrusive_MichaelList_hp "MichaelList" and \ref cds_nonintrusive_IterableList_gc "IterableList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". \ref cds_intrusive_LazyList_hp "LazyList" provides exclusive access to inserted item and does not require any node-level synchronization. */ template std::pair update( Q&& val, Func func, bool bAllowUpdate = true ) { std::pair bRet = bucket( val ).update( std::forward( val ), func, bAllowUpdate ); if ( bRet.second ) ++m_ItemCounter; return bRet; } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( const Q& val, Func func ) { return update( val, func, true ); } //@endcond /// Inserts or updates the node (only for \p IterableList) /** The operation performs inserting or changing data with lock-free manner. If the item \p val is not found in the set, then \p val is inserted iff \p bAllowInsert is \p true. Otherwise, the current element is changed to \p val, the old element will be retired later. Returns std::pair where \p first is \p true if operation is successful, \p second is \p true if \p val has been added or \p false if the item with that key already in the set. */ template #ifdef CDS_DOXYGEN_INVOKED std::pair #else typename std::enable_if< std::is_same< Q, Q>::value && is_iterable_list< ordered_list >::value, std::pair >::type #endif upsert( Q&& val, bool bAllowInsert = true ) { std::pair bRet = bucket( val ).upsert( std::forward( val ), bAllowInsert ); if ( bRet.second ) ++m_ItemCounter; return bRet; } /// Inserts data of type \p value_type constructed from \p args /** Returns \p true if inserting successful, \p false otherwise. */ template bool emplace( Args&&... args ) { bool bRet = bucket_emplace( std::forward(args)... ); if ( bRet ) ++m_ItemCounter; return bRet; } /// Deletes \p key from the set /** Since the key of MichaelHashSet's item type \p value_type is not explicitly specified, template parameter \p Q defines the key type searching in the list. The set item comparator should be able to compare the type \p value_type and the type \p Q. Return \p true if key is found and deleted, \p false otherwise. */ template bool erase( Q const& key ) { const bool bRet = bucket( key ).erase( key ); if ( bRet ) --m_ItemCounter; return bRet; } /// Deletes the item from the set using \p pred predicate for searching /** The function is an analog of \p erase(Q const&) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool erase_with( Q const& key, Less pred ) { const bool bRet = bucket( key ).erase_with( key, pred ); if ( bRet ) --m_ItemCounter; return bRet; } /// Deletes \p key from the set /** The function searches an item with key \p key, calls \p f functor and deletes the item. If \p key is not found, the functor is not called. The functor \p Func interface: \code struct extractor { void operator()(value_type& item); }; \endcode where \p item - the item found. Since the key of %MichaelHashSet's \p value_type is not explicitly specified, template parameter \p Q defines the key type searching in the list. The list item comparator should be able to compare the type \p T of list item and the type \p Q. Return \p true if key is found and deleted, \p false otherwise */ template bool erase( Q const& key, Func f ) { const bool bRet = bucket( key ).erase( key, f ); if ( bRet ) --m_ItemCounter; return bRet; } /// Deletes the item from the set using \p pred predicate for searching /** The function is an analog of \p erase(Q const&, Func) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool erase_with( Q const& key, Less pred, Func f ) { const bool bRet = bucket( key ).erase_with( key, pred, f ); if ( bRet ) --m_ItemCounter; return bRet; } /// Deletes the item pointed by iterator \p iter (only for \p IterableList based set) /** Returns \p true if the operation is successful, \p false otherwise. The function can return \p false if the node the iterator points to has already been deleted by other thread. The function does not invalidate the iterator, it remains valid and can be used for further traversing. @note \p %erase_at() is supported only for \p %MichaelHashSet based on \p IterableList. */ #ifdef CDS_DOXYGEN_INVOKED bool erase_at( iterator const& iter ) #else template typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, bool >::type erase_at( Iterator const& iter ) #endif { assert( iter != end()); assert( iter.bucket() != nullptr ); if ( iter.bucket()->erase_at( iter.underlying_iterator())) { --m_ItemCounter; return true; } return false; } /// Extracts the item with specified \p key /** \anchor cds_nonintrusive_MichaelHashSet_hp_extract The function searches an item with key equal to \p key, unlinks it from the set, and returns it as \p guarded_ptr. If \p key is not found the function returns an empty guadd pointer. Note the compare functor should accept a parameter of type \p Q that may be not the same as \p value_type. The extracted item is freed automatically when returned \p guarded_ptr object will be destroyed or released. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. Usage: \code typedef cds::container::MichaelHashSet< your_template_args > michael_set; michael_set theSet; // ... { typename michael_set::guarded_ptr gp( theSet.extract( 5 )); if ( gp ) { // Deal with gp // ... } // Destructor of gp releases internal HP guard } \endcode */ template guarded_ptr extract( Q const& key ) { guarded_ptr gp( bucket( key ).extract( key )); if ( gp ) --m_ItemCounter; return gp; } /// Extracts the item using compare functor \p pred /** The function is an analog of \p extract(Q const&) but \p pred predicate is used for key comparing. \p Less functor has the semantics like \p std::less but should take arguments of type \p value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the set. */ template guarded_ptr extract_with( Q const& key, Less pred ) { guarded_ptr gp( bucket( key ).extract_with( key, pred )); if ( gp ) --m_ItemCounter; return gp; } /// Finds the key \p key /** The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& key ); }; \endcode where \p item is the item found, \p key is the find function argument. The functor may change non-key fields of \p item. Note that the functor is only guarantee that \p item cannot be disposed during functor is executing. The functor does not serialize simultaneous access to the set's \p item. If such access is possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor can modify both arguments. Note the hash functor specified for class \p Traits template parameter should accept a parameter of type \p Q that may be not the same as \p value_type. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( Q& key, Func f ) { return bucket( key ).find( key, f ); } //@cond template bool find( Q const& key, Func f ) { return bucket( key ).find( key, f ); } //@endcond /// Finds \p key and returns iterator pointed to the item found (only for \p IterableList) /** If \p key is not found the function returns \p end(). @note This function is supported only for the set based on \p IterableList */ template #ifdef CDS_DOXYGEN_INVOKED iterator #else typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type #endif find( Q& key ) { internal_bucket_type& b = bucket( key ); typename internal_bucket_type::iterator it = b.find( key ); if ( it == b.end()) return end(); return iterator( it, &b, bucket_end()); } //@cond template typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type find( Q const& key ) { internal_bucket_type& b = bucket( key ); typename internal_bucket_type::iterator it = b.find( key ); if ( it == b.end()) return end(); return iterator( it, &b, bucket_end()); } //@endcond /// Finds the key \p key using \p pred predicate for searching /** The function is an analog of \p find(Q&, Func) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool find_with( Q& key, Less pred, Func f ) { return bucket( key ).find_with( key, pred, f ); } //@cond template bool find_with( Q const& key, Less pred, Func f ) { return bucket( key ).find_with( key, pred, f ); } //@endcond /// Finds \p key using \p pred predicate and returns iterator pointed to the item found (only for \p IterableList) /** The function is an analog of \p find(Q&) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. If \p key is not found the function returns \p end(). @note This function is supported only for the set based on \p IterableList */ template #ifdef CDS_DOXYGEN_INVOKED iterator #else typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type #endif find_with( Q& key, Less pred ) { internal_bucket_type& b = bucket( key ); typename internal_bucket_type::iterator it = b.find_with( key, pred ); if ( it == b.end()) return end(); return iterator( it, &b, bucket_end()); } //@cond template typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type find_with( Q const& key, Less pred ) { internal_bucket_type& b = bucket( key ); typename internal_bucket_type::iterator it = b.find_with( key, pred ); if ( it == b.end()) return end(); return iterator( it, &b, bucket_end()); } //@endcond /// Checks whether the set contains \p key /** The function searches the item with key equal to \p key and returns \p true if the key is found, and \p false otherwise. Note the hash functor specified for class \p Traits template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. */ template bool contains( Q const& key ) { return bucket( key ).contains( key ); } /// Checks whether the set contains \p key using \p pred predicate for searching /** The function is an analog of contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool contains( Q const& key, Less pred ) { return bucket( key ).contains( key, pred ); } /// Finds the key \p key and return the item found /** \anchor cds_nonintrusive_MichaelHashSet_hp_get The function searches the item with key equal to \p key and returns the guarded pointer to the item found. If \p key is not found the functin returns an empty guarded pointer. @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. Usage: \code typedef cds::container::MichaeHashSet< your_template_params > michael_set; michael_set theSet; // ... { typename michael_set::guarded_ptr gp( theSet.get( 5 )); if ( gp ) { // Deal with gp //... } // Destructor of guarded_ptr releases internal HP guard } \endcode Note the compare functor specified for \p OrderedList template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. */ template guarded_ptr get( Q const& key ) { return bucket( key ).get( key ); } /// Finds the key \p key and return the item found /** The function is an analog of \ref cds_nonintrusive_MichaelHashSet_hp_get "get( Q const&)" but \p pred is used for comparing the keys. \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the set. */ template guarded_ptr get_with( Q const& key, Less pred ) { return bucket( key ).get_with( key, pred ); } /// Clears the set (non-atomic) /** The function erases all items from the set. The function is not atomic. It cleans up each bucket and then resets the item counter to zero. If there are a thread that performs insertion while \p clear is working the result is undefined in general case: empty() may return \p true but the set may contain item(s). Therefore, \p clear may be used only for debugging purposes. */ void clear() { for ( size_t i = 0; i < bucket_count(); ++i ) m_Buckets[i].clear(); m_ItemCounter.reset(); } /// Checks if the set is empty /** @warning If you use \p atomicity::empty_item_counter in \p traits::item_counter, the function always returns \p true. */ bool empty() const { return size() == 0; } /// Returns item count in the set /** @warning If you use \p atomicity::empty_item_counter in \p traits::item_counter, the function always returns 0. */ size_t size() const { return m_ItemCounter; } /// Returns const reference to internal statistics stat const& statistics() const { return m_Stat; } /// Returns the size of hash table /** Since MichaelHashSet cannot dynamically extend the hash table size, the value returned is an constant depending on object initialization parameters; see MichaelHashSet::MichaelHashSet for explanation. */ size_t bucket_count() const { return m_nHashBitmask + 1; } protected: //@cond /// Calculates hash value of \p key template size_t hash_value( Q const& key ) const { return m_HashFunctor( key ) & m_nHashBitmask; } /// Returns the bucket (ordered list) for \p key template internal_bucket_type& bucket( Q const& key ) { return m_Buckets[ hash_value( key ) ]; } template internal_bucket_type const& bucket( Q const& key ) const { return m_Buckets[hash_value( key )]; } //@endcond private: //@cond internal_bucket_type* bucket_begin() const { return m_Buckets; } internal_bucket_type* bucket_end() const { return m_Buckets + bucket_count(); } const_iterator get_const_begin() const { return const_iterator( bucket_begin()->cbegin(), bucket_begin(), bucket_end()); } const_iterator get_const_end() const { return const_iterator(( bucket_end() -1 )->cend(), bucket_end() - 1, bucket_end()); } template typename std::enable_if< Stat::empty >::type construct_bucket( internal_bucket_type* b ) { new (b) internal_bucket_type; } template typename std::enable_if< !Stat::empty >::type construct_bucket( internal_bucket_type* b ) { new (b) internal_bucket_type( m_Stat ); } template typename std::enable_if< !is_iterable_list::value, bool>::type bucket_emplace( Args&&... args ) { class list_accessor: public List { public: using List::alloc_node; using List::node_to_value; using List::insert_node; }; auto pNode = list_accessor::alloc_node( std::forward( args )... ); assert( pNode != nullptr ); return static_cast( bucket( list_accessor::node_to_value( *pNode ))).insert_node( pNode ); } template typename std::enable_if< is_iterable_list::value, bool>::type bucket_emplace( Args&&... args ) { class list_accessor: public List { public: using List::alloc_data; using List::insert_node; }; auto pData = list_accessor::alloc_data( std::forward( args )... ); assert( pData != nullptr ); return static_cast( bucket( *pData )).insert_node( pData ); } //@endcond }; }} // namespace cds::container #endif // ifndef CDSLIB_CONTAINER_MICHAEL_SET_H libcds-2.3.3/cds/container/michael_set_nogc.h000066400000000000000000000377561341244201700211660ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_MICHAEL_SET_NOGC_H #define CDSLIB_CONTAINER_MICHAEL_SET_NOGC_H #include #include #include namespace cds { namespace container { /// Michael's hash set (template specialization for gc::nogc) /** @ingroup cds_nonintrusive_set \anchor cds_nonintrusive_MichaelHashSet_nogc This specialization is so-called append-only when no item reclamation may be performed. The class does not support deleting of list item. See \ref cds_nonintrusive_MichaelHashSet_hp "MichaelHashSet" for description of template parameters. The template parameter \p OrderedList should be any \p gc::nogc -derived ordered list, for example, \ref cds_nonintrusive_MichaelList_nogc "append-only MichaelList". */ template < class OrderedList, #ifdef CDS_DOXYGEN_INVOKED class Traits = michael_set::traits #else class Traits #endif > class MichaelHashSet< cds::gc::nogc, OrderedList, Traits > { public: typedef cds::gc::nogc gc; ///< Garbage collector typedef OrderedList ordered_list; ///< type of ordered list to be used as a bucket implementation typedef Traits traits; ///< Set traits typedef typename ordered_list::value_type value_type; ///< type of value stored in the list typedef typename ordered_list::key_comparator key_comparator; ///< key comparison functor #ifdef CDS_DOXYGEN_INVOKED typedef typename ordered_list::stat stat; ///< Internal statistics #endif /// Hash functor for \ref value_type and all its derivatives that you use typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash; typedef typename traits::item_counter item_counter; ///< Item counter type typedef typename traits::allocator allocator; ///< Bucket table allocator // GC and OrderedList::gc must be the same static_assert(std::is_same::value, "GC and OrderedList::gc must be the same"); protected: //@cond typedef typename ordered_list::template select_stat_wrapper< typename ordered_list::stat > bucket_stat; typedef typename ordered_list::template rebind_traits< cds::opt::item_counter< cds::atomicity::empty_item_counter > , cds::opt::stat< typename bucket_stat::wrapped_stat > >::type internal_bucket_type_; class internal_bucket_type: public internal_bucket_type_ { typedef internal_bucket_type_ base_class; public: using base_class::base_class; using typename base_class::node_type; using base_class::alloc_node; using base_class::insert_node; using base_class::node_to_value; }; /// Bucket table allocator typedef typename std::allocator_traits< allocator >::template rebind_alloc< internal_bucket_type > bucket_table_allocator; typedef typename internal_bucket_type::iterator bucket_iterator; typedef typename internal_bucket_type::const_iterator bucket_const_iterator; //@endcond public: //@cond typedef typename bucket_stat::stat stat; //@endcond protected: //@cond const size_t m_nHashBitmask; hash m_HashFunctor; ///< Hash functor internal_bucket_type* m_Buckets; ///< bucket table item_counter m_ItemCounter; ///< Item counter stat m_Stat; ///< Internal statistics //@endcond public: ///@name Forward iterators //@{ /// Forward iterator /** The forward iterator for Michael's set is based on \p OrderedList forward iterator and has some features: - it has no post-increment operator - it iterates items in unordered fashion The iterator interface: \code class iterator { public: // Default constructor iterator(); // Copy construtor iterator( iterator const& src ); // Dereference operator value_type * operator ->() const; // Dereference operator value_type& operator *() const; // Preincrement operator iterator& operator ++(); // Assignment operator iterator& operator = (iterator const& src); // Equality operators bool operator ==(iterator const& i ) const; bool operator !=(iterator const& i ) const; }; \endcode */ typedef michael_set::details::iterator< internal_bucket_type, false > iterator; /// Const forward iterator /** For iterator's features and requirements see \ref iterator */ typedef michael_set::details::iterator< internal_bucket_type, true > const_iterator; /// Returns a forward iterator addressing the first element in a set /** For empty set \code begin() == end() \endcode */ iterator begin() { return iterator( m_Buckets[0].begin(), m_Buckets, m_Buckets + bucket_count()); } /// Returns an iterator that addresses the location succeeding the last element in a set /** Do not use the value returned by end function to access any item. The returned value can be used only to control reaching the end of the set. For empty set \code begin() == end() \endcode */ iterator end() { return iterator( m_Buckets[bucket_count() - 1].end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count()); } /// Returns a forward const iterator addressing the first element in a set const_iterator begin() const { return get_const_begin(); } /// Returns a forward const iterator addressing the first element in a set const_iterator cbegin() const { return get_const_begin(); } /// Returns an const iterator that addresses the location succeeding the last element in a set const_iterator end() const { return get_const_end(); } /// Returns an const iterator that addresses the location succeeding the last element in a set const_iterator cend() const { return get_const_end(); } //@} public: /// Initialize hash set /** The Michael's hash set is non-expandable container. You should point the average count of items \p nMaxItemCount when you create an object. \p nLoadFactor parameter defines average count of items per bucket and it should be small number between 1 and 10. Remember, since the bucket implementation is an ordered list, searching in the bucket is linear [O(nLoadFactor)]. The ctor defines hash table size as rounding nMaxItemCount / nLoadFactor up to nearest power of two. */ MichaelHashSet( size_t nMaxItemCount, ///< estimation of max item count in the hash set size_t nLoadFactor ///< load factor: estimation of max number of items in the bucket ) : m_nHashBitmask( michael_set::details::init_hash_bitmask( nMaxItemCount, nLoadFactor )) , m_Buckets( bucket_table_allocator().allocate( bucket_count())) { for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it ) construct_bucket( it ); } /// Clears hash set and destroys it ~MichaelHashSet() { clear(); for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it ) it->~internal_bucket_type(); bucket_table_allocator().deallocate( m_Buckets, bucket_count()); } /// Inserts new node /** The function inserts \p val in the set if it does not contain an item with key equal to \p val. Return an iterator pointing to inserted item if success, otherwise \ref end() */ template iterator insert( const Q& val ) { internal_bucket_type& refBucket = bucket( val ); bucket_iterator it = refBucket.insert( val ); if ( it != refBucket.end()) { ++m_ItemCounter; return iterator( it, &refBucket, m_Buckets + bucket_count()); } return end(); } /// Inserts data of type \ref value_type constructed with std::forward(args)... /** Return an iterator pointing to inserted item if success \ref end() otherwise */ template iterator emplace( Args&&... args ) { typename internal_bucket_type::node_type * pNode = internal_bucket_type::alloc_node( std::forward( args )... ); internal_bucket_type& refBucket = bucket( internal_bucket_type::node_to_value( *pNode )); bucket_iterator it = refBucket.insert_node( pNode ); if ( it != refBucket.end()) { ++m_ItemCounter; return iterator( it, &refBucket, m_Buckets + bucket_count()); } return end(); } /// Updates the element /** The operation performs inserting or changing data with lock-free manner. If the item \p val not found in the set, then \p val is inserted iff \p bAllowInsert is \p true. Returns std::pair where \p first is an iterator pointing to item found or inserted, or \p end() if \p bAllowInsert is \p false, \p second is true if new item has been added or \p false if the item is already in the set. @warning For \ref cds_intrusive_MichaelList_hp "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". \ref cds_intrusive_LazyList_hp "LazyList" provides exclusive access to inserted item and does not require any node-level synchronization. */ template std::pair update( Q const& val, bool bAllowInsert = true ) { internal_bucket_type& refBucket = bucket( val ); std::pair ret = refBucket.update( val, bAllowInsert ); if ( ret.first != refBucket.end()) { if ( ret.second ) ++m_ItemCounter; return std::make_pair( iterator( ret.first, &refBucket, m_Buckets + bucket_count()), ret.second ); } return std::make_pair( end(), ret.second ); } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( Q const& val ) { return update( val, true ); } //@endcond /// Checks whether the set contains \p key /** The function searches the item with key equal to \p key and returns an iterator pointed to item found if the key is found, or \ref end() otherwise. Note the hash functor specified for class \p Traits template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. */ template iterator contains( Q const& key ) { internal_bucket_type& refBucket = bucket( key ); bucket_iterator it = refBucket.contains( key ); if ( it != refBucket.end()) return iterator( it, &refBucket, m_Buckets + bucket_count()); return end(); } //@cond template CDS_DEPRECATED("use contains()") iterator find( Q const& key ) { return contains( key ); } //@endcond /// Checks whether the set contains \p key using \p pred predicate for searching /** The function is an analog of contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template iterator contains( Q const& key, Less pred ) { internal_bucket_type& refBucket = bucket( key ); bucket_iterator it = refBucket.contains( key, pred ); if ( it != refBucket.end()) return iterator( it, &refBucket, m_Buckets + bucket_count()); return end(); } //@cond template CDS_DEPRECATED("use contains()") iterator find_with( Q const& key, Less pred ) { return contains( key, pred ); } //@endcond /// Clears the set (not atomic) void clear() { for ( size_t i = 0; i < bucket_count(); ++i ) m_Buckets[i].clear(); m_ItemCounter.reset(); } /// Checks if the set is empty /** @warning If you use \p atomicity::empty_item_counter in \p traits::item_counter, the function always returns \p true. */ bool empty() const { return size() == 0; } /// Returns item count in the set /** @warning If you use \p atomicity::empty_item_counter in \p traits::item_counter, the function always returns 0. */ size_t size() const { return m_ItemCounter; } /// Returns const reference to internal statistics stat const& statistics() const { return m_Stat; } /// Returns the size of hash table /** Since \p %MichaelHashSet cannot dynamically extend the hash table size, the value returned is an constant depending on object initialization parameters; see MichaelHashSet::MichaelHashSet for explanation. */ size_t bucket_count() const { return m_nHashBitmask + 1; } protected: //@cond /// Calculates hash value of \p key template size_t hash_value( const Q& key ) const { return m_HashFunctor( key ) & m_nHashBitmask; } /// Returns the bucket (ordered list) for \p key template internal_bucket_type& bucket( const Q& key ) { return m_Buckets[hash_value( key )]; } //@endcond private: //@cond template typename std::enable_if< Stat::empty >::type construct_bucket( internal_bucket_type* b ) { new (b) internal_bucket_type; } template typename std::enable_if< !Stat::empty >::type construct_bucket( internal_bucket_type* b ) { new (b) internal_bucket_type( m_Stat ); } const_iterator get_const_begin() const { return const_iterator( const_cast(m_Buckets[0]).begin(), m_Buckets, m_Buckets + bucket_count()); } const_iterator get_const_end() const { return const_iterator( const_cast(m_Buckets[bucket_count() - 1]).end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count()); } //@endcond }; }} // cds::container #endif // ifndef CDSLIB_CONTAINER_MICHAEL_SET_NOGC_H libcds-2.3.3/cds/container/michael_set_rcu.h000066400000000000000000000763311341244201700210210ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_MICHAEL_SET_RCU_H #define CDSLIB_CONTAINER_MICHAEL_SET_RCU_H #include #include namespace cds { namespace container { /// Michael's hash set (template specialization for \ref cds_urcu_desc "RCU") /** @ingroup cds_nonintrusive_set \anchor cds_nonintrusive_MichaelHashSet_rcu Source: - [2002] Maged Michael "High performance dynamic lock-free hash tables and list-based sets" Michael's hash table algorithm is based on lock-free ordered list and it is very simple. The main structure is an array \p T of size \p M. Each element in \p T is basically a pointer to a hash bucket, implemented as a singly linked list. The array of buckets cannot be dynamically expanded. However, each bucket may contain unbounded number of items. Template parameters are: - \p RCU - one of \ref cds_urcu_gc "RCU type" - \p OrderedList - ordered list implementation used as the bucket for hash set, for example, \ref cds_nonintrusive_MichaelList_rcu "MichaelList". The ordered list implementation specifies the type \p T stored in the hash-set, the comparison functor for the type \p T and other features specific for the ordered list. - \p Traits - set traits, default is michael_set::traits. Instead of defining \p Traits struct you may use option-based syntax with michael_set::make_traits metafunction. About hash functor see \ref cds_nonintrusive_MichaelHashSet_hash_functor "MichaelSet hash functor". How to use Suppose, we have the following type \p Foo that we want to store in your \p %MichaelHashSet: \code struct Foo { int nKey ; // key field int nVal ; // value field }; \endcode To use \p %MichaelHashSet for \p Foo values, you should first choose suitable ordered list class that will be used as a bucket for the set. We will cds::urcu::general_buffered<> RCU type and MichaelList as a bucket type. You should include RCU-related header file (cds/urcu/general_buffered.h in this example) before including cds/container/michael_set_rcu.h. Also, for ordered list we should develop a comparator for our \p Foo struct. \code #include #include #include namespace cc = cds::container; // Foo comparator struct Foo_cmp { int operator ()(Foo const& v1, Foo const& v2 ) const { if ( std::less( v1.nKey, v2.nKey )) return -1; return std::less(v2.nKey, v1.nKey) ? 1 : 0; } }; // Ordered list typedef cc::MichaelList< cds::urcu::gc< cds::urcu::general_buffered<> >, Foo, typename cc::michael_list::make_traits< cc::opt::compare< Foo_cmp > // item comparator option >::type > bucket_list; // Hash functor for Foo struct foo_hash { size_t operator ()( int i ) const { return std::hash( i ); } size_t operator()( Foo const& i ) const { return std::hash( i.nKey ); } }; // Declare the set // Note that \p RCU template parameter of ordered list must be equal \p RCU for the set. typedef cc::MichaelHashSet< cds::urcu::gc< cds::urcu::general_buffered<> >, bucket_list, cc::michael_set::make_traits< cc::opt::hash< foo_hash > >::type > foo_set; foo_set fooSet; \endcode */ template < class RCU, class OrderedList, #ifdef CDS_DOXYGEN_INVOKED class Traits = michael_set::traits #else class Traits #endif > class MichaelHashSet< cds::urcu::gc< RCU >, OrderedList, Traits > { public: typedef cds::urcu::gc< RCU > gc; ///< RCU used as garbage collector typedef OrderedList ordered_list; ///< type of ordered list to be used as a bucket implementation typedef Traits traits; ///< Set traits typedef typename ordered_list::value_type value_type; ///< type of value to be stored in the list typedef typename ordered_list::key_comparator key_comparator; ///< key comparing functor #ifdef CDS_DOXYGEN_INVOKED typedef typename ordered_list::stat stat; ///< Internal statistics typedef typename ordered_list::exempt_ptr exempt_ptr; ///< pointer to extracted node typedef typename ordered_list::raw_ptr raw_ptr; ///< Return type of \p get() member function and its derivatives #endif /// Hash functor for \ref value_type and all its derivatives that you use typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash; typedef typename traits::item_counter item_counter; ///< Item counter type typedef typename traits::allocator allocator; ///< Bucket table allocator typedef typename ordered_list::rcu_lock rcu_lock; ///< RCU scoped lock /// Group of \p extract_xxx functions require external locking if underlying ordered list requires that static constexpr const bool c_bExtractLockExternal = ordered_list::c_bExtractLockExternal; // GC and OrderedList::gc must be the same static_assert(std::is_same::value, "GC and OrderedList::gc must be the same"); //@cond typedef typename ordered_list::template select_stat_wrapper< typename ordered_list::stat > bucket_stat; typedef typename ordered_list::template rebind_traits< cds::opt::item_counter< cds::atomicity::empty_item_counter > , cds::opt::stat< typename bucket_stat::wrapped_stat > >::type internal_bucket_type_; class internal_bucket_type: public internal_bucket_type_ { typedef internal_bucket_type_ base_class; public: using base_class::base_class; using typename base_class::node_type; using base_class::alloc_node; using base_class::insert_node; using base_class::node_to_value; }; typedef typename internal_bucket_type::exempt_ptr exempt_ptr; typedef typename internal_bucket_type::raw_ptr raw_ptr; typedef typename bucket_stat::stat stat; //@endcond protected: //@cond /// Bucket table allocator typedef typename std::allocator_traits< allocator >::template rebind_alloc< internal_bucket_type > bucket_table_allocator; const size_t m_nHashBitmask; hash m_HashFunctor; ///< Hash functor internal_bucket_type* m_Buckets; ///< bucket table item_counter m_ItemCounter; ///< Item counter stat m_Stat; ///< Internal statistics //@endcond public: ///@name Forward iterators (thread-safe under RCU lock) //@{ /// Forward iterator /** The forward iterator for Michael's set is based on \p OrderedList forward iterator and has some features: - it has no post-increment operator - it iterates items in unordered fashion You may safely use iterators in multi-threaded environment only under RCU lock. Otherwise, a crash is possible if another thread deletes the element the iterator points to. The iterator interface: \code class iterator { public: // Default constructor iterator(); // Copy construtor iterator( iterator const& src ); // Dereference operator value_type * operator ->() const; // Dereference operator value_type& operator *() const; // Preincrement operator iterator& operator ++(); // Assignment operator iterator& operator = (iterator const& src); // Equality operators bool operator ==(iterator const& i ) const; bool operator !=(iterator const& i ) const; }; \endcode */ typedef michael_set::details::iterator< internal_bucket_type, false > iterator; /// Const forward iterator typedef michael_set::details::iterator< internal_bucket_type, true > const_iterator; /// Returns a forward iterator addressing the first element in a set /** For empty set \code begin() == end() \endcode */ iterator begin() { return iterator( m_Buckets[0].begin(), m_Buckets, m_Buckets + bucket_count()); } /// Returns an iterator that addresses the location succeeding the last element in a set /** Do not use the value returned by end function to access any item. The returned value can be used only to control reaching the end of the set. For empty set \code begin() == end() \endcode */ iterator end() { return iterator( m_Buckets[bucket_count() - 1].end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count()); } /// Returns a forward const iterator addressing the first element in a set const_iterator begin() const { return get_const_begin(); } /// Returns a forward const iterator addressing the first element in a set const_iterator cbegin() const { return get_const_begin(); } /// Returns an const iterator that addresses the location succeeding the last element in a set const_iterator end() const { return get_const_end(); } /// Returns an const iterator that addresses the location succeeding the last element in a set const_iterator cend() const { return get_const_end(); } //@} public: /// Initialize hash set /** The Michael's hash set is non-expandable container. You should point the average count of items \p nMaxItemCount when you create an object. \p nLoadFactor parameter defines average count of items per bucket and it should be small number between 1 and 10. Remember, since the bucket implementation is an ordered list, searching in the bucket is linear [O(nLoadFactor)]. The ctor defines hash table size as rounding nMaxItemCount / nLoadFactor up to nearest power of two. */ MichaelHashSet( size_t nMaxItemCount, ///< estimation of max item count in the hash set size_t nLoadFactor ///< load factor: estimation of max number of items in the bucket ) : m_nHashBitmask( michael_set::details::init_hash_bitmask( nMaxItemCount, nLoadFactor )) , m_Buckets( bucket_table_allocator().allocate( bucket_count())) { for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it ) construct_bucket( it ); } /// Clears hash set and destroys it ~MichaelHashSet() { clear(); for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it ) it->~internal_bucket_type(); bucket_table_allocator().deallocate( m_Buckets, bucket_count()); } /// Inserts new node /** The function creates a node with copy of \p val value and then inserts the node created into the set. The type \p Q should contain as minimum the complete key for the node. The object of \ref value_type should be constructible from a value of type \p Q. In trivial case, \p Q is equal to \ref value_type. The function applies RCU lock internally. Returns \p true if \p val is inserted into the set, \p false otherwise. */ template bool insert( Q&& val ) { const bool bRet = bucket( val ).insert( std::forward( val )); if ( bRet ) ++m_ItemCounter; return bRet; } /// Inserts new node /** The function allows to split creating of new item into two part: - create item with key only - insert new item into the set - if inserting is success, calls \p f functor to initialize value-fields of \p val. The functor signature is: \code void func( value_type& val ); \endcode where \p val is the item inserted. The user-defined functor is called only if the inserting is success. The function applies RCU lock internally. @warning For \ref cds_nonintrusive_MichaelList_rcu "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". \ref cds_nonintrusive_LazyList_rcu "LazyList" provides exclusive access to inserted item and does not require any node-level synchronization. */ template bool insert( Q&& val, Func f ) { const bool bRet = bucket( val ).insert( std::forward( val ), f ); if ( bRet ) ++m_ItemCounter; return bRet; } /// Updates the element /** The operation performs inserting or changing data with lock-free manner. If the item \p val not found in the set, then \p val is inserted iff \p bAllowInsert is \p true. Otherwise, the functor \p func is called with item found. The functor signature is: \code struct functor { void operator()( bool bNew, value_type& item, Q const& val ); }; \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the set - \p val - argument \p val passed into the \p %update() function The functor may change non-key fields of the \p item. The function applies RCU lock internally. Returns std::pair where \p first is \p true if operation is successful, \p second is \p true if new item has been added or \p false if the item with \p key already is in the set. @warning For \ref cds_intrusive_MichaelList_hp "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". \ref cds_intrusive_LazyList_hp "LazyList" provides exclusive access to inserted item and does not require any node-level synchronization. */ template std::pair update( Q const& val, Func func, bool bAllowInsert = true ) { std::pair bRet = bucket( val ).update( val, func, bAllowInsert ); if ( bRet.second ) ++m_ItemCounter; return bRet; }//@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( const Q& val, Func func ) { return update( val, func, true ); } //@endcond /// Inserts data of type \p value_type created from \p args /** Returns \p true if inserting successful, \p false otherwise. The function applies RCU lock internally. */ template bool emplace( Args&&... args ) { typename internal_bucket_type::node_type * pNode = internal_bucket_type::alloc_node( std::forward( args )... ); bool bRet = bucket( internal_bucket_type::node_to_value( *pNode )).insert_node( pNode ); if ( bRet ) ++m_ItemCounter; return bRet; } /// Deletes \p key from the set /** \anchor cds_nonintrusive_MichealSet_rcu_erase_val Since the key of MichaelHashSet's item type \p value_type is not explicitly specified, template parameter \p Q defines the key type searching in the list. The set item comparator should be able to compare the type \p value_type and the type \p Q. RCU \p synchronize method can be called. RCU should not be locked. Return \p true if key is found and deleted, \p false otherwise */ template bool erase( Q const& key ) { const bool bRet = bucket( key ).erase( key ); if ( bRet ) --m_ItemCounter; return bRet; } /// Deletes the item from the set using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_MichealSet_rcu_erase_val "erase(Q const&)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool erase_with( Q const& key, Less pred ) { const bool bRet = bucket( key ).erase_with( key, pred ); if ( bRet ) --m_ItemCounter; return bRet; } /// Deletes \p key from the set /** \anchor cds_nonintrusive_MichealSet_rcu_erase_func The function searches an item with key \p key, calls \p f functor and deletes the item. If \p key is not found, the functor is not called. The functor \p Func interface: \code struct extractor { void operator()(value_type const& val); }; \endcode Since the key of %MichaelHashSet's \p value_type is not explicitly specified, template parameter \p Q defines the key type searching in the list. The list item comparator should be able to compare the type \p T of list item and the type \p Q. RCU \p synchronize method can be called. RCU should not be locked. Return \p true if key is found and deleted, \p false otherwise */ template bool erase( Q const& key, Func f ) { const bool bRet = bucket( key ).erase( key, f ); if ( bRet ) --m_ItemCounter; return bRet; } /// Deletes the item from the set using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_MichealSet_rcu_erase_func "erase(Q const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool erase_with( Q const& key, Less pred, Func f ) { const bool bRet = bucket( key ).erase_with( key, pred, f ); if ( bRet ) --m_ItemCounter; return bRet; } /// Extracts an item from the set /** \anchor cds_nonintrusive_MichaelHashSet_rcu_extract The function searches an item with key equal to \p key in the set, unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found. If the item with the key equal to \p key is not found the function return an empty \p exempt_ptr. The function just excludes the item from the set and returns a pointer to item found. Depends on \p ordered_list you should or should not lock RCU before calling of this function: - for the set based on \ref cds_nonintrusive_MichaelList_rcu "MichaelList" RCU should not be locked - for the set based on \ref cds_nonintrusive_LazyList_rcu "LazyList" RCU should be locked See ordered list implementation for details. \code #include #include #include typedef cds::urcu::gc< general_buffered<> > rcu; typedef cds::container::MichaelList< rcu, Foo > rcu_michael_list; typedef cds::container::MichaelHashSet< rcu, rcu_michael_list, foo_traits > rcu_michael_set; rcu_michael_set theSet; // ... typename rcu_michael_set::exempt_ptr p; // For MichaelList we should not lock RCU // Note that you must not delete the item found inside the RCU lock p = theSet.extract( 10 ); if ( p ) { // do something with p ... } // We may safely release p here // release() passes the pointer to RCU reclamation cycle p.release(); \endcode */ template exempt_ptr extract( Q const& key ) { exempt_ptr p = bucket( key ).extract( key ); if ( p ) --m_ItemCounter; return p; } /// Extracts an item from the set using \p pred predicate for searching /** The function is an analog of \p extract(Q const&) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. */ template exempt_ptr extract_with( Q const& key, Less pred ) { exempt_ptr p = bucket( key ).extract_with( key, pred ); if ( p ) --m_ItemCounter; return p; } /// Finds the key \p key /** \anchor cds_nonintrusive_MichealSet_rcu_find_func The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& key ); }; \endcode where \p item is the item found, \p key is the find function argument. The functor may change non-key fields of \p item. Note that the functor is only guarantee that \p item cannot be disposed during functor is executing. The functor does not serialize simultaneous access to the set's \p item. If such access is possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor can modify both arguments. Note the hash functor specified for class \p Traits template parameter should accept a parameter of type \p Q that may be not the same as \p value_type. The function applies RCU lock internally. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( Q& key, Func f ) { return bucket( key ).find( key, f ); } //@cond template bool find( Q const& key, Func f ) { return bucket( key ).find( key, f ); } //@endcond /// Finds the key \p key using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_MichealSet_rcu_find_func "find(Q&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool find_with( Q& key, Less pred, Func f ) { return bucket( key ).find_with( key, pred, f ); } //@cond template bool find_with( Q const& key, Less pred, Func f ) { return bucket( key ).find_with( key, pred, f ); } //@endcond /// Checks whether the set contains \p key /** The function searches the item with key equal to \p key and returns \p true if the key is found, and \p false otherwise. Note the hash functor specified for class \p Traits template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. */ template bool contains( Q const& key ) { return bucket( key ).contains( key ); } //@cond template CDS_DEPRECATED("use contains()") bool find( Q const& key ) { return contains( key ); } //@endcond /// Checks whether the set contains \p key using \p pred predicate for searching /** The function is an analog of contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool contains( Q const& key, Less pred ) { return bucket( key ).contains( key, pred ); } //@cond template CDS_DEPRECATED("use contains()") bool find_with( Q const& key, Less pred ) { return contains( key, pred ); } //@endcond /// Finds the key \p key and return the item found /** \anchor cds_nonintrusive_MichaelHashSet_rcu_get The function searches the item with key equal to \p key and returns the pointer to item found. If \p key is not found it returns \p nullptr. Note the type of returned value depends on underlying \p ordered_list. For details, see documentation of ordered list you use. Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. RCU should be locked before call of this function. Returned item is valid only while RCU is locked: \code typedef cds::container::MichaelHashSet< your_template_parameters > hash_set; hash_set theSet; typename hash_set::raw_ptr gp; // ... { // Lock RCU hash_set::rcu_lock lock; gp = theSet.get( 5 ); if ( gp ) { // Deal with pVal //... } // Unlock RCU by rcu_lock destructor // gp can be reclaimed at any time after RCU has been unlocked } \endcode */ template raw_ptr get( Q const& key ) { return bucket( key ).get( key ); } /// Finds the key \p key and return the item found /** The function is an analog of \ref cds_nonintrusive_MichaelHashSet_rcu_get "get(Q const&)" but \p pred is used for comparing the keys. \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the set. */ template raw_ptr get_with( Q const& key, Less pred ) { return bucket( key ).get_with( key, pred ); } /// Clears the set (not atomic) void clear() { for ( size_t i = 0; i < bucket_count(); ++i ) m_Buckets[i].clear(); m_ItemCounter.reset(); } /// Checks if the set is empty /** @warning If you use \p atomicity::empty_item_counter in \p traits::item_counter, the function always returns \p true. */ bool empty() const { return size() == 0; } /// Returns item count in the set /** @warning If you use \p atomicity::empty_item_counter in \p traits::item_counter, the function always returns 0. */ size_t size() const { return m_ItemCounter; } /// Returns const reference to internal statistics stat const& statistics() const { return m_Stat; } /// Returns the size of hash table /** Since \p %MichaelHashSet cannot dynamically extend the hash table size, the value returned is an constant depending on object initialization parameters; see MichaelHashSet::MichaelHashSet for explanation. */ size_t bucket_count() const { return m_nHashBitmask + 1; } protected: //@cond /// Calculates hash value of \p key template size_t hash_value( Q const& key ) const { return m_HashFunctor( key ) & m_nHashBitmask; } /// Returns the bucket (ordered list) for \p key template internal_bucket_type& bucket( Q const& key ) { return m_Buckets[hash_value( key )]; } template internal_bucket_type const& bucket( Q const& key ) const { return m_Buckets[hash_value( key )]; } template typename std::enable_if< Stat::empty >::type construct_bucket( internal_bucket_type* bkt ) { new (bkt) internal_bucket_type; } template typename std::enable_if< !Stat::empty >::type construct_bucket( internal_bucket_type* bkt ) { new (bkt) internal_bucket_type( m_Stat ); } const_iterator get_const_begin() const { return const_iterator( const_cast(m_Buckets[0]).begin(), m_Buckets, m_Buckets + bucket_count()); } const_iterator get_const_end() const { return const_iterator( const_cast(m_Buckets[bucket_count() - 1]).end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count()); } //@endcond }; }} // namespace cds::container #endif // ifndef CDSLIB_CONTAINER_MICHAEL_SET_RCU_H libcds-2.3.3/cds/container/moir_queue.h000066400000000000000000000242651341244201700200440ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_MOIR_QUEUE_H #define CDSLIB_CONTAINER_MOIR_QUEUE_H #include #include #include namespace cds { namespace container { //@cond namespace details { template struct make_moir_queue: public cds::container::details::make_msqueue< GC, T, Traits > { typedef cds::container::details::make_msqueue< GC, T, Traits > base_class; typedef cds::intrusive::MoirQueue< GC, typename base_class::node_type, typename base_class::intrusive_traits > type; }; } //@endcond /// A variation of Michael & Scott's lock-free queue /** @ingroup cds_nonintrusive_queue It is non-intrusive version of \p cds::intrusive::MoirQueue. Template arguments: - \p GC - garbage collector type: \p gc::HP, \p gc::DHP - \p T - a type stored in the queue. - \p Traits - queue traits, default is \p msqueue::traits. You can use \p msqueue::make_traits metafunction to make your traits or just derive your traits from \p %msqueue::traits: \code struct myTraits: public cds::container::msqueue::traits { typedef cds::intrusive::msqueue::stat<> stat; typedef cds::atomicity::item_counter item_counter; }; typedef cds::container::MoirQueue< cds::gc::HP, Foo, myTraits > myQueue; // Equivalent make_traits example: typedef cds::container::MoirQueue< cds::gc::HP, Foo, typename cds::container::msqueue::make_traits< cds::opt::stat< cds::container::msqueue::stat<> >, cds::opt::item_counter< cds::atomicity::item_counter > >::type > myQueue; \endcode */ template class MoirQueue: #ifdef CDS_DOXYGEN_INVOKED private intrusive::MoirQueue< GC, intrusive::msqueue::node< T >, Traits > #else private details::make_moir_queue< GC, T, Traits >::type #endif { //@cond typedef details::make_moir_queue< GC, T, Traits > maker; typedef typename maker::type base_class; //@endcond public: /// Rebind template arguments template struct rebind { typedef MoirQueue< GC2, T2, Traits2 > other ; ///< Rebinding result }; static constexpr const size_t c_nHazardPtrCount = base_class::c_nHazardPtrCount; ///< Count of hazard pointer required for the algorithm public: typedef T value_type ; ///< Value type stored in the queue typedef typename base_class::gc gc; ///< Garbage collector typedef typename base_class::back_off back_off; ///< Back-off strategy typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes typedef typename base_class::item_counter item_counter; ///< Item counting policy used typedef typename base_class::stat stat; ///< Internal statistics policy used typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option protected: //@cond typedef typename maker::node_type node_type; ///< queue node type (derived from intrusive::msqueue::node) typedef typename maker::cxx_allocator cxx_allocator; typedef typename maker::node_deallocator node_deallocator; // deallocate node typedef typename base_class::node_traits node_traits; //@endcond protected: ///@cond static node_type * alloc_node() { return cxx_allocator().New(); } static node_type * alloc_node( const value_type& val ) { return cxx_allocator().New( val ); } template static node_type * alloc_node_move( Args&&... args ) { return cxx_allocator().MoveNew( std::forward( args )... ); } static void free_node( node_type * p ) { node_deallocator()( p ); } struct node_disposer { void operator()( node_type * pNode ) { free_node( pNode ); } }; typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; //@endcond public: /// Initializes empty queue MoirQueue() {} /// Destructor clears the queue ~MoirQueue() {} /// Enqueues \p val value into the queue. /** The function makes queue node in dynamic memory calling copy constructor for \p val and then it calls \p intrusive::MoirQueue::enqueue. Returns \p true if success, \p false otherwise. */ bool enqueue( value_type const& val ) { scoped_node_ptr p( alloc_node(val)); if ( base_class::enqueue( *p )) { p.release(); return true; } return false; } /// Enqueues \p val value into the queue, move semantics bool enqueue( value_type&& val ) { scoped_node_ptr p( alloc_node_move( std::move( val ))); if ( base_class::enqueue( *p )) { p.release(); return true; } return false; } /// Enqueues \p data to queue using a functor /** \p Func is a functor calling to create a new node. The functor should initialize creating node and it takes one argument - a reference to a new node of type \ref value_type : \code cds:container::MoirQueue< cds::gc::HP, Foo > myQueue; Bar bar; myQueue.enqueue_with( [&bar]( Foo& dest ) { dest = bar; } ); \endcode */ template bool enqueue_with( Func f ) { scoped_node_ptr p( alloc_node()); f( p->m_value ); if ( base_class::enqueue( *p )) { p.release(); return true; } return false; } /// Enqueues data of type \ref value_type constructed with std::forward(args)... template bool emplace( Args&&... args ) { scoped_node_ptr p( alloc_node_move( std::forward( args )... )); if ( base_class::enqueue( *p )) { p.release(); return true; } return false; } /// Synonym for \p enqueue() function bool push( value_type const& val ) { return enqueue( val ); } /// Synonym for \p enqueue() function, move semantics bool push( value_type&& val ) { return enqueue( std::move( val )); } /// Synonym for \p enqueue_with() function template bool push_with( Func f ) { return enqueue_with( f ); } /// Dequeues a value from the queue /** If queue is not empty, the function returns \p true, \p dest contains copy of dequeued value. The assignment operator for type \ref value_type is invoked. If queue is empty, the function returns \p false, \p dest is unchanged. */ bool dequeue( value_type& dest ) { return dequeue_with( [&dest]( value_type& src ) { // TSan finds a race between this read of \p src and node_type constructor // I think, it is wrong CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN; dest = std::move( src ); CDS_TSAN_ANNOTATE_IGNORE_READS_END; }); } /// Dequeues a value using a functor /** \p Func is a functor called to copy dequeued value. The functor takes one argument - a reference to removed node: \code cds:container::MoirQueue< cds::gc::HP, Foo > myQueue; Bar bar; myQueue.dequeue_with( [&bar]( Foo& src ) { bar = std::move( src );}); \endcode The functor is called only if the queue is not empty. */ template bool dequeue_with( Func f ) { typename base_class::dequeue_result res; if ( base_class::do_dequeue( res )) { f( node_traits::to_value_ptr( *res.pNext )->m_value ); base_class::dispose_result( res ); return true; } return false; } /// Synonym for \p dequeue() function bool pop( value_type& dest ) { return dequeue( dest ); } /// Synonym for \p dequeue_with() function template bool pop_with( Func f ) { return dequeue_with( f ); } /// Clear the queue /** The function repeatedly calls \ref dequeue until it returns \p nullptr. The disposer defined in template \p Traits is called for each item that can be safely disposed. */ void clear() { base_class::clear(); } /// Checks if the queue is empty bool empty() const { return base_class::empty(); } /// Returns queue's item count (see \ref intrusive::MSQueue::size for explanation) size_t size() const { return base_class::size(); } /// Returns refernce to internal statistics const stat& statistics() const { return base_class::statistics(); } }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_MOIR_QUEUE_H libcds-2.3.3/cds/container/mspriority_queue.h000066400000000000000000000306061341244201700213130ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_MSPRIORITY_QUEUE_H #define CDSLIB_CONTAINER_MSPRIORITY_QUEUE_H #include #include #include namespace cds { namespace container { /// MSPriorityQueue related definitions /** @ingroup cds_nonintrusive_helper */ namespace mspriority_queue { #ifdef CDS_DOXYGEN_INVOKED /// Synonym for \p cds::intrusive::mspriority_queue::stat typedef cds::intrusive::mspriority_queue::stat<> stat; /// Synonym for \p cds::intrusive::mspriority_queue::empty_stat typedef cds::intrusive::mspriority_queue::empty_stat empty_stat; #else using cds::intrusive::mspriority_queue::stat; using cds::intrusive::mspriority_queue::empty_stat; #endif /// MSPriorityQueue traits /** The traits for \p %cds::container::MSPriorityQueue is the same as for \p cds::intrusive::MSPriorityQueue (see \p cds::intrusive::mspriority_queue::traits) plus some additional properties. */ struct traits: public cds::intrusive::mspriority_queue::traits { /// The allocator use to allocate memory for values typedef CDS_DEFAULT_ALLOCATOR allocator; /// Move policy /** The move policy used in \p MSPriorityQueue::pop() function to move item's value. Default is \p opt::v::assignment_move_policy. */ typedef cds::opt::v::assignment_move_policy move_policy; }; /// Metafunction converting option list to traits /** \p Options are: - \p opt::buffer - the buffer type for heap array. Possible type are: \p opt::v::initiaized_static_buffer, \p opt::v::initialized_dynamic_buffer. Default is \p %opt::v::initialized_dynamic_buffer. You may specify any type of values for the buffer since at instantiation time the \p buffer::rebind member metafunction is called to change the type of values stored in the buffer. - \p opt::compare - priority compare functor. No default functor is provided. If the option is not specified, the \p opt::less is used. - \p opt::less - specifies binary predicate used for priority compare. Default is \p std::less. - \p opt::lock_type - lock type. Default is \p cds::sync::spin. - \p opt::back_off - back-off strategy. Default is \p cds::backoff::yield - \p opt::allocator - allocator (like \p std::allocator) for the values of queue's items. Default is \ref CDS_DEFAULT_ALLOCATOR - \p opt::move_policy - policy for moving item's value. Default is \p opt::v::assignment_move_policy. If the compiler supports move semantics it would be better to specify the move policy based on the move semantics for type \p T. - \p opt::stat - internal statistics. Available types: \p mspriority_queue::stat, \p mspriority_queue::empty_stat (the default, no overhead) */ template struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type ; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type ,Options... >::type type; # endif }; } // namespace mspriority_queue /// Michael & Scott array-based lock-based concurrent priority queue heap /** @ingroup cds_nonintrusive_priority_queue Source: - [1996] G.Hunt, M.Michael, S. Parthasarathy, M.Scott "An efficient algorithm for concurrent priority queue heaps" \p %MSPriorityQueue augments the standard array-based heap data structure with a mutual-exclusion lock on the heap's size and locks on each node in the heap. Each node also has a tag that indicates whether it is empty, valid, or in a transient state due to an update to the heap by an inserting thread. The algorithm allows concurrent insertions and deletions in opposite directions, without risking deadlock and without the need for special server threads. It also uses a "bit-reversal" technique to scatter accesses across the fringe of the tree to reduce contention. On large heaps the algorithm achieves significant performance improvements over serialized single-lock algorithm, for various insertion/deletion workloads. For small heaps it still performs well, but not as well as single-lock algorithm. Template parameters: - \p T - type to be stored in the list. The priority is a part of \p T type. - \p Traits - the traits. See \p mspriority_queue::traits for explanation. It is possible to declare option-based queue with \p mspriority_queue::make_traits metafunction instead of \p Traits template argument. */ template class MSPriorityQueue: protected cds::intrusive::MSPriorityQueue< T, Traits > { //@cond typedef cds::intrusive::MSPriorityQueue< T, Traits > base_class; //@endcond public: typedef T value_type ; ///< Value type stored in the queue typedef Traits traits ; ///< Traits template parameter typedef typename base_class::key_comparator key_comparator; ///< priority comparing functor based on opt::compare and opt::less option setter. typedef typename base_class::lock_type lock_type; ///< heap's size lock type typedef typename base_class::back_off back_off ; ///< Back-off strategy typedef typename traits::stat stat; ///< internal statistics type, see \p intrusive::mspriority_queue::traits::stat typedef typename base_class::item_counter item_counter;///< Item counter type typedef typename std::allocator_traits::template rebind_alloc allocator_type; ///< Value allocator typedef typename traits::move_policy move_policy; ///< Move policy for type \p T protected: //@cond typedef cds::details::Allocator< value_type, allocator_type > cxx_allocator; struct value_deleter { void operator()( value_type * p ) const { cxx_allocator().Delete( p ); } }; typedef std::unique_ptr scoped_ptr; //@endcond public: /// Constructs empty priority queue /** For \p cds::opt::v::initialized_static_buffer the \p nCapacity parameter is ignored. */ MSPriorityQueue( size_t nCapacity ) : base_class( nCapacity ) {} /// Clears priority queue and destructs the object ~MSPriorityQueue() { clear(); } /// Inserts an item into priority queue /** If the priority queue is full, the function returns \p false, no item has been added. Otherwise, the function inserts the copy of \p val into the heap and returns \p true. The function use copy constructor to create new heap item from \p val. */ bool push( value_type const& val ) { scoped_ptr pVal( cxx_allocator().New( val )); if ( base_class::push( *(pVal.get()))) { pVal.release(); return true; } return false; } /// Inserts an item into the queue using a functor /** \p Func is a functor called to create node. The functor \p f takes one argument - a reference to a new node of type \ref value_type : \code cds::container::MSPriorityQueue< Foo > myQueue; Bar bar; myQueue.push_with( [&bar]( Foo& dest ) { dest = bar; } ); \endcode */ template bool push_with( Func f ) { scoped_ptr pVal( cxx_allocator().New()); f( *pVal ); if ( base_class::push( *pVal )) { pVal.release(); return true; } return false; } /// Inserts a item into priority queue /** If the priority queue is full, the function returns \p false, no item has been added. Otherwise, the function inserts a new item created from \p args arguments into the heap and returns \p true. */ template bool emplace( Args&&... args ) { scoped_ptr pVal( cxx_allocator().MoveNew( std::forward(args)... )); if ( base_class::push( *(pVal.get()))) { pVal.release(); return true; } return false; } /// Extracts item with high priority /** If the priority queue is empty, the function returns \p false. Otherwise, it returns \p true and \p dest contains the copy of extracted item. The item is deleted from the heap. The function uses \ref move_policy to move extracted value from the heap's top to \p dest. The function is equivalent of such call: \code pop_with( dest, [&dest]( value_type& src ) { move_policy()(dest, src); } ); \endcode */ bool pop( value_type& dest ) { return pop_with( [&dest]( value_type& src ) { move_policy()(dest, std::move(src)); }); } /// Extracts an item with high priority /** If the priority queue is empty, the function returns \p false. Otherwise, it returns \p true and \p dest contains the copy of extracted item. The item is deleted from the heap. \p Func is a functor called to copy popped value. The functor takes one argument - a reference to removed node: \code cds:container::MSPriorityQueue< Foo > myQueue; Bar bar; myQueue.pop_with( [&bar]( Foo& src ) { bar = std::move( src );}); \endcode */ template bool pop_with( Func f ) { value_type * pVal = base_class::pop(); if ( pVal ) { f( *pVal ); cxx_allocator().Delete( pVal ); return true; } return false; } /// Clears the queue (not atomic) /** This function is not atomic, but thread-safe */ void clear() { base_class::clear_with( []( value_type& src ) { value_deleter()(&src); } ); } /// Clears the queue (not atomic) /** This function is not atomic, but thread-safe. For each item removed the functor \p f is called. \p Func interface is: \code struct clear_functor { void operator()( value_type& item ); }; \endcode */ template void clear_with( Func f ) { base_class::clear_with( [&f]( value_type& val ) { f(val); value_deleter()( &val ); } ); } /// Checks is the priority queue is empty bool empty() const { return base_class::empty(); } /// Checks if the priority queue is full bool full() const { return base_class::full(); } /// Returns current size of priority queue size_t size() const { return base_class::size(); } /// Return capacity of the priority queue size_t capacity() const { return base_class::capacity(); } /// Returns const reference to internal statistics stat const& statistics() const { return base_class::statistics(); } }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_MSPRIORITY_QUEUE_H libcds-2.3.3/cds/container/msqueue.h000066400000000000000000000355331341244201700173560ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_MSQUEUE_H #define CDSLIB_CONTAINER_MSQUEUE_H #include #include #include namespace cds { namespace container { /// MSQueue related definitions /** @ingroup cds_nonintrusive_helper */ namespace msqueue { /// Internal statistics template ::counter_type > using stat = cds::intrusive::msqueue::stat< Counter >; /// Dummy internal statistics typedef cds::intrusive::msqueue::empty_stat empty_stat; /// MSQueue default type traits struct traits { /// Node allocator typedef CDS_DEFAULT_ALLOCATOR allocator; /// Back-off strategy typedef cds::backoff::empty back_off; /// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter to enable item counting typedef atomicity::empty_item_counter item_counter; /// Internal statistics (by default, disabled) /** Possible option value are: \p msqueue::stat, \p msqueue::empty_stat (the default), user-provided class that supports \p %msqueue::stat interface. */ typedef msqueue::empty_stat stat; /// C++ memory ordering model /** Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). */ typedef opt::v::relaxed_ordering memory_model; /// Padding for internal critical atomic data. Default is \p opt::cache_line_padding enum { padding = opt::cache_line_padding }; }; /// Metafunction converting option list to \p msqueue::traits /** Supported \p Options are: - \p opt::allocator - allocator (like \p std::allocator) used for allocating queue nodes. Default is \ref CDS_DEFAULT_ALLOCATOR - \p opt::back_off - back-off strategy used, default is \p cds::backoff::empty. - \p opt::item_counter - the type of item counting feature. Default is \p cds::atomicity::empty_item_counter (item counting disabled) To enable item counting use \p cds::atomicity::item_counter - \p opt::stat - the type to gather internal statistics. Possible statistics types are: \p msqueue::stat, \p msqueue::empty_stat, user-provided class that supports \p %msqueue::stat interface. Default is \p %msqueue::empty_stat. - \p opt::padding - padding for internal critical atomic data. Default is \p opt::cache_line_padding - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). Example: declare \p %MSQueue with item counting and internal statistics \code typedef cds::container::MSQueue< cds::gc::HP, Foo, typename cds::container::msqueue::make_traits< cds::opt::item_counter< cds::atomicity::item_counter >, cds::opt::stat< cds::container::msqueue::stat<> > >::type > myQueue; \endcode */ template struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type , Options... >::type type; # endif }; } // namespace msqueue //@cond namespace details { template struct make_msqueue { typedef GC gc; typedef T value_type; typedef Traits traits; struct node_type : public intrusive::msqueue::node< gc > { value_type m_value; node_type( value_type const& val ) : m_value( val ) {} template node_type( Args&&... args ) : m_value( std::forward( args )... ) {} }; typedef typename std::allocator_traits< typename traits::allocator >::template rebind_alloc allocator_type; typedef cds::details::Allocator< node_type, allocator_type > cxx_allocator; struct node_deallocator { void operator ()( node_type * pNode ) { cxx_allocator().Delete( pNode ); } }; struct intrusive_traits : public traits { typedef cds::intrusive::msqueue::base_hook< cds::opt::gc > hook; typedef node_deallocator disposer; static constexpr const cds::intrusive::opt::link_check_type link_checker = cds::intrusive::msqueue::traits::link_checker; }; typedef intrusive::MSQueue< gc, node_type, intrusive_traits > type; }; } //@endcond /// Michael & Scott lock-free queue /** @ingroup cds_nonintrusive_queue It is non-intrusive version of Michael & Scott's queue algorithm based on intrusive implementation \p cds::intrusive::MSQueue. Template arguments: - \p GC - garbage collector type: \p gc::HP, \p gc::DHP - \p T is a type stored in the queue. - \p Traits - queue traits, default is \p msqueue::traits. You can use \p msqueue::make_traits metafunction to make your traits or just derive your traits from \p %msqueue::traits: \code struct myTraits: public cds::container::msqueue::traits { typedef cds::intrusive::msqueue::stat<> stat; typedef cds::atomicity::item_counter item_counter; }; typedef cds::container::MSQueue< cds::gc::HP, Foo, myTraits > myQueue; // Equivalent make_traits example: typedef cds::container::MSQueue< cds::gc::HP, Foo, typename cds::container::msqueue::make_traits< cds::opt::stat< cds::container::msqueue::stat<> >, cds::opt::item_counter< cds::atomicity::item_counter > >::type > myQueue; \endcode */ template class MSQueue: #ifdef CDS_DOXYGEN_INVOKED private intrusive::MSQueue< GC, cds::intrusive::msqueue::node< T >, Traits > #else private details::make_msqueue< GC, T, Traits >::type #endif { //@cond typedef details::make_msqueue< GC, T, Traits > maker; typedef typename maker::type base_class; //@endcond public: /// Rebind template arguments template struct rebind { typedef MSQueue< GC2, T2, Traits2> other ; ///< Rebinding result }; public: typedef T value_type; ///< Value type stored in the queue typedef Traits traits; ///< Queue traits typedef typename base_class::gc gc; ///< Garbage collector used typedef typename base_class::back_off back_off; ///< Back-off strategy used typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes typedef typename base_class::item_counter item_counter; ///< Item counting policy used typedef typename base_class::stat stat; ///< Internal statistics policy used typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option static constexpr const size_t c_nHazardPtrCount = base_class::c_nHazardPtrCount; ///< Count of hazard pointer required for the algorithm protected: //@cond typedef typename maker::node_type node_type; ///< queue node type (derived from \p intrusive::msqueue::node) typedef typename maker::cxx_allocator cxx_allocator; typedef typename maker::node_deallocator node_deallocator; // deallocate node typedef typename base_class::node_traits node_traits; //@endcond protected: ///@cond static node_type * alloc_node() { return cxx_allocator().New(); } static node_type * alloc_node( value_type const& val ) { return cxx_allocator().New( val ); } template static node_type * alloc_node_move( Args&&... args ) { return cxx_allocator().MoveNew( std::forward( args )... ); } static void free_node( node_type * p ) { node_deallocator()( p ); } struct node_disposer { void operator()( node_type * pNode ) { free_node( pNode ); } }; typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; //@endcond public: /// Initializes empty queue MSQueue() {} /// Destructor clears the queue ~MSQueue() {} /// Enqueues \p val value into the queue. /** The function makes queue node in dynamic memory calling copy constructor for \p val and then it calls \p intrusive::MSQueue::enqueue. Returns \p true if success, \p false otherwise. */ bool enqueue( value_type const& val ) { scoped_node_ptr p( alloc_node(val)); if ( base_class::enqueue( *p )) { p.release(); return true; } return false; } /// Enqueues \p val in the queue, move semantics bool enqueue( value_type&& val ) { scoped_node_ptr p( alloc_node_move( std::move( val ))); if ( base_class::enqueue( *p )) { p.release(); return true; } return false; } /// Enqueues data to the queue using a functor /** \p Func is a functor called to create node. The functor \p f takes one argument - a reference to a new node of type \ref value_type : \code cds::container::MSQueue< cds::gc::HP, Foo > myQueue; Bar bar; myQueue.enqueue_with( [&bar]( Foo& dest ) { dest = bar; } ); \endcode */ template bool enqueue_with( Func f ) { scoped_node_ptr p( alloc_node()); f( p->m_value ); if ( base_class::enqueue( *p )) { p.release(); return true; } return false; } /// Enqueues data of type \ref value_type constructed from std::forward(args)... template bool emplace( Args&&... args ) { scoped_node_ptr p( alloc_node_move( std::forward( args )... )); if ( base_class::enqueue( *p )) { p.release(); return true; } return false; } /// Synonym for \p enqueue() function bool push( value_type const& val ) { return enqueue( val ); } /// Synonym for \p enqueue() function bool push( value_type&& val ) { return enqueue( std::move( val )); } /// Synonym for \p enqueue_with() function template bool push_with( Func f ) { return enqueue_with( f ); } /// Dequeues a value from the queue /** If queue is not empty, the function returns \p true, \p dest contains copy of dequeued value. The assignment operator for type \ref value_type is invoked. If queue is empty, the function returns \p false, \p dest is unchanged. */ bool dequeue( value_type& dest ) { return dequeue_with( [&dest]( value_type& src ) { // TSan finds a race between this read of \p src and node_type constructor // I think, it is wrong CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN; dest = std::move( src ); CDS_TSAN_ANNOTATE_IGNORE_READS_END; }); } /// Dequeues a value using a functor /** \p Func is a functor called to copy dequeued value. The functor takes one argument - a reference to removed node: \code cds:container::MSQueue< cds::gc::HP, Foo > myQueue; Bar bar; myQueue.dequeue_with( [&bar]( Foo& src ) { bar = std::move( src );}); \endcode The functor is called only if the queue is not empty. */ template bool dequeue_with( Func f ) { typename base_class::dequeue_result res; if ( base_class::do_dequeue( res )) { f( node_traits::to_value_ptr( *res.pNext )->m_value ); base_class::dispose_result( res ); return true; } return false; } /// Synonym for \p dequeue() function bool pop( value_type& dest ) { return dequeue( dest ); } /// Synonym for \p dequeue_with() function template bool pop_with( Func f ) { return dequeue_with( f ); } /// Clear the queue /** The function repeatedly calls \ref dequeue until it returns \p nullptr. */ void clear() { base_class::clear(); } /// Checks if the queue is empty bool empty() const { return base_class::empty(); } /// Returns queue's item count (see \ref intrusive::MSQueue::size for explanation) /** \copydetails cds::intrusive::MSQueue::size() */ size_t size() const { return base_class::size(); } /// Returns reference to internal statistics const stat& statistics() const { return base_class::statistics(); } }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_MSQUEUE_H libcds-2.3.3/cds/container/optimistic_queue.h000066400000000000000000000361021341244201700212530ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_OPTIMISTIC_QUEUE_H #define CDSLIB_CONTAINER_OPTIMISTIC_QUEUE_H #include #include #include namespace cds { namespace container { /// OptimisticQueue related definitions /** @ingroup cds_nonintrusive_helper */ namespace optimistic_queue { /// Internal statistics template ::counter_type > using stat = cds::intrusive::optimistic_queue::stat< Counter >; /// Dummy internal statistics typedef cds::intrusive::optimistic_queue::empty_stat empty_stat; /// MSQueue default type traits struct traits { /// Node allocator typedef CDS_DEFAULT_ALLOCATOR allocator; /// Back-off strategy typedef cds::backoff::empty back_off; /// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter to enable item counting typedef atomicity::empty_item_counter item_counter; /// Internal statistics (by default, disabled) /** Possible option value are: \p optimistic_queue::stat, \p optimistic_queue::empty_stat (the default), user-provided class that supports \p %optimistic_queue::stat interface. */ typedef optimistic_queue::empty_stat stat; /// C++ memory ordering model /** Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). */ typedef opt::v::relaxed_ordering memory_model; /// Padding for internal critical atomic data. Default is \p opt::cache_line_padding enum { padding = opt::cache_line_padding }; }; /// Metafunction converting option list to \p msqueue::traits /** Supported \p Options are: - \p opt::allocator - allocator (like \p std::allocator) used for allocating queue nodes. Default is \ref CDS_DEFAULT_ALLOCATOR - \p opt::back_off - back-off strategy used, default is \p cds::backoff::empty. - \p opt::item_counter - the type of item counting feature. Default is \p cds::atomicity::empty_item_counter (item counting disabled) To enable item counting use \p cds::atomicity::item_counter - \p opt::stat - the type to gather internal statistics. Possible statistics types are: \p optimistic_queue::stat, \p optimistic_queue::empty_stat, user-provided class that supports \p %optimistic_queue::stat interface. Default is \p %optimistic_queue::empty_stat. - \p opt::padding - padding for internal critical atomic data. Default is \p opt::cache_line_padding - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). Example: declare \p OptimisticQueue with item counting and internal statistics \code typedef cds::container::OptimisticQueue< cds::gc::HP, Foo, typename cds::container::optimistic_queue::make_traits< cds::opt::item_counter< cds::atomicity::item_counter >, cds::opt::stat< cds::container::optimistic_queue::stat<> > >::type > myQueue; \endcode */ template struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type , Options... >::type type; # endif }; } // namespace optimistic_queue //@cond namespace details { template struct make_optimistic_queue { typedef GC gc; typedef T value_type; typedef Traits traits; struct node_type: public cds::intrusive::optimistic_queue::node< gc > { value_type m_value; node_type( value_type const& val ) : m_value( val ) {} template node_type( Args&&... args ) : m_value( std::forward(args)...) {} }; typedef typename std::allocator_traits< typename traits::allocator >::template rebind_alloc allocator_type; typedef cds::details::Allocator< node_type, allocator_type > cxx_allocator; struct node_deallocator { void operator ()( node_type * pNode ) { cxx_allocator().Delete( pNode ); } }; struct intrusive_traits : public traits { typedef cds::intrusive::optimistic_queue::base_hook< opt::gc > hook; typedef node_deallocator disposer; static constexpr const opt::link_check_type link_checker = cds::intrusive::optimistic_queue::traits::link_checker; }; typedef intrusive::OptimisticQueue< gc, node_type, intrusive_traits > type; }; } // namespace details //@endcond /// Optimistic queue /** @ingroup cds_nonintrusive_queue Implementation of Ladan-Mozes & Shavit optimistic queue algorithm. - [2008] Edya Ladan-Mozes, Nir Shavit "An Optimistic Approach to Lock-Free FIFO Queues" Template arguments: - \p GC - garbage collector type: \p gc::HP, \p gc::DHP. - \p T - type of values to be stored in the queue - \p Traits - queue traits, default is \p optimistic_queue::traits. You can use \p optimistic_queue::make_traits metafunction to make your traits or just derive your traits from \p %optimistic_queue::traits: \code struct myTraits: public cds::container::optimistic_queue::traits { typedef cds::intrusive::optimistic_queue::stat<> stat; typedef cds::atomicity::item_counter item_counter; }; typedef cds::container::OptimisticQueue< cds::gc::HP, Foo, myTraits > myQueue; // Equivalent make_traits example: typedef cds::container::OptimisticQueue< cds::gc::HP, Foo, typename cds::container::optimistic_queue::make_traits< cds::opt::stat< cds::container::optimistic_queue::stat<> >, cds::opt::item_counter< cds::atomicity::item_counter > >::type > myQueue; \endcode */ template class OptimisticQueue: #ifdef CDS_DOXYGEN_INVOKED private intrusive::OptimisticQueue< GC, cds::intrusive::optimistic_queue::node< T >, Traits > #else private details::make_optimistic_queue< GC, T, Traits >::type #endif { //@cond typedef details::make_optimistic_queue< GC, T, Traits > maker; typedef typename maker::type base_class; //@endcond public: /// Rebind template arguments template struct rebind { typedef OptimisticQueue< GC2, T2, Traits2 > other ; ///< Rebinding result }; public: typedef GC gc; ///< Garbage collector typedef T value_type; ///< Value type to be stored in the queue typedef Traits traits; ///< Queue traits typedef typename base_class::back_off back_off; ///< Back-off strategy used typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes typedef typename base_class::item_counter item_counter; ///< Item counting policy used typedef typename base_class::stat stat; ///< Internal statistics policy used typedef typename base_class::memory_model memory_model; ///< Memory ordering. See \p cds::opt::memory_model option static constexpr const size_t c_nHazardPtrCount = base_class::c_nHazardPtrCount; ///< Count of hazard pointer required for the algorithm protected: //@cond typedef typename maker::node_type node_type; ///< queue node type (derived from intrusive::optimistic_queue::node) typedef typename maker::cxx_allocator cxx_allocator; typedef typename maker::node_deallocator node_deallocator; // deallocate node typedef typename base_class::node_traits node_traits; //@endcond protected: ///@cond static node_type * alloc_node() { return cxx_allocator().New(); } static node_type * alloc_node( const value_type& val ) { return cxx_allocator().New( val ); } template static node_type * alloc_node_move( Args&&... args ) { return cxx_allocator().MoveNew( std::forward( args )... ); } static void free_node( node_type * p ) { node_deallocator()( p ); } struct node_disposer { void operator()( node_type * pNode ) { free_node( pNode ); } }; typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; //@endcond public: /// Initializes empty queue OptimisticQueue() {} /// Destructor clears the queue ~OptimisticQueue() {} /// Enqueues \p val value into the queue. /** The function makes queue node in dynamic memory calling copy constructor for \p val and then it calls \p intrusive::OptimisticQueue::enqueue. Returns \p true if success, \p false otherwise. */ bool enqueue( const value_type& val ) { scoped_node_ptr p( alloc_node(val)); if ( base_class::enqueue( *p )) { p.release(); return true; } return false; } /// Enqueues \p val value into the queue, move semntics bool enqueue( value_type&& val ) { scoped_node_ptr p( alloc_node_move( std::move( val ))); if ( base_class::enqueue( *p )) { p.release(); return true; } return false; } /// Enqueues \p data to queue using a functor /** \p Func is a functor called to create node. The functor \p f takes one argument - a reference to a new node of type \ref value_type : \code cds::container::OptimisticQueue< cds::gc::HP, Foo > myQueue; Bar bar; myQueue.enqueue_with( [&bar]( Foo& dest ) { dest = bar; } ); \endcode */ template bool enqueue_with( Func f ) { scoped_node_ptr p( alloc_node()); f( p->m_value ); if ( base_class::enqueue( *p )) { p.release(); return true; } return false; } /// Enqueues data of type \ref value_type constructed with std::forward(args)... template bool emplace( Args&&... args ) { scoped_node_ptr p( alloc_node_move( std::forward(args)... )); if ( base_class::enqueue( *p )) { p.release(); return true; } return false; } /// Synonym for \p enqueue( const value_type& ) function bool push( const value_type& val ) { return enqueue( val ); } /// Synonym for \p enqueue( value_type&& ) function bool push( value_type&& val ) { return enqueue( std::move( val )); } /// Synonym for \p enqueue_with() function template bool push_with( Func f ) { return enqueue_with( f ); } /// Dequeues a value from the queue /** If queue is not empty, the function returns \p true, \p dest contains copy of dequeued value. The assignment operator for type \p value_type is invoked. If queue is empty, the function returns \p false, \p dest is unchanged. */ bool dequeue( value_type& dest ) { return dequeue_with( [&dest]( value_type& src ) { dest = std::move( src ); }); } /// Dequeues a value using a functor /** \p Func is a functor called to copy dequeued value. The functor takes one argument - a reference to removed node: \code cds:container::OptimisticQueue< cds::gc::HP, Foo > myQueue; Bar bar; myQueue.dequeue_with( [&bar]( Foo& src ) { bar = std::move( src );}); \endcode The functor is called only if the queue is not empty. */ template bool dequeue_with( Func f ) { typename base_class::dequeue_result res; if ( base_class::do_dequeue( res )) { f( node_traits::to_value_ptr( *res.pNext )->m_value ); base_class::dispose_result( res ); return true; } return false; } /// Synonym for \ref dequeue() function bool pop( value_type& dest ) { return dequeue( dest ); } /// Synonym for template version of \p dequeue_with() function template bool pop_with( Func f ) { return dequeue_with( f ); } /// Checks if the queue is empty bool empty() const { return base_class::empty(); } /// Clear the queue /** The function repeatedly calls \ref dequeue until it returns \p nullptr. */ void clear() { base_class::clear(); } /// Returns queue's item count /** \copydetails cds::intrusive::OptimisticQueue::size() */ size_t size() const { return base_class::size(); } /// Returns reference to internal statistics const stat& statistics() const { return base_class::statistics(); } }; }} // namespace cds::container #endif //#ifndef CDSLIB_CONTAINER_OPTIMISTIC_QUEUE_H libcds-2.3.3/cds/container/rwqueue.h000066400000000000000000000315571341244201700173710ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_RWQUEUE_H #define CDSLIB_CONTAINER_RWQUEUE_H #include #include #include #include // unique_lock #include namespace cds { namespace container { /// RWQueue related definitions /** @ingroup cds_nonintrusive_helper */ namespace rwqueue { /// RWQueue default type traits struct traits { /// Lock policy typedef cds::sync::spin lock_type; /// Node allocator typedef CDS_DEFAULT_ALLOCATOR allocator; /// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter to enable item counting typedef cds::atomicity::empty_item_counter item_counter; /// Padding for internal critical atomic data. Default is \p opt::cache_line_padding enum { padding = opt::cache_line_padding }; }; /// Metafunction converting option list to \p rwqueue::traits /** Supported \p Options are: - opt::lock_type - lock policy, default is \p cds::sync::spin. Any type satisfied \p Mutex C++ concept may be used. - opt::allocator - allocator (like \p std::allocator) used for allocating queue nodes. Default is \ref CDS_DEFAULT_ALLOCATOR - opt::item_counter - the type of item counting feature. Default is \p cds::atomicity::empty_item_counter (item counting disabled) To enable item counting use \p cds::atomicity::item_counter. - \p opt::padding - padding for internal critical data. Default is \p opt::cache_line_padding Example: declare mutex-based \p %RWQueue with item counting \code typedef cds::container::RWQueue< Foo, typename cds::container::rwqueue::make_traits< cds::opt::item_counter< cds::atomicity::item_counter >, cds::opt::lock_type< std::mutex > >::type > myQueue; \endcode */ template struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type , Options... >::type type; # endif }; } // namespace rwqueue /// Michael & Scott blocking queue with fine-grained synchronization schema /** @ingroup cds_nonintrusive_queue The queue has two different locks: one for reading and one for writing. Therefore, one writer and one reader can simultaneously access to the queue. The queue does not require any garbage collector. Source - [1998] Maged Michael, Michael Scott "Simple, fast, and practical non-blocking and blocking concurrent queue algorithms" Template arguments - \p T - value type to be stored in the queue - \p Traits - queue traits, default is \p rwqueue::traits. You can use \p rwqueue::make_traits metafunction to make your traits or just derive your traits from \p %rwqueue::traits: \code struct myTraits: public cds::container::rwqueue::traits { typedef cds::atomicity::item_counter item_counter; }; typedef cds::container::RWQueue< Foo, myTraits > myQueue; // Equivalent make_traits example: typedef cds::container::RWQueue< Foo, typename cds::container::rwqueue::make_traits< cds::opt::item_counter< cds::atomicity::item_counter > >::type > myQueue; \endcode */ template class RWQueue { public: /// Rebind template arguments template struct rebind { typedef RWQueue< T2, Traits2 > other ; ///< Rebinding result }; public: typedef T value_type; ///< Type of value to be stored in the queue typedef Traits traits; ///< Queue traits typedef typename traits::lock_type lock_type; ///< Locking primitive typedef typename traits::item_counter item_counter; ///< Item counting policy used protected: //@cond /// Node type struct node_type { atomics::atomic< node_type *> m_pNext; ///< Pointer to the next node in the queue value_type m_value; ///< Value stored in the node node_type( value_type const& v ) : m_pNext( nullptr ) , m_value(v) {} node_type() : m_pNext( nullptr ) {} template node_type( Args&&... args ) : m_pNext( nullptr ) , m_value( std::forward(args)...) {} }; //@endcond public: /// Allocator type used for allocate/deallocate the queue nodes typedef typename std::allocator_traits< typename traits::allocator >::template rebind_alloc allocator_type; protected: //@cond typedef std::unique_lock scoped_lock; typedef cds::details::Allocator< node_type, allocator_type > node_allocator; struct head_type { mutable lock_type lock; node_type * ptr; }; head_type m_Head; typename opt::details::apply_padding< head_type, traits::padding >::padding_type pad_; head_type m_Tail; item_counter m_ItemCounter; //@endcond protected: //@cond static node_type * alloc_node() { return node_allocator().New(); } static node_type * alloc_node( T const& data ) { return node_allocator().New( data ); } template static node_type * alloc_node_move( Args&&... args ) { return node_allocator().MoveNew( std::forward( args )... ); } static void free_node( node_type * pNode ) { node_allocator().Delete( pNode ); } bool enqueue_node( node_type * p ) { assert( p != nullptr ); { scoped_lock lock( m_Tail.lock ); m_Tail.ptr->m_pNext.store( p, atomics::memory_order_release ); m_Tail.ptr = p; } ++m_ItemCounter; return true; } struct node_disposer { void operator()( node_type * pNode ) { free_node( pNode ); } }; typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; //@endcond public: /// Makes empty queue RWQueue() { node_type * pNode = alloc_node(); m_Head.ptr = m_Tail.ptr = pNode; } /// Destructor clears queue ~RWQueue() { clear(); assert( m_Head.ptr == m_Tail.ptr ); free_node( m_Head.ptr ); } /// Enqueues \p data. Always return \a true bool enqueue( value_type const& data ) { scoped_node_ptr p( alloc_node( data )); if ( enqueue_node( p.get())) { p.release(); return true; } return false; } /// Enqueues \p data, move semantics bool enqueue( value_type&& data ) { scoped_node_ptr p( alloc_node_move( std::move( data ))); if ( enqueue_node( p.get())) { p.release(); return true; } return false; } /// Enqueues \p data to the queue using a functor /** \p Func is a functor called to create node. The functor \p f takes one argument - a reference to a new node of type \ref value_type : \code cds::container::RWQueue< cds::gc::HP, Foo > myQueue; Bar bar; myQueue.enqueue_with( [&bar]( Foo& dest ) { dest = bar; } ); \endcode */ template bool enqueue_with( Func f ) { scoped_node_ptr p( alloc_node()); f( p->m_value ); if ( enqueue_node( p.get())) { p.release(); return true; } return false; } /// Enqueues data of type \ref value_type constructed with std::forward(args)... template bool emplace( Args&&... args ) { scoped_node_ptr p( alloc_node_move( std::forward(args)... )); if ( enqueue_node( p.get())) { p.release(); return true; } return false; } /// Synonym for \p enqueue( value_type const& ) function bool push( value_type const& val ) { return enqueue( val ); } /// Synonym for \p enqueue( value_type&& ) function bool push( value_type&& val ) { return enqueue( std::move( val )); } /// Synonym for \p enqueue_with() function template bool push_with( Func f ) { return enqueue_with( f ); } /// Dequeues a value to \p dest. /** If queue is empty returns \a false, \p dest can be corrupted. If queue is not empty returns \a true, \p dest contains the value dequeued */ bool dequeue( value_type& dest ) { return dequeue_with( [&dest]( value_type& src ) { dest = std::move( src ); }); } /// Dequeues a value using a functor /** \p Func is a functor called to copy dequeued value. The functor takes one argument - a reference to removed node: \code cds:container::RWQueue< cds::gc::HP, Foo > myQueue; Bar bar; myQueue.dequeue_with( [&bar]( Foo& src ) { bar = std::move( src );}); \endcode The functor is called only if the queue is not empty. */ template bool dequeue_with( Func f ) { node_type * pNode; { scoped_lock lock( m_Head.lock ); pNode = m_Head.ptr; node_type * pNewHead = pNode->m_pNext.load( atomics::memory_order_acquire ); if ( pNewHead == nullptr ) return false; f( pNewHead->m_value ); m_Head.ptr = pNewHead; } // unlock here --m_ItemCounter; free_node( pNode ); return true; } /// Synonym for \p dequeue() function bool pop( value_type& dest ) { return dequeue( dest ); } /// Synonym for \p dequeue_with() function template bool pop_with( Func f ) { return dequeue_with( f ); } /// Checks if queue is empty bool empty() const { scoped_lock lock( m_Head.lock ); return m_Head.ptr->m_pNext.load( atomics::memory_order_relaxed ) == nullptr; } /// Clears queue void clear() { scoped_lock lockR( m_Head.lock ); scoped_lock lockW( m_Tail.lock ); while ( m_Head.ptr->m_pNext.load( atomics::memory_order_relaxed ) != nullptr ) { node_type * pHead = m_Head.ptr; m_Head.ptr = m_Head.ptr->m_pNext.load( atomics::memory_order_relaxed ); free_node( pHead ); } m_ItemCounter.reset(); } /// Returns queue's item count /** The value returned depends on \p rwqueue::traits::item_counter. For \p atomicity::empty_item_counter, this function always returns 0. @note Even if you use real item counter and it returns 0, this fact is not mean that the queue is empty. To check queue emptyness use \p empty() method. */ size_t size() const { return m_ItemCounter.value(); } //@cond /// The class has no internal statistics. For test consistency only std::nullptr_t statistics() const { return nullptr; } //@endcond }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_RWQUEUE_H libcds-2.3.3/cds/container/segmented_queue.h000066400000000000000000000415451341244201700210510ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_SEGMENTED_QUEUE_H #define CDSLIB_CONTAINER_SEGMENTED_QUEUE_H #include #include // ref #include namespace cds { namespace container { /// SegmentedQueue -related declarations namespace segmented_queue { # ifdef CDS_DOXYGEN_INVOKED /// SegmentedQueue internal statistics typedef cds::intrusive::segmented_queue::stat stat; # else using cds::intrusive::segmented_queue::stat; # endif /// SegmentedQueue empty internal statistics (no overhead) typedef cds::intrusive::segmented_queue::empty_stat empty_stat; /// SegmentedQueue default type traits struct traits { /// Item allocator. Default is \ref CDS_DEFAULT_ALLOCATOR typedef CDS_DEFAULT_ALLOCATOR node_allocator; /// Item counter, default is atomicity::item_counter /** The item counting is an essential part of segmented queue algorithm. The \p empty() member function is based on checking size() == 0. Therefore, dummy item counter like atomicity::empty_item_counter is not the proper counter. */ typedef atomicity::item_counter item_counter; /// Internal statistics, possible predefined types are \ref stat, \ref empty_stat (the default) typedef segmented_queue::empty_stat stat; /// Memory model, default is opt::v::relaxed_ordering. See cds::opt::memory_model for the full list of possible types typedef opt::v::relaxed_ordering memory_model; /// Alignment of critical data, default is cache line alignment. See cds::opt::alignment option specification enum { alignment = opt::cache_line_alignment }; /// Padding of segment data, default is no special padding /** The segment is just an array of atomic data pointers, so, the high load leads to false sharing and performance degradation. A padding of segment data can eliminate false sharing issue. On the other hand, the padding leads to increase segment size. */ enum { padding = cds::intrusive::segmented_queue::traits::padding }; /// Segment allocator. Default is \ref CDS_DEFAULT_ALLOCATOR typedef CDS_DEFAULT_ALLOCATOR allocator; /// Lock type used to maintain an internal list of allocated segments typedef cds::sync::spin lock_type; /// Random \ref cds::opt::permutation_generator "permutation generator" for sequence [0, quasi_factor) typedef cds::opt::v::random2_permutation permutation_generator; }; /// Metafunction converting option list to traits for SegmentedQueue /** The metafunction can be useful if a few fields in \p segmented_queue::traits should be changed. For example: \code typedef cds::container::segmented_queue::make_traits< cds::opt::item_counter< cds::atomicity::item_counter > >::type my_segmented_queue_traits; \endcode This code creates \p %SegmentedQueue type traits with item counting feature, all other \p segmented_queue::traits members left unchanged. \p Options are: - \p opt::node_allocator - node allocator. - \p opt::stat - internal statistics, possible type: \p segmented_queue::stat, \p segmented_queue::empty_stat (the default) - \p opt::item_counter - item counting feature. Note that \p atomicity::empty_item_counetr is not suitable for segmented queue. - \p opt::memory_model - memory model, default is \p opt::v::relaxed_ordering. See option description for the full list of possible models - \p opt::alignment - the alignment of critical data, see option description for explanation - \p opt::padding - the padding of segment data, default no special padding. See \p traits::padding for explanation. - \p opt::allocator - the allocator used to maintain segments. - \p opt::lock_type - a mutual exclusion lock type used to maintain internal list of allocated segments. Default is \p cds::opt::Spin, \p std::mutex is also suitable. - \p opt::permutation_generator - a random permutation generator for sequence [0, quasi_factor), default is \p cds::opt::v::random2_permutation */ template struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type ; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type ,Options... >::type type; # endif }; } // namespace segmented_queue //@cond namespace details { template struct make_segmented_queue { typedef GC gc; typedef T value_type; typedef Traits original_type_traits; typedef cds::details::Allocator< T, typename original_type_traits::node_allocator > cxx_node_allocator; struct node_disposer { void operator()( T * p ) { cxx_node_allocator().Delete( p ); } }; struct intrusive_type_traits: public original_type_traits { typedef node_disposer disposer; }; typedef cds::intrusive::SegmentedQueue< gc, value_type, intrusive_type_traits > type; }; } // namespace details //@endcond /// Segmented queue /** @ingroup cds_nonintrusive_queue The queue is based on work - [2010] Afek, Korland, Yanovsky "Quasi-Linearizability: relaxed consistency for improved concurrency" In this paper the authors offer a relaxed version of linearizability, so-called quasi-linearizability, that preserves some of the intuition, provides a flexible way to control the level of relaxation and supports th implementation of more concurrent and scalable data structure. Intuitively, the linearizability requires each run to be equivalent in some sense to a serial run of the algorithm. This equivalence to some serial run imposes strong synchronization requirements that in many cases results in limited scalability and synchronization bottleneck. The general idea is that the queue maintains a linked list of segments, each segment is an array of nodes in the size of the quasi factor, and each node has a deleted boolean marker, which states if it has been dequeued. Each producer iterates over last segment in the linked list in some random permutation order. Whet it finds an empty cell it performs a CAS operation attempting to enqueue its new element. In case the entire segment has been scanned and no available cell is found (implying that the segment is full), then it attempts to add a new segment to the list. The dequeue operation is similar: the consumer iterates over the first segment in the linked list in some random permutation order. When it finds an item which has not yet been dequeued, it performs CAS on its deleted marker in order to "delete" it, if succeeded this item is considered dequeued. In case the entire segment was scanned and all the nodes have already been dequeued (implying that the segment is empty), then it attempts to remove this segment from the linked list and starts the same process on the next segment. If there is no next segment, the queue is considered empty. Based on the fact that most of the time threads do not add or remove segments, most of the work is done in parallel on different cells in the segments. This ensures a controlled contention depending on the segment size, which is quasi factor. The segmented queue is an unfair queue since it violates the strong FIFO order but no more than quasi factor. It means that the consumer dequeues any item from the current first segment. Template parameters: - \p GC - a garbage collector, possible types are cds::gc::HP, cds::gc::DHP - \p T - the type of values stored in the queue - \p Traits - queue type traits, default is \p segmented_queue::traits. \p segmented_queue::make_traits metafunction can be used to construct your type traits. */ template class SegmentedQueue: #ifdef CDS_DOXYGEN_INVOKED public cds::intrusive::SegmentedQueue< GC, T, Traits > #else public details::make_segmented_queue< GC, T, Traits >::type #endif { //@cond typedef details::make_segmented_queue< GC, T, Traits > maker; typedef typename maker::type base_class; //@endcond public: typedef GC gc; ///< Garbage collector typedef T value_type; ///< type of the value stored in the queue typedef Traits traits; ///< Queue traits typedef typename traits::node_allocator node_allocator; ///< Node allocator typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option typedef typename base_class::item_counter item_counter; ///< Item counting policy, see cds::opt::item_counter option setter typedef typename base_class::stat stat ; ///< Internal statistics policy typedef typename base_class::lock_type lock_type ; ///< Type of mutex for maintaining an internal list of allocated segments. typedef typename base_class::permutation_generator permutation_generator; ///< Random permutation generator for sequence [0, quasi-factor) static const size_t c_nHazardPtrCount = base_class::c_nHazardPtrCount ; ///< Count of hazard pointer required for the algorithm protected: //@cond typedef typename maker::cxx_node_allocator cxx_node_allocator; typedef std::unique_ptr< value_type, typename maker::node_disposer > scoped_node_ptr; static value_type * alloc_node( value_type const& v ) { return cxx_node_allocator().New( v ); } static value_type * alloc_node() { return cxx_node_allocator().New(); } template static value_type * alloc_node_move( Args&&... args ) { return cxx_node_allocator().MoveNew( std::forward( args )... ); } //@endcond public: /// Initializes the empty queue SegmentedQueue( size_t nQuasiFactor ///< Quasi factor. If it is not a power of 2 it is rounded up to nearest power of 2. Minimum is 2. ) : base_class( nQuasiFactor ) {} /// Clears the queue and deletes all internal data ~SegmentedQueue() {} /// Inserts a new element at last segment of the queue /** The function makes queue node in dynamic memory calling copy constructor for \p val and then it calls intrusive::SEgmentedQueue::enqueue. Returns \p true if success, \p false otherwise. */ bool enqueue( value_type const& val ) { scoped_node_ptr p( alloc_node(val)); if ( base_class::enqueue( *p )) { p.release(); return true; } return false; } /// Inserts a new element at last segment of the queue, move semantics bool enqueue( value_type&& val ) { scoped_node_ptr p( alloc_node_move( std::move( val ))); if ( base_class::enqueue( *p )) { p.release(); return true; } return false; } /// Enqueues data to the queue using a functor /** \p Func is a functor called to create node. The functor \p f takes one argument - a reference to a new node of type \ref value_type : \code cds::container::SegmentedQueue< cds::gc::HP, Foo > myQueue; Bar bar; myQueue.enqueue_with( [&bar]( Foo& dest ) { dest = bar; } ); \endcode */ template bool enqueue_with( Func f ) { scoped_node_ptr p( alloc_node()); f( *p ); if ( base_class::enqueue( *p )) { p.release(); return true; } return false; } /// Synonym for \p enqueue( value_type const& ) member function bool push( value_type const& val ) { return enqueue( val ); } /// Synonym for \p enqueue( value_type&& ) member function bool push( value_type&& val ) { return enqueue( std::move( val )); } /// Synonym for \p enqueue_with() member function template bool push_with( Func f ) { return enqueue_with( f ); } /// Enqueues data of type \ref value_type constructed with std::forward(args)... template bool emplace( Args&&... args ) { scoped_node_ptr p( alloc_node_move( std::forward(args)... )); if ( base_class::enqueue( *p )) { p.release(); return true; } return false; } /// Dequeues a value from the queue /** If queue is not empty, the function returns \p true, \p dest contains copy of dequeued value. The assignment operator for type \ref value_type is invoked. If queue is empty, the function returns \p false, \p dest is unchanged. */ bool dequeue( value_type& dest ) { return dequeue_with( [&dest]( value_type& src ) { dest = std::move( src );}); } /// Dequeues a value using a functor /** \p Func is a functor called to copy dequeued value. The functor takes one argument - a reference to removed node: \code cds:container::MSQueue< cds::gc::HP, Foo > myQueue; Bar bar; myQueue.dequeue_with( [&bar]( Foo& src ) { bar = std::move( src );}); \endcode The functor is called only if the queue is not empty. */ template bool dequeue_with( Func f ) { value_type * p = base_class::dequeue(); if ( p ) { f( *p ); gc::template retire< typename maker::node_disposer >( p ); return true; } return false; } /// Synonym for \p dequeue_with() function template bool pop_with( Func f ) { return dequeue_with( f ); } /// Synonym for \p dequeue() function bool pop( value_type& dest ) { return dequeue( dest ); } /// Checks if the queue is empty /** The original segmented queue algorithm does not allow to check emptiness accurately because \p empty() is unlinearizable. This function tests queue's emptiness checking size() == 0, so, the item counting feature is an essential part of queue's algorithm. */ bool empty() const { return base_class::empty(); } /// Clear the queue /** The function repeatedly calls \p dequeue() until it returns \p nullptr. The disposer specified in \p Traits template argument is called for each removed item. */ void clear() { base_class::clear(); } /// Returns queue's item count size_t size() const { return base_class::size(); } /// Returns reference to internal statistics /** The type of internal statistics is specified by \p Traits template argument. */ const stat& statistics() const { return base_class::statistics(); } /// Returns quasi factor, a power-of-two number size_t quasi_factor() const { return base_class::quasi_factor(); } }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_SEGMENTED_QUEUE_H libcds-2.3.3/cds/container/skip_list_map_dhp.h000066400000000000000000000010241341244201700213470ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_SKIP_LIST_SET_DHP_H #define CDSLIB_CONTAINER_SKIP_LIST_SET_DHP_H #include #include #include #include #endif // #ifndef CDSLIB_CONTAINER_SKIP_LIST_SET_DHP_H libcds-2.3.3/cds/container/skip_list_map_hp.h000066400000000000000000000010201341244201700211770ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_SKIP_LIST_MAP_HP_H #define CDSLIB_CONTAINER_SKIP_LIST_MAP_HP_H #include #include #include #include #endif // #ifndef CDSLIB_CONTAINER_SKIP_LIST_MAP_HP_H libcds-2.3.3/cds/container/skip_list_map_nogc.h000066400000000000000000000327011341244201700215300ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_SKIP_LIST_MAP_NOGC_H #define CDSLIB_CONTAINER_SKIP_LIST_MAP_NOGC_H #include namespace cds { namespace container { //@cond namespace skip_list { namespace details { struct map_key_accessor { template typename NodeType::stored_value_type::first_type const& operator()( NodeType const& node ) const { return node.m_Value.first; } }; }} // namespace skip_list::details //@endcond /// Lock-free skip-list map (template specialization for gc::nogc) /** @ingroup cds_nonintrusive_map \anchor cds_nonintrusive_SkipListMap_nogc This specialization is intended for so-called persistent usage when no item reclamation may be performed. The class does not support deleting of map item. See \ref cds_nonintrusive_SkipListMap_hp "SkipListMap" for detailed description. Template arguments: - \p K - type of a key to be stored in the map. - \p T - type of a value to be stored in the map. - \p Traits - map traits, default is \p skip_list::traits It is possible to declare option-based list with \p cds::container::skip_list::make_traits metafunction istead of \p Traits template argument. */ template < typename Key, typename T, #ifdef CDS_DOXYGEN_INVOKED typename Traits = skip_list::traits #else typename Traits #endif > class SkipListMap< cds::gc::nogc, Key, T, Traits >: #ifdef CDS_DOXYGEN_INVOKED protected SkipListSet< cds::gc::nogc, std::pair< Key const, T >, Traits > #else protected SkipListSet< cds::gc::nogc ,std::pair< Key const, T > ,typename cds::opt::replace_key_accessor< Traits, skip_list::details::map_key_accessor >::type > #endif { //@cond typedef SkipListSet< cds::gc::nogc ,std::pair< Key const, T > ,typename cds::opt::replace_key_accessor< Traits, skip_list::details::map_key_accessor >::type > base_class; //@endcond public: typedef cds::gc::nogc gc; ///< Garbage collector typedef Key key_type; ///< Key type typedef T mapped_type; ///< Mapped type typedef std::pair< key_type const, mapped_type> value_type; ///< Key-value pair stored in the map typedef Traits traits; ///< Options specified typedef typename base_class::back_off back_off; ///< Back-off strategy typedef typename base_class::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the skip-list nodes typedef typename base_class::item_counter item_counter; ///< Item counting policy typedef typename base_class::key_comparator key_comparator; ///< key compare functor typedef typename base_class::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option typedef typename base_class::stat stat; ///< internal statistics type typedef typename base_class::random_level_generator random_level_generator; ///< random level generator protected: //@cond typedef typename base_class::node_type node_type; typedef typename base_class::node_allocator node_allocator; //@endcond public: /// Default constructor SkipListMap() : base_class() {} /// Destructor clears the map ~SkipListMap() {} public: ///@name Forward ordered iterators //@{ /// Forward iterator /** The forward iterator for a split-list has some features: - it has no post-increment operator - it depends on iterator of underlying \p OrderedList */ typedef typename base_class::iterator iterator; /// Const forward iterator typedef typename base_class::const_iterator const_iterator; /// Returns a forward iterator addressing the first element in a map /** For empty set \code begin() == end() \endcode */ iterator begin() { return base_class::begin(); } /// Returns an iterator that addresses the location succeeding the last element in a map /** Do not use the value returned by end function to access any item. The returned value can be used only to control reaching the end of the set. For empty set \code begin() == end() \endcode */ iterator end() { return base_class::end(); } /// Returns a forward const iterator addressing the first element in a map const_iterator begin() const { return base_class::begin(); } /// Returns a forward const iterator addressing the first element in a map const_iterator cbegin() const { return base_class::cbegin(); } /// Returns an const iterator that addresses the location succeeding the last element in a map const_iterator end() const { return base_class::end(); } /// Returns an const iterator that addresses the location succeeding the last element in a map const_iterator cend() const { return base_class::cend(); } //@} public: /// Inserts new node with key and default value /** The function creates a node with \p key and default value, and then inserts the node created into the map. Preconditions: - The \ref key_type should be constructible from value of type \p K. In trivial case, \p K is equal to \ref key_type. - The \ref mapped_type should be default-constructible. Returns an iterator pointed to inserted value, or \p end() if inserting is failed */ template iterator insert( K const& key ) { //TODO: pass arguments by reference (make_pair makes copy) return base_class::insert( std::make_pair( key_type( key ), mapped_type())); } /// Inserts new node /** The function creates a node with copy of \p val value and then inserts the node created into the map. Preconditions: - The \ref key_type should be constructible from \p key of type \p K. - The \ref mapped_type should be constructible from \p val of type \p V. Returns an iterator pointed to inserted value, or \p end() if inserting is failed */ template iterator insert( K const& key, V const& val ) { //TODO: pass arguments by reference (make_pair makes copy) return base_class::insert( std::make_pair( key_type( key ), mapped_type( val ))); } /// Inserts new node and initialize it by a functor /** This function inserts new node with key \p key and if inserting is successful then it calls \p func functor with signature \code struct functor { void operator()( value_type& item ); }; \endcode The argument \p item of user-defined functor \p func is the reference to the map's item inserted. item.second is a reference to item's value that may be changed. User-defined functor \p func should guarantee that during changing item's value no any other changes could be made on this map's item by concurrent threads. The key_type should be constructible from value of type \p K. The function allows to split creating of new item into three part: - create item from \p key; - insert new item into the map; - if inserting is successful, initialize the value of item by calling \p f functor This can be useful if complete initialization of object of \p mapped_type is heavyweight and it is preferable that the initialization should be completed only if inserting is successful. Returns an iterator pointed to inserted value, or \p end() if inserting is failed */ template iterator insert_with( K const& key, Func func ) { iterator it = insert( key ); if ( it != end()) func( (*it)); return it; } /// For key \p key inserts data of type \p mapped_type created in-place from \p args /** \p key_type should be constructible from type \p K Returns \p true if inserting successful, \p false otherwise. */ template iterator emplace( K&& key, Args&&... args ) { return base_class::emplace( key_type( std::forward( key )), mapped_type( std::forward(args)... )); } /// UPdates data by \p key /** The operation inserts new item if \p key is not found in the map and \p bInsert is \p true. Otherwise, if \p key is found, the function returns an iterator that points to item found. Returns std::pair where \p first is an iterator pointing to item found or inserted or \p end() if \p key is not found and insertion is not allowed (\p bInsert is \p false), \p second is \p true if new item has been added or \p false if the item already exists. */ template std::pair update( K const& key, bool bInsert = true ) { //TODO: pass arguments by reference (make_pair makes copy) return base_class::update( std::make_pair( key_type( key ), mapped_type()), bInsert ); } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( K const& key ) { return update( key, true ); } //@endcond /// Checks whether the map contains \p key /** The function searches the item with key equal to \p key and returns an iterator pointed to item found if the key is found, and \ref end() otherwise */ template iterator contains( K const& key ) { return base_class::contains( key ); } //@cond template CDS_DEPRECATED("deprecated, use contains()") iterator find( K const& key ) { return contains( key ); } //@endcond /// Checks whether the map contains \p key using \p pred predicate for searching /** The function is similar to contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template iterator contains( K const& key, Less pred ) const { return base_class::contains( key, pred ); } //@cond template CDS_DEPRECATED("deprecated, use contains()") iterator find_with( K const& key, Less pred ) const { return contains( key, pred ); } //@endcond /// Gets minimum key from the map /** If the map is empty the function returns \p nullptr */ value_type * get_min() const { return base_class::get_min(); } /// Gets maximum key from the map /** The function returns \p nullptr if the map is empty */ value_type * get_max() const { return base_class::get_max(); } /// Clears the map (not atomic) /** Finding and/or inserting is prohibited while clearing. Otherwise an unpredictable result may be encountered. Thus, \p clear() may be used only for debugging purposes. */ void clear() { base_class::clear(); } /// Checks if the map is empty /** Emptiness is checked by item counting: if item count is zero then the map is empty. Thus, the correct item counting feature is an important part of Michael's map implementation. */ bool empty() const { return base_class::empty(); } /// Returns item count in the map size_t size() const { return base_class::size(); } /// Returns maximum height of skip-list. The max height is a constant for each object and does not exceed 32. static constexpr unsigned int max_height() noexcept { return base_class::max_height(); } /// Returns const reference to internal statistics stat const& statistics() const { return base_class::statistics(); } }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_SKIP_LIST_MAP_NOGC_H libcds-2.3.3/cds/container/skip_list_map_rcu.h000066400000000000000000000654631341244201700214060ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_SKIP_LIST_MAP_RCU_H #define CDSLIB_CONTAINER_SKIP_LIST_MAP_RCU_H #include #include #include namespace cds { namespace container { /// Lock-free skip-list map (template specialization for \ref cds_urcu_desc "RCU") /** @ingroup cds_nonintrusive_map \anchor cds_nonintrusive_SkipListMap_rcu The implementation of well-known probabilistic data structure called skip-list invented by W.Pugh in his papers: - [1989] W.Pugh Skip Lists: A Probabilistic Alternative to Balanced Trees - [1990] W.Pugh A Skip List Cookbook A skip-list is a probabilistic data structure that provides expected logarithmic time search without the need of rebalance. The skip-list is a collection of sorted linked list. Nodes are ordered by key. Each node is linked into a subset of the lists. Each list has a level, ranging from 0 to 32. The bottom-level list contains all the nodes, and each higher-level list is a sublist of the lower-level lists. Each node is created with a random top level (with a random height), and belongs to all lists up to that level. The probability that a node has the height 1 is 1/2. The probability that a node has the height N is 1/2 ** N (more precisely, the distribution depends on an random generator provided, but our generators have this property). The lock-free variant of skip-list is implemented according to book - [2008] M.Herlihy, N.Shavit "The Art of Multiprocessor Programming", chapter 14.4 "A Lock-Free Concurrent Skiplist" Template arguments: - \p RCU - one of \ref cds_urcu_gc "RCU type". - \p K - type of a key to be stored in the list. - \p T - type of a value to be stored in the list. - \p Traits - map traits, default is \p skip_list::traits. It is possible to declare option-based list with \p cds::container::skip_list::make_traits metafunction instead of \p Traits template argument. Like STL map class, \p %SkipListMap stores its key-value pair as std:pair< K const, T>. @note Before including you should include appropriate RCU header file, see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. Iterators The class supports a forward iterator (\ref iterator and \ref const_iterator). The iteration is ordered. You may iterate over skip-list set items only under RCU lock. Only in this case the iterator is thread-safe since while RCU is locked any set's item cannot be reclaimed. The requirement of RCU lock during iterating means that deletion of the elements (i.e. \ref erase) is not possible. @warning The iterator object cannot be passed between threads The iterator class supports the following minimalistic interface: \code struct iterator { // Default ctor iterator(); // Copy ctor iterator( iterator const& s); value_type * operator ->() const; value_type& operator *() const; // Pre-increment iterator& operator ++(); // Copy assignment iterator& operator = (const iterator& src); bool operator ==(iterator const& i ) const; bool operator !=(iterator const& i ) const; }; \endcode Note, the iterator object returned by \ref end, \p cend member functions points to \p nullptr and should not be dereferenced. */ template < typename RCU, typename Key, typename T, #ifdef CDS_DOXYGEN_INVOKED typename Traits = skip_list::traits #else typename Traits #endif > class SkipListMap< cds::urcu::gc< RCU >, Key, T, Traits >: #ifdef CDS_DOXYGEN_INVOKED protected intrusive::SkipListSet< cds::urcu::gc< RCU >, std::pair, Traits > #else protected details::make_skip_list_map< cds::urcu::gc< RCU >, Key, T, Traits >::type #endif { //@cond typedef details::make_skip_list_map< cds::urcu::gc< RCU >, Key, T, Traits > maker; typedef typename maker::type base_class; //@endcond public: typedef cds::urcu::gc< RCU > gc; ///< Garbage collector used typedef Key key_type; ///< Key type typedef T mapped_type; ///< Mapped type # ifdef CDS_DOXYGEN_INVOKED typedef std::pair< K const, T> value_type; ///< Value type stored in the map # else typedef typename maker::value_type value_type; # endif typedef Traits traits; ///< Map traits typedef typename base_class::back_off back_off; ///< Back-off strategy used typedef typename traits::allocator allocator_type; ///< Allocator type used for allocate/deallocate the skip-list nodes typedef typename base_class::item_counter item_counter; ///< Item counting policy used typedef typename maker::key_comparator key_comparator; ///< key comparison functor typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option typedef typename traits::random_level_generator random_level_generator; ///< random level generator typedef typename traits::stat stat; ///< internal statistics type protected: //@cond typedef typename maker::node_type node_type; typedef typename maker::node_allocator node_allocator; typedef std::unique_ptr< node_type, typename maker::node_deallocator > scoped_node_ptr; //@endcond public: typedef typename base_class::rcu_lock rcu_lock; ///< RCU scoped lock /// Group of \p extract_xxx functions do not require external locking static constexpr const bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; /// pointer to extracted node using exempt_ptr = cds::urcu::exempt_ptr< gc, node_type, value_type, typename maker::intrusive_type_traits::disposer >; private: //@cond struct raw_ptr_converter { value_type * operator()( node_type * p ) const { return p ? &p->m_Value : nullptr; } value_type& operator()( node_type& n ) const { return n.m_Value; } value_type const& operator()( node_type const& n ) const { return n.m_Value; } }; //@endcond public: /// Result of \p get(), \p get_with() functions - pointer to the node found typedef cds::urcu::raw_ptr_adaptor< value_type, typename base_class::raw_ptr, raw_ptr_converter > raw_ptr; protected: //@cond unsigned int random_level() { return base_class::random_level(); } //@endcond public: /// Default ctor SkipListMap() : base_class() {} /// Destructor destroys the set object ~SkipListMap() {} public: ///@name Forward ordered iterators (thread-safe under RCU lock) //@{ /// Forward iterator /** The forward iterator has some features: - it has no post-increment operator - it depends on iterator of underlying \p OrderedList You may safely use iterators in multi-threaded environment only under RCU lock. Otherwise, a crash is possible if another thread deletes the element the iterator points to. */ typedef skip_list::details::iterator< typename base_class::iterator > iterator; /// Const iterator type typedef skip_list::details::iterator< typename base_class::const_iterator > const_iterator; /// Returns a forward iterator addressing the first element in a map iterator begin() { return iterator( base_class::begin()); } /// Returns a forward const iterator addressing the first element in a map const_iterator begin() const { return cbegin(); } /// Returns a forward const iterator addressing the first element in a map const_iterator cbegin() const { return const_iterator( base_class::cbegin()); } /// Returns a forward iterator that addresses the location succeeding the last element in a map. iterator end() { return iterator( base_class::end()); } /// Returns a forward const iterator that addresses the location succeeding the last element in a map. const_iterator end() const { return cend(); } /// Returns a forward const iterator that addresses the location succeeding the last element in a map. const_iterator cend() const { return const_iterator( base_class::cend()); } //@} public: /// Inserts new node with key and default value /** The function creates a node with \p key and default value, and then inserts the node created into the map. Preconditions: - The \p key_type should be constructible from a value of type \p K. In trivial case, \p K is equal to \p key_type. - The \p mapped_type should be default-constructible. RCU \p synchronize method can be called. RCU should not be locked. Returns \p true if inserting successful, \p false otherwise. */ template bool insert( K const& key ) { return insert_with( key, [](value_type&){} ); } /// Inserts new node /** The function creates a node with copy of \p val value and then inserts the node created into the map. Preconditions: - The \p key_type should be constructible from \p key of type \p K. - The \p value_type should be constructible from \p val of type \p V. RCU \p synchronize method can be called. RCU should not be locked. Returns \p true if \p val is inserted into the set, \p false otherwise. */ template bool insert( K const& key, V const& val ) { scoped_node_ptr pNode( node_allocator().New( random_level(), key, val )); if ( base_class::insert( *pNode )) { pNode.release(); return true; } return false; } /// Inserts new node and initialize it by a functor /** This function inserts new node with key \p key and if inserting is successful then it calls \p func functor with signature \code struct functor { void operator()( value_type& item ); }; \endcode The argument \p item of user-defined functor \p func is the reference to the map's item inserted: - item.first is a const reference to item's key that cannot be changed. - item.second is a reference to item's value that may be changed. The function allows to split creating of new item into three part: - create item from \p key; - insert new item into the map; - if inserting is successful, initialize the value of item by calling \p func functor This can be useful if complete initialization of object of \p value_type is heavyweight and it is preferable that the initialization should be completed only if inserting is successful. RCU \p synchronize method can be called. RCU should not be locked. */ template bool insert_with( const K& key, Func func ) { scoped_node_ptr pNode( node_allocator().New( random_level(), key )); if ( base_class::insert( *pNode, [&func]( node_type& item ) { func( item.m_Value ); } )) { pNode.release(); return true; } return false; } /// For key \p key inserts data of type \p value_type created in-place from \p args /** Returns \p true if inserting successful, \p false otherwise. RCU \p synchronize() method can be called. RCU should not be locked. */ template bool emplace( K&& key, Args&&... args ) { scoped_node_ptr pNode( node_allocator().New( random_level(), std::forward(key), std::forward(args)... )); if ( base_class::insert( *pNode )) { pNode.release(); return true; } return false; } /// Updates data by \p key /** The operation performs inserting or changing data with lock-free manner. If the \p key not found in the map, then the new item created from \p key is inserted into the map iff \p bInsert is \p true. Otherwise, if \p key found, the functor \p func is called with item found. The functor \p Func interface is: \code struct my_functor { void operator()( bool bNew, value_type& item ); }; \endcode where: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the map The functor may change any fields of \p item.second. RCU \p synchronize() method can be called. RCU should not be locked. Returns std::pair where \p first is \p true if operation is successful, \p second is \p true if new item has been added or \p false if the item with \p key already exists. @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" */ template std::pair update( K const& key, Func func, bool bInsert = true ) { scoped_node_ptr pNode( node_allocator().New( random_level(), key )); std::pair res = base_class::update( *pNode, [&func](bool bNew, node_type& item, node_type const& ){ func( bNew, item.m_Value );}, bInsert ); if ( res.first && res.second ) pNode.release(); return res; } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( K const& key, Func func ) { return update( key, func, true ); } //@endcond /// Delete \p key from the map /**\anchor cds_nonintrusive_SkipListMap_rcu_erase_val RCU \p synchronize method can be called. RCU should not be locked. Return \p true if \p key is found and deleted, \p false otherwise */ template bool erase( K const& key ) { return base_class::erase(key); } /// Deletes the item from the map using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_SkipListMap_rcu_erase_val "erase(K const&)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool erase_with( K const& key, Less pred ) { CDS_UNUSED( pred ); return base_class::erase_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor >()); } /// Delete \p key from the map /** \anchor cds_nonintrusive_SkipListMap_rcu_erase_func The function searches an item with key \p key, calls \p f functor and deletes the item. If \p key is not found, the functor is not called. The functor \p Func interface: \code struct extractor { void operator()(value_type& item) { ... } }; \endcode RCU \p synchronize method can be called. RCU should not be locked. Return \p true if key is found and deleted, \p false otherwise */ template bool erase( K const& key, Func f ) { return base_class::erase( key, [&f]( node_type& node) { f( node.m_Value ); } ); } /// Deletes the item from the map using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_SkipListMap_rcu_erase_func "erase(K const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool erase_with( K const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return base_class::erase_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor >(), [&f]( node_type& node) { f( node.m_Value ); } ); } /// Extracts the item from the map with specified \p key /** \anchor cds_nonintrusive_SkipListMap_rcu_extract The function searches an item with key equal to \p key in the map, unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found. If the item is not found the function returns an empty \p exempt_ptr Note the compare functor from \p Traits class' template argument should accept a parameter of type \p K that can be not the same as \p key_type. RCU \p synchronize() method can be called. RCU should NOT be locked. The function does not free the item found. The item will be implicitly freed when the returned object is destroyed or when its \p release() member function is called. */ template exempt_ptr extract( K const& key ) { return exempt_ptr( base_class::do_extract( key )); } /// Extracts the item from the map with comparing functor \p pred /** The function is an analog of \p extract(K const&) but \p pred predicate is used for key comparing. \p Less has the semantics like \p std::less. \p pred must imply the same element order as the comparator used for building the map. */ template exempt_ptr extract_with( K const& key, Less pred ) { CDS_UNUSED( pred ); return exempt_ptr( base_class::do_extract_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor >())); } /// Extracts an item with minimal key from the map /** The function searches an item with minimal key, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item. If the skip-list is empty the function returns an empty \p exempt_ptr. RCU \p synchronize method can be called. RCU should NOT be locked. The function does not free the item found. The item will be implicitly freed when the returned object is destroyed or when its \p release() member function is called. */ exempt_ptr extract_min() { return exempt_ptr( base_class::do_extract_min()); } /// Extracts an item with maximal key from the map /** The function searches an item with maximal key, unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item. If the skip-list is empty the function returns an empty \p exempt_ptr. RCU \p synchronize method can be called. RCU should NOT be locked. The function does not free the item found. The item will be implicitly freed when the returned object is destroyed or when its \p release() member function is called. */ exempt_ptr extract_max() { return exempt_ptr( base_class::do_extract_max()); } /// Find the key \p key /** \anchor cds_nonintrusive_SkipListMap_rcu_find_cfunc The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item ); }; \endcode where \p item is the item found. The functor may change \p item.second. The function applies RCU lock internally. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( K const& key, Func f ) { return base_class::find( key, [&f](node_type& item, K const& ) { f( item.m_Value );}); } /// Finds the key \p val using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_SkipListMap_rcu_find_cfunc "find(K const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool find_with( K const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return base_class::find_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor >(), [&f](node_type& item, K const& ) { f( item.m_Value );}); } /// Checks whether the map contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. The function applies RCU lock internally. */ template bool contains( K const& key ) { return base_class::contains( key ); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find( K const& key ) { return contains( key ); } //@endcond /// Checks whether the map contains \p key using \p pred predicate for searching /** The function is similar to contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool contains( K const& key, Less pred ) { CDS_UNUSED( pred ); return base_class::contains( key, cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor >()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find_with( K const& key, Less pred ) { return contains( key, pred ); } //@endcond /// Finds the key \p key and return the item found /** \anchor cds_nonintrusive_SkipListMap_rcu_get The function searches the item with key equal to \p key and returns a \p raw_ptr object pointing to an item found. If \p key is not found it returns empty \p raw_ptr. Note the compare functor in \p Traits class' template argument should accept a parameter of type \p K that can be not the same as \p key_type. RCU should be locked before call of this function. Returned item is valid only while RCU is locked: \code typedef cds::container::SkipListMap< cds::urcu::gc< cds::urcu::general_buffered<> >, int, foo, my_traits > skip_list; skip_list theList; // ... typename skip_list::raw_ptr pVal; { // Lock RCU skip_list::rcu_lock lock; pVal = theList.get( 5 ); if ( pVal ) { // Deal with pVal //... } } // You can manually release pVal after RCU-locked section pVal.release(); \endcode */ template raw_ptr get( K const& key ) { return raw_ptr( base_class::get( key )); } /// Finds the key \p key and return the item found /** The function is an analog of \ref cds_nonintrusive_SkipListMap_rcu_get "get(K const&)" but \p pred is used for comparing the keys. \p Less functor has the semantics like \p std::less but should take arguments of type \ref key_type and \p K in any order. \p pred must imply the same element order as the comparator used for building the map. */ template raw_ptr get_with( K const& key, Less pred ) { CDS_UNUSED( pred ); return raw_ptr( base_class::get_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor >())); } /// Clears the map (not atomic) void clear() { base_class::clear(); } /// Checks if the map is empty /** Emptiness is checked by item counting: if item count is zero then the map is empty. */ bool empty() const { return base_class::empty(); } /// Returns item count in the map size_t size() const { return base_class::size(); } /// Returns const reference to internal statistics stat const& statistics() const { return base_class::statistics(); } }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_SKIP_LIST_MAP_RCU_H libcds-2.3.3/cds/container/skip_list_set_dhp.h000066400000000000000000000010241341244201700213650ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_SKIP_LIST_MAP_DHP_H #define CDSLIB_CONTAINER_SKIP_LIST_MAP_DHP_H #include #include #include #include #endif // #ifndef CDSLIB_CONTAINER_SKIP_LIST_MAP_DHP_H libcds-2.3.3/cds/container/skip_list_set_hp.h000066400000000000000000000010201341244201700212150ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_SKIP_LIST_SET_HP_H #define CDSLIB_CONTAINER_SKIP_LIST_SET_HP_H #include #include #include #include #endif // #ifndef CDSLIB_CONTAINER_SKIP_LIST_SET_HP_H libcds-2.3.3/cds/container/skip_list_set_nogc.h000066400000000000000000000411211341244201700215420ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_SKIP_LIST_SET_NOGC_H #define CDSLIB_CONTAINER_SKIP_LIST_SET_NOGC_H #include #include #include #include #include namespace cds { namespace container { //@cond namespace skip_list { namespace details { struct set_key_accessor { template typename NodeType::stored_value_type const& operator()( NodeType const& node ) const { return node.m_Value; } }; }} // namespace skip_list::details namespace details { template struct make_skip_list_set_nogc { typedef cds::gc::nogc gc; typedef T value_type; typedef Traits traits; typedef cds::intrusive::skip_list::node< gc > intrusive_node_type; struct node_type: public intrusive_node_type { typedef intrusive_node_type base_class; typedef typename base_class::atomic_ptr atomic_ptr; typedef atomic_ptr tower_item_type; typedef value_type stored_value_type; value_type m_Value; //atomic_ptr m_arrTower[] ; // allocated together with node_type in single memory block template node_type( unsigned int nHeight, atomic_ptr * pTower, Q const& v ) : m_Value(v) { if ( nHeight > 1 ) { new (pTower) atomic_ptr[ nHeight - 1 ]; base_class::make_tower( nHeight, pTower ); } } template node_type( unsigned int nHeight, atomic_ptr * pTower, Q&& q, Args&&... args ) : m_Value( std::forward(q), std::forward(args)... ) { if ( nHeight > 1 ) { new (pTower) atomic_ptr[ nHeight - 1 ]; base_class::make_tower( nHeight, pTower ); } } node_type() = delete; // no default ctor }; typedef skip_list::details::node_allocator< node_type, traits> node_allocator; struct node_deallocator { void operator ()( node_type * pNode ) { node_allocator().Delete( pNode ); } }; typedef skip_list::details::dummy_node_builder dummy_node_builder; typedef typename traits::key_accessor key_accessor; typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator; /* template using less_wrapper = compare_wrapper< node_type, cds::opt::details::make_comparator_from_less, key_accessor >; */ typedef typename cds::intrusive::skip_list::make_traits< cds::opt::type_traits< traits > ,cds::intrusive::opt::hook< intrusive::skip_list::base_hook< cds::opt::gc< gc > > > ,cds::intrusive::opt::disposer< node_deallocator > ,cds::intrusive::skip_list::internal_node_builder< dummy_node_builder > ,cds::opt::compare< cds::details::compare_wrapper< node_type, key_comparator, key_accessor > > >::type intrusive_type_traits; typedef cds::intrusive::SkipListSet< gc, node_type, intrusive_type_traits> type; }; } // namespace details //@endcond /// Lock-free skip-list set (template specialization for gc::nogc) /** @ingroup cds_nonintrusive_set \anchor cds_nonintrusive_SkipListSet_nogc This specialization is intended for so-called persistent usage when no item reclamation may be performed. The class does not support deleting of list item. See \ref cds_nonintrusive_SkipListSet_hp "SkipListSet" for detailed description. Template arguments: - \p T - type to be stored in the list. - \p Traits - type traits. See skip_list::traits for explanation. It is possible to declare option-based list with cds::container::skip_list::make_traits metafunction istead of \p Traits template argument. \p Options template arguments of cds::container::skip_list::make_traits metafunction are: - opt::compare - key comparison functor. No default functor is provided. If the option is not specified, the opt::less is used. - opt::less - specifies binary predicate used for key comparison. Default is \p std::less. - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter that is no item counting. - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) or opt::v::sequential_consistent (sequentially consisnent memory model). - skip_list::random_level_generator - random level generator. Can be \p skip_list::xor_shift, \p skip_list::turbo or user-provided one. See skip_list::random_level_generator option description for explanation. Default is \p skip_list::turbo32. - opt::allocator - allocator for skip-list node. Default is \ref CDS_DEFAULT_ALLOCATOR. - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::Default is used. - opt::stat - internal statistics. Available types: skip_list::stat, skip_list::empty_stat (the default) */ template < typename T, #ifdef CDS_DOXYGEN_INVOKED class Traits = skip_list::traits #else class Traits #endif > class SkipListSet< gc::nogc, T, Traits >: #ifdef CDS_DOXYGEN_INVOKED protected intrusive::SkipListSet< cds::gc::nogc, T, Traits > #else protected details::make_skip_list_set_nogc< T, typename cds::opt::replace_key_accessor< Traits, skip_list::details::set_key_accessor >::type >::type #endif { //@cond typedef details::make_skip_list_set_nogc< T, typename cds::opt::replace_key_accessor< Traits, skip_list::details::set_key_accessor >::type > maker; typedef typename maker::type base_class; //@endcond public: typedef typename base_class::gc gc ; ///< Garbage collector used typedef T value_type ; ///< Value type stored in the set typedef Traits options ; ///< Options specified typedef typename base_class::back_off back_off ; ///< Back-off strategy used typedef typename options::allocator allocator_type ; ///< Allocator type used for allocate/deallocate the skip-list nodes typedef typename base_class::item_counter item_counter ; ///< Item counting policy used typedef typename maker::key_comparator key_comparator ; ///< key compare functor typedef typename base_class::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option typedef typename options::stat stat ; ///< internal statistics type typedef typename base_class::random_level_generator random_level_generator ; ///< random level generator protected: //@cond typedef typename maker::node_type node_type; typedef typename maker::node_allocator node_allocator; typedef typename std::conditional< std::is_same< typename options::key_accessor, opt::none >::value, skip_list::details::set_key_accessor, typename options::key_accessor >::type key_accessor; typedef std::unique_ptr< node_type, typename maker::node_deallocator > scoped_node_ptr; //@endcond public: ///@name Forward iterators //@{ /// Forward ordered iterator /** The forward iterator for a split-list has some features: - it has no post-increment operator - it depends on iterator of underlying \p OrderedList */ typedef skip_list::details::iterator< typename base_class::iterator > iterator; /// Const iterator type typedef skip_list::details::iterator< typename base_class::const_iterator > const_iterator; /// Returns a forward iterator addressing the first element in a set iterator begin() { return iterator( base_class::begin()); } /// Returns a forward const iterator addressing the first element in a set const_iterator begin() const { return const_iterator( base_class::begin()); } /// Returns a forward const iterator addressing the first element in a set const_iterator cbegin() const { return const_iterator( base_class::cbegin()); } /// Returns a forward iterator that addresses the location succeeding the last element in a set. iterator end() { return iterator( base_class::end()); } /// Returns a forward const iterator that addresses the location succeeding the last element in a set. const_iterator end() const { return const_iterator( base_class::end()); } /// Returns a forward const iterator that addresses the location succeeding the last element in a set. const_iterator cend() const { return const_iterator( base_class::cend()); } //@} protected: //@cond static iterator node_to_iterator( node_type * pNode ) { assert( pNode ); return iterator( base_class::iterator::from_node( pNode )); } //@endcond public: /// Default ctor SkipListSet() : base_class() {} /// Destructor destroys the set object ~SkipListSet() {} /// Inserts new node /** The function inserts \p val in the set if it does not contain an item with key equal to \p val. Return an iterator pointing to inserted item if success, otherwise \ref end() */ template iterator insert( const Q& val ) { scoped_node_ptr sp( node_allocator().New( base_class::random_level(), val )); if ( base_class::insert( *sp.get())) { return node_to_iterator( sp.release()); } return end(); } /// Inserts data of type \ref value_type constructed with std::forward(args)... /** Return an iterator pointing to inserted item if success \ref end() otherwise */ template iterator emplace( Args&&... args ) { scoped_node_ptr sp( node_allocator().New( base_class::random_level(), std::forward(args)... )); if ( base_class::insert( *sp.get())) { return node_to_iterator( sp.release()); } return end(); } /// Updates the item /** The operation inserts new item if \p val is not found in the set and \p bInsert is \p true. Otherwise, if that key exists, the function returns an iterator that points to item found. Returns std::pair where \p first is an iterator pointing to item found or inserted or \p end() if \p val is not found and \p bInsert is \p false, \p second is \p true if new item has been added or \p false if the item already is in the set. */ template std::pair update( const Q& val, bool bInsert = true ) { scoped_node_ptr sp( node_allocator().New( base_class::random_level(), val )); node_type * pNode; std::pair bRes = base_class::update( *sp, [&pNode](bool, node_type& item, node_type&) { pNode = &item; }, bInsert ); if ( bRes.first && bRes.second ) sp.release(); else if ( !bRes.first ) return std::make_pair( end(), false ); assert( pNode ); return std::make_pair( node_to_iterator( pNode ), bRes.second ); } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( const Q& val ) { return update( val, true ); } //@endcond /// Checks whether the set contains \p key /** The function searches the item with key equal to \p key and returns an iterator to item found or \p end() if the key is not fund */ template iterator contains( Q const& key ) const { node_type * pNode = base_class::contains( key ); if ( pNode ) return node_to_iterator( pNode ); return base_class::nonconst_end(); } //@cond template CDS_DEPRECATED("deprecated, use contains()") iterator find( Q const& key ) const { return contains( key ); } //@edncond /// Checks whether the set contains \p key using \p pred predicate for searching /** The function is similar to contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template iterator contains( Q const& key, Less pred ) const { CDS_UNUSED( pred ); node_type * pNode = base_class::contains( key, cds::details::predicate_wrapper< node_type, Less, key_accessor>()); if ( pNode ) return node_to_iterator( pNode ); return base_class::nonconst_end(); } //@cond template CDS_DEPRECATED("deprecated, use contains()") iterator find_with( Q const& key, Less pred ) const { return contains( key, pred ); } //@endcond /// Gets minimum key from the set /** If the set is empty the function returns \p nullptr */ value_type * get_min() const { node_type * pNode = base_class::get_min(); return pNode ? &pNode->m_Value : nullptr; } /// Gets maximum key from the set /** The function returns \p nullptr if the set is empty */ value_type * get_max() const { node_type * pNode = base_class::get_max(); return pNode ? &pNode->m_Value : nullptr; } /// Clears the set (non-atomic) /** The function is not atomic. Finding and/or inserting is prohibited while clearing. Otherwise an unpredictable result may be encountered. Thus, \p clear() may be used only for debugging purposes. */ void clear() { base_class::clear(); } /// Checks if the set is empty bool empty() const { return base_class::empty(); } /// Returns item count in the set /** The value returned depends on item counter type provided by \p Traits template parameter. If it is atomicity::empty_item_counter this function always returns 0. The function is not suitable for checking the set emptiness, use \ref empty member function for this purpose. */ size_t size() const { return base_class::size(); } /// Returns maximum height of skip-list. The max height is a constant for each object and does not exceed 32. static constexpr unsigned int max_height() noexcept { return base_class::max_height(); } /// Returns const reference to internal statistics stat const& statistics() const { return base_class::statistics(); } }; }} // cds::container #endif // ifndef CDSLIB_CONTAINER_SKIP_LIST_SET_NOGC_H libcds-2.3.3/cds/container/skip_list_set_rcu.h000066400000000000000000000752411341244201700214170ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_SKIP_LIST_SET_RCU_H #define CDSLIB_CONTAINER_SKIP_LIST_SET_RCU_H #include #include namespace cds { namespace container { /// Lock-free skip-list set (template specialization for \ref cds_urcu_desc "RCU") /** @ingroup cds_nonintrusive_set \anchor cds_nonintrusive_SkipListSet_rcu The implementation of well-known probabilistic data structure called skip-list invented by W.Pugh in his papers: - [1989] W.Pugh Skip Lists: A Probabilistic Alternative to Balanced Trees - [1990] W.Pugh A Skip List Cookbook A skip-list is a probabilistic data structure that provides expected logarithmic time search without the need of rebalance. The skip-list is a collection of sorted linked list. Nodes are ordered by key. Each node is linked into a subset of the lists. Each list has a level, ranging from 0 to 32. The bottom-level list contains all the nodes, and each higher-level list is a sublist of the lower-level lists. Each node is created with a random top level (with a random height), and belongs to all lists up to that level. The probability that a node has the height 1 is 1/2. The probability that a node has the height N is 1/2 ** N (more precisely, the distribution depends on an random generator provided, but our generators have this property). The lock-free variant of skip-list is implemented according to book - [2008] M.Herlihy, N.Shavit "The Art of Multiprocessor Programming", chapter 14.4 "A Lock-Free Concurrent Skiplist" Template arguments: - \p RCU - one of \ref cds_urcu_gc "RCU type". - \p T - type to be stored in the list. - \p Traits - set traits, default is skip_list::traits for explanation. It is possible to declare option-based list with cds::container::skip_list::make_traits metafunction istead of \p Traits template argument. Template argument list \p Options of cds::container::skip_list::make_traits metafunction are: - opt::compare - key comparison functor. No default functor is provided. If the option is not specified, the opt::less is used. - opt::less - specifies binary predicate used for key comparison. Default is \p std::less. - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter that is no item counting. - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) or opt::v::sequential_consistent (sequentially consisnent memory model). - skip_list::random_level_generator - random level generator. Can be \p skip_list::xor_shift, \p skip_list::turbo or user-provided one. See \p skip_list::random_level_generator option description for explanation. Default is \p skip_list::turbo32. - opt::allocator - allocator for skip-list node. Default is \ref CDS_DEFAULT_ALLOCATOR. - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::Default is used. - opt::stat - internal statistics. Available types: skip_list::stat, skip_list::empty_stat (the default) - opt::rcu_check_deadlock - a deadlock checking policy. Default is opt::v::rcu_throw_deadlock @note Before including you should include appropriate RCU header file, see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. Iterators The class supports a forward iterator (\ref iterator and \ref const_iterator). The iteration is ordered. You may iterate over skip-list set items only under RCU lock. Only in this case the iterator is thread-safe since while RCU is locked any set's item cannot be reclaimed. The requirement of RCU lock during iterating means that deletion of the elements (i.e. \ref erase) is not possible. @warning The iterator object cannot be passed between threads Example how to use skip-list set iterators: \code // First, you should include the header for RCU type you have chosen #include #include typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type; struct Foo { // ... }; // Traits for your skip-list. // At least, you should define cds::opt::less or cds::opt::compare for Foo struct struct my_traits: public cds::continer::skip_list::traits { // ... }; typedef cds::container::SkipListSet< rcu_type, Foo, my_traits > my_skiplist_set; my_skiplist_set theSet; // ... // Begin iteration { // Apply RCU locking manually typename rcu_type::scoped_lock sl; for ( auto it = theList.begin(); it != theList.end(); ++it ) { // ... } // rcu_type::scoped_lock destructor releases RCU lock implicitly } \endcode \warning Due to concurrent nature of skip-list set it is not guarantee that you can iterate all elements in the set: any concurrent deletion can exclude the element pointed by the iterator from the set, and your iteration can be terminated before end of the set. Therefore, such iteration is more suitable for debugging purposes The iterator class supports the following minimalistic interface: \code struct iterator { // Default ctor iterator(); // Copy ctor iterator( iterator const& s); value_type * operator ->() const; value_type& operator *() const; // Pre-increment iterator& operator ++(); // Copy assignment iterator& operator = (const iterator& src); bool operator ==(iterator const& i ) const; bool operator !=(iterator const& i ) const; }; \endcode Note, the iterator object returned by \ref end, \p cend member functions points to \p nullptr and should not be dereferenced. */ template < typename RCU, typename T, #ifdef CDS_DOXYGEN_INVOKED typename Traits = skip_list::traits #else typename Traits #endif > class SkipListSet< cds::urcu::gc< RCU >, T, Traits >: #ifdef CDS_DOXYGEN_INVOKED protected intrusive::SkipListSet< cds::urcu::gc< RCU >, T, Traits > #else protected details::make_skip_list_set< cds::urcu::gc< RCU >, T, Traits >::type #endif { //@cond typedef details::make_skip_list_set< cds::urcu::gc< RCU >, T, Traits > maker; typedef typename maker::type base_class; //@endcond public: typedef typename base_class::gc gc ; ///< Garbage collector used typedef T value_type ; ///< Value type stored in the set typedef Traits traits ; ///< Options specified typedef typename base_class::back_off back_off ; ///< Back-off strategy used typedef typename traits::allocator allocator_type ; ///< Allocator type used for allocate/deallocate the skip-list nodes typedef typename base_class::item_counter item_counter ; ///< Item counting policy used typedef typename maker::key_comparator key_comparator ; ///< key compare functor typedef typename base_class::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option typedef typename traits::random_level_generator random_level_generator ; ///< random level generator typedef typename traits::stat stat ; ///< internal statistics type typedef typename traits::rcu_check_deadlock rcu_check_deadlock ; ///< Deadlock checking policy protected: //@cond typedef typename maker::node_type node_type; typedef typename maker::node_allocator node_allocator; typedef std::unique_ptr< node_type, typename maker::node_deallocator > scoped_node_ptr; //@endcond public: typedef typename base_class::rcu_lock rcu_lock; ///< RCU scoped lock /// Group of \p extract_xxx functions do not require external locking static constexpr const bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; /// pointer to extracted node using exempt_ptr = cds::urcu::exempt_ptr< gc, node_type, value_type, typename maker::intrusive_traits::disposer >; private: //@cond struct raw_ptr_converter { value_type * operator()( node_type * p ) const { return p ? &p->m_Value : nullptr; } value_type& operator()( node_type& n ) const { return n.m_Value; } value_type const& operator()( node_type const& n ) const { return n.m_Value; } }; //@endcond public: /// Result of \p get(), \p get_with() functions - pointer to the node found typedef cds::urcu::raw_ptr_adaptor< value_type, typename base_class::raw_ptr, raw_ptr_converter > raw_ptr; protected: //@cond unsigned int random_level() { return base_class::random_level(); } //@endcond public: /// Default ctor SkipListSet() : base_class() {} /// Destructor destroys the set object ~SkipListSet() {} public: ///@name Forward ordered iterators (thread-safe under RCU lock) //@{ /// Forward iterator /** The forward iterator has some features: - it has no post-increment operator - it depends on iterator of underlying \p OrderedList You may safely use iterators in multi-threaded environment only under RCU lock. Otherwise, a crash is possible if another thread deletes the element the iterator points to. */ typedef skip_list::details::iterator< typename base_class::iterator > iterator; /// Const iterator type typedef skip_list::details::iterator< typename base_class::const_iterator > const_iterator; /// Returns a forward iterator addressing the first element in a set iterator begin() { return iterator( base_class::begin()); } /// Returns a forward const iterator addressing the first element in a set const_iterator begin() const { return const_iterator( base_class::begin()); } /// Returns a forward const iterator addressing the first element in a set const_iterator cbegin() const { return const_iterator( base_class::cbegin()); } /// Returns a forward iterator that addresses the location succeeding the last element in a set. iterator end() { return iterator( base_class::end()); } /// Returns a forward const iterator that addresses the location succeeding the last element in a set. const_iterator end() const { return const_iterator( base_class::end()); } /// Returns a forward const iterator that addresses the location succeeding the last element in a set. const_iterator cend() const { return const_iterator( base_class::cend()); } //@} public: /// Inserts new node /** The function creates a node with copy of \p val value and then inserts the node created into the set. The type \p Q should contain as minimum the complete key for the node. The object of \ref value_type should be constructible from a value of type \p Q. In trivial case, \p Q is equal to \ref value_type. RCU \p synchronize method can be called. RCU should not be locked. Returns \p true if \p val is inserted into the set, \p false otherwise. */ template bool insert( Q const& val ) { scoped_node_ptr sp( node_allocator().New( random_level(), val )); if ( base_class::insert( *sp.get())) { sp.release(); return true; } return false; } /// Inserts new node /** The function allows to split creating of new item into two part: - create item with key only - insert new item into the set - if inserting is success, calls \p f functor to initialize value-fields of \p val. The functor signature is: \code void func( value_type& val ); \endcode where \p val is the item inserted. User-defined functor \p f should guarantee that during changing \p val no any other changes could be made on this set's item by concurrent threads. The user-defined functor is called only if the inserting is success. RCU \p synchronize method can be called. RCU should not be locked. */ template bool insert( Q const& val, Func f ) { scoped_node_ptr sp( node_allocator().New( random_level(), val )); if ( base_class::insert( *sp.get(), [&f]( node_type& v ) { f( v.m_Value ); } )) { sp.release(); return true; } return false; } /// Updates the item /** The operation performs inserting or changing data with lock-free manner. If \p val not found in the set, then the new item created from \p val is inserted into the set iff \p bInsert is \p true. Otherwise, the functor \p func is called with the item found. The functor \p Func signature: \code struct my_functor { void operator()( bool bNew, value_type& item, const Q& val ); }; \endcode where: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - an item of the set - \p val - argument \p val passed into the \p %update() function The functor may change non-key fields of the \p item; however, \p func must guarantee that during changing no any other modifications could be made on this item by concurrent threads. RCU \p synchronize method can be called. RCU should not be locked. Returns std::pair where \p first is true if operation is successful, \p second is true if new item has been added or \p false if the item with \p key already exists. */ template std::pair update( const Q& val, Func func, bool bInsert = true ) { scoped_node_ptr sp( node_allocator().New( random_level(), val )); std::pair bRes = base_class::update( *sp, [&func, &val](bool bNew, node_type& node, node_type&){ func( bNew, node.m_Value, val );}, bInsert ); if ( bRes.first && bRes.second ) sp.release(); return bRes; } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( const Q& val, Func func ) { return update( val, func, true ); } //@endcond /// Inserts data of type \ref value_type constructed with std::forward(args)... /** Returns \p true if inserting successful, \p false otherwise. RCU \p synchronize method can be called. RCU should not be locked. */ template bool emplace( Args&&... args ) { scoped_node_ptr sp( node_allocator().New( random_level(), std::forward(args)... )); if ( base_class::insert( *sp.get())) { sp.release(); return true; } return false; } /// Delete \p key from the set /** \anchor cds_nonintrusive_SkipListSet_rcu_erase_val The item comparator should be able to compare the type \p value_type and the type \p Q. RCU \p synchronize method can be called. RCU should not be locked. Return \p true if key is found and deleted, \p false otherwise */ template bool erase( Q const& key ) { return base_class::erase( key ); } /// Deletes the item from the set using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_SkipListSet_rcu_erase_val "erase(Q const&)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool erase_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return base_class::erase_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >()); } /// Delete \p key from the set /** \anchor cds_nonintrusive_SkipListSet_rcu_erase_func The function searches an item with key \p key, calls \p f functor and deletes the item. If \p key is not found, the functor is not called. The functor \p Func interface: \code struct extractor { void operator()(value_type const& val); }; \endcode Since the key of MichaelHashSet's \p value_type is not explicitly specified, template parameter \p Q defines the key type searching in the list. The list item comparator should be able to compare the type \p T of list item and the type \p Q. RCU \p synchronize method can be called. RCU should not be locked. Return \p true if key is found and deleted, \p false otherwise See also: \ref erase */ template bool erase( Q const& key, Func f ) { return base_class::erase( key, [&f]( node_type const& node) { f( node.m_Value ); } ); } /// Deletes the item from the set using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_SkipListSet_rcu_erase_func "erase(Q const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool erase_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return base_class::erase_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >(), [&f]( node_type const& node) { f( node.m_Value ); } ); } /// Extracts the item from the set with specified \p key /** \anchor cds_nonintrusive_SkipListSet_rcu_extract The function searches an item with key equal to \p key in the set, unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found. If the item is not found the function returns an empty \p exempt_ptr Note the compare functor from \p Traits class' template argument should accept a parameter of type \p Q that can be not the same as \p value_type. RCU \p synchronize method can be called. RCU should NOT be locked. The function does not free the item found. The item will be implicitly freed when the returned object is destroyed or when its \p release() member function is called. */ template exempt_ptr extract( Q const& key ) { return exempt_ptr( base_class::do_extract( key )); } /// Extracts the item from the set with comparing functor \p pred /** The function is an analog of \p extract(Q const&) but \p pred predicate is used for key comparing. \p Less has the semantics like \p std::less. \p pred must imply the same element order as the comparator used for building the set. */ template exempt_ptr extract_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return exempt_ptr( base_class::do_extract_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >())); } /// Extracts an item with minimal key from the set /** The function searches an item with minimal key, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item. If the skip-list is empty the function returns an empty \p exempt_ptr. RCU \p synchronize method can be called. RCU should NOT be locked. The function does not free the item found. The item will be implicitly freed when the returned object is destroyed or when its \p release() member function is called. */ exempt_ptr extract_min() { return exempt_ptr( base_class::do_extract_min()); } /// Extracts an item with maximal key from the set /** The function searches an item with maximal key, unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item. If the skip-list is empty the function returns an empty \p exempt_ptr. RCU \p synchronize method can be called. RCU should NOT be locked. The function does not free the item found. The item will be implicitly freed when the returned object is destroyed or when its \p release() member function is called. */ exempt_ptr extract_max() { return exempt_ptr( base_class::do_extract_max()); } /// Find the key \p val /** @anchor cds_nonintrusive_SkipListSet_rcu_find_func The function searches the item with key equal to \p val and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& val ); }; \endcode where \p item is the item found, \p val is the find function argument. The functor may change non-key fields of \p item. Note that the functor is only guarantee that \p item cannot be disposed during functor is executing. The functor does not serialize simultaneous access to the set's \p item. If such access is possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor can modify both arguments. Note the hash functor specified for class \p Traits template parameter should accept a parameter of type \p Q that may be not the same as \p value_type. The function applies RCU lock internally. The function returns \p true if \p val is found, \p false otherwise. */ template bool find( Q& val, Func f ) { return base_class::find( val, [&f]( node_type& node, Q& v ) { f( node.m_Value, v ); }); } //@cond template bool find( Q const& val, Func f ) { return base_class::find( val, [&f]( node_type& node, Q& v ) { f( node.m_Value, v ); } ); } //@endcond /// Finds the key \p val using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_SkipListSet_rcu_find_func "find(Q&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool find_with( Q& val, Less pred, Func f ) { CDS_UNUSED( pred ); return base_class::find_with( val, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >(), [&f]( node_type& node, Q& v ) { f( node.m_Value, v ); } ); } //@cond template bool find_with( Q const& val, Less pred, Func f ) { CDS_UNUSED( pred ); return base_class::find_with( val, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >(), [&f]( node_type& node, Q const& v ) { f( node.m_Value, v ); } ); } //@endcond /// Checks whether the set contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. The function applies RCU lock internally. */ template bool contains( Q const & key ) { return base_class::contains( key ); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find( Q const & key ) { return contains( key ); } //@endcond /// Checks whether the set contains \p key using \p pred predicate for searching /** The function is similar to contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool contains( Q const& key, Less pred ) { CDS_UNUSED( pred ); return base_class::contains( key, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find_with( Q const& key, Less pred ) { return contains( key, pred ); } //@endcond /// Finds \p key and return the item found /** \anchor cds_nonintrusive_SkipListSet_rcu_get The function searches the item with key equal to \p key and returns a \p raw_ptr object pointed to item found. If \p key is not found it returns empty \p raw_ptr. Note the compare functor in \p Traits class' template argument should accept a parameter of type \p Q that can be not the same as \p value_type. RCU should be locked before call of this function. Returned item is valid only while RCU is locked: \code typedef cds::container::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list; skip_list theList; // ... typename skip_list::raw_ptr pVal; { // Lock RCU skip_list::rcu_lock lock; pVal = theList.get( 5 ); if ( pVal ) { // Deal with pVal //... } } // You can manually release pVal after RCU-locked section pVal.release(); \endcode */ template raw_ptr get( Q const& key ) { return raw_ptr( base_class::get( key )); } /// Finds the key \p val and return the item found /** The function is an analog of \ref cds_nonintrusive_SkipListSet_rcu_get "get(Q const&)" but \p pred is used for comparing the keys. \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the set. */ template raw_ptr get_with( Q const& val, Less pred ) { CDS_UNUSED( pred ); return raw_ptr( base_class::get_with( val, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >())); } /// Clears the set (non-atomic). /** The function deletes all items from the set. The function is not atomic, thus, in multi-threaded environment with parallel insertions this sequence \code set.clear(); assert( set.empty()); \endcode the assertion could be raised. For each item the \ref disposer provided by \p Traits template parameter will be called. */ void clear() { base_class::clear(); } /// Checks if the set is empty bool empty() const { return base_class::empty(); } /// Returns item count in the set /** The value returned depends on item counter type provided by \p Traits template parameter. If it is atomicity::empty_item_counter this function always returns 0. Therefore, the function is not suitable for checking the set emptiness, use \ref empty member function for this purpose. */ size_t size() const { return base_class::size(); } /// Returns const reference to internal statistics stat const& statistics() const { return base_class::statistics(); } }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_SKIP_LIST_SET_RCU_H libcds-2.3.3/cds/container/split_list_map.h000066400000000000000000001005721341244201700207110ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_SPLIT_LIST_MAP_H #define CDSLIB_CONTAINER_SPLIT_LIST_MAP_H #include #include namespace cds { namespace container { /// Split-ordered list map /** @ingroup cds_nonintrusive_map \anchor cds_nonintrusive_SplitListMap_hp Hash table implementation based on split-ordered list algorithm discovered by Ori Shalev and Nir Shavit, see - [2003] Ori Shalev, Nir Shavit "Split-Ordered Lists - Lock-free Resizable Hash Tables" - [2008] Nir Shavit "The Art of Multiprocessor Programming" See intrusive::SplitListSet for a brief description of the split-list algorithm. Template parameters: - \p GC - Garbage collector used like \p cds::gc::HP or \p cds::gc::DHP - \p Key - key type of an item stored in the map. It should be copy-constructible - \p Value - value type stored in the map - \p Traits - map traits, default is \p split_list::traits. Instead of declaring \p %split_list::traits -based struct you may apply option-based notation with \p split_list::make_traits metafunction. There are the specializations: - for \ref cds_urcu_desc "RCU" - declared in cd/container/split_list_map_rcu.h, see \ref cds_nonintrusive_SplitListMap_rcu "SplitListMap". - for \ref cds::gc::nogc declared in cds/container/split_list_map_nogc.h, see \ref cds_nonintrusive_SplitListMap_nogc "SplitListMap". \par Usage You should decide what garbage collector you want, and what ordered list you want to use. Split-ordered list is original data structure based on an ordered list. Suppose, you want construct split-list map based on \p gc::HP GC and \p MichaelList as ordered list implementation. Your map should map \p int key to \p std::string value. So, you beginning your code with the following: \code #include #include namespace cc = cds::container; \endcode The inclusion order is important: first, include file for ordered-list implementation (for this example, cds/container/michael_list_hp.h), then the header for split-list map cds/container/split_list_map.h. Now, you should declare traits for split-list map. The main parts of traits are a hash functor and a comparing functor for the ordered list. We use std::hash as hash functor and std::less predicate as comparing functor. The second attention: instead of using \p %MichaelList in \p %SplitListMap traits we use a tag \p cds::contaner::michael_list_tag for the Michael's list. The split-list requires significant support from underlying ordered list class and it is not good idea to dive you into deep implementation details of split-list and ordered list interrelations. The tag paradigm simplifies split-list interface. \code // SplitListMap traits struct foo_set_traits: public cc::split_list::traits { typedef cc::michael_list_tag ordered_list ; // what type of ordered list we want to use typedef std::hash hash ; // hash functor for the key stored in split-list map // Type traits for our MichaelList class struct ordered_list_traits: public cc::michael_list::traits { typedef std::less less ; // use our std::less predicate as comparator to order list nodes }; }; \endcode Now you are ready to declare our map class based on \p %SplitListMap: \code typedef cc::SplitListMap< cds::gc::DHP, int, std::string, foo_set_traits > int_string_map; \endcode You may use the modern option-based declaration instead of classic type-traits-based one: \code typedef cc::SplitListMap< cs::gc::DHP // GC used ,int // key type ,std::string // value type ,cc::split_list::make_traits< // metafunction to build split-list traits cc::split_list::ordered_list // tag for underlying ordered list implementation ,cc::opt::hash< std::hash > // hash functor ,cc::split_list::ordered_list_traits< // ordered list traits desired cc::michael_list::make_traits< // metafunction to build lazy list traits cc::opt::less< std::less > // less-based compare functor >::type > >::type > int_string_map; \endcode In case of option-based declaration with \p split_list::make_traits metafunction the struct \p foo_set_traits is not required. Now, the map of type \p int_string_map is ready to use in your program. Note that in this example we show only mandatory \p traits parts, optional ones is the default and they are inherited from \p container::split_list::traits. There are many other options for deep tuning of the split-list and ordered-list containers. */ template < class GC, typename Key, typename Value, #ifdef CDS_DOXYGEN_INVOKED class Traits = split_list::traits #else class Traits #endif > class SplitListMap: protected container::SplitListSet< GC, std::pair, split_list::details::wrap_map_traits > { //@cond typedef container::SplitListSet< GC, std::pair, split_list::details::wrap_map_traits > base_class; //@endcond public: typedef GC gc; ///< Garbage collector typedef Key key_type; ///< key type typedef Value mapped_type; ///< type of value to be stored in the map typedef Traits traits; ///< Map traits typedef std::pair value_type ; ///< key-value pair type typedef typename base_class::ordered_list ordered_list; ///< Underlying ordered list class typedef typename base_class::key_comparator key_comparator; ///< key compare functor typedef typename base_class::hash hash; ///< Hash functor for \ref key_type typedef typename base_class::item_counter item_counter; ///< Item counter type typedef typename base_class::stat stat; ///< Internal statistics /// Count of hazard pointer required static constexpr const size_t c_nHazardPtrCount = base_class::c_nHazardPtrCount; protected: //@cond typedef typename base_class::maker::traits::key_accessor key_accessor; typedef typename base_class::node_type node_type; //@endcond public: /// Guarded pointer typedef typename gc::template guarded_ptr< node_type, value_type, details::guarded_ptr_cast_set > guarded_ptr; public: ///@name Forward iterators (only for debugging purpose) //@{ /// Forward iterator /** The forward iterator for a split-list has the following features: - it has no post-increment operator - it depends on underlying ordered list iterator - The iterator object cannot be moved across thread boundary because it contains GC's guard that is thread-private GC data. - Iterator ensures thread-safety even if you delete the item that iterator points to. However, in case of concurrent deleting operations it is no guarantee that you iterate all item in the split-list. Moreover, a crash is possible when you try to iterate the next element that has been deleted by concurrent thread. @warning Use this iterator on the concurrent container for debugging purpose only. The iterator interface: \code class iterator { public: // Default constructor iterator(); // Copy construtor iterator( iterator const& src ); // Dereference operator value_type * operator ->() const; // Dereference operator value_type& operator *() const; // Preincrement operator iterator& operator ++(); // Assignment operator iterator& operator = (iterator const& src); // Equality operators bool operator ==(iterator const& i ) const; bool operator !=(iterator const& i ) const; }; \endcode */ typedef typename base_class::iterator iterator; /// Const forward iterator typedef typename base_class::const_iterator const_iterator; /// Returns a forward iterator addressing the first element in a map /** For empty map \code begin() == end() \endcode */ iterator begin() { return base_class::begin(); } /// Returns an iterator that addresses the location succeeding the last element in a map /** Do not use the value returned by end function to access any item. The returned value can be used only to control reaching the end of the map. For empty map \code begin() == end() \endcode */ iterator end() { return base_class::end(); } /// Returns a forward const iterator addressing the first element in a map const_iterator begin() const { return base_class::begin(); } /// Returns a forward const iterator addressing the first element in a map const_iterator cbegin() const { return base_class::cbegin(); } /// Returns an const iterator that addresses the location succeeding the last element in a map const_iterator end() const { return base_class::end(); } /// Returns an const iterator that addresses the location succeeding the last element in a map const_iterator cend() const { return base_class::cend(); } //@} public: /// Initializes split-ordered map of default capacity /** The default capacity is defined in bucket table constructor. See \p intrusive::split_list::expandable_bucket_table, \p intrusive::split_list::static_bucket_table which selects by \p intrusive::split_list::traits::dynamic_bucket_table. */ SplitListMap() : base_class() {} /// Initializes split-ordered map SplitListMap( size_t nItemCount ///< estimated average item count , size_t nLoadFactor = 1 ///< load factor - average item count per bucket. Small integer up to 10, default is 1. ) : base_class( nItemCount, nLoadFactor ) {} public: /// Inserts new node with key and default value /** The function creates a node with \p key and default value, and then inserts the node created into the map. Preconditions: - The \ref key_type should be constructible from value of type \p K. In trivial case, \p K is equal to \ref key_type. - The \ref mapped_type should be default-constructible. Returns \p true if inserting successful, \p false otherwise. */ template bool insert( K&& key ) { return base_class::emplace( key_type( std::forward( key )), mapped_type()); } /// Inserts new node /** The function creates a node with copy of \p val value and then inserts the node created into the map. Preconditions: - The \ref key_type should be constructible from \p key of type \p K. - The \ref mapped_type should be constructible from \p val of type \p V. Returns \p true if \p val is inserted into the map, \p false otherwise. */ template bool insert( K&& key, V&& val ) { return base_class::emplace( key_type( std::forward( key )), mapped_type( std::forward( val ))); } /// Inserts new node and initialize it by a functor /** This function inserts new node with key \p key and if inserting is successful then it calls \p func functor with signature \code struct functor { void operator()( value_type& item ); }; \endcode The argument \p item of user-defined functor \p func is the reference to the map's item inserted: - item.first is a const reference to item's key that cannot be changed. - item.second is a reference to item's value that may be changed. It should be keep in mind that concurrent modifications of \p item.second may be possible. The key_type should be constructible from value of type \p K. The function allows to split creating of new item into two part: - create item from \p key; - insert new item into the map; - if inserting is successful, initialize the value of item by calling \p func functor This can be useful if complete initialization of object of \p mapped_type is heavyweight and it is preferable that the initialization should be completed only if inserting is successful. @warning For \ref cds_nonintrusive_MichaelKVList_gc "MichaelKVList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". \ref cds_nonintrusive_LazyKVList_gc "LazyKVList" provides exclusive access to inserted item and does not require any node-level synchronization. */ template bool insert_with( K&& key, Func func ) { //TODO: pass arguments by reference (make_pair makes copy) return base_class::insert( std::make_pair( key_type( std::forward( key )), mapped_type()), func ); } /// For key \p key inserts data of type \p mapped_type created from \p args /** \p key_type should be constructible from type \p K Returns \p true if inserting successful, \p false otherwise. */ template bool emplace( K&& key, Args&&... args ) { return base_class::emplace( key_type( std::forward(key)), mapped_type( std::forward(args)...)); } /// Updates the node /** The operation performs inserting or changing data with lock-free manner. If \p key is not found in the map, then \p key is inserted iff \p bAllowInsert is \p true. Otherwise, the functor \p func is called with item found. The functor \p func signature depends on ordered list: for \p MichaelKVList, \p LazyKVList \code struct my_functor { void operator()( bool bNew, value_type& item ); }; \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - the item found or inserted The functor may change any fields of the \p item.second that is \p mapped_type. for \p IterableKVList \code void func( value_type& val, value_type * old ); \endcode where - \p val - a new data constructed from \p key - \p old - old value that will be retired. If new item has been inserted then \p old is \p nullptr. Returns std::pair where \p first is true if operation is successful, \p second is true if new item has been added or \p false if the item with \p key already is in the map. @warning For \ref cds_nonintrusive_MichaelKVList_gc "MichaelKVList" and \ref cds_nonintrusive_IterableKVList_gc "IterableKVList" as the ordered list see \ref cds_intrusive_item_creating "insert item troubleshooting". \ref cds_nonintrusive_LazyKVList_gc "LazyKVList" provides exclusive access to inserted item and does not require any node-level synchronization. */ template #ifdef CDS_DOXYGE_INVOKED std::pair #else typename std::enable_if< std::is_same::value && !is_iterable_list< ordered_list >::value, std::pair >::type #endif update( K&& key, Func func, bool bAllowInsert = true ) { typedef decltype( std::make_pair( key_type( std::forward( key )), mapped_type())) arg_pair_type; return base_class::update( std::make_pair( key_type( key ), mapped_type()), [&func]( bool bNew, value_type& item, arg_pair_type const& /*val*/ ) { func( bNew, item ); }, bAllowInsert ); } //@cond template #ifdef CDS_DOXYGE_INVOKED std::pair #else typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, std::pair >::type #endif update( K&& key, Func func, bool bAllowInsert = true ) { return base_class::update( std::make_pair( key_type( std::forward( key )), mapped_type()), func, bAllowInsert ); } //@endcond //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( K const& key, Func func ) { return update( key, func, true ); } //@endcond /// Inserts or updates the node (only for \p IterableKVList) /** The operation performs inserting or changing data with lock-free manner. If \p key is not found in the map, then \p key is inserted iff \p bAllowInsert is \p true. Otherwise, the current element is changed to \p val, the old element will be retired later. Returns std::pair where \p first is \p true if operation is successful, \p second is \p true if \p val has been added or \p false if the item with that key already in the map. */ template #ifdef CDS_DOXYGEN_INVOKED std::pair #else typename std::enable_if< std::is_same< Q, Q>::value && is_iterable_list< ordered_list >::value, std::pair >::type #endif upsert( Q&& key, V&& val, bool bAllowInsert = true ) { return base_class::upsert( std::make_pair( key_type( std::forward( key )), mapped_type( std::forward( val ))), bAllowInsert ); } /// Deletes \p key from the map /** \anchor cds_nonintrusive_SplitListMap_erase_val Return \p true if \p key is found and deleted, \p false otherwise */ template bool erase( K const& key ) { return base_class::erase( key ); } /// Deletes the item from the map using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_SplitListMap_erase_val "erase(K const&)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool erase_with( K const& key, Less pred ) { CDS_UNUSED( pred ); return base_class::erase_with( key, cds::details::predicate_wrapper()); } /// Deletes \p key from the map /** \anchor cds_nonintrusive_SplitListMap_erase_func The function searches an item with key \p key, calls \p f functor and deletes the item. If \p key is not found, the functor is not called. The functor \p Func interface is: \code struct extractor { void operator()(value_type& item) { ... } }; \endcode Return \p true if key is found and deleted, \p false otherwise */ template bool erase( K const& key, Func f ) { return base_class::erase( key, f ); } /// Deletes the item from the map using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_SplitListMap_erase_func "erase(K const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool erase_with( K const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return base_class::erase_with( key, cds::details::predicate_wrapper(), f ); } /// Deletes the item pointed by iterator \p iter (only for \p IterableList based map) /** Returns \p true if the operation is successful, \p false otherwise. The function can return \p false if the node the iterator points to has already been deleted by other thread. The function does not invalidate the iterator, it remains valid and can be used for further traversing. @note \p %erase_at() is supported only for \p %SplitListMap based on \p IterableList. */ #ifdef CDS_DOXYGEN_INVOKED bool erase_at( iterator const& iter ) #else template typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, bool >::type erase_at( Iterator const& iter ) #endif { return base_class::erase_at( iter ); } /// Extracts the item with specified \p key /** \anchor cds_nonintrusive_SplitListMap_hp_extract The function searches an item with key equal to \p key, unlinks it from the map, and returns it as \p guarded_ptr. If \p key is not found the function returns an empty guarded pointer. Note the compare functor should accept a parameter of type \p K that may be not the same as \p value_type. The extracted item is freed automatically when returned \p guarded_ptr object will be destroyed or released. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. Usage: \code typedef cds::container::SplitListMap< your_template_args > splitlist_map; splitlist_map theMap; // ... { splitlist_map::guarded_ptr gp(theMap.extract( 5 )); if ( gp ) { // Deal with gp // ... } // Destructor of gp releases internal HP guard } \endcode */ template guarded_ptr extract( K const& key ) { return base_class::extract_( key ); } /// Extracts the item using compare functor \p pred /** The function is an analog of \ref cds_nonintrusive_SplitListMap_hp_extract "extract(K const&)" but \p pred predicate is used for key comparing. \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p K in any order. \p pred must imply the same element order as the comparator used for building the map. */ template guarded_ptr extract_with( K const& key, Less pred ) { CDS_UNUSED( pred ); return base_class::extract_with_( key, cds::details::predicate_wrapper()); } /// Finds the key \p key /** \anchor cds_nonintrusive_SplitListMap_find_cfunc The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item ); }; \endcode where \p item is the item found. The functor may change \p item.second. Note that the functor is only guarantee that \p item cannot be disposed during functor is executing. The functor does not serialize simultaneous access to the map's \p item. If such access is possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( K const& key, Func f ) { return base_class::find( key, [&f](value_type& pair, K const&){ f( pair ); } ); } /// Finds \p key and returns iterator pointed to the item found (only for \p IterableList) /** If \p key is not found the function returns \p end(). @note This function is supported only for map based on \p IterableList */ template #ifdef CDS_DOXYGEN_INVOKED iterator #else typename std::enable_if< std::is_same::value && is_iterable_list::value, iterator >::type #endif find( K const& key ) { return base_class::find( key ); } /// Finds the key \p val using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_SplitListMap_find_cfunc "find(K const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool find_with( K const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return base_class::find_with( key, cds::details::predicate_wrapper(), [&f](value_type& pair, K const&){ f( pair ); } ); } /// Finds \p key using \p pred predicate and returns iterator pointed to the item found (only for \p IterableList) /** The function is an analog of \p find(K&) but \p pred is used for key comparing. \p Less functor has interface like \p std::less. \p pred must imply the same element order as the comparator used for building the map. If \p key is not found the function returns \p end(). @note This function is supported only for map based on \p IterableList */ template #ifdef CDS_DOXYGEN_INVOKED iterator #else typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type #endif find_with( K const& key, Less pred ) { CDS_UNUSED( pred ); return base_class::find_with( key, cds::details::predicate_wrapper()); } /// Checks whether the map contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. Note the hash functor specified for class \p Traits template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. Otherwise, you may use \p contains( Q const&, Less pred ) functions with explicit predicate for key comparing. */ template bool contains( K const& key ) { return base_class::contains( key ); } /// Checks whether the map contains \p key using \p pred predicate for searching /** The function is similar to contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool contains( K const& key, Less pred ) { CDS_UNUSED( pred ); return base_class::contains( key, cds::details::predicate_wrapper()); } /// Finds \p key and return the item found /** \anchor cds_nonintrusive_SplitListMap_hp_get The function searches the item with key equal to \p key and returns the item found as a guarded pointer. If \p key is not found the function returns an empty guarded pointer. @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. Usage: \code typedef cds::container::SplitListMap< your_template_params > splitlist_map; splitlist_map theMap; // ... { splitlist_map::guarded_ptr gp(theMap.get( 5 )); if ( gp ) { // Deal with gp //... } // Destructor of guarded_ptr releases internal HP guard } \endcode Note the compare functor specified for split-list map should accept a parameter of type \p K that can be not the same as \p value_type. */ template guarded_ptr get( K const& key ) { return base_class::get_( key ); } /// Finds \p key and return the item found /** The function is an analog of \ref cds_nonintrusive_SplitListMap_hp_get "get( K const&)" but \p pred is used for comparing the keys. \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p K in any order. \p pred must imply the same element order as the comparator used for building the map. */ template guarded_ptr get_with( K const& key, Less pred ) { CDS_UNUSED( pred ); return base_class::get_with_( key, cds::details::predicate_wrapper()); } /// Clears the map (not atomic) void clear() { base_class::clear(); } /// Checks if the map is empty /** Emptiness is checked by item counting: if item count is zero then the map is empty. Thus, the correct item counting is an important part of the map implementation. */ bool empty() const { return base_class::empty(); } /// Returns item count in the map size_t size() const { return base_class::size(); } /// Returns internal statistics stat const& statistics() const { return base_class::statistics(); } /// Returns internal statistics for \p ordered_list typename ordered_list::stat const& list_statistics() const { return base_class::list_statistics(); } }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_SPLIT_LIST_MAP_H libcds-2.3.3/cds/container/split_list_map_nogc.h000066400000000000000000000322621341244201700217170ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_SPLIT_LIST_MAP_NOGC_H #define CDSLIB_CONTAINER_SPLIT_LIST_MAP_NOGC_H #include #include namespace cds { namespace container { /// Split-ordered list map (template specialization for gc::nogc) /** @ingroup cds_nonintrusive_map \anchor cds_nonintrusive_SplitListMap_nogc This specialization is so-called append-only. The map does not support the removal of list item. See \ref cds_nonintrusive_SplitListMap_hp "SplitListMap" for description of template parameters. @warning Many member functions return an iterator pointing to an item. The iterator can be used to set up field of the item, but you should provide an exclusive access to it, see \ref cds_intrusive_item_creating "insert item troubleshooting". */ template < typename Key, typename Value, #ifdef CDS_DOXYGEN_INVOKED class Traits = split_list::traits #else class Traits #endif > class SplitListMap: protected container::SplitListSet< cds::gc::nogc, std::pair, split_list::details::wrap_map_traits > { //@cond typedef container::SplitListSet< cds::gc::nogc, std::pair, split_list::details::wrap_map_traits > base_class; //@endcond public: typedef cds::gc::nogc gc; ///< Garbage collector typedef Key key_type; ///< key type typedef Value mapped_type; ///< type of value stored in the map typedef std::pair value_type ; ///< Pair type typedef typename base_class::ordered_list ordered_list; ///< Underlying ordered list class typedef typename base_class::key_comparator key_comparator; ///< key comparison functor typedef typename base_class::hash hash; ///< Hash functor for \ref key_type typedef typename base_class::item_counter item_counter; ///< Item counter type typedef typename base_class::stat stat; ///< Internal statistics protected: //@cond typedef typename base_class::traits::key_accessor key_accessor; //@endcond public: ///@name Forward iterators //@{ /// Forward iterator /** The forward iterator for split-list is based on \p OrderedList forward iterator and has some features: - it has no post-increment operator - it iterates items in unordered fashion The iterator interface: \code class iterator { public: // Default constructor iterator(); // Copy construtor iterator( iterator const& src ); // Dereference operator value_type * operator ->() const; // Dereference operator value_type& operator *() const; // Preincrement operator iterator& operator ++(); // Assignment operator iterator& operator = (iterator const& src); // Equality operators bool operator ==(iterator const& i ) const; bool operator !=(iterator const& i ) const; }; \endcode */ typedef typename base_class::iterator iterator; /// Const forward iterator typedef typename base_class::const_iterator const_iterator; /// Returns a forward iterator addressing the first element in a map /** For empty set \code begin() == end() \endcode */ iterator begin() { return base_class::begin(); } /// Returns an iterator that addresses the location succeeding the last element in a map /** Do not use the value returned by end function to access any item. The returned value can be used only to control reaching the end of the set. For empty set \code begin() == end() \endcode */ iterator end() { return base_class::end(); } /// Returns a forward const iterator addressing the first element in a map const_iterator begin() const { return base_class::begin(); } /// Returns a forward const iterator addressing the first element in a map const_iterator cbegin() const { return base_class::cbegin(); } /// Returns an const iterator that addresses the location succeeding the last element in a map const_iterator end() const { return base_class::end(); } /// Returns an const iterator that addresses the location succeeding the last element in a map const_iterator cend() const { return base_class::cend(); } //@} public: /// Initialize split-ordered map of default capacity /** The default capacity is defined in bucket table constructor. See \p intrusive::split_list::expandable_bucket_table, \p intrusive::split_list::static_ducket_table which selects by \p intrusive::split_list::traits::dynamic_bucket_table. */ SplitListMap() : base_class() {} /// Initialize split-ordered map SplitListMap( size_t nItemCount ///< estimated average item count , size_t nLoadFactor = 1 ///< load factor - average item count per bucket. Small integer up to 10, default is 1. ) : base_class( nItemCount, nLoadFactor ) {} public: /// Inserts new node with key and default value /** The function creates a node with \p key and default value, and then inserts the node created into the map. Preconditions: - The \p key_type should be constructible from value of type \p K. In trivial case, \p K is equal to \ref key_type. - The \p mapped_type should be default-constructible. Returns an iterator pointed to inserted value, or \p end() if inserting is failed */ template iterator insert( K const& key ) { //TODO: pass arguments by reference (make_pair makes copy) return base_class::emplace( key_type( key ), mapped_type()); } /// Inserts new node /** The function creates a node with copy of \p val value and then inserts the node created into the map. Preconditions: - The \p key_type should be constructible from \p key of type \p K. - The \p mapped_type should be constructible from \p val of type \p V. Returns an iterator pointed to inserted value, or \p end() if inserting is failed */ template iterator insert( K const& key, V const& val ) { return base_class::emplace( key_type( key ), mapped_type( val )); } /// Inserts new node and initialize it by a functor /** This function inserts new node with key \p key and if inserting is successful then it calls \p func functor with signature \code struct functor { void operator()( value_type& item ); }; \endcode The argument \p item of user-defined functor \p func is the reference to the map's item inserted. \p item.second is a reference to item's value that may be changed. User-defined functor \p func should guarantee that during changing item's value no any other changes could be made on this map's item by concurrent threads. The user-defined functor is called only if the inserting is successful. The \p key_type should be constructible from value of type \p K. The function allows to split creating of new item into two part: - create item from \p key; - insert new item into the map; - if inserting is successful, initialize the value of item by calling \p f functor This can be useful if complete initialization of object of \p mapped_type is heavyweight and it is preferable that the initialization should be completed only if inserting is successful. Returns an iterator pointed to inserted value, or \p end() if inserting is failed */ template iterator insert_with( const K& key, Func func ) { iterator it = insert( key ); if ( it != end()) func( (*it)); return it; } /// For key \p key inserts data of type \p mapped_type created in-place from \p args /** \p key_type should be constructible from type \p K Returns \p true if inserting successful, \p false otherwise. */ template iterator emplace( K&& key, Args&&... args ) { return base_class::emplace( key_type( std::forward( key )), mapped_type( std::forward( args )...)); } /// Updates the item /** If \p key is not in the map and \p bAllowInsert is \p true, the function inserts a new item. Otherwise, the function returns an iterator pointing to the item found. Returns std::pair where \p first is an iterator pointing to item found or inserted (if inserting is not allowed and \p key is not found, the iterator will be \p end()), \p second is true if new item has been added or \p false if the item already is in the map. */ template std::pair update( K const& key, bool bAllowInsert = true ) { //TODO: pass arguments by reference (make_pair makes copy) return base_class::update( std::make_pair( key_type( key ), mapped_type()), bAllowInsert ); } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( K const& key ) { return update( key, true ); } //@endcond /// Checks whether the map contains \p key /** The function searches the item with key equal to \p key and returns an iterator pointed to item found and \ref end() otherwise */ template iterator contains( K const& key ) { return base_class::contains( key ); } //@cond template CDS_DEPRECATED("deprecated, use contains()") iterator find( K const& key ) { return contains( key ); } //@endcond /// Checks whether the map contains \p key using \p pred predicate for searching /** The function is an analog of contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the map. */ template iterator contains( K const& key, Less pred ) { CDS_UNUSED( pred ); return base_class::contains( key, cds::details::predicate_wrapper()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") iterator find_with( K const& key, Less pred ) { return contains( key, pred ); } //@endcond /// Clears the set (not atomic, for debugging purposes only) void clear() { base_class::clear(); } /// Checks if the map is empty /** Emptiness is checked by item counting: if item count is zero then the map is empty. Thus, the correct item counting feature is an important part of Michael's map implementation. */ bool empty() const { return base_class::empty(); } /// Returns item count in the map size_t size() const { return base_class::size(); } /// Returns internal statistics stat const& statistics() const { return base_class::statistics(); } /// Returns internal statistics for \p ordered_list typename ordered_list::stat const& list_statistics() const { return base_class::list_statistics(); } }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_SPLIT_LIST_MAP_NOGC_H libcds-2.3.3/cds/container/split_list_map_rcu.h000066400000000000000000000712061341244201700215630ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_SPLIT_LIST_MAP_RCU_H #define CDSLIB_CONTAINER_SPLIT_LIST_MAP_RCU_H #include #include namespace cds { namespace container { /// Split-ordered list map (template specialization for \ref cds_urcu_desc "RCU") /** @ingroup cds_nonintrusive_map \anchor cds_nonintrusive_SplitListMap_rcu Hash table implementation based on split-ordered list algorithm discovered by Ori Shalev and Nir Shavit, see - [2003] Ori Shalev, Nir Shavit "Split-Ordered Lists - Lock-free Resizable Hash Tables" - [2008] Nir Shavit "The Art of Multiprocessor Programming" See intrusive::SplitListSet for a brief description of the split-list algorithm. Template parameters: - \p RCU - one of \ref cds_urcu_gc "RCU type" - \p Key - key type to be stored in the map - \p Value - value type to be stored in the map - \p Traits - type traits, default is \p split_list::traits. Instead of declaring \p %split_list::traits -based struct you may apply option-based notation with \p split_list::make_traits metafunction. Iterators The class supports a forward unordered iterator (\ref iterator and \ref const_iterator). You may iterate over split-list map items only under RCU lock. Only in this case the iterator is thread-safe since while RCU is locked any map's item cannot be reclaimed. The requirement of RCU lock during iterating means that deletion of the elements is not possible. @warning The iterator object cannot be passed between threads. Due to concurrent nature of split-list map it is not guarantee that you can iterate all elements in the map: any concurrent deletion can exclude the element pointed by the iterator from the map, and your iteration can be terminated before end of the map. Therefore, such iteration is more suitable for debugging purposes The iterator class supports the following minimalistic interface: \code struct iterator { // Default ctor iterator(); // Copy ctor iterator( iterator const& s); value_type * operator ->() const; value_type& operator *() const; // Pre-increment iterator& operator ++(); // Copy assignment iterator& operator = (const iterator& src); bool operator ==(iterator const& i ) const; bool operator !=(iterator const& i ) const; }; \endcode Note, the iterator object returned by \ref end, \p cend member functions points to \p nullptr and should not be dereferenced. \par Usage You should decide what garbage collector you want, and what ordered list you want to use. Split-ordered list is original data structure based on an ordered list. Suppose, you want construct split-list map based on \p cds::urcu::general_buffered<> GC and \p MichaelList as ordered list implementation. Your map should map \p int key to \p std::string value. So, you beginning your program with following include: \code #include #include #include namespace cc = cds::container; \endcode The inclusion order is important: - first, include one of \ref cds_urcu_gc "RCU implementation" (cds/urcu/general_buffered.h in our case) - second, include the header of ordered-list implementation (for this example, cds/container/michael_list_rcu.h), - then, the header for RCU-based split-list map cds/container/split_list_map_rcu.h. Now, you should declare traits for split-list map. The main parts of traits are a hash functor for the map key and a comparing functor for ordered list. We use \p std::hash and \p std::less. The second attention: instead of using \p %MichaelList in \p %SplitListMap traits we use a tag \p ds::contaner::michael_list_tag for the Michael's list. The split-list requires significant support from underlying ordered list class and it is not good idea to dive you into deep implementation details of split-list and ordered list interrelations. The tag paradigm simplifies split-list interface. \code // SplitListMap traits struct foo_set_traits: public cc::split_list::traits { typedef cc::michael_list_tag ordered_list ; // what type of ordered list we want to use typedef std::hash hash ; // hash functor for the key stored in split-list map // Type traits for our MichaelList class struct ordered_list_traits: public cc::michael_list::traits { typedef std::less less ; // use our std::less predicate as comparator to order list nodes }; }; \endcode Now you are ready to declare our map class based on \p %SplitListMap: \code typedef cc::SplitListMap< cds::urcu::gc >, int, std::string, foo_set_traits > int_string_map; \endcode You may use the modern option-based declaration instead of classic traits-based one: \code typedef cc::SplitListMap< cds::urcu::gc > // RCU type ,int // key type ,std::string // value type ,cc::split_list::make_traits< // metafunction to build split-list traits cc::split_list::ordered_list // tag for underlying ordered list implementation ,cc::opt::hash< std::hash > // hash functor ,cc::split_list::ordered_list_traits< // ordered list traits desired cc::michael_list::make_traits< // metafunction to build lazy list traits cc::opt::less< std::less > // less-based compare functor >::type > >::type > int_string_map; \endcode In case of option-based declaration using \p split_list::make_traits metafunction the struct \p foo_set_traits is not required. Now, the map of type \p int_string_map is ready to use in your program. Note that in this example we show only mandatory \p traits parts, optional ones is the default and they are inherited from cds::container::split_list::traits. There are many other useful options for deep tuning the split-list and ordered-list containers. */ template < class RCU, typename Key, typename Value, #ifdef CDS_DOXYGEN_INVOKED class Traits = split_list::traits #else class Traits #endif > class SplitListMap< cds::urcu::gc< RCU >, Key, Value, Traits >: protected container::SplitListSet< cds::urcu::gc< RCU >, std::pair, split_list::details::wrap_map_traits > { //@cond typedef container::SplitListSet< cds::urcu::gc< RCU >, std::pair, split_list::details::wrap_map_traits > base_class; //@endcond public: typedef cds::urcu::gc< RCU > gc; ///< Garbage collector typedef Key key_type; ///< key type typedef Value mapped_type; ///< type of value to be stored in the map typedef Traits traits; ///< Map traits typedef std::pair value_type; ///< key-value pair type typedef typename base_class::ordered_list ordered_list; ///< Underlying ordered list class typedef typename base_class::key_comparator key_comparator; ///< key comparison functor typedef typename base_class::hash hash; ///< Hash functor for \ref key_type typedef typename base_class::item_counter item_counter; ///< Item counter type typedef typename base_class::stat stat; ///< Internal statistics typedef typename base_class::rcu_lock rcu_lock; ///< RCU scoped lock typedef typename base_class::exempt_ptr exempt_ptr; ///< pointer to extracted node /// Group of \p extract_xxx functions require external locking if underlying ordered list requires that static constexpr const bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; typedef typename base_class::raw_ptr raw_ptr; ///< type of \p get() return value protected: //@cond typedef typename base_class::maker::traits::key_accessor key_accessor; //@endcond public: /// Forward iterator typedef typename base_class::iterator iterator; /// Const forward iterator typedef typename base_class::const_iterator const_iterator; /// Returns a forward iterator addressing the first element in a map /** For empty map \code begin() == end() \endcode */ iterator begin() { return base_class::begin(); } /// Returns an iterator that addresses the location succeeding the last element in a map /** Do not use the value returned by end function to access any item. The returned value can be used only to control reaching the end of the map. For empty map \code begin() == end() \endcode */ iterator end() { return base_class::end(); } /// Returns a forward const iterator addressing the first element in a map //@{ const_iterator begin() const { return base_class::begin(); } const_iterator cbegin() const { return base_class::cbegin(); } //@} /// Returns an const iterator that addresses the location succeeding the last element in a map //@{ const_iterator end() const { return base_class::end(); } const_iterator cend() const { return base_class::cend(); } //@} public: /// Initializes split-ordered map of default capacity /** The default capacity is defined in bucket table constructor. See \p intrusive::split_list::expandable_bucket_table, \p intrusive::split_list::static_bucket_table which selects by \p split_list::dynamic_bucket_table option. */ SplitListMap() : base_class() {} /// Initializes split-ordered map SplitListMap( size_t nItemCount ///< estimated average item count , size_t nLoadFactor = 1 ///< load factor - average item count per bucket. Small integer up to 10, default is 1. ) : base_class( nItemCount, nLoadFactor ) {} public: /// Inserts new node with key and default value /** The function creates a node with \p key and the default value, and then inserts the node created into the map. Preconditions: - The \p key_type should be constructible from value of type \p K. - The \p mapped_type should be default-constructible. The function applies RCU lock internally. Returns \p true if inserting successful, \p false otherwise. */ template bool insert( K const& key ) { return base_class::emplace( key_type( key ), mapped_type()); } /// Inserts new node /** The function creates a node with copy of \p val value and then inserts the node into the map. Preconditions: - The \p key_type should be constructible from \p key of type \p K. - The \p mapped_type should be constructible from \p val of type \p V. The function applies RCU lock internally. Returns \p true if \p val is inserted into the map, \p false otherwise. */ template bool insert( K const& key, V const& val ) { //TODO: pass arguments by reference (make_pair makes copy) return base_class::emplace( key_type( key ), mapped_type( val )); } /// Inserts new node and initialize it by a functor /** This function inserts new node with key \p key and if inserting is successful then it calls \p func functor with signature \code struct functor { void operator()( value_type& item ); }; \endcode The argument \p item of user-defined functor \p func is the reference to the map's item inserted: - item.first is a const reference to item's key that cannot be changed. - item.second is a reference to item's value that may be changed. It should be keep in mind that concurrent modifications of \p item.second in \p func body should be careful. You shouldf guarantee that during changing item's value in \p func no any other changes could be made on this \p item by concurrent threads. \p func is called only if inserting is successful. The function allows to split creating of new item into two part: - create item from \p key; - insert new item into the map; - if inserting is successful, initialize the value of item by calling \p func functor This can be useful if complete initialization of object of \p mapped_type is heavyweight and it is preferable that the initialization should be completed only if inserting is successful. The function applies RCU lock internally. */ template bool insert_with( K const& key, Func func ) { //TODO: pass arguments by reference (make_pair makes copy) return base_class::insert( std::make_pair( key_type( key ), mapped_type()), func ); } /// For key \p key inserts data of type \p mapped_type created in-place from \p args /** \p key_type should be constructible from type \p K The function applies RCU lock internally. Returns \p true if inserting successful, \p false otherwise. */ template bool emplace( K&& key, Args&&... args ) { return base_class::emplace( key_type( std::forward( key )), mapped_type( std::forward(args)... )); } /// Updates data by \p key /** The operation performs inserting or replacing the element with lock-free manner. If the \p key not found in the map, then the new item created from \p key will be inserted into the map iff \p bAllowInsert is \p true. (note that in this case the \ref key_type should be constructible from type \p K). Otherwise, if \p key is found, the functor \p func is called with item found. The functor \p Func signature is: \code struct my_functor { void operator()( bool bNew, value_type& item ); }; \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - the item found or inserted The functor may change any fields of the \p item.second that is \p mapped_type. The function applies RCU lock internally. Returns std::pair where \p first is true if operation is successful, \p second is true if new item has been added or \p false if the item with \p key already exists. @warning For \ref cds_nonintrusive_MichaelKVList_gc "MichaelKVList" as the ordered list see \ref cds_intrusive_item_creating "insert item troubleshooting". \ref cds_nonintrusive_LazyKVList_gc "LazyKVList" provides exclusive access to inserted item and does not require any node-level synchronization. */ template std::pair update( K const& key, Func func, bool bAllowInsert = true ) { //TODO: pass arguments by reference (make_pair makes copy) typedef decltype( std::make_pair( key_type( key ), mapped_type())) arg_pair_type; return base_class::update( std::make_pair( key_type( key ), mapped_type()), [&func]( bool bNew, value_type& item, arg_pair_type const& /*val*/ ) { func( bNew, item ); }, bAllowInsert ); } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( K const& key, Func func ) { return update( key, func, true ); } //@endcond /// Deletes \p key from the map /** \anchor cds_nonintrusive_SplitListMap_rcu_erase_val RCU \p synchronize method can be called. RCU should not be locked. Return \p true if \p key is found and deleted, \p false otherwise */ template bool erase( K const& key ) { return base_class::erase( key ); } /// Deletes the item from the map using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_SplitListMap_rcu_erase_val "erase(K const&)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool erase_with( K const& key, Less pred ) { CDS_UNUSED( pred ); return base_class::erase_with( key, cds::details::predicate_wrapper()); } /// Deletes \p key from the map /** \anchor cds_nonintrusive_SplitListMap_rcu_erase_func The function searches an item with key \p key, calls \p f functor and deletes the item. If \p key is not found, the functor is not called. The functor \p Func interface is: \code struct extractor { void operator()(value_type& item) { ... } }; \endcode RCU \p synchronize method can be called. RCU should not be locked. Return \p true if key is found and deleted, \p false otherwise */ template bool erase( K const& key, Func f ) { return base_class::erase( key, f ); } /// Deletes the item from the map using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_SplitListMap_rcu_erase_func "erase(K const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool erase_with( K const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return base_class::erase_with( key, cds::details::predicate_wrapper(), f ); } /// Extracts an item from the map /** \anchor cds_nonintrusive_SplitListMap_rcu_extract The function searches an item with key equal to \p key in the map, unlinks it from the map, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found. If the item with the key equal to \p key is not found the function returns an empty \p exempt_ptr. Depends on ordered list you should or should not lock RCU before calling of this function: - for the set based on \ref cds_intrusive_MichaelList_rcu "MichaelList" RCU should not be locked - for the set based on \ref cds_intrusive_LazyList_rcu "LazyList" RCU should be locked See ordered list implementation for details. \code typedef cds::urcu::gc< general_buffered<> > rcu; // Split-list set based on MichaelList by default typedef cds::container::SplitListMap< rcu, int, Foo > splitlist_map; splitlist_map theMap; // ... typename splitlist_map::exempt_ptr p; // For MichaelList we should not lock RCU // Now, you can apply extract function p = theMap.extract( 10 ) if ( p ) { // do something with p ... } // We may safely release p here // release() passes the pointer to RCU reclamation cycle p.release(); \endcode */ template exempt_ptr extract( K const& key ) { return base_class::extract( key ); } /// Extracts an item from the map using \p pred predicate for searching /** The function is an analog of \p extract(K const&) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the map. */ template exempt_ptr extract_with( K const& key, Less pred ) { CDS_UNUSED( pred ); return base_class::extract_with( key, cds::details::predicate_wrapper()); } /// Finds the key \p key /** \anchor cds_nonintrusive_SplitListMap_rcu_find_cfunc The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item ); }; \endcode where \p item is the item found. The functor may change \p item.second. Note that the functor is only guarantee that \p item cannot be disposed during functor is executing. The functor does not serialize simultaneous access to the map's \p item. If such access is possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. The function applies RCU lock internally. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( K const& key, Func f ) { return base_class::find( key, [&f](value_type& pair, K const&){ f( pair ); } ); } /// Finds the key \p key using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_SplitListMap_rcu_find_cfunc "find(K const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool find_with( K const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return base_class::find_with( key, cds::details::predicate_wrapper(), [&f](value_type& pair, K const&){ f( pair ); } ); } /// Checks whether the map contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. The function applies RCU lock internally. */ template bool contains( K const& key ) { return base_class::contains( key ); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find( K const& key ) { return base_class::find( key ); } //@endcond /// Checks whether the map contains \p key using \p pred predicate for searching /** The function is an analog of contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool contains( K const& key, Less pred ) { CDS_UNUSED( pred ); return base_class::contains( key, cds::details::predicate_wrapper()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find_with( K const& key, Less pred ) { return contains( key, pred ); } //@endcond /// Finds \p key and return the item found /** \anchor cds_intrusive_SplitListMap_rcu_get The function searches the item with key equal to \p key and returns the pointer to item found. If \p key is not found it returns empty \p raw_ptr. Note the compare functor should accept a parameter of type \p K that can be not the same as \p value_type. RCU should be locked before call of this function. Returned item is valid only while RCU is locked: \code typedef cds::urcu::gc< general_buffered<> > rcu; typedef cds::container::SplitListMap< rcu, int, Foo > splitlist_map; splitlist_map theMap; // ... { // Lock RCU typename splitlist_map::rcu_lock lock; typename splitlist_map::raw_ptr pVal = theMap.get( 5 ); if ( pVal ) { // Deal with pVal //... } // Unlock RCU by rcu_lock destructor // pVal can be retired by disposer at any time after RCU has been unlocked } \endcode */ template raw_ptr get( K const& key ) { return base_class::get( key ); } /// Finds \p key with predicate specified and return the item found /** The function is an analog of \ref cds_intrusive_SplitListMap_rcu_get "get(K const&)" but \p pred is used for comparing the keys. \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p K in any order. \p pred must imply the same element order as the comparator used for building the map. */ template raw_ptr get_with( K const& key, Less pred ) { CDS_UNUSED( pred ); return base_class::get_with( key, cds::details::predicate_wrapper()); } /// Clears the map (not atomic) void clear() { base_class::clear(); } /// Checks if the map is empty /** Emptiness is checked by item counting: if item count is zero then the map is empty. Thus, the correct item counting is an important part of the map implementation. */ bool empty() const { return base_class::empty(); } /// Returns item count in the map size_t size() const { return base_class::size(); } /// Returns internal statistics stat const& statistics() const { return base_class::statistics(); } /// Returns internal statistics for \p ordered_list typename ordered_list::stat const& list_statistics() const { return base_class::list_statistics(); } }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_SPLIT_LIST_MAP_RCU_H libcds-2.3.3/cds/container/split_list_set.h000066400000000000000000001135641341244201700207340ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_SPLIT_LIST_SET_H #define CDSLIB_CONTAINER_SPLIT_LIST_SET_H #include #include #include namespace cds { namespace container { /// Split-ordered list set /** @ingroup cds_nonintrusive_set \anchor cds_nonintrusive_SplitListSet_hp Hash table implementation based on split-ordered list algorithm discovered by Ori Shalev and Nir Shavit, see - [2003] Ori Shalev, Nir Shavit "Split-Ordered Lists - Lock-free Resizable Hash Tables" - [2008] Nir Shavit "The Art of Multiprocessor Programming" See \p intrusive::SplitListSet for a brief description of the split-list algorithm. Template parameters: - \p GC - Garbage collector used - \p T - type to be stored in the split-list. - \p Traits - type traits, default is \p split_list::traits. Instead of declaring \p split_list::traits -based struct you may apply option-based notation with \p split_list::make_traits metafunction. There are the specializations: - for \ref cds_urcu_desc "RCU" - declared in cd/container/split_list_set_rcu.h, see \ref cds_nonintrusive_SplitListSet_rcu "SplitListSet". - for \ref cds::gc::nogc declared in cds/container/split_list_set_nogc.h, see \ref cds_nonintrusive_SplitListSet_nogc "SplitListSet". \par Usage You should decide what garbage collector you want, and what ordered list you want to use as a base. Split-ordered list is original data structure based on an ordered list. Suppose, you want construct split-list set based on \p gc::DHP GC and \p LazyList as ordered list implementation. So, you beginning your program with following include: \code #include #include namespace cc = cds::container; // The data belonged to split-ordered list sturuct foo { int nKey; // key field std::string strValue ; // value field }; \endcode The inclusion order is important: first, include header for ordered-list implementation (for this example, cds/container/lazy_list_dhp.h), then the header for split-list set cds/container/split_list_set.h. Now, you should declare traits for split-list set. The main parts of traits are a hash functor for the set and a comparing functor for ordered list. Note that we define several function in foo_hash and foo_less functors for different argument types since we want call our \p %SplitListSet object by the key of type int and by the value of type foo. The second attention: instead of using \p %LazyList in \p %SplitListSet traits we use a tag \p cds::contaner::lazy_list_tag for the lazy list. The split-list requires significant support from underlying ordered list class and it is not good idea to dive you into deep implementation details of split-list and ordered list interrelations. The tag paradigm simplifies split-list interface. \code // foo hash functor struct foo_hash { size_t operator()( int key ) const { return std::hash( key ) ; } size_t operator()( foo const& item ) const { return std::hash( item.nKey ) ; } }; // foo comparator struct foo_less { bool operator()(int i, foo const& f ) const { return i < f.nKey ; } bool operator()(foo const& f, int i ) const { return f.nKey < i ; } bool operator()(foo const& f1, foo const& f2) const { return f1.nKey < f2.nKey; } }; // SplitListSet traits struct foo_set_traits: public cc::split_list::traits { typedef cc::lazy_list_tag ordered_list; // what type of ordered list we want to use typedef foo_hash hash; // hash functor for our data stored in split-list set // Type traits for our LazyList class struct ordered_list_traits: public cc::lazy_list::traits { typedef foo_less less ; // use our foo_less as comparator to order list nodes }; }; \endcode Now you are ready to declare our set class based on \p %SplitListSet: \code typedef cc::SplitListSet< cds::gc::DHP, foo, foo_set_traits > foo_set; \endcode You may use the modern option-based declaration instead of classic traits-based one: \code typedef cc::SplitListSet< cs::gc::DHP // GC used ,foo // type of data stored ,cc::split_list::make_traits< // metafunction to build split-list traits cc::split_list::ordered_list // tag for underlying ordered list implementation ,cc::opt::hash< foo_hash > // hash functor ,cc::split_list::ordered_list_traits< // ordered list traits desired cc::lazy_list::make_traits< // metafunction to build lazy list traits cc::opt::less< foo_less > // less-based compare functor >::type > >::type > foo_set; \endcode In case of option-based declaration using split_list::make_traits metafunction the struct \p foo_set_traits is not required. Now, the set of type \p foo_set is ready to use in your program. Note that in this example we show only mandatory \p traits parts, optional ones is the default and they are inherited from \p cds::container::split_list::traits. There are many other options for deep tuning the split-list and ordered-list containers. */ template < class GC, class T, #ifdef CDS_DOXYGEN_INVOKED class Traits = split_list::traits #else class Traits #endif > class SplitListSet: #ifdef CDS_DOXYGEN_INVOKED protected intrusive::SplitListSet #else protected details::make_split_list_set< GC, T, typename Traits::ordered_list, split_list::details::wrap_set_traits >::type #endif { protected: //@cond typedef details::make_split_list_set< GC, T, typename Traits::ordered_list, split_list::details::wrap_set_traits > maker; typedef typename maker::type base_class; //@endcond public: typedef GC gc; ///< Garbage collector typedef T value_type; ///< Type of vlue to be stored in split-list typedef Traits traits; ///< \p Traits template argument typedef typename maker::ordered_list ordered_list; ///< Underlying ordered list class typedef typename base_class::key_comparator key_comparator; ///< key compare functor /// Hash functor for \p %value_type and all its derivatives that you use typedef typename base_class::hash hash; typedef typename base_class::item_counter item_counter; ///< Item counter type typedef typename base_class::stat stat; ///< Internal statistics /// Count of hazard pointer required static constexpr const size_t c_nHazardPtrCount = base_class::c_nHazardPtrCount; protected: //@cond typedef typename maker::cxx_node_allocator cxx_node_allocator; typedef typename maker::node_type node_type; //@endcond public: /// Guarded pointer typedef typename gc::template guarded_ptr< node_type, value_type, details::guarded_ptr_cast_set > guarded_ptr; protected: //@cond template class iterator_type: protected base_class::template iterator_type { typedef typename base_class::template iterator_type iterator_base_class; friend class SplitListSet; public: /// Value pointer type (const for const iterator) typedef typename cds::details::make_const_type::pointer value_ptr; /// Value reference type (const for const iterator) typedef typename cds::details::make_const_type::reference value_ref; public: /// Default ctor iterator_type() {} /// Copy ctor iterator_type( iterator_type const& src ) : iterator_base_class( src ) {} protected: explicit iterator_type( iterator_base_class const& src ) : iterator_base_class( src ) {} public: /// Dereference operator value_ptr operator ->() const { return &(iterator_base_class::operator->()->m_Value); } /// Dereference operator value_ref operator *() const { return iterator_base_class::operator*().m_Value; } /// Pre-increment iterator_type& operator ++() { iterator_base_class::operator++(); return *this; } /// Assignment operator iterator_type& operator = (iterator_type const& src) { iterator_base_class::operator=(src); return *this; } /// Equality operator template bool operator ==(iterator_type const& i ) const { return iterator_base_class::operator==(i); } /// Equality operator template bool operator !=(iterator_type const& i ) const { return iterator_base_class::operator!=(i); } }; //@endcond public: /// Initializes split-ordered list of default capacity /** The default capacity is defined in bucket table constructor. See \p intrusive::split_list::expandable_bucket_table, \p intrusive::split_list::static_bucket_table which selects by \p split_list::dynamic_bucket_table option. */ SplitListSet() : base_class() {} /// Initializes split-ordered list SplitListSet( size_t nItemCount ///< estimated average of item count , size_t nLoadFactor = 1 ///< the load factor - average item count per bucket. Small integer up to 8, default is 1. ) : base_class( nItemCount, nLoadFactor ) {} public: ///@name Forward iterators (only for debugging purpose) //@{ /// Forward iterator /** The forward iterator for a split-list has the following features: - it has no post-increment operator - it depends on underlying ordered list iterator - The iterator object cannot be moved across thread boundary because it contains GC's guard that is thread-private GC data. - Iterator ensures thread-safety even if you delete the item that iterator points to. However, in case of concurrent deleting operations it is no guarantee that you iterate all item in the split-list. Moreover, a crash is possible when you try to iterate the next element that has been deleted by concurrent thread. @warning Use this iterator on the concurrent container for debugging purpose only. The iterator interface: \code class iterator { public: // Default constructor iterator(); // Copy construtor iterator( iterator const& src ); // Dereference operator value_type * operator ->() const; // Dereference operator value_type& operator *() const; // Preincrement operator iterator& operator ++(); // Assignment operator iterator& operator = (iterator const& src); // Equality operators bool operator ==(iterator const& i ) const; bool operator !=(iterator const& i ) const; }; \endcode */ typedef iterator_type iterator; /// Const forward iterator typedef iterator_type const_iterator; /// Returns a forward iterator addressing the first element in a set /** For empty set \code begin() == end() \endcode */ iterator begin() { return iterator( base_class::begin()); } /// Returns an iterator that addresses the location succeeding the last element in a set /** Do not use the value returned by end function to access any item. The returned value can be used only to control reaching the end of the set. For empty set \code begin() == end() \endcode */ iterator end() { return iterator( base_class::end()); } /// Returns a forward const iterator addressing the first element in a set const_iterator begin() const { return cbegin(); } /// Returns a forward const iterator addressing the first element in a set const_iterator cbegin() const { return const_iterator( base_class::cbegin()); } /// Returns an const iterator that addresses the location succeeding the last element in a set const_iterator end() const { return cend(); } /// Returns an const iterator that addresses the location succeeding the last element in a set const_iterator cend() const { return const_iterator( base_class::cend()); } //@} public: /// Inserts new node /** The function creates a node with copy of \p val value and then inserts the node created into the set. The type \p Q should contain as minimum the complete key for the node. The object of \ref value_type should be constructible from a value of type \p Q. In trivial case, \p Q is equal to \ref value_type. Returns \p true if \p val is inserted into the set, \p false otherwise. */ template bool insert( Q&& val ) { return insert_node( alloc_node( std::forward( val ))); } /// Inserts new node /** The function allows to split creating of new item into two part: - create item with key only - insert new item into the set - if inserting is success, calls \p f functor to initialize value-field of \p val. The functor signature is: \code void func( value_type& val ); \endcode where \p val is the item inserted. The user-defined functor is called only if the inserting is success. @warning For \ref cds_intrusive_MichaelList_hp "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". \ref cds_intrusive_LazyList_hp "LazyList" provides exclusive access to inserted item and does not require any node-level synchronization. */ template bool insert( Q&& val, Func f ) { scoped_node_ptr pNode( alloc_node( std::forward( val ))); if ( base_class::insert( *pNode, [&f](node_type& node) { f( node.m_Value ) ; } )) { pNode.release(); return true; } return false; } /// Inserts data of type \p value_type created from \p args /** Returns \p true if inserting successful, \p false otherwise. */ template bool emplace( Args&&... args ) { return insert_node( alloc_node( std::forward(args)...)); } /// Inserts or updates the node (only for \p IterableList -based set) /** The operation performs inserting or changing data with lock-free manner. If the item \p val is not found in the set, then \p val is inserted iff \p bAllowInsert is \p true. Otherwise, the current element is changed to \p val, the old element will be retired later. Returns std::pair where \p first is \p true if operation is successful, \p second is \p true if \p val has been added or \p false if the item with that key already in the set. */ template #ifdef CDS_DOXYGEN_INVOKED std::pair #else typename std::enable_if< std::is_same< Q, Q>::value && is_iterable_list< ordered_list >::value, std::pair >::type #endif upsert( Q&& val, bool bAllowInsert = true ) { scoped_node_ptr pNode( alloc_node( std::forward( val ))); auto bRet = base_class::upsert( *pNode, bAllowInsert ); if ( bRet.first ) pNode.release(); return bRet; } /// Updates the node /** The operation performs inserting or changing data with lock-free manner. If \p key is not found in the set, then \p key is inserted iff \p bAllowInsert is \p true. Otherwise, the functor \p func is called with item found. The functor \p func signature depends of ordered list: for \p MichaelList, \p LazyList \code struct functor { void operator()( bool bNew, value_type& item, Q const& val ); }; \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the set - \p val - argument \p val passed into the \p %update() function The functor may change non-key fields of the \p item. for \p IterableList \code void func( value_type& val, value_type * old ); \endcode where - \p val - a new data constructed from \p key - \p old - old value that will be retired. If new item has been inserted then \p old is \p nullptr. Returns std::pair where \p first is true if operation is successful, \p second is true if new item has been added or \p false if the item with \p key already is in the set. @warning For \ref cds_intrusive_MichaelList_hp "MichaelList" and \ref cds_nonintrusive_IterableList_gc "IterableList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". \ref cds_intrusive_LazyList_hp "LazyList" provides exclusive access to inserted item and does not require any node-level synchronization. */ template #ifdef CDS_DOXYGEN_INVOKED std::pair #else typename std::enable_if< std::is_same::value && !is_iterable_list::value, std::pair >::type #endif update( Q&& val, Func func, bool bAllowInsert = true ) { scoped_node_ptr pNode( alloc_node( std::forward( val ))); auto bRet = base_class::update( *pNode, [&func, &val]( bool bNew, node_type& item, node_type const& /*val*/ ) { func( bNew, item.m_Value, val ); }, bAllowInsert ); if ( bRet.first && bRet.second ) pNode.release(); return bRet; } //@cond template typename std::enable_if< std::is_same::value && is_iterable_list::value, std::pair >::type update( Q&& val, Func func, bool bAllowInsert = true ) { scoped_node_ptr pNode( alloc_node( std::forward( val ))); auto bRet = base_class::update( *pNode, [&func]( node_type& item, node_type* old ) { func( item.m_Value, old ? &old->m_Value : nullptr ); }, bAllowInsert ); if ( bRet.first ) pNode.release(); return bRet; } //@endcond //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( Q const& val, Func func ) { return update( val, func, true ); } //@endcond /// Deletes \p key from the set /** \anchor cds_nonintrusive_SplitListSet_erase_val The item comparator should be able to compare the values of type \p value_type and the type \p Q. Return \p true if key is found and deleted, \p false otherwise */ template bool erase( Q const& key ) { return base_class::erase( key ); } /// Deletes the item from the set using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_SplitListSet_erase_val "erase(Q const&)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool erase_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return base_class::erase_with( key, typename maker::template predicate_wrapper()); } /// Deletes \p key from the set /** \anchor cds_nonintrusive_SplitListSet_erase_func The function searches an item with key \p key, calls \p f functor and deletes the item. If \p key is not found, the functor is not called. The functor \p Func interface: \code struct extractor { void operator()(value_type const& val); }; \endcode Since the key of split-list \p value_type is not explicitly specified, template parameter \p Q defines the key type searching in the list. The list item comparator should be able to compare the values of the type \p value_type and the type \p Q. Return \p true if key is found and deleted, \p false otherwise */ template bool erase( Q const& key, Func f ) { return base_class::erase( key, [&f](node_type& node) { f( node.m_Value ); } ); } /// Deletes the item from the set using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_SplitListSet_erase_func "erase(Q const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool erase_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return base_class::erase_with( key, typename maker::template predicate_wrapper(), [&f](node_type& node) { f( node.m_Value ); } ); } /// Deletes the item pointed by iterator \p iter (only for \p IterableList based set) /** Returns \p true if the operation is successful, \p false otherwise. The function can return \p false if the node the iterator points to has already been deleted by other thread. The function does not invalidate the iterator, it remains valid and can be used for further traversing. @note \p %erase_at() is supported only for \p %SplitListSet based on \p IterableList. */ #ifdef CDS_DOXYGEN_INVOKED bool erase_at( iterator const& iter ) #else template typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, bool >::type erase_at( Iterator const& iter ) #endif { return base_class::erase_at( static_cast( iter )); } /// Extracts the item with specified \p key /** \anchor cds_nonintrusive_SplitListSet_hp_extract The function searches an item with key equal to \p key, unlinks it from the set, and returns it as \p guarded_ptr. If \p key is not found the function returns an empty guarded pointer. Note the compare functor should accept a parameter of type \p Q that may be not the same as \p value_type. The extracted item is freed automatically when returned \p guarded_ptr object will be destroyed or released. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. Usage: \code typedef cds::container::SplitListSet< your_template_args > splitlist_set; splitlist_set theSet; // ... { splitlist_set::guarded_ptr gp(theSet.extract( 5 )); if ( gp ) { // Deal with gp // ... } // Destructor of gp releases internal HP guard } \endcode */ template guarded_ptr extract( Q const& key ) { return extract_( key ); } /// Extracts the item using compare functor \p pred /** The function is an analog of \ref cds_nonintrusive_SplitListSet_hp_extract "extract(Q const&)" but \p pred predicate is used for key comparing. \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the set. */ template guarded_ptr extract_with( Q const& key, Less pred ) { return extract_with_( key, pred ); } /// Finds the key \p key /** \anchor cds_nonintrusive_SplitListSet_find_func The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& key ); }; \endcode where \p item is the item found, \p key is the find function argument. The functor may change non-key fields of \p item. Note that the functor is only guarantee that \p item cannot be disposed during functor is executing. The functor does not serialize simultaneous access to the set's \p item. If such access is possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor may modify both arguments. Note the hash functor specified for class \p Traits template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( Q& key, Func f ) { return find_( key, f ); } //@cond template bool find( Q const& key, Func f ) { return find_( key, f ); } //@endcond /// Finds \p key and returns iterator pointed to the item found (only for \p IterableList -based set) /** If \p key is not found the function returns \p end(). @note This function is supported only for the set based on \p IterableList */ template #ifdef CDS_DOXYGEN_INVOKED iterator #else typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type #endif find( Q& key ) { return find_iterator_( key ); } //@cond template typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type find( Q const& key ) { return find_iterator_( key ); } //@endcond /// Finds the key \p key using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_SplitListSet_find_func "find(Q&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool find_with( Q& key, Less pred, Func f ) { return find_with_( key, pred, f ); } //@cond template bool find_with( Q const& key, Less pred, Func f ) { return find_with_( key, pred, f ); } //@endcond /// Finds \p key using \p pred predicate and returns iterator pointed to the item found (only for \p IterableList -based set) /** The function is an analog of \p find(Q&) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. If \p key is not found the function returns \p end(). @note This function is supported only for the set based on \p IterableList */ template #ifdef CDS_DOXYGEN_INVOKED iterator #else typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type #endif find_with( Q& key, Less pred ) { return find_iterator_with_( key, pred ); } //@cond template typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type find_with( Q const& key, Less pred ) { return find_iterator_with_( key, pred ); } //@endcond /// Checks whether the set contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. Note the hash functor specified for class \p Traits template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. Otherwise, you may use \p contains( Q const&, Less pred ) functions with explicit predicate for key comparing. */ template bool contains( Q const& key ) { return base_class::contains( key ); } /// Checks whether the map contains \p key using \p pred predicate for searching /** The function is similar to contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool contains( Q const& key, Less pred ) { CDS_UNUSED( pred ); return base_class::contains( key, typename maker::template predicate_wrapper()); } /// Finds the key \p key and return the item found /** \anchor cds_nonintrusive_SplitListSet_hp_get The function searches the item with key equal to \p key and returns the item found as \p guarded_ptr. If \p key is not found the function returns an empty guarded pointer. @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. Usage: \code typedef cds::container::SplitListSet< your_template_params > splitlist_set; splitlist_set theSet; // ... { splitlist_set::guarded_ptr gp(theSet.get( 5 )); if ( gp ) { // Deal with gp //... } // Destructor of guarded_ptr releases internal HP guard } \endcode Note the compare functor specified for split-list set should accept a parameter of type \p Q that can be not the same as \p value_type. */ template guarded_ptr get( Q const& key ) { return get_( key ); } /// Finds \p key and return the item found /** The function is an analog of \ref cds_nonintrusive_SplitListSet_hp_get "get( Q const&)" but \p pred is used for comparing the keys. \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the set. */ template guarded_ptr get_with( Q const& key, Less pred ) { return get_with_( key, pred ); } /// Clears the set (not atomic) void clear() { base_class::clear(); } /// Checks if the set is empty /** Emptiness is checked by item counting: if item count is zero then assume that the set is empty. Thus, the correct item counting feature is an important part of split-list set implementation. */ bool empty() const { return base_class::empty(); } /// Returns item count in the set size_t size() const { return base_class::size(); } /// Returns internal statistics stat const& statistics() const { return base_class::statistics(); } /// Returns internal statistics for \p ordered_list typename ordered_list::stat const& list_statistics() const { return base_class::list_statistics(); } protected: //@cond using base_class::extract_; using base_class::get_; template static node_type * alloc_node( Args&&... args ) { return cxx_node_allocator().MoveNew( std::forward( args )... ); } static void free_node( node_type * pNode ) { cxx_node_allocator().Delete( pNode ); } template bool find_( Q& val, Func f ) { return base_class::find( val, [&f]( node_type& item, Q& v ) { f( item.m_Value, v ); } ); } template typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator>::type find_iterator_( Q& val ) { return iterator( base_class::find( val )); } template bool find_with_( Q& val, Less pred, Func f ) { CDS_UNUSED( pred ); return base_class::find_with( val, typename maker::template predicate_wrapper(), [&f]( node_type& item, Q& v ) { f( item.m_Value, v ); } ); } template typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator>::type find_iterator_with_( Q& val, Less pred ) { CDS_UNUSED( pred ); return iterator( base_class::find_with( val, typename maker::template predicate_wrapper())); } struct node_disposer { void operator()( node_type * pNode ) { free_node( pNode ); } }; typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; bool insert_node( node_type * pNode ) { assert( pNode != nullptr ); scoped_node_ptr p( pNode ); if ( base_class::insert( *pNode )) { p.release(); return true; } return false; } template guarded_ptr extract_with_( Q const& key, Less pred ) { CDS_UNUSED( pred ); return base_class::extract_with_( key, typename maker::template predicate_wrapper()); } template guarded_ptr get_with_( Q const& key, Less pred ) { CDS_UNUSED( pred ); return base_class::get_with_( key, typename maker::template predicate_wrapper()); } //@endcond }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_SPLIT_LIST_SET_H libcds-2.3.3/cds/container/split_list_set_nogc.h000066400000000000000000000351451341244201700217400ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_SPLIT_LIST_SET_NOGC_H #define CDSLIB_CONTAINER_SPLIT_LIST_SET_NOGC_H #include #include #include #include namespace cds { namespace container { /// Split-ordered list set (template specialization for \p gc::nogc) /** @ingroup cds_nonintrusive_set \anchor cds_nonintrusive_SplitListSet_nogc This specialization is so-called append-only container when no item reclamation may be performed. The class does not support deleting of list item. See \ref cds_nonintrusive_SplitListSet_hp "SplitListSet" for description of template parameters. @warning Many member functions return an iterator pointing to an item. The iterator can be used to set up field of the item, but you should provide an exclusive access to it, see \ref cds_intrusive_item_creating "insert item troubleshooting". */ template < class T, #ifdef CDS_DOXYGEN_INVOKED class Traits = split_list::traits #else class Traits #endif > class SplitListSet< cds::gc::nogc, T, Traits> #ifdef CDS_DOXYGEN_INVOKED :protected intrusive::SplitListSet #else :protected details::make_split_list_set< cds::gc::nogc, T, typename Traits::ordered_list, split_list::details::wrap_set_traits >::type #endif { protected: //@cond typedef details::make_split_list_set< cds::gc::nogc, T, typename Traits::ordered_list, split_list::details::wrap_set_traits > maker; typedef typename maker::type base_class; //@endcond public: typedef cds::gc::nogc gc; ///< Garbage collector typedef T value_type; ///< type of value to be stored in the list typedef Traits traits; ///< List traits typedef typename maker::ordered_list ordered_list; ///< Underlying ordered list class typedef typename base_class::key_comparator key_comparator; ///< key comparison functor /// Hash functor for \ref value_type and all its derivatives that you use typedef typename base_class::hash hash; typedef typename base_class::item_counter item_counter; ///< Item counter type typedef typename base_class::stat stat; ///< Internal statistics protected: //@cond typedef typename maker::cxx_node_allocator cxx_node_allocator; typedef typename maker::node_type node_type; template static node_type * alloc_node(Q const& v ) { return cxx_node_allocator().New( v ); } template static node_type * alloc_node( Args&&... args ) { return cxx_node_allocator().MoveNew( std::forward(args)...); } static void free_node( node_type * pNode ) { cxx_node_allocator().Delete( pNode ); } struct node_disposer { void operator()( node_type * pNode ) { free_node( pNode ); } }; typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; //@endcond public: /// Initialize split-ordered list of default capacity /** The default capacity is defined in bucket table constructor. See \p intrusive::split_list::expandable_bucket_table, \p intrusive::split_list::static_bucket_table which selects by \p split_list::dynamic_bucket_table option. */ SplitListSet() : base_class() {} /// Initialize split-ordered list SplitListSet( size_t nItemCount ///< estimated average of item count , size_t nLoadFactor = 1 ///< load factor - average item count per bucket. Small integer up to 10, default is 1. ) : base_class( nItemCount, nLoadFactor ) {} protected: //@cond template class iterator_type: protected base_class::template iterator_type { typedef typename base_class::template iterator_type iterator_base_class; friend class SplitListSet; public: /// Value pointer type (const for const iterator) typedef typename cds::details::make_const_type::pointer value_ptr; /// Value reference type (const for const iterator) typedef typename cds::details::make_const_type::reference value_ref; public: /// Default ctor iterator_type() {} /// Copy ctor iterator_type( iterator_type const& src ) : iterator_base_class( src ) {} protected: explicit iterator_type( iterator_base_class const& src ) : iterator_base_class( src ) {} public: /// Dereference operator value_ptr operator ->() const { return &(iterator_base_class::operator->()->m_Value); } /// Dereference operator value_ref operator *() const { return iterator_base_class::operator*().m_Value; } /// Pre-increment iterator_type& operator ++() { iterator_base_class::operator++(); return *this; } /// Assignment operator iterator_type& operator = (iterator_type const& src) { iterator_base_class::operator=(src); return *this; } /// Equality operator template bool operator ==(iterator_type const& i ) const { return iterator_base_class::operator==(i); } /// Equality operator template bool operator !=(iterator_type const& i ) const { return iterator_base_class::operator!=(i); } }; //@endcond public: ///@name Forward iterators //@{ /// Forward iterator /** The forward iterator for split-list is based on \p OrderedList forward iterator and has some features: - it has no post-increment operator - it iterates items in unordered fashion The iterator interface: \code class iterator { public: // Default constructor iterator(); // Copy construtor iterator( iterator const& src ); // Dereference operator value_type * operator ->() const; // Dereference operator value_type& operator *() const; // Preincrement operator iterator& operator ++(); // Assignment operator iterator& operator = (iterator const& src); // Equality operators bool operator ==(iterator const& i ) const; bool operator !=(iterator const& i ) const; }; \endcode */ typedef iterator_type iterator; /// Const forward iterator typedef iterator_type const_iterator; /// Returns a forward iterator addressing the first element in a set /** For empty set \code begin() == end() \endcode */ iterator begin() { return iterator( base_class::begin()); } /// Returns an iterator that addresses the location succeeding the last element in a set /** Do not use the value returned by end function to access any item. The returned value can be used only to control reaching the end of the set. For empty set \code begin() == end() \endcode */ iterator end() { return iterator( base_class::end()); } /// Returns a forward const iterator addressing the first element in a set const_iterator begin() const { return cbegin(); } /// Returns a forward const iterator addressing the first element in a set const_iterator cbegin() const { return const_iterator( base_class::cbegin()); } /// Returns an const iterator that addresses the location succeeding the last element in a set const_iterator end() const { return cend(); } /// Returns an const iterator that addresses the location succeeding the last element in a set const_iterator cend() const { return const_iterator( base_class::cend()); } //@} protected: //@cond iterator insert_node( node_type * pNode ) { assert( pNode != nullptr ); scoped_node_ptr p(pNode); iterator it( base_class::insert_( *pNode )); if ( it != end()) { p.release(); return it; } return end(); } //@endcond public: /// Inserts new node /** The function inserts \p val in the set if it does not contain an item with key equal to \p val. The \p value_type should be constructible from a value of type \p Q. Return an iterator pointing to inserted item if success \p end() otherwise */ template iterator insert( const Q& val ) { return insert_node( alloc_node( val )); } /// Inserts data of type \p value_type created from \p args /** Return an iterator pointing to inserted item if success \p end() otherwise */ template iterator emplace( Args&&... args ) { return insert_node( alloc_node( std::forward(args)... )); } /// Updates the item /** If \p key is not in the set and \p bAllowInsert is \p true, the function inserts a new item. Otherwise, the function returns an iterator pointing to the item found. Returns std::pair where \p first is an iterator pointing to item found or inserted (if inserting is not allowed and \p key is not found, the iterator will be \p end()), \p second is true if new item has been added or \p false if the item already is in the set. @warning If the set is based on \ref cds_nonintrusive_MichaelList_nogc "MichaelList", see \ref cds_intrusive_item_creating "insert item troubleshooting". \ref cds_nonintrusive_LazyList_nogc "LazyList" as the base provides exclusive access to inserted item and does not require any node-level synchronization. */ template std::pair update( Q const& key, bool bAllowInsert = true ) { scoped_node_ptr pNode( alloc_node( key )); std::pair ret = base_class::update_( *pNode, [](bool /*bNew*/, node_type& /*item*/, node_type& /*val*/){}, bAllowInsert ); if ( ret.first != base_class::end() && ret.second ) { pNode.release(); return std::make_pair( iterator(ret.first), ret.second ); } return std::make_pair( iterator(ret.first), ret.second ); } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( const Q& val ) { return update( val, true ); } //@endcond /// Checks whether the set contains \p key /** The function searches the item with key equal to \p key and returns an iterator pointed to item found and \ref end() otherwise */ template iterator contains( Q const& key ) { return iterator( base_class::find_( key )); } //@cond template CDS_DEPRECATED("deprecated, use contains()") iterator find( Q const& key ) { return contains( key ); } //@endcond /// Checks whether the set contains \p key using \p pred predicate for searching /** The function is an analog of contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. */ template iterator contains( Q const& key, Less pred ) { CDS_UNUSED( pred ); return iterator( base_class::find_with_( key, typename maker::template predicate_wrapper())); } //@cond // eprecated, use contains() template iterator find_with( Q const& key, Less pred ) { return contains( key, pred ); } //@endcond /// Clears the set (not atomic, for debugging purposes only) void clear() { base_class::clear(); } /// Checks if the set is empty /** Emptiness is checked by item counting: if item count is zero then the set is empty. Thus, the correct item counting feature is an important part of split-list set implementation. */ bool empty() const { return base_class::empty(); } /// Returns item count in the set size_t size() const { return base_class::size(); } /// Returns internal statistics stat const& statistics() const { return base_class::statistics(); } /// Returns internal statistics for \p ordered_list typename ordered_list::stat const& list_statistics() const { return base_class::list_statistics(); } }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_SPLIT_LIST_SET_NOGC_H libcds-2.3.3/cds/container/split_list_set_rcu.h000066400000000000000000001131471341244201700216020ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_SPLIT_LIST_SET_RCU_H #define CDSLIB_CONTAINER_SPLIT_LIST_SET_RCU_H #include #include #include namespace cds { namespace container { //@cond namespace split_list { namespace details { template < typename T, class OrdList, typename OrdListTag > class make_raw_ptr; #ifdef CDSLIB_CONTAINER_DETAILS_MICHAEL_LIST_BASE_H template class make_raw_ptr< T, RawPtr, cds::container::michael_list_tag > { typedef RawPtr intrusive_raw_ptr; typedef typename intrusive_raw_ptr::value_type node_type; typedef T value_type; struct raw_ptr_converter { value_type * operator()( node_type * p ) const { return p ? &p->m_Value : nullptr; } value_type& operator()( node_type& n ) const { return n.m_Value; } value_type const& operator()( node_type const& n ) const { return n.m_Value; } }; public: typedef cds::urcu::raw_ptr_adaptor< value_type, intrusive_raw_ptr, raw_ptr_converter > raw_ptr; static raw_ptr make( intrusive_raw_ptr&& p ) { return raw_ptr(std::move( p )); } }; #endif #ifdef CDSLIB_CONTAINER_DETAILS_LAZY_LIST_BASE_H template class make_raw_ptr< T, RawPtr, cds::container::lazy_list_tag > { typedef RawPtr node_type_pointer; typedef T value_type; public: typedef value_type * raw_ptr; static raw_ptr make( node_type_pointer p ) { return p ? &p->m_Value : nullptr; } }; #endif }} //namespace split_list::details //@endcond /// Split-ordered list set (template specialization for \ref cds_urcu_desc "RCU") /** @ingroup cds_nonintrusive_set \anchor cds_nonintrusive_SplitListSet_rcu Hash table implementation based on split-ordered list algorithm discovered by Ori Shalev and Nir Shavit, see - [2003] Ori Shalev, Nir Shavit "Split-Ordered Lists - Lock-free Resizable Hash Tables" - [2008] Nir Shavit "The Art of Multiprocessor Programming" See \p intrusive::SplitListSet for a brief description of the split-list algorithm. Template parameters: - \p RCU - one of \ref cds_urcu_gc "RCU type" - \p T - type of the value to be stored in the split-list. - \p Traits - type traits, default is \p split_list::traits. Instead of declaring \p split_list::traits -based struct you can apply option-based notation with \p split_list::make_traits metafunction. Iterators The class supports a forward iterator (\ref iterator and \ref const_iterator). The iteration is unordered. You may iterate over split-list set items only under RCU lock. Only in this case the iterator is thread-safe since while RCU is locked any set's item cannot be reclaimed. @warning The iterator object cannot be passed between threads \warning Due to concurrent nature of skip-list set it is not guarantee that you can iterate all elements in the set: any concurrent deletion can exclude the element pointed by the iterator from the set, and your iteration can be terminated before end of the set. Therefore, such iteration is more suitable for debugging purposes The iterator class supports the following minimalistic interface: \code struct iterator { // Default ctor iterator(); // Copy ctor iterator( iterator const& s); value_type * operator ->() const; value_type& operator *() const; // Pre-increment iterator& operator ++(); // Copy assignment iterator& operator = (const iterator& src); bool operator ==(iterator const& i ) const; bool operator !=(iterator const& i ) const; }; \endcode Note, the iterator object returned by \p end(), \p cend() member functions points to \p nullptr and should not be dereferenced. \par Usage You should decide what garbage collector you want, and what ordered list you want to use. Split-ordered list is an original data structure based on an ordered list. Suppose, you want construct split-list set based on \p cds::urcu::general_buffered<> GC and \p LazyList as ordered list implementation. So, you beginning your program with following include: \code #include #include #include namespace cc = cds::container; // The data belonged to split-ordered list sturuct foo { int nKey; // key field std::string strValue ; // value field }; \endcode The inclusion order is important: - first, include one of \ref cds_urcu_gc "RCU implementation" (cds/urcu/general_buffered.h in our case) - second, include file for ordered-list implementation (for this example, cds/container/lazy_list_rcu.h), - then, the header for RCU-based split-list set cds/container/split_list_set_rcu.h. Now, you should declare traits for split-list set. The main parts of traits are a hash functor for the set and a comparing functor for ordered list. Note that we define several function in \p foo_hash and \p foo_less functors for different argument types since we want call our \p %SplitListSet object by the key of type \p int and by the value of type \p foo. The second attention: instead of using \p %LazyList in \p %SplitListSet traits we use \p cds::contaner::lazy_list_tag tag for the lazy list. The split-list requires significant support from underlying ordered list class and it is not good idea to dive you into deep implementation details of split-list and ordered list interrelations. The tag paradigm simplifies split-list interface. \code // foo hash functor struct foo_hash { size_t operator()( int key ) const { return std::hash( key ) ; } size_t operator()( foo const& item ) const { return std::hash( item.nKey ) ; } }; // foo comparator struct foo_less { bool operator()(int i, foo const& f ) const { return i < f.nKey ; } bool operator()(foo const& f, int i ) const { return f.nKey < i ; } bool operator()(foo const& f1, foo const& f2) const { return f1.nKey < f2.nKey; } }; // SplitListSet traits struct foo_set_traits: public cc::split_list::traits { typedef cc::lazy_list_tag ordered_list ; // what type of ordered list we want to use typedef foo_hash hash ; // hash functor for our data stored in split-list set // Type traits for our LazyList class struct ordered_list_traits: public cc::lazy_list::traits { typedef foo_less less ; // use our foo_less as comparator to order list nodes }; }; \endcode Now you are ready to declare our set class based on \p %SplitListSet: \code typedef cc::SplitListSet< cds::urcu::gc >, foo, foo_set_traits > foo_set; \endcode You may use the modern option-based declaration instead of classic type-traits-based one: \code typedef cc::SplitListSet< cds::urcu::gc > // RCU type used ,foo // type of data stored ,cc::split_list::make_traits< // metafunction to build split-list traits cc::split_list::ordered_list // tag for underlying ordered list implementation ,cc::opt::hash< foo_hash > // hash functor ,cc::split_list::ordered_list_traits< // ordered list traits cc::lazy_list::make_traits< // metafunction to build lazy list traits cc::opt::less< foo_less > // less-based compare functor >::type > >::type > foo_set; \endcode In case of option-based declaration using \p split_list::make_traits metafunction the struct \p foo_set_traits is not required. Now, the set of type \p foo_set is ready to use in your program. Note that in this example we show only mandatory \p traits parts, optional ones is the default and they are inherited from \p container::split_list::traits. There are many other options for deep tuning of the split-list and ordered-list containers. */ template < class RCU, class T, #ifdef CDS_DOXYGEN_INVOKED class Traits = split_list::traits #else class Traits #endif > class SplitListSet< cds::urcu::gc< RCU >, T, Traits >: #ifdef CDS_DOXYGEN_INVOKED protected intrusive::SplitListSet< cds::urcu::gc< RCU >, T, typename Traits::ordered_list, Traits > #else protected details::make_split_list_set< cds::urcu::gc< RCU >, T, typename Traits::ordered_list, split_list::details::wrap_set_traits >::type #endif { protected: //@cond typedef details::make_split_list_set< cds::urcu::gc< RCU >, T, typename Traits::ordered_list, split_list::details::wrap_set_traits > maker; typedef typename maker::type base_class; //@endcond public: typedef cds::urcu::gc< RCU > gc; ///< RCU-based garbage collector typedef T value_type; ///< Type of value to be storedin the set typedef Traits traits; ///< \p Traits template argument // Note: ordered_list is not real ordered list type. Actual type is base_class::ordered_list typedef typename maker::ordered_list ordered_list; ///< Underlying ordered list class typedef typename base_class::key_comparator key_comparator; ///< key compare functor /// Hash functor for \ref value_type and all its derivatives that you use typedef typename base_class::hash hash; typedef typename base_class::item_counter item_counter; ///< Item counter type typedef typename base_class::stat stat; ///< Internal statistics typedef typename base_class::rcu_lock rcu_lock ; ///< RCU scoped lock /// Group of \p extract_xxx functions require external locking if underlying ordered list requires that static constexpr const bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; protected: //@cond typedef typename maker::cxx_node_allocator cxx_node_allocator; typedef typename maker::node_type node_type; //@endcond public: /// pointer to extracted node using exempt_ptr = cds::urcu::exempt_ptr< gc, node_type, value_type, typename maker::ordered_list_traits::disposer >; # ifdef CDS_DOXYGEN_INVOKED /// pointer to the node for \p get() function /** For \p LazyList, \p %raw_ptr is just pointer to \p value_type. For \p MichaelList, \p %raw_ptr is \p cds::urcu::raw_ptr object giving access to \p value_type. */ typedef implementation_defined raw_ptr; # else private: typedef split_list::details::make_raw_ptr< value_type, typename base_class::ordered_list::raw_ptr, typename traits::ordered_list > raw_ptr_maker; public: typedef typename raw_ptr_maker::raw_ptr raw_ptr; #endif protected: //@cond template bool find_( Q& val, Func f ) { return base_class::find( val, [&f]( node_type& item, Q& v ) { f(item.m_Value, v) ; } ); } template bool find_with_( Q& val, Less pred, Func f ) { CDS_UNUSED( pred ); return base_class::find_with( val, typename maker::template predicate_wrapper(), [&f]( node_type& item, Q& v ) { f(item.m_Value, v) ; } ); } template static node_type * alloc_node( Q const& v ) { return cxx_node_allocator().New( v ); } template static node_type * alloc_node( Args&&... args ) { return cxx_node_allocator().MoveNew( std::forward(args)...); } static void free_node( node_type * pNode ) { cxx_node_allocator().Delete( pNode ); } struct node_disposer { void operator()( node_type * pNode ) { free_node( pNode ); } }; typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; bool insert_node( node_type * pNode ) { assert( pNode != nullptr ); scoped_node_ptr p(pNode); if ( base_class::insert( *pNode )) { p.release(); return true; } return false; } //@endcond protected: //@cond template class iterator_type: protected base_class::template iterator_type { typedef typename base_class::template iterator_type iterator_base_class; friend class SplitListSet; public: /// Value pointer type (const for const iterator) typedef typename cds::details::make_const_type::pointer value_ptr; /// Value reference type (const for const iterator) typedef typename cds::details::make_const_type::reference value_ref; public: /// Default ctor iterator_type() {} /// Copy ctor iterator_type( iterator_type const& src ) : iterator_base_class( src ) {} protected: explicit iterator_type( iterator_base_class const& src ) : iterator_base_class( src ) {} public: /// Dereference operator value_ptr operator ->() const { return &(iterator_base_class::operator->()->m_Value); } /// Dereference operator value_ref operator *() const { return iterator_base_class::operator*().m_Value; } /// Pre-increment iterator_type& operator ++() { iterator_base_class::operator++(); return *this; } /// Assignment operator iterator_type& operator = (iterator_type const& src) { iterator_base_class::operator=(src); return *this; } /// Equality operator template bool operator ==(iterator_type const& i ) const { return iterator_base_class::operator==(i); } /// Equality operator template bool operator !=(iterator_type const& i ) const { return iterator_base_class::operator!=(i); } }; //@endcond public: /// Initializes split-ordered list of default capacity /** The default capacity is defined in bucket table constructor. See \p intrusive::split_list::expandable_bucket_table, \p intrusive::split_list::static_bucket_table which selects by \p container::split_list::dynamic_bucket_table option. */ SplitListSet() : base_class() {} /// Initializes split-ordered list SplitListSet( size_t nItemCount ///< estimated average of item count , size_t nLoadFactor = 1 ///< load factor - average item count per bucket. Small integer up to 8, default is 1. ) : base_class( nItemCount, nLoadFactor ) {} public: ///@name Forward iterators (thread-safe under RCU lock) //@{ /// Forward iterator /** The forward iterator for Michael's set is based on \p OrderedList forward iterator and has some features: - it has no post-increment operator - it iterates items in unordered fashion You may safely use iterators in multi-threaded environment only under RCU lock. Otherwise, a crash is possible if another thread deletes the element the iterator points to. The iterator interface: \code class iterator { public: // Default constructor iterator(); // Copy construtor iterator( iterator const& src ); // Dereference operator value_type * operator ->() const; // Dereference operator value_type& operator *() const; // Preincrement operator iterator& operator ++(); // Assignment operator iterator& operator = (iterator const& src); // Equality operators bool operator ==(iterator const& i ) const; bool operator !=(iterator const& i ) const; }; \endcode */ typedef iterator_type iterator; /// Forward const iterator typedef iterator_type const_iterator; /// Returns a forward iterator addressing the first element in a set /** For empty set \code begin() == end() \endcode */ iterator begin() { return iterator( base_class::begin()); } /// Returns an iterator that addresses the location succeeding the last element in a set /** Do not use the value returned by end function to access any item. The returned value can be used only to control reaching the end of the set. For empty set \code begin() == end() \endcode */ iterator end() { return iterator( base_class::end()); } /// Returns a forward const iterator addressing the first element in a set const_iterator begin() const { return cbegin(); } /// Returns a forward const iterator addressing the first element in a set const_iterator cbegin() const { return const_iterator( base_class::cbegin()); } /// Returns an const iterator that addresses the location succeeding the last element in a set const_iterator end() const { return cend(); } /// Returns an const iterator that addresses the location succeeding the last element in a set const_iterator cend() const { return const_iterator( base_class::cend()); } //@} public: /// Inserts new node /** The function creates a node with copy of \p val value and then inserts the node created into the set. The type \p Q should contain as minimum the complete key for the node. The object of \p value_type should be constructible from a value of type \p Q. In trivial case, \p Q is equal to \p value_type. The function applies RCU lock internally. Returns \p true if \p val is inserted into the set, \p false otherwise. */ template bool insert( Q const& val ) { return insert_node( alloc_node( val )); } /// Inserts new node /** The function allows to split creating of new item into two part: - create item with key only - insert new item into the set - if inserting is success, calls \p f functor to initialize value-field of \p val. The functor signature is: \code void func( value_type& val ); \endcode where \p val is the item inserted. User-defined functor \p f should guarantee that during changing \p val no any other changes could be made on this set's item by concurrent threads. The user-defined functor is called only if the inserting is success. The function applies RCU lock internally. */ template bool insert( Q const& key, Func f ) { scoped_node_ptr pNode( alloc_node( key )); if ( base_class::insert( *pNode, [&f](node_type& node) { f( node.m_Value ) ; } )) { pNode.release(); return true; } return false; } /// Inserts data of type \p value_type created from \p args /** Returns \p true if inserting successful, \p false otherwise. The function applies RCU lock internally. */ template bool emplace( Args&&... args ) { return insert_node( alloc_node( std::forward(args)...)); } /// Updates an element with given \p val /** The operation performs inserting or changing data with lock-free manner. If the \p val key not found in the set, then the new item created from \p val is inserted into the set. Otherwise, the functor \p func is called with the item found. The functor \p Func signature is: \code struct my_functor { void operator()( bool bNew, value_type& item, const Q& val ); }; \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the set - \p val - argument \p val passed into the \p %ensure() function The functor may change non-key fields of the \p item; however, \p func must guarantee that during changing no any other modifications could be made on this item by concurrent threads. The function applies RCU lock internally. Returns std::pair where \p first is true if operation is successful, \p second is true if new item has been added or \p false if the item with \p key already is in the set. */ /// Updates the node /** The operation performs inserting or changing data with lock-free manner. If \p key is not found in the set, then \p key is inserted iff \p bAllowInsert is \p true. Otherwise, the functor \p func is called with item found. The functor signature is: \code struct my_functor { void operator()( bool bNew, value_type& item, const Q& val ); }; \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the set - \p val - argument \p val passed into the \p %update() function The functor may change non-key fields of the \p item. The function applies RCU lock internally. Returns std::pair where \p first is true if operation is successful, \p second is true if new item has been added or \p false if the item with \p key already is in the map. @warning For \ref cds_intrusive_MichaelList_rcu "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". \ref cds_intrusive_LazyList_rcu "LazyList" provides exclusive access to inserted item and does not require any node-level synchronization. */ template std::pair update( Q const& val, Func func, bool bAllowInsert = true ) { scoped_node_ptr pNode( alloc_node( val )); std::pair bRet = base_class::update( *pNode, [&func, &val]( bool bNew, node_type& item, node_type const& /*val*/ ) { func( bNew, item.m_Value, val ); }, bAllowInsert ); if ( bRet.first && bRet.second ) pNode.release(); return bRet; } //@cond // Dprecated, use update() template std::pair ensure( Q const& val, Func func ) { return update( val, func, true ); } //@endcond /// Deletes \p key from the set /** \anchor cds_nonintrusive_SplitListSet_rcu_erase_val Template parameter of type \p Q defines the key type searching in the list. The set item comparator should be able to compare the values of type \p value_type and the type \p Q. RCU \p synchronize method can be called. RCU should not be locked. Return \p true if key is found and deleted, \p false otherwise */ template bool erase( Q const& key ) { return base_class::erase( key ); } /// Deletes the item from the set using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_SplitListSet_rcu_erase_val "erase(Q const&)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool erase_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return base_class::erase_with( key, typename maker::template predicate_wrapper()); } /// Deletes \p key from the set /** \anchor cds_nonintrusive_SplitListSet_rcu_erase_func The function searches an item with key \p key, calls \p f functor and deletes the item. If \p key is not found, the functor is not called. The functor \p Func interface: \code struct extractor { void operator()(value_type const& val); }; \endcode Template parameter of type \p Q defines the key type searching in the list. The list item comparator should be able to compare the values of the type \p value_type and the type \p Q. RCU \p synchronize method can be called. RCU should not be locked. Return \p true if key is found and deleted, \p false otherwise */ template bool erase( Q const& key, Func f ) { return base_class::erase( key, [&f](node_type& node) { f( node.m_Value ); } ); } /// Deletes the item from the set using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_SplitListSet_rcu_erase_func "erase(Q const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool erase_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return base_class::erase_with( key, typename maker::template predicate_wrapper(), [&f](node_type& node) { f( node.m_Value ); } ); } /// Extracts an item from the set /** \anchor cds_nonintrusive_SplitListSet_rcu_extract The function searches an item with key equal to \p key in the set, unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found. If the item with the key equal to \p key is not found the function returns an empty \p exempt_ptr. Depends on \p bucket_type you should or should not lock RCU before calling of this function: - for the set based on \ref cds_intrusive_MichaelList_rcu "MichaelList" RCU should not be locked - for the set based on \ref cds_intrusive_LazyList_rcu "LazyList" RCU should be locked See ordered list implementation for details. \code typedef cds::urcu::gc< general_buffered<> > rcu; // Split-list set based on MichaelList by default typedef cds::container::SplitListSet< rcu, Foo > splitlist_set; splitlist_set theSet; // ... splitlist_set::exempt_ptr p; // For MichaelList we should not lock RCU // Now, you can apply extract function p = theSet.extract( 10 ); if ( p ) { // do something with p ... } // We may safely release p here // release() passes the pointer to RCU reclamation cycle p.release(); \endcode */ template exempt_ptr extract( Q const& key ) { return exempt_ptr( base_class::extract_( key, key_comparator())); } /// Extracts an item from the set using \p pred predicate for searching /** The function is an analog of \p extract(Q const&) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. */ template exempt_ptr extract_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return exempt_ptr( base_class::extract_with_( key, typename maker::template predicate_wrapper())); } /// Finds the key \p key /** \anchor cds_nonintrusive_SplitListSet_rcu_find_func The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& key ); }; \endcode where \p item is the item found, \p key is the find function argument. The functor may change non-key fields of \p item. Note that the functor is only guarantee that \p item cannot be disposed during functor is executing. The functor does not serialize simultaneous access to the set's \p item. If such access is possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. Note the hash functor specified for class \p Traits template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. The function makes RCU lock internally. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( Q& key, Func f ) { return find_( key, f ); } //@cond template bool find( Q const& key, Func f ) { return find_( key, f ); } //@endcond /// Finds the key \p key using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_SplitListSet_rcu_find_func "find(Q&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool find_with( Q& key, Less pred, Func f ) { return find_with_( key, pred, f ); } //@cond template bool find_with( Q const& key, Less pred, Func f ) { return find_with_( key, pred, f ); } //@endcond /// Checks whether the set contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. Note the hash functor specified for class \p Traits template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. Otherwise, you may use \p contains( Q const&, Less pred ) functions with explicit predicate for key comparing. The function applies RCU lock internally. */ template bool contains( Q const& key ) { return base_class::contains( key ); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find( Q const& key ) { return contains( key ); } //@endcond /// Checks whether the map contains \p key using \p pred predicate for searching /** The function is similar to contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template bool contains( Q const& key, Less pred ) { CDS_UNUSED( pred ); return base_class::contains( key, typename maker::template predicate_wrapper()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find_with( Q const& key, Less pred ) { return contains( key, pred ); } //@endcond /// Finds the key \p key and return the item found /** \anchor cds_nonintrusive_SplitListSet_rcu_get The function searches the item with key equal to \p key and returns the pointer to item found. If \p key is not found it returns \p nullptr. Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. RCU should be locked before call of this function. Returned item is valid only while RCU is locked: \code typedef cds::urcu::gc< general_buffered<> > rcu; typedef cds::container::SplitListSet< rcu, Foo > splitlist_set; splitlist_set theSet; // ... { // Lock RCU splitlist_set::rcu_lock lock; foo * pVal = theSet.get( 5 ); if ( pVal ) { // Deal with pVal //... } // Unlock RCU by rcu_lock destructor // pVal can be retired by disposer at any time after RCU has been unlocked } \endcode */ template raw_ptr get( Q const& key ) { return raw_ptr_maker::make( base_class::get( key )); } /// Finds the key \p key and return the item found /** The function is an analog of \ref cds_nonintrusive_SplitListSet_rcu_get "get(Q const&)" but \p pred is used for comparing the keys. \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the set. */ template raw_ptr get_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return raw_ptr_maker::make( base_class::get_with( key, typename maker::template predicate_wrapper())); } /// Clears the set (not atomic) void clear() { base_class::clear(); } /// Checks if the set is empty /** Emptiness is checked by item counting: if item count is zero then assume that the set is empty. Thus, the correct item counting feature is an important part of split-list set implementation. */ bool empty() const { return base_class::empty(); } /// Returns item count in the set size_t size() const { return base_class::size(); } /// Returns internal statistics stat const& statistics() const { return base_class::statistics(); } /// Returns internal statistics for \p ordered_list typename ordered_list::stat const& list_statistics() const { return base_class::list_statistics(); } }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_SPLIT_LIST_SET_RCU_H libcds-2.3.3/cds/container/striped_map.h000066400000000000000000001200711341244201700201710ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_STRIPED_MAP_H #define CDSLIB_CONTAINER_STRIPED_MAP_H #include #include #include #include namespace cds { namespace container { //@cond namespace details { template class make_striped_map { typedef StripedSet< Container, Options...> billet; typedef typename billet::options billet_options; typedef typename billet_options::hash billet_hash; typedef typename Container::value_type pair_type; typedef typename pair_type::first_type key_type; struct options: public billet_options { struct hash: public billet_hash { size_t operator()( pair_type const& v ) const { return billet_hash::operator()( v.first ); } template size_t operator()( Q const& v ) const { return billet_hash::operator()( v ); } }; }; public: typedef StripedSet< Container, cds::opt::type_traits< options > > type ; ///< metafunction result }; } //@endcond /// Striped hash map /** @ingroup cds_nonintrusive_map Source - [2008] Maurice Herlihy, Nir Shavit "The Art of Multiprocessor Programming" Lock striping is very simple technique. The map consists of the bucket table and the array of locks. Initially, the capacity of lock array and bucket table is the same. When the map is resized, bucket table capacity will be doubled but lock array will not. The lock \p i protects each bucket \p j, where j = i mod L , where \p L - the size of lock array. Template arguments: - \p Container - the container class that is used as bucket entry. The \p Container class should support an uniform interface described below. - \p Options - options The \p %StripedMap class does not exactly specify the type of container that should be used as a \p Container bucket. Instead, the class supports different container type for the bucket, for exampe, \p std::list, \p std::map and others. Remember that \p %StripedMap class algorithm ensures sequential blocking access to its bucket through the mutex type you specify among \p Options template arguments. The \p Options are: - \p cds::opt::mutex_policy - concurrent access policy. Available policies: \p striped_set::striping, \p striped_set::refinable. Default is \p %striped_set::striping. - \p cds::opt::hash - hash functor. Default option value see opt::v::hash_selector which selects default hash functor for your compiler. - \p cds::opt::compare - key comparison functor. No default functor is provided. If the option is not specified, the \p %opt::less is used. - \p cds::opt::less - specifies binary predicate used for key comparison. Default is \p std::less. - \p cds::opt::item_counter - item counter type. Default is \p atomicity::item_counter since some operation on the counter is performed without locks. Note that item counting is an essential part of the map algorithm, so dummy counter like as \p atomicity::empty_item_counter is not suitable. - \p cds::opt::allocator - the allocator type using for memory allocation of bucket table and lock array. Default is \ref CDS_DEFAULT_ALLOCATOR. - \p cds::opt::resizing_policy - the resizing policy that is a functor that decides when to resize the hash map. Default option value depends on bucket container type: for sequential containers like \p std::list, \p std::vector the resizing policy is striped_set::load_factor_resizing<4> ; for other type of containers like \p std::map, \p std::unordered_map the resizing policy is \p striped_set::no_resizing. See \ref cds_striped_resizing_policy "available resizing policy". Note that the choose of resizing policy depends of \p Container type: for sequential containers like \p std::list, \p std::vector and so on, right choosing of the policy can significantly improve performance. For other, non-sequential types of \p Container (like a \p std::map) the resizing policy is not so important. - \p cds::opt::copy_policy - the copy policy which is used to copy items from the old map to the new one when resizing. The policy can be optionally used in adapted bucket container for performance reasons of resizing. The detail of copy algorithm depends on type of bucket container and explains below. \p %opt::compare or \p %opt::less options are used only in some \p Container class for searching an item. \p %opt::compare option has the highest priority: if \p %opt::compare is specified, \p %opt::less is not used. You can pass other option that would be passed to adapt metafunction, see below. Internal details The \p %StripedMap class cannot utilize the \p Container container specified directly, but only its adapted variant which supports an unified interface. Internally, the adaptation is made via \p striped_set::adapt metafunction that wraps bucket container and provides the unified bucket interface suitable for \p %StripedMap. Such adaptation is completely transparent for you - you don't need to call \p adapt metafunction directly, \p %StripedMap class's internal machinery itself invokes appropriate \p adapt metafunction to adjust your \p Container container class to \p %StripedMap bucket's internal interface. All you need is to include a right header before striped_hash_map.h. By default, striped_set::adapt metafunction does not make any wrapping to \p AnyContainer, so, the result striped_set::adapt::type is the same as \p AnyContainer. However, there are a lot of specializations of \p adapt for well-known containers, see table below. Any of this specialization wraps corresponding container making it suitable for the map's bucket. Remember, you should include the proper header file for \p adapt before striped_map.h.
Container .h-file for \p adapt Example Notes
\p std::list \code #include #include typedef cds::container::StripedMap< std::list< std::pair< const Key, V > >, cds::opt::less< std::less > > striped_map; \endcode The type of values stored in the \p std::list must be std::pair< const Key, V > , where \p Key - key type, and \p V - value type The list is ordered by key \p Key. Template argument pack \p Options must contain \p cds::opt::less or \p cds::opt::compare for type \p Key stored in the list.
\p std::map \code #include #include typedef cds::container::StripedMap< std::map< Key, T, std::less > > striped_map; \endcode
\p std::unordered_map \code #include #include typedef cds::container::StripedMap< std::unordered_map< Key, T, std::hash, std::equal_to > > striped_map; \endcode You should provide two different hash function \p h1 and \p h2 - one for std::unordered_map and other for \p %StripedMap. For the best result, \p h1 and \p h2 must be orthogonal i.e. h1(X) != h2(X) for any value \p X of type \p Key.
\p boost::container::slist \code #include #include typedef cds::container::StripedMap< boost::container::slist< std::pair< const Key, T > > > striped_map; \endcode The type of values stored in the \p boost::container::slist must be std::pair< const Key, T > , where \p Key - key type, and \p T - value type. The list is ordered. \p Options must contain \p cds::opt::less or \p cds::opt::compare.
\p boost::container::list \code #include #include typedef cds::container::StripedMap< boost::container::list< std::pair< const Key, T > > > striped_map; \endcode The type of values stored in the \p boost::container::list must be std::pair< const Key, T > , where \p Key - key type, and \p T - value type. The list is ordered. \p Options must contain \p cds::opt::less or \p cds::opt::compare.
\p boost::container::map \code #include #include typedef cds::container::StripedMap< boost::container::map< Key, T, std::less > > striped_map; \endcode
\p boost::container::flat_map \code #include #include typedef cds::container::StripedMap< boost::container::flat_map< Key, T, std::less< std::less > > > striped_map; \endcode
\p boost::unordered_map \code #include #include typedef cds::container::StripedMap< boost::unordered_map< Key, T, boost::hash, std::equal_to > > refinable_map; \endcode
You can use another container type as map's bucket. Suppose, you have a container class \p MyBestContainer and you want to integrate it with \p %StripedMap as bucket type. There are two possibility: - either your \p MyBestContainer class has native support of bucket's interface; in this case, you can use default striped_set::adapt metafunction; - or your \p MyBestContainer class does not support bucket's interface; it means you should develop a specialization cds::container::striped_set::adapt metafunction providing necessary interface. The striped_set::adapt< Container, Options... > metafunction has two template argument: - \p Container is the class that should be used as the bucket, for example, std::list< std::pair< Key, T > >. - \p Options pack is the options from \p %StripedMap declaration. The \p adapt metafunction can use any option from \p Options for its internal use. For example, a \p compare option can be passed to \p adapt metafunction via \p Options argument of \p %StripedMap declaration. See \p striped_set::adapt metafunction for the description of interface that the bucket container must provide to be \p %StripedMap compatible. Copy policy There are three predefined copy policy: - \p cds::container::striped_set::copy_item - copy item from old bucket to new one when resizing using copy ctor. It is default policy for any compiler that do not support move semantics - \p cds::container::striped_set::move_item - move item from old bucket to new one when resizing using move semantics. It is default policy for any compiler that support move semantics. If compiler does not support move semantics, the move policy is the same as \p copy_item - \p cds::container::striped_set::swap_item - copy item from old bucket to new one when resizing using \p std::swap. Not all containers support this copy policy, see details in table below. You can define your own copy policy specifically for your case. Note, right copy policy can significantly improve the performance of resizing.
Container Policies
- \p std::list - \p boost::list \code struct copy_item { void operator()( std::list< std::pair >& list, std::list >::iterator itInsert, std::list >::iterator itWhat ) { list.insert( itInsert, *itWhat ); } } \endcode \code // The type T stored in the list must be swappable struct swap_item { void operator()( std::list< std::pair >& list, std::list >::iterator itInsert, std::list >::iterator itWhat ) { std::pair newVal( itWhat->first, T()); std::swap( list.insert( itInsert, newVal )->second, itWhat->second ); } } \endcode \code struct move_item { void operator()( std::list< std::pair >& list, std::list >::iterator itInsert, std::list >::iterator itWhat ) { list.insert( itInsert, std::move( *itWhat )); } } \endcode
- \p std::map - \p std::unordered_map - \p boost::container::map - \p boost::container::flat_map - \p boost::unordered_map \code struct copy_item { void operator()( std::map< Key, T>& map, std::map::iterator itWhat ) { map.insert( *itWhat ); } } \endcode \code struct swap_item { void operator()( std::map< Key, T>& map, std::map::iterator itWhat ) { std::swap( map.insert( std::map::value_type( itWhat->first, T())).first->second , itWhat->second )); } } \endcode \p T type must be swappable. \code struct move_item { void operator()( std::map< Key, T>& map, std::map::iterator itWhat ) { map.insert( std::move( *itWhat )); } } \endcode
\p boost::container::slist \code struct copy_item { void operator()( bc::slist< std::pair >& list, bc::slist >::iterator itInsert, bc::slist >::iterator itWhat ) { list.insert_after( itInsert, *itWhat ); } } \endcode \code // The type T stored in the list must be swappable struct swap_item { void operator()( bc::slist< std::pair >& list, bc::slist >::iterator itInsert, bc::slist >::iterator itWhat ) { std::pair newVal( itWhat->first, T()); std::swap( list.insert( itInsert, newVal )->second, itWhat->second ); } } \endcode \code struct move_item { void operator()( bc::slist< std::pair >& list, bc::slist >::iterator itInsert, bc::slist >::iterator itWhat ) { list.insert_after( itInsert, std::move( *itWhat )); } } \endcode
Advanced functions The library provides some advanced functions like \p erase_with(), \p find_with(), that cannot be supported by all underlying containers. The table below shows whether underlying container supports those functions (the sign "+" means "container supports the function"):
Container \p find_with \p erse_with
\p std::list + +
\p std::map - -
\p std::unordered_map - -
\p boost::container::slist + +
\p boost::container::list + +
\p boost::container::map - -
\p boost::container::flat_map - -
\p boost::unordered_map - -
**/ template class StripedMap #ifdef CDS_DOXYGEN_INVOKED : protected StripedSet #else : protected details::make_striped_map< Container, Options...>::type #endif { //@cond typedef typename details::make_striped_map< Container, Options...>::type base_class; //@endcond public: //@cond typedef typename base_class::default_options default_options; typedef typename base_class::options options; //@endcond typedef Container underlying_container_type ; ///< original intrusive container type for the bucket typedef typename base_class::bucket_type bucket_type ; ///< container type adapted for hash set typedef typename bucket_type::value_type value_type ; ///< pair type ( std::pair ) typedef typename value_type::first_type key_type ; ///< key type typedef typename value_type::second_type mapped_type ; ///< mapped type typedef typename base_class::hash hash ; ///< Hash functor typedef typename base_class::item_counter item_counter ; ///< Item counter typedef typename base_class::resizing_policy resizing_policy ; ///< Resizing policy typedef typename base_class::allocator_type allocator_type ; ///< allocator type specified in options. typedef typename base_class::mutex_policy mutex_policy ; ///< Mutex policy protected: //@cond typedef typename base_class::scoped_cell_lock scoped_cell_lock; typedef typename base_class::scoped_full_lock scoped_full_lock; typedef typename base_class::scoped_resize_lock scoped_resize_lock; //@endcond private: //@cond struct key_accessor { key_type const& operator()( value_type const& p ) const { return p.first; } }; //@endcond public: /// Default ctor. The initial capacity is 16. StripedMap() : base_class() {} /// Ctor with initial capacity specified StripedMap( size_t nCapacity ///< Initial size of bucket table and lock array. Must be power of two, the minimum is 16. ) : base_class( nCapacity ) {} /// Ctor with resizing policy (copy semantics) /** This constructor initializes m_ResizingPolicy member with copy of \p resizingPolicy parameter */ StripedMap( size_t nCapacity ///< Initial size of bucket table and lock array. Must be power of two, the minimum is 16. ,resizing_policy const& resizingPolicy ///< Resizing policy ) : base_class( nCapacity, resizingPolicy ) {} /// Ctor with resizing policy (move semantics) /** This constructor initializes m_ResizingPolicy member moving \p resizingPolicy parameter Move semantics is used. Available only for the compilers that supports C++11 rvalue reference. */ StripedMap( size_t nCapacity ///< Initial size of bucket table and lock array. Must be power of two, the minimum is 16. ,resizing_policy&& resizingPolicy ///< Resizing policy ) : base_class( nCapacity, std::forward(resizingPolicy)) {} /// Destructor destroys internal data ~StripedMap() {} public: /// Inserts new node with key and default value /** The function creates a node with \p key and default value, and then inserts the node created into the map. Preconditions: - The \p key_type should be constructible from a value of type \p K. In trivial case, \p K is equal to \p key_type. - The \p mapped_type should be default-constructible. Returns \p true if inserting successful, \p false otherwise. */ template bool insert( K const& key ) { return insert_with( key, [](value_type&){} ); } /// Inserts new node /** The function creates a node with copy of \p val value and then inserts the node created into the map. Preconditions: - The \p key_type should be constructible from \p key of type \p K. - The \p mapped_type should be constructible from \p val of type \p V. Returns \p true if \p val is inserted into the set, \p false otherwise. */ template bool insert( K const& key, V const& val ) { return insert_with( key, [&val](value_type& item) { item.second = val ; } ); } /// Inserts new node and initialize it by a functor /** This function inserts new node with key \p key and if inserting is successful then it calls \p func functor with signature \code struct functor { void operator()( value_type& item ); }; \endcode The argument \p item of user-defined functor \p func is the reference to the map's item inserted: - item.first is a const reference to item's key that cannot be changed. - item.second is a reference to item's value that may be changed. The key_type should be constructible from value of type \p K. The function allows to split creating of new item into two part: - create item from \p key; - insert new item into the map; - if inserting is successful, initialize the value of item by calling \p func functor This can be useful if complete initialization of object of \p mapped_type is heavyweight and it is preferable that the initialization should be completed only if inserting is successful. */ template bool insert_with( const K& key, Func func ) { return base_class::insert( key, func ); } /// For key \p key inserts data of type \p mapped_type created in-place from \p args /** Returns \p true if inserting successful, \p false otherwise. */ template bool emplace( K&& key, Args&&... args ) { bool bOk; bool bResize; size_t nHash = base_class::hashing( std::forward(key)); bucket_type * pBucket; { scoped_cell_lock sl( base_class::m_MutexPolicy, nHash ); pBucket = base_class::bucket( nHash ); bOk = pBucket->emplace( std::forward(key), std::forward(args)...); bResize = bOk && base_class::m_ResizingPolicy( ++base_class::m_ItemCounter, *this, *pBucket ); } if ( bResize ) base_class::resize(); return bOk; } /// Updates the node /** The operation performs inserting or changing data with lock-free manner. If \p key is not found in the map, then \p key is inserted iff \p bAllowInsert is \p true. Otherwise, the functor \p func is called with item found. The functor signature is: \code struct my_functor { void operator()( bool bNew, value_type& item ); }; \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the map Returns std::pair where \p first is true if operation is successful, \p second is true if new item has been added or \p false if the item with \p key already is in the map. */ template std::pair update( K const& key, Func func, bool bAllowInsert = true ) { std::pair result; bool bResize; size_t nHash = base_class::hashing( key ); bucket_type * pBucket; { scoped_cell_lock sl( base_class::m_MutexPolicy, nHash ); pBucket = base_class::bucket( nHash ); result = pBucket->update( key, func, bAllowInsert ); bResize = result.first && result.second && base_class::m_ResizingPolicy( ++base_class::m_ItemCounter, *this, *pBucket ); } if ( bResize ) base_class::resize(); return result; } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update() instead") std::pair ensure( K const& key, Func func ) { return update( key, func, true ); } //@endcond /// Delete \p key from the map /** \anchor cds_nonintrusive_StripedMap_erase Return \p true if \p key is found and deleted, \p false otherwise */ template bool erase( K const& key ) { return base_class::erase( key ); } /// Deletes the item from the map using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_StripedMap_erase "erase(K const&)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the map. @note This function is enabled if the compiler supports C++11 default template arguments for function template and the underlying container supports \p %erase_with feature. */ template < typename K, typename Less ,typename Bucket = bucket_type, typename = typename std::enable_if< Bucket::has_erase_with >::type > bool erase_with( K const& key, Less pred ) { return erase_with( key, pred, [](value_type const&) {} ); } /// Delete \p key from the map /** \anchor cds_nonintrusive_StripedMap_erase_func The function searches an item with key \p key, calls \p f functor and deletes the item. If \p key is not found, the functor is not called. The functor \p Func interface: \code struct extractor { void operator()(value_type& item) { ... } }; \endcode Return \p true if key is found and deleted, \p false otherwise */ template bool erase( K const& key, Func f ) { return base_class::erase( key, f ); } /// Deletes the item from the map using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_StripedMap_erase_func "erase(K const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the map. @note This function is enabled if the compiler supports C++11 default template arguments for function template and the underlying container supports \p %erase_with feature. */ template ::type > bool erase_with( K const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return base_class::erase_with( key, cds::details::predicate_wrapper< value_type, Less, key_accessor >(), f ); } /// Find the key \p key /** \anchor cds_nonintrusive_StripedMap_find_func The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item ); }; \endcode where \p item is the item found. The functor may change \p item.second. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( K const& key, Func f ) { return base_class::find( key, [&f]( value_type& pair, K const& ) mutable { f(pair); } ); } /// Find the key \p val using \p pred predicate /** The function is an analog of \ref cds_nonintrusive_StripedMap_find_func "find(K const&, Func)" but \p pred is used for key comparing \p Less has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. @note This function is enabled if the compiler supports C++11 default template arguments for function template and the underlying container supports \p %find_with feature. */ template ::type > bool find_with( K const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return base_class::find_with( key, cds::details::predicate_wrapper< value_type, Less, key_accessor >(), [&f]( value_type& pair, K const& ) mutable { f(pair); } ); } /// Checks whether the map contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. */ template bool contains( K const& key ) { return base_class::contains( key ); } //@cond template CDS_DEPRECATED("use contains()") bool find( K const& key ) { return contains( key ); } //@endcond /// Checks whether the set contains \p key using \p pred predicate for searching /** The function is similar to contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. @note This function is enabled if the compiler supports C++11 default template arguments for function template and the underlying container supports \p %contains() feature. */ template ::type > bool contains( K const& key, Less pred ) { CDS_UNUSED( pred ); return base_class::contains( key, cds::details::predicate_wrapper< value_type, Less, key_accessor >()); } //@cond template ::type > CDS_DEPRECATED("use contains()") bool find_with( K const& key, Less pred ) { return contains( key, pred ); } //@endcond /// Clears the map void clear() { base_class::clear(); } /// Checks if the map is empty /** Emptiness is checked by item counting: if item count is zero then the map is empty. */ bool empty() const { return base_class::empty(); } /// Returns item count in the map size_t size() const { return base_class::size(); } /// Returns the size of hash table /** The hash table size is non-constant and can be increased via resizing. */ size_t bucket_count() const { return base_class::bucket_count(); } /// Returns lock array size /** The lock array size is constant. */ size_t lock_count() const { return base_class::lock_count(); } /// Returns resizing policy object resizing_policy& get_resizing_policy() { return base_class::get_resizing_policy(); } /// Returns resizing policy (const version) resizing_policy const& get_resizing_policy() const { return base_class::get_resizing_policy(); } }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_STRIPED_MAP_H libcds-2.3.3/cds/container/striped_map/000077500000000000000000000000001341244201700200175ustar00rootroot00000000000000libcds-2.3.3/cds/container/striped_map/boost_flat_map.h000066400000000000000000000044121341244201700231620ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_STRIPED_MAP_BOOST_FLAT_MAP_ADAPTER_H #define CDSLIB_CONTAINER_STRIPED_MAP_BOOST_FLAT_MAP_ADAPTER_H #include #if BOOST_VERSION < 104800 # error "For boost::container::flat_map you must use boost 1.48 or above" #endif #include #include //@cond namespace cds { namespace container { namespace striped_set { // Copy policy for map template struct copy_item_policy< boost::container::flat_map< Key, T, Traits, Alloc > > : public details::boost_map_copy_policies >::copy_item_policy {}; // Swap item policy template struct swap_item_policy< boost::container::flat_map< Key, T, Traits, Alloc > > : public details::boost_map_copy_policies >::swap_item_policy {}; // Move policy for map template struct move_item_policy< boost::container::flat_map< Key, T, Traits, Alloc > > : public details::boost_map_copy_policies >::move_item_policy {}; } // namespace striped_set }} // namespace cds::container namespace cds { namespace intrusive { namespace striped_set { template class adapt< boost::container::flat_map< Key, T, Traits, Alloc>, Options... > { public: typedef boost::container::flat_map< Key, T, Traits, Alloc> container_type ; ///< underlying container type typedef cds::container::striped_set::details::boost_map_adapter< container_type, Options... > type; }; }}} // namespace cds::intrusive::striped_set //@endcond #endif // #ifndef CDSLIB_CONTAINER_STRIPED_MAP_BOOST_FLAT_MAP_ADAPTER_H libcds-2.3.3/cds/container/striped_map/boost_list.h000066400000000000000000000244721341244201700223620ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_STRIPED_MAP_BOOST_LIST_ADAPTER_H #define CDSLIB_CONTAINER_STRIPED_MAP_BOOST_LIST_ADAPTER_H #include #if BOOST_VERSION < 104800 # error "For boost::container::list you must use boost 1.48 or above" #endif #include // ref #include // std::lower_bound #include // std::pair #include #include //@cond namespace cds { namespace container { namespace striped_set { // Copy policy for map template struct copy_item_policy< boost::container::list< std::pair< K const, T >, Alloc > > { typedef std::pair< K const, T> pair_type; typedef boost::container::list< pair_type, Alloc > list_type; typedef typename list_type::iterator iterator; void operator()( list_type& list, iterator itInsert, iterator itWhat ) { list.insert( itInsert, *itWhat ); } }; // Swap policy for map template struct swap_item_policy< boost::container::list< std::pair< K const, T >, Alloc > > { typedef std::pair< K const, T> pair_type; typedef boost::container::list< pair_type, Alloc > list_type; typedef typename list_type::iterator iterator; void operator()( list_type& list, iterator itInsert, iterator itWhat ) { pair_type newVal( itWhat->first, typename pair_type::second_type()); itInsert = list.insert( itInsert, newVal ); std::swap( itInsert->second, itWhat->second ); } }; // Move policy for map template struct move_item_policy< boost::container::list< std::pair< K const, T >, Alloc > > { typedef std::pair< K const, T> pair_type; typedef boost::container::list< pair_type, Alloc > list_type; typedef typename list_type::iterator iterator; void operator()( list_type& list, iterator itInsert, iterator itWhat ) { list.insert( itInsert, std::move( *itWhat )); } }; } // namespace striped_set }} // namespace cds:container namespace cds { namespace intrusive { namespace striped_set { /// boost::container::list adapter for hash map bucket template class adapt< boost::container::list< std::pair, Alloc>, Options... > { public: typedef boost::container::list< std::pair, Alloc> container_type ; ///< underlying container type private: /// Adapted container type class adapted_container: public cds::container::striped_set::adapted_sequential_container { public: typedef typename container_type::value_type value_type ; ///< value type stored in the container typedef typename value_type::first_type key_type; typedef typename value_type::second_type mapped_type; typedef typename container_type::iterator iterator ; ///< container iterator typedef typename container_type::const_iterator const_iterator ; ///< container const iterator static bool const has_find_with = true; static bool const has_erase_with = true; private: //@cond typedef typename cds::opt::details::make_comparator_from_option_list< value_type, Options... >::type key_comparator; typedef typename cds::opt::select< typename cds::opt::value< typename cds::opt::find_option< cds::opt::copy_policy< cds::container::striped_set::move_item > , Options... >::type >::copy_policy , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy >::type copy_item; struct find_predicate { bool operator()( value_type const& i1, value_type const& i2) const { return key_comparator()( i1.first, i2.first ) < 0; } template bool operator()( Q const& i1, value_type const& i2) const { return key_comparator()( i1, i2.first ) < 0; } template bool operator()( value_type const& i1, Q const& i2) const { return key_comparator()( i1.first, i2 ) < 0; } }; //@endcond private: //@cond container_type m_List; //@endcond public: adapted_container() {} template bool insert( const Q& key, Func f ) { iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, find_predicate()); if ( it == m_List.end() || key_comparator()( key, it->first ) != 0 ) { //value_type newItem( key ); it = m_List.insert( it, value_type( key_type( key ), mapped_type())); f( *it ); return true; } // key already exists return false; } template bool emplace( K&& key, Args&&... args ) { value_type val( key_type( std::forward( key )), mapped_type( std::forward( args )... )); iterator it = std::lower_bound( m_List.begin(), m_List.end(), val.first, find_predicate()); if ( it == m_List.end() || key_comparator()( val.first, it->first ) != 0 ) { m_List.emplace( it, std::move( val )); return true; } return false; } template std::pair update( const Q& key, Func func, bool bAllowInsert ) { iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, find_predicate()); if ( it == m_List.end() || key_comparator()( key, it->first ) != 0 ) { // insert new if ( !bAllowInsert ) return std::make_pair( false, false ); it = m_List.insert( it, value_type( key_type( key ), mapped_type())); func( true, *it ); return std::make_pair( true, true ); } else { // already exists func( false, *it ); return std::make_pair( true, false ); } } template bool erase( Q const& key, Func f ) { iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, find_predicate()); if ( it == m_List.end() || key_comparator()( key, it->first ) != 0 ) return false; // key exists f( *it ); m_List.erase( it ); return true; } template bool erase( Q const& key, Less pred, Func f ) { iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, pred ); if ( it == m_List.end() || pred( key, it->first ) || pred(it->first, key)) return false; // key exists f( *it ); m_List.erase( it ); return true; } template bool find( Q& val, Func f ) { iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, find_predicate()); if ( it == m_List.end() || key_comparator()( val, it->first ) != 0 ) return false; // key exists f( *it, val ); return true; } template bool find( Q& val, Less pred, Func f ) { iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, pred ); if ( it == m_List.end() || pred( val, it->first ) || pred( it->first, val )) return false; // key exists f( *it, val ); return true; } /// Clears the container void clear() { m_List.clear(); } iterator begin() { return m_List.begin(); } const_iterator begin() const { return m_List.begin(); } iterator end() { return m_List.end(); } const_iterator end() const { return m_List.end(); } void move_item( adapted_container& /*from*/, iterator itWhat ) { iterator it = std::lower_bound( m_List.begin(), m_List.end(), *itWhat, find_predicate()); assert( it == m_List.end() || key_comparator()( itWhat->first, it->first ) != 0 ); copy_item()( m_List, it, itWhat ); } size_t size() const { return m_List.size(); } }; public: typedef adapted_container type ; ///< Result of \p adapt metafunction }; }}} // namespace cds::intrusive::striped_set //@endcond #endif // #ifndef CDSLIB_CONTAINER_STRIPED_MAP_BOOST_LIST_ADAPTER_H libcds-2.3.3/cds/container/striped_map/boost_map.h000066400000000000000000000043651341244201700221630ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_STRIPED_MAP_BOOST_MAP_ADAPTER_H #define CDSLIB_CONTAINER_STRIPED_MAP_BOOST_MAP_ADAPTER_H #include #if BOOST_VERSION < 104800 # error "For boost::container::map you must use boost 1.48 or above" #endif #include #include //@cond namespace cds { namespace container { namespace striped_set { // Copy policy for map template struct copy_item_policy< boost::container::map< Key, T, Traits, Alloc > > : public details::boost_map_copy_policies >::copy_item_policy {}; // Swap item policy template struct swap_item_policy< boost::container::map< Key, T, Traits, Alloc > > : public details::boost_map_copy_policies >::swap_item_policy {}; // Move policy for map template struct move_item_policy< boost::container::map< Key, T, Traits, Alloc > > : public details::boost_map_copy_policies >::move_item_policy {}; } // namespace striped_set }} // namespace cds::container namespace cds { namespace intrusive { namespace striped_set { /// std::set adapter for hash set bucket template class adapt< boost::container::map< Key, T, Traits, Alloc>, Options... > { public: typedef boost::container::map< Key, T, Traits, Alloc> container_type ; ///< underlying container type typedef cds::container::striped_set::details::boost_map_adapter< container_type, Options... > type; }; }}} // namespace cds::intrusive::striped_set //@endcond #endif // #ifndef CDSLIB_CONTAINER_STRIPED_MAP_BOOST_MAP_ADAPTER_H libcds-2.3.3/cds/container/striped_map/boost_slist.h000066400000000000000000000246621341244201700225460ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_STRIPED_MAP_BOOST_SLIST_ADAPTER_H #define CDSLIB_CONTAINER_STRIPED_MAP_BOOST_SLIST_ADAPTER_H #include #if BOOST_VERSION < 104800 # error "For boost::container::slist you must use boost 1.48 or above" #endif #include // ref #include // std::pair #include #include //@cond namespace cds { namespace container { namespace striped_set { // Copy policy for map template struct copy_item_policy< boost::container::slist< std::pair< K const, T >, Alloc > > { typedef std::pair< K const, T> pair_type; typedef boost::container::slist< pair_type, Alloc > list_type; typedef typename list_type::iterator iterator; void operator()( list_type& list, iterator itInsert, iterator itWhat ) { itInsert = list.insert_after( itInsert, *itWhat ); } }; // Swap policy for map template struct swap_item_policy< boost::container::slist< std::pair< K const, T >, Alloc > > { typedef std::pair< K const, T> pair_type; typedef boost::container::slist< pair_type, Alloc > list_type; typedef typename list_type::iterator iterator; void operator()( list_type& list, iterator itInsert, iterator itWhat ) { pair_type newVal( itWhat->first, typename pair_type::mapped_type()); itInsert = list.insert_after( itInsert, newVal ); std::swap( itInsert->second, itWhat->second ); } }; // Move policy for map template struct move_item_policy< boost::container::slist< std::pair< K const, T >, Alloc > > { typedef std::pair< K const, T> pair_type; typedef boost::container::slist< pair_type, Alloc > list_type; typedef typename list_type::iterator iterator; void operator()( list_type& list, iterator itInsert, iterator itWhat ) { list.insert_after( itInsert, std::move( *itWhat )); } }; } // namespace striped_set }} // namespace cds:container namespace cds { namespace intrusive { namespace striped_set { /// boost::container::slist adapter for hash map bucket template class adapt< boost::container::slist< std::pair, Alloc>, Options... > { public: typedef boost::container::slist< std::pair, Alloc> container_type ; ///< underlying container type private: /// Adapted container type class adapted_container: public cds::container::striped_set::adapted_sequential_container { public: typedef typename container_type::value_type value_type ; ///< value type stored in the container typedef typename value_type::first_type key_type; typedef typename value_type::second_type mapped_type; typedef typename container_type::iterator iterator ; ///< container iterator typedef typename container_type::const_iterator const_iterator ; ///< container const iterator static bool const has_find_with = true; static bool const has_erase_with = true; private: //@cond typedef typename cds::opt::details::make_comparator_from_option_list< value_type, Options... >::type key_comparator; typedef typename cds::opt::select< typename cds::opt::value< typename cds::opt::find_option< cds::opt::copy_policy< cds::container::striped_set::move_item > , Options... >::type >::copy_policy , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy >::type copy_item; template std::pair< iterator, bool > find_prev_item( Q const& key ) { iterator itPrev = m_List.before_begin(); iterator itEnd = m_List.end(); for ( iterator it = m_List.begin(); it != itEnd; ++it ) { int nCmp = key_comparator()( key, it->first ); if ( nCmp < 0 ) itPrev = it; else if ( nCmp > 0 ) break; else return std::make_pair( itPrev, true ); } return std::make_pair( itPrev, false ); } template std::pair< iterator, bool > find_prev_item( Q const& key, Less pred ) { iterator itPrev = m_List.before_begin(); iterator itEnd = m_List.end(); for ( iterator it = m_List.begin(); it != itEnd; ++it ) { if ( pred( key, it->first )) itPrev = it; else if ( pred(it->first, key)) break; else return std::make_pair( itPrev, true ); } return std::make_pair( itPrev, false ); } //@endcond private: //@cond container_type m_List; //@endcond public: adapted_container() {} template bool insert( const Q& key, Func f ) { std::pair< iterator, bool > pos = find_prev_item( key ); if ( !pos.second ) { pos.first = m_List.insert_after( pos.first, value_type( key_type( key ), mapped_type())); f( *pos.first ); return true; } // key already exists return false; } template bool emplace( K&& key, Args&&... args ) { std::pair< iterator, bool > pos = find_prev_item( key ); if ( !pos.second ) { m_List.emplace_after( pos.first, key_type( std::forward( key )), mapped_type( std::forward( args )... )); return true; } return false; } template std::pair update( const Q& key, Func func, bool bAllowInsert ) { std::pair< iterator, bool > pos = find_prev_item( key ); if ( !pos.second ) { // insert new if ( !bAllowInsert ) return std::make_pair( false, false ); pos.first = m_List.insert_after( pos.first, value_type( key_type( key ), mapped_type())); func( true, *pos.first ); return std::make_pair( true, true ); } else { // already exists func( false, *(++pos.first)); return std::make_pair( true, false ); } } template bool erase( Q const& key, Func f ) { std::pair< iterator, bool > pos = find_prev_item( key ); if ( !pos.second ) return false; // key exists iterator it = pos.first; f( *(++it)); m_List.erase_after( pos.first ); return true; } template bool erase( Q const& key, Less pred, Func f ) { std::pair< iterator, bool > pos = find_prev_item( key, pred ); if ( !pos.second ) return false; // key exists iterator it = pos.first; f( *(++it)); m_List.erase_after( pos.first ); return true; } template bool find( Q& val, Func f ) { std::pair< iterator, bool > pos = find_prev_item( val ); if ( !pos.second ) return false; // key exists f( *(++pos.first), val ); return true; } template bool find( Q& val, Less pred, Func f ) { std::pair< iterator, bool > pos = find_prev_item( val, pred ); if ( !pos.second ) return false; // key exists f( *(++pos.first), val ); return true; } void clear() { m_List.clear(); } iterator begin() { return m_List.begin(); } const_iterator begin() const { return m_List.begin(); } iterator end() { return m_List.end(); } const_iterator end() const { return m_List.end(); } void move_item( adapted_container& /*from*/, iterator itWhat ) { std::pair< iterator, bool > pos = find_prev_item( itWhat->first ); assert( !pos.second ); copy_item()( m_List, pos.first, itWhat ); } size_t size() const { return m_List.size(); } }; public: typedef adapted_container type ; ///< Result of \p adapt metafunction }; }}} // namespace cds::intrusive::striped_set //@endcond #endif // #ifndef CDSLIB_CONTAINER_STRIPED_MAP_BOOST_SLIST_ADAPTER_H libcds-2.3.3/cds/container/striped_map/boost_unordered_map.h000066400000000000000000000042351341244201700242260ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_STRIPED_MAP_BOOST_UNORDERED_MAP_ADAPTER_H #define CDSLIB_CONTAINER_STRIPED_MAP_BOOST_UNORDERED_MAP_ADAPTER_H #include #include //@cond namespace cds { namespace container { namespace striped_set { // Copy policy for map template struct copy_item_policy< boost::unordered_map< Key, T, Traits, Alloc > > : public details::boost_map_copy_policies >::copy_item_policy {}; // Swap policy for map template struct swap_item_policy< boost::unordered_map< Key, T, Traits, Alloc > > : public details::boost_map_copy_policies >::swap_item_policy {}; // Move policy for map template struct move_item_policy< boost::unordered_map< Key, T, Traits, Alloc > > : public details::boost_map_copy_policies >::move_item_policy {}; } // namespace striped_set }} // namespace cds::container namespace cds { namespace intrusive { namespace striped_set { /// boost::unordered_map adapter for hash map bucket template class adapt< boost::unordered_map< Key, T, Hash, Pred, Alloc>, Options... > { public: typedef boost::unordered_map< Key, T, Hash, Pred, Alloc> container_type ; ///< underlying container type typedef cds::container::striped_set::details::boost_map_adapter< container_type, Options... > type; }; }}} // namespace cds::intrusive::striped_set //@endcond #endif // #ifndef CDSLIB_CONTAINER_STRIPED_MAP_BOOST_UNORDERED_MAP_ADAPTER_H libcds-2.3.3/cds/container/striped_map/std_hash_map.h000066400000000000000000000166051341244201700226320ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_STRIPED_MAP_STD_HASH_MAP_ADAPTER_H #define CDSLIB_CONTAINER_STRIPED_MAP_STD_HASH_MAP_ADAPTER_H #include #include //@cond namespace cds { namespace container { namespace striped_set { // Copy policy for map template struct copy_item_policy< std::unordered_map< Key, T, Hash, Pred, Alloc > > { typedef std::unordered_map< Key, T, Hash, Pred, Alloc > map_type; typedef typename map_type::value_type pair_type; typedef typename map_type::iterator iterator; void operator()( map_type& map, iterator itWhat ) { map.insert( *itWhat ); } }; // Swap policy for map template struct swap_item_policy< std::unordered_map< Key, T, Hash, Pred, Alloc > > { typedef std::unordered_map< Key, T, Hash, Pred, Alloc > map_type; typedef typename map_type::value_type pair_type; typedef typename map_type::iterator iterator; void operator()( map_type& map, iterator itWhat ) { pair_type pair( itWhat->first, typename pair_type::second_type()); std::pair res = map.insert( pair ); assert( res.second ); std::swap( res.first->second, itWhat->second ); } }; // Move policy for map template struct move_item_policy< std::unordered_map< Key, T, Hash, Pred, Alloc > > { typedef std::unordered_map< Key, T, Hash, Pred, Alloc > map_type; typedef typename map_type::value_type pair_type; typedef typename map_type::iterator iterator; void operator()( map_type& map, iterator itWhat ) { map.insert( std::move( *itWhat )); } }; } // namespace striped_set }} // namespace cds::container namespace cds { namespace intrusive { namespace striped_set { /// std::unordered_map adapter for hash map bucket template class adapt< std::unordered_map< Key, T, Hash, Pred, Alloc>, Options... > { public: typedef std::unordered_map< Key, T, Hash, Pred, Alloc> container_type ; ///< underlying container type private: /// Adapted container type class adapted_container: public cds::container::striped_set::adapted_container { public: typedef typename container_type::value_type value_type ; ///< value type stored in the container typedef typename container_type::key_type key_type; typedef typename container_type::mapped_type mapped_type; typedef typename container_type::iterator iterator ; ///< container iterator typedef typename container_type::const_iterator const_iterator ; ///< container const iterator static bool const has_find_with = false; static bool const has_erase_with = false; private: //@cond typedef typename cds::opt::select< typename cds::opt::value< typename cds::opt::find_option< cds::opt::copy_policy< cds::container::striped_set::move_item > , Options... >::type >::copy_policy , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy >::type copy_item; //@endcond private: //@cond container_type m_Map; //@endcond public: template bool insert( const Q& key, Func f ) { std::pair res = m_Map.insert( value_type( key_type( key ), mapped_type())); if ( res.second ) f( const_cast(*res.first)); return res.second; } template bool emplace( Q&& key, Args&&... args ) { std::pair res = m_Map.emplace( key_type( std::forward( key )), mapped_type( std::forward( args )...)); return res.second; } template std::pair update( const Q& key, Func func, bool bAllowInsert ) { if ( bAllowInsert ) { std::pair res = m_Map.insert( value_type( key_type( key ), mapped_type())); func( res.second, const_cast(*res.first)); return std::make_pair( true, res.second ); } else { auto it = m_Map.find( key_type( key )); if ( it == end()) return std::make_pair( false, false ); func( false, *it ); return std::make_pair( true, false ); } } template bool erase( const Q& key, Func f ) { iterator it = m_Map.find( key_type( key )); if ( it == m_Map.end()) return false; f( const_cast(*it)); m_Map.erase( it ); return true; } template bool find( Q& key, Func f ) { iterator it = m_Map.find( key_type( key )); if ( it == m_Map.end()) return false; f( const_cast(*it), key ); return true; } void clear() { m_Map.clear(); } iterator begin() { return m_Map.begin(); } const_iterator begin() const { return m_Map.begin(); } iterator end() { return m_Map.end(); } const_iterator end() const { return m_Map.end(); } void move_item( adapted_container& /*from*/, iterator itWhat ) { assert( m_Map.find( itWhat->first ) == m_Map.end()); copy_item()( m_Map, itWhat ); } size_t size() const { return m_Map.size(); } }; public: typedef adapted_container type ; ///< Result of \p adapt metafunction }; }}} // namespace cds::intrusive::striped_set //@endcond #endif // #ifndef CDSLIB_CONTAINER_STRIPED_MAP_STD_HASH_MAP_ADAPTER_H libcds-2.3.3/cds/container/striped_map/std_list.h000066400000000000000000000265071341244201700220270ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_STRIPED_MAP_STD_LIST_ADAPTER_H #define CDSLIB_CONTAINER_STRIPED_MAP_STD_LIST_ADAPTER_H #include #include // ref #include // std::lower_bound #include // std::pair #include #undef CDS_STD_LIST_SIZE_CXX11_CONFORM #if !( defined(__GLIBCXX__ ) && (!defined(_GLIBCXX_USE_CXX11_ABI) || _GLIBCXX_USE_CXX11_ABI == 0 )) # define CDS_STD_LIST_SIZE_CXX11_CONFORM #endif //@cond namespace cds { namespace container { namespace striped_set { // Copy policy for map template struct copy_item_policy< std::list< std::pair< K const, T >, Alloc > > { typedef std::pair< K const, T> pair_type; typedef std::list< pair_type, Alloc > list_type; typedef typename list_type::iterator iterator; void operator()( list_type& list, iterator itInsert, iterator itWhat ) { list.insert( itInsert, *itWhat ); } }; // Swap policy for map template struct swap_item_policy< std::list< std::pair< K const, T >, Alloc > > { typedef std::pair< K const, T> pair_type; typedef std::list< pair_type, Alloc > list_type; typedef typename list_type::iterator iterator; void operator()( list_type& list, iterator itInsert, iterator itWhat ) { pair_type newVal( itWhat->first, typename pair_type::second_type()); itInsert = list.insert( itInsert, newVal ); std::swap( itInsert->second, itWhat->second ); } }; // Move policy for map template struct move_item_policy< std::list< std::pair< K const, T >, Alloc > > { typedef std::pair< K const, T> pair_type; typedef std::list< pair_type, Alloc > list_type; typedef typename list_type::iterator iterator; void operator()( list_type& list, iterator itInsert, iterator itWhat ) { list.insert( itInsert, std::move( *itWhat )); } }; } // namespace striped_set }} // namespace cds:container namespace cds { namespace intrusive { namespace striped_set { /// std::list adapter for hash map bucket template class adapt< std::list< std::pair, Alloc>, Options... > { public: typedef std::list< std::pair, Alloc> container_type ; ///< underlying container type private: /// Adapted container type class adapted_container: public cds::container::striped_set::adapted_sequential_container { public: typedef typename container_type::value_type value_type ; ///< value type stored in the container typedef typename value_type::first_type key_type; typedef typename value_type::second_type mapped_type; typedef typename container_type::iterator iterator ; ///< container iterator typedef typename container_type::const_iterator const_iterator ; ///< container const iterator static bool const has_find_with = true; static bool const has_erase_with = true; private: //@cond typedef typename cds::opt::details::make_comparator_from_option_list< value_type, Options... >::type key_comparator; typedef typename cds::opt::select< typename cds::opt::value< typename cds::opt::find_option< cds::opt::copy_policy< cds::container::striped_set::move_item > , Options... >::type >::copy_policy , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy >::type copy_item; struct find_predicate { bool operator()( value_type const& i1, value_type const& i2) const { return key_comparator()( i1.first, i2.first ) < 0; } template bool operator()( Q const& i1, value_type const& i2) const { return key_comparator()( i1, i2.first ) < 0; } template bool operator()( value_type const& i1, Q const& i2) const { return key_comparator()( i1.first, i2 ) < 0; } }; //@endcond private: //@cond container_type m_List; # if !defined(CDS_STD_LIST_SIZE_CXX11_CONFORM) // GCC C++ lib bug: // In GCC (at least up to 4.7.x), the complexity of std::list::size() is O(N) // (see http://gcc.gnu.org/bugzilla/show_bug.cgi?id=49561) // Fixed in GCC 5 size_t m_nSize ; // list size # endif //@endcond public: adapted_container() # if !defined(CDS_STD_LIST_SIZE_CXX11_CONFORM) : m_nSize(0) # endif {} template bool insert( const Q& key, Func f ) { iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, find_predicate()); if ( it == m_List.end() || key_comparator()( key, it->first ) != 0 ) { it = m_List.insert( it, value_type( key_type( key ), mapped_type())); f( *it ); # if !defined(CDS_STD_LIST_SIZE_CXX11_CONFORM) ++m_nSize; # endif return true; } // key already exists return false; } template bool emplace( K&& key, Args&&... args ) { value_type val( key_type( std::forward( key )), mapped_type( std::forward( args )... )); iterator it = std::lower_bound( m_List.begin(), m_List.end(), val.first, find_predicate()); if ( it == m_List.end() || key_comparator()( val.first, it->first ) != 0 ) { it = m_List.emplace( it, std::move( val )); # if !defined(CDS_STD_LIST_SIZE_CXX11_CONFORM) ++m_nSize; # endif return true; } return false; } template std::pair update( const Q& key, Func func, bool bAllowInsert ) { iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, find_predicate()); if ( it == m_List.end() || key_comparator()( key, it->first ) != 0 ) { // insert new if ( !bAllowInsert ) return std::make_pair( false, false ); it = m_List.insert( it, value_type( key_type( key ), mapped_type())); func( true, *it ); # if !defined(CDS_STD_LIST_SIZE_CXX11_CONFORM) ++m_nSize; # endif return std::make_pair( true, true ); } else { // already exists func( false, *it ); return std::make_pair( true, false ); } } template bool erase( Q const& key, Func f ) { iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, find_predicate()); if ( it == m_List.end() || key_comparator()( key, it->first ) != 0 ) return false; // key exists f( *it ); m_List.erase( it ); # if !defined(CDS_STD_LIST_SIZE_CXX11_CONFORM) --m_nSize; # endif return true; } template bool erase( Q const& key, Less pred, Func f ) { iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, pred ); if ( it == m_List.end() || pred( key, it->first ) || pred( it->first, key )) return false; // key exists f( *it ); m_List.erase( it ); # if !defined(CDS_STD_LIST_SIZE_CXX11_CONFORM) --m_nSize; # endif return true; } template bool find( Q& val, Func f ) { iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, find_predicate()); if ( it == m_List.end() || key_comparator()( val, it->first ) != 0 ) return false; // key exists f( *it, val ); return true; } template bool find( Q& val, Less pred, Func f ) { iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, pred ); if ( it == m_List.end() || pred( val, it->first ) || pred( it->first, val )) return false; // key exists f( *it, val ); return true; } void clear() { m_List.clear(); } iterator begin() { return m_List.begin(); } const_iterator begin() const { return m_List.begin(); } iterator end() { return m_List.end(); } const_iterator end() const { return m_List.end(); } void move_item( adapted_container& /*from*/, iterator itWhat ) { iterator it = std::lower_bound( m_List.begin(), m_List.end(), *itWhat, find_predicate()); assert( it == m_List.end() || key_comparator()( itWhat->first, it->first ) != 0 ); copy_item()( m_List, it, itWhat ); # if !defined(CDS_STD_LIST_SIZE_CXX11_CONFORM) ++m_nSize; # endif } size_t size() const { # if !defined(CDS_STD_LIST_SIZE_CXX11_CONFORM) return m_nSize; # else return m_List.size(); # endif } }; public: typedef adapted_container type ; ///< Result of \p adapt metafunction }; }}} // namespace cds::intrusive::striped_set //@endcond #endif // #ifndef CDSLIB_CONTAINER_STRIPED_MAP_STD_LIST_ADAPTER_H libcds-2.3.3/cds/container/striped_map/std_map.h000066400000000000000000000162331341244201700216240ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_STRIPED_MAP_STD_MAP_ADAPTER_H #define CDSLIB_CONTAINER_STRIPED_MAP_STD_MAP_ADAPTER_H #include #include //@cond namespace cds { namespace container { namespace striped_set { // Copy policy for map template struct copy_item_policy< std::map< Key, T, Traits, Alloc > > { typedef std::map< Key, T, Traits, Alloc > map_type; typedef typename map_type::value_type pair_type; typedef typename map_type::iterator iterator; void operator()( map_type& map, iterator itWhat ) { map.insert( *itWhat ); } }; // Swap item policy template struct swap_item_policy< std::map< Key, T, Traits, Alloc > > { typedef std::map< Key, T, Traits, Alloc > map_type; typedef typename map_type::value_type pair_type; typedef typename map_type::iterator iterator; void operator()( map_type& map, iterator itWhat ) { std::pair< typename map_type::iterator, bool > ret = map.insert( pair_type( itWhat->first, typename pair_type::second_type())); assert( ret.second ) ; // successful insertion std::swap( ret.first->second, itWhat->second ); } }; // Move policy for map template struct move_item_policy< std::map< Key, T, Traits, Alloc > > { typedef std::map< Key, T, Traits, Alloc > map_type; typedef typename map_type::value_type pair_type; typedef typename map_type::iterator iterator; void operator()( map_type& map, iterator itWhat ) { map.insert( std::move( *itWhat )); } }; } // namespace striped_set }} // namespace cds::container namespace cds { namespace intrusive { namespace striped_set { /// std::set adapter for hash set bucket template class adapt< std::map< Key, T, Traits, Alloc>, Options... > { public: typedef std::map< Key, T, Traits, Alloc> container_type ; ///< underlying container type private: /// Adapted container type class adapted_container: public cds::container::striped_set::adapted_container { public: typedef typename container_type::value_type value_type ; ///< value type stored in the container typedef typename container_type::key_type key_type; typedef typename container_type::mapped_type mapped_type; typedef typename container_type::iterator iterator ; ///< container iterator typedef typename container_type::const_iterator const_iterator ; ///< container const iterator static bool const has_find_with = false; static bool const has_erase_with = false; private: //@cond typedef typename cds::opt::select< typename cds::opt::value< typename cds::opt::find_option< cds::opt::copy_policy< cds::container::striped_set::move_item > , Options... >::type >::copy_policy , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy >::type copy_item; //@endcond private: //@cond container_type m_Map; //@endcond public: template bool insert( const Q& key, Func f ) { std::pair res = m_Map.insert( value_type( key_type( key ), mapped_type())); if ( res.second ) f( *res.first ); return res.second; } template bool emplace( Q&& key, Args&&... args ) { std::pair res = m_Map.emplace( key_type( std::forward( key )), mapped_type( std::forward( args )...)); return res.second; } template std::pair update( const Q& key, Func func, bool bAllowInsert ) { if ( bAllowInsert ) { std::pair res = m_Map.insert( value_type( key_type( key ), mapped_type())); func( res.second, *res.first ); return std::make_pair( true, res.second ); } else { auto it = m_Map.find( key_type( key )); if ( it == end()) return std::make_pair( false, false ); func( false, *it ); return std::make_pair( true, false ); } } template bool erase( const Q& key, Func f ) { iterator it = m_Map.find( key_type( key )); if ( it == m_Map.end()) return false; f( *it ); m_Map.erase( it ); return true; } template bool find( Q& key, Func f ) { iterator it = m_Map.find( key_type( key )); if ( it == m_Map.end()) return false; f( *it, key ); return true; } /// Clears the container void clear() { m_Map.clear(); } iterator begin() { return m_Map.begin(); } const_iterator begin() const { return m_Map.begin(); } iterator end() { return m_Map.end(); } const_iterator end() const { return m_Map.end(); } void move_item( adapted_container& /*from*/, iterator itWhat ) { assert( m_Map.find( itWhat->first ) == m_Map.end()); copy_item()( m_Map, itWhat ); } size_t size() const { return m_Map.size(); } }; public: typedef adapted_container type ; ///< Result of \p adapt metafunction }; }}} // namespace cds::intrusive::striped_set //@endcond #endif // #ifndef CDSLIB_CONTAINER_STRIPED_MAP_STD_MAP_ADAPTER_H libcds-2.3.3/cds/container/striped_set.h000066400000000000000000001233551341244201700202170ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_STRIPED_SET_H #define CDSLIB_CONTAINER_STRIPED_SET_H #include #include namespace cds { namespace container { /// Striped hash set /** @ingroup cds_nonintrusive_set Source - [2008] Maurice Herlihy, Nir Shavit "The Art of Multiprocessor Programming" Lock striping is very simple technique. The set consists of the bucket table and the array of locks. Initially, the capacity of lock array and bucket table is the same. When set is resized, bucket table capacity will be doubled but lock array will not. The lock \p i protects each bucket \p j, where j = i mod L , where \p L - the size of lock array. Template arguments: - \p Container - the container class that is used as bucket table entry. The \p Container class should support an uniform interface described below. - \p Options - options The \p %StripedSet class does not exactly dictate the type of container that should be used as a \p Container bucket. Instead, the class supports different container type for the bucket, for exampe, \p std::list, \p std::set and others. Remember that \p %StripedSet class algorithm ensures sequential blocking access to its bucket through the mutex type you specify among \p Options template arguments. The \p Options are: - \p opt::mutex_policy - concurrent access policy. Available policies: \p intrusive::striped_set::striping, \p intrusive::striped_set::refinable. Default is \p %striped_set::striping. - \p opt::hash - hash functor. Default option value see opt::v::hash_selector which selects default hash functor for your compiler. - \p opt::compare - key comparison functor. No default functor is provided. If the option is not specified, the \p %opt::less is used. - \p opt::less - specifies binary predicate used for key comparison. Default is \p std::less. - \p opt::item_counter - item counter type. Default is \p atomicity::item_counter since some operation on the counter is performed without locks. Note that item counting is an essential part of the set algorithm, so dummy counter like as \p atomicity::empty_item_counter is not suitable. - \p opt::allocator - the allocator type using for memory allocation of bucket table and lock array. Default is \ref CDS_DEFAULT_ALLOCATOR. - \p opt::resizing_policy - the resizing policy that is a functor that decides when to resize the hash set. Default option value depends on bucket container type: for sequential containers like \p std::list, \p std::vector the resizing policy is striped_set::load_factor_resizing<4> ; for other type of containers like \p std::set, \p std::unordered_set the resizing policy is \p striped_set::no_resizing. See \ref cds_striped_resizing_policy "available resizing policy". Note that the choose of resizing policy depends of \p Container type: for sequential containers like \p std::list, \p std::vector and so on, right choosing of the policy can significantly improve performance. For other, non-sequential types of \p Container (like a \p std::set) the resizing policy is not so important. - \p opt::copy_policy - the copy policy which is used to copy items from the old set to the new one when resizing. The policy can be optionally used in adapted bucket container for performance reasons of resizing. The detail of copy algorithm depends on type of bucket container and explains below. \p %opt::compare or \p %opt::less options are used in some \p Container class for searching an item. \p %opt::compare option has the highest priority: if \p %opt::compare is specified, \p %opt::less is not used. You can pass other option that would be passed to adapt metafunction, see below. Internal details The \p %StripedSet class cannot utilize the \p Container container specified directly, but only its adapted variant which supports an unified interface. Internally, the adaptation is made via striped_set::adapt metafunction that wraps bucket container and provides the unified bucket interface suitable for \p %StripedSet. Such adaptation is completely transparent for you - you don't need to call \p adapt metafunction directly, \p %StripedSet class's internal machinery itself invokes appropriate \p adapt metafunction to adjust your \p Container container class to \p %StripedSet bucket's internal interface. All you need is to include a right header before striped_hash_set.h. By default, striped_set::adapt metafunction does not make any wrapping to \p AnyContainer, so, the result striped_set::adapt::type is the same as \p AnyContainer. However, there are a lot of specializations of striped_set::adapt for well-known containers, see table below. Any of this specialization wraps corresponding container making it suitable for the set's bucket. Remember, you should include the proper header file for \p adapt before including striped_hash_set.h.
Container .h-file for \p adapt Example Notes
\p std::list \code #include #include typedef cds::container::StripedSet< std::list, cds::opt::less< std::less > > striped_set; \endcode The list is ordered. Template argument pack \p Options must contain cds::opt::less or cds::opt::compare for type \p T stored in the list
\p std::vector \code #include #include typedef cds::container::StripedSet< std::vector, cds::opt::less< std::less > > striped_set; \endcode The vector is ordered. Template argument pack \p Options must contain \p cds::opt::less or \p cds::opt::compare for type \p T stored in the list
\p std::set \code #include #include typedef cds::container::StripedSet< std::set< T, std::less > > striped_set; \endcode
\p std::unordered_set \code #include #include typedef cds::container::StripedSet< std::unordered_set< T, hash, equal > > striped_set; \endcode You should provide two different hash function \p h1 and \p h2 - one for \p std::unordered_set and other for \p %StripedSet. For the best result, \p h1 and \p h2 must be orthogonal i.e. h1(X) != h2(X) for any value \p X.
\p boost::container::slist \code #include #include typedef cds::container::StripedSet< boost::container::slist > striped_set; \endcode The list is ordered. \p Options must contain \p cds::opt::less or \p cds::opt::compare.
\p boost::container::list \code #include #include typedef cds::container::StripedSet< boost::container::list > striped_set; \endcode The list is ordered. \p Options must contain \p cds::opt::less or \p cds::opt::compare.
\p boost::container::vector \code #include #include typedef cds::container::StripedSet< boost::container::vector, cds::opt::less< std::less > > striped_set; \endcode The vector is ordered. Template argument pack \p Options must contain \p cds::opt::less or \p cds::opt::compare for type \p T stored in the vector
\p boost::container::stable_vector \code #include #include typedef cds::container::StripedSet< boost::container::stable_vector, cds::opt::less< std::less > > striped_set; \endcode The vector is ordered. Template argument pack \p Options must contain \p cds::opt::less or \p cds::opt::compare for type \p T stored in the vector
\p boost::container::set \code #include #include typedef cds::container::StripedSet< boost::container::set< T, std::less > > striped_set; \endcode
\p boost::container::flat_set \code #include #include typedef cds::container::StripedSet< boost::container::flat_set< T, std::less > > striped_set; \endcode
\p boost::unordered_set \code #include #include typedef cds::container::StripedSet< boost::unordered_set< T, hash, equal > > striped_set; \endcode You should provide two different hash function \p h1 and \p h2 - one for \p boost::unordered_set and other for \p %StripedSet. For the best result, \p h1 and \p h2 must be orthogonal i.e. h1(X) != h2(X) for any value \p X.
You can use another container type as set's bucket. Suppose, you have a container class \p MyBestContainer and you want to integrate it with \p %StripedSet as bucket type. There are two possibility: - either your \p MyBestContainer class has native support of bucket's interface; in this case, you can use default striped_set::adapt metafunction; - or your \p MyBestContainer class does not support bucket's interface, which means, that you should develop a specialization cds::container::striped_set::adapt metafunction providing necessary interface. The striped_set::adapt< Container, Options... > metafunction has two template argument: - \p Container is the class that should be used as the bucket, for example, std::list< T >. - \p Options pack is the options from \p %StripedSet declaration. The \p adapt metafunction can use any option from \p Options for its internal use. For example, a \p compare option can be passed to \p adapt metafunction via \p Options argument of \p %StripedSet declaration. See striped_set::adapt metafunction for the description of interface that the bucket container must provide to be %StripedSet compatible. Copy policy There are three predefined copy policy: - \p cds::container::striped_set::copy_item - copy item from old bucket to new one when resizing using copy ctor. It is default policy for any compiler that do not support move semantics - \p cds::container::striped_set::move_item - move item from old bucket to new one when resizing using move semantics. It is default policy for any compiler that support move semantics. If compiler does not support move semantics, the move policy is the same as \p copy_item - \p cds::container::striped_set::swap_item - copy item from old bucket to new one when resizing using \p std::swap. Not all containers support this copy policy, see details in table below. You can define your own copy policy specifically for your case. Note, right copy policy can significantly improve the performance of resizing.
Container Policies
- \p std::list - \p std::vector - \p boost::list - \p boost::vector - \p boost::stable_vector \code struct copy_item { void operator()( std::list& list, std::list::iterator itInsert, std::list::iterator itWhat ) { list.insert( itInsert, *itWhat ); } } \endcode \code // The type T stored in the list must be swappable struct swap_item { void operator()( std::list& list, std::list::iterator itInsert, std::list::iterator itWhat ) { std::swap( *list.insert( itInsert, T()), *itWhat ); } } \endcode \code struct move_item { void operator()( std::list& list, std::list::iterator itInsert, std::list::iterator itWhat ) { list.insert( itInsert, std::move( *itWhat )); } } \endcode
- \p std::set - \p std::unordered_set \code struct copy_item { void operator()( std::set& set, std::set::iterator itWhat ) { set.insert( *itWhat ); } } \endcode \p swap_item is not applicable (same as \p copy_item) \code struct move_item { void operator()( std::set& set, std::set::iterator itWhat ) { set.insert( std::move( *itWhat )); } } \endcode
- \p boost::container::slist \code struct copy_item { void operator()( bc::slist& list, bc::slist::iterator itInsert, bc::slist::iterator itWhat ) { list.insert_after( itInsert, *itWhat ); } } \endcode \code // The type T stored in the list must be swappable struct swap_item { void operator()( bc::slist& list, bc::slist::iterator itInsert, bc::slist::iterator itWhat ) { std::swap( *list.insert_after( itInsert, T()), *itWhat ); } } \endcode \code struct move_item { void operator()( bc::slist& list, bc::slist::iterator itInsert, bc::slist::iterator itWhat ) { list.insert_after( itInsert, std::move( *itWhat )); } } \endcode
Advanced functions libcds provides some advanced functions like \p erase_with(), \p find_with(), that cannot be supported by all underlying containers. The table below shows whether underlying container supports those functions (the sign "+" means "container supports the function"):
Container \p find_with \p erse_with
\p std::list + +
\p std::vector + +
\p std::set - -
\p std::unordered_set - -
\p boost::container::slist + +
\p boost::container::list + +
\p boost::container::vector + +
\p boost::container::stable_vector + +
\p boost::container::set - -
\p boost::container::flat_set - -
\p boost::unordered_set - -
*/ template class StripedSet: protected intrusive::StripedSet { //@cond typedef intrusive::StripedSet base_class; //@endcond public: //@cond typedef typename base_class::default_options default_options; typedef typename base_class::options options; //@endcond typedef Container underlying_container_type ; ///< original intrusive container type for the bucket typedef typename base_class::bucket_type bucket_type ; ///< container type adapted for hash set typedef typename bucket_type::value_type value_type ; ///< value type stored in the set typedef typename base_class::hash hash ; ///< Hash functor typedef typename base_class::item_counter item_counter ; ///< Item counter typedef typename base_class::resizing_policy resizing_policy ; ///< Resizing policy typedef typename base_class::allocator_type allocator_type ; ///< allocator type specified in options. typedef typename base_class::mutex_policy mutex_policy ; ///< Mutex policy protected: //@cond typedef typename base_class::scoped_cell_lock scoped_cell_lock; typedef typename base_class::scoped_full_lock scoped_full_lock; typedef typename base_class::scoped_resize_lock scoped_resize_lock; //@endcond public: /// Default ctor. The initial capacity is 16. StripedSet() : base_class() {} /// Ctor with initial capacity specified StripedSet( size_t nCapacity ///< Initial size of bucket table and lock array. Must be power of two, the minimum is 16. ) : base_class( nCapacity ) {} /// Ctor with resizing policy (copy semantics) /** This constructor initializes m_ResizingPolicy member with copy of \p resizingPolicy parameter */ StripedSet( size_t nCapacity ///< Initial size of bucket table and lock array. Must be power of two, the minimum is 16. ,resizing_policy const& resizingPolicy ///< Resizing policy ) : base_class( nCapacity, resizingPolicy ) {} /// Ctor with resizing policy (move semantics) /** This constructor initializes m_ResizingPolicy member moving \p resizingPolicy parameter Move semantics is used. Available only for the compilers that supports C++11 rvalue reference. */ StripedSet( size_t nCapacity ///< Initial size of bucket table and lock array. Must be power of two, the minimum is 16. ,resizing_policy&& resizingPolicy ///< Resizing policy ) : base_class( nCapacity, std::forward(resizingPolicy)) {} /// Destructor destroys internal data ~StripedSet() {} public: /// Inserts new node /** The function creates a node with copy of \p val value and then inserts the node created into the set. The type \p Q should contain as minimum the complete key for the node. The object of \p value_type should be constructible from a value of type \p Q. In trivial case, \p Q is equal to \p value_type. Returns \p true if \p val is inserted into the set, \p false otherwise. */ template bool insert( Q const& val ) { return insert( val, []( value_type& ) {} ); } /// Inserts new node /** The function allows to split creating of new item into two part: - create item with key only - insert new item into the set - if inserting is success, calls \p f functor to initialize value-field of new item . The functor signature is: \code void func( value_type& item ); \endcode where \p item is the item inserted. The type \p Q can differ from \p value_type of items storing in the set. Therefore, the \p value_type should be constructible from type \p Q. The user-defined functor is called only if the inserting is success. */ template bool insert( Q const& val, Func f ) { bool bOk; bool bResize; size_t nHash = base_class::hashing( val ); bucket_type * pBucket; { scoped_cell_lock sl( base_class::m_MutexPolicy, nHash ); pBucket = base_class::bucket( nHash ); bOk = pBucket->insert( val, f ); bResize = bOk && base_class::m_ResizingPolicy( ++base_class::m_ItemCounter, *this, *pBucket ); } if ( bResize ) base_class::resize(); return bOk; } /// Inserts data of type \p %value_type constructed with std::forward(args)... /** Returns \p true if inserting successful, \p false otherwise. */ template bool emplace( Args&&... args ) { bool bOk; bool bResize; value_type val( std::forward( args )... ); size_t nHash = base_class::hashing( val ); bucket_type * pBucket; { scoped_cell_lock sl( base_class::m_MutexPolicy, nHash ); pBucket = base_class::bucket( nHash ); bOk = pBucket->emplace( std::move( val )); bResize = bOk && base_class::m_ResizingPolicy( ++base_class::m_ItemCounter, *this, *pBucket ); } if ( bResize ) base_class::resize(); return bOk; } /// Updates the node /** The operation performs inserting or changing data with lock-free manner. If \p key is not found in the set, then \p key is inserted iff \p bAllowInsert is \p true. Otherwise, the functor \p func is called with item found. The functor signature is: \code struct my_functor { void operator()( bool bNew, value_type& item, const Q& val ); }; \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the set - \p val - argument \p val passed into the \p %update() function The functor may change non-key fields of the \p item. Returns std::pair where \p first is true if operation is successful, \p second is true if new item has been added or \p false if the item with \p key already is in the map. */ template std::pair update( Q const& val, Func func, bool bAllowInsert = true ) { std::pair result; bool bResize = false; size_t nHash = base_class::hashing( val ); bucket_type * pBucket; { scoped_cell_lock sl( base_class::m_MutexPolicy, nHash ); pBucket = base_class::bucket( nHash ); result = pBucket->update( val, func, bAllowInsert ); if ( result.first && result.second ) bResize = base_class::m_ResizingPolicy( ++base_class::m_ItemCounter, *this, *pBucket ); } if ( bResize ) base_class::resize(); return result; } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( Q const& val, Func func ) { return update( val, func, true ); } //@endcond /// Delete \p key from the set /** \anchor cds_nonintrusive_StripedSet_erase The set item comparator should be able to compare the type \p value_type and the type \p Q. Return \p true if key is found and deleted, \p false otherwise */ template bool erase( Q const& key ) { return erase( key, [](value_type const&) {} ); } /// Deletes the item from the set using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_StripedSet_erase "erase(Q const&)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. @note This function is enabled if the compiler supports C++11 default template arguments for function template and the underlying container supports \p %erase_with feature. */ template < typename Q, typename Less ,typename Bucket = bucket_type, typename = typename std::enable_if< Bucket::has_erase_with >::type > bool erase_with( Q const& key, Less pred ) { return erase_with( key, pred, [](value_type const&) {} ); } /// Delete \p key from the set /** \anchor cds_nonintrusive_StripedSet_erase_func The function searches an item with key \p key, calls \p f functor with item found and deletes it. If \p key is not found, the functor is not called. The functor \p Func interface is: \code struct functor { void operator()(value_type const& val); }; \endcode Return \p true if key is found and deleted, \p false otherwise */ template bool erase( Q const& key, Func f ) { bool bOk; size_t nHash = base_class::hashing( key ); { scoped_cell_lock sl( base_class::m_MutexPolicy, nHash ); bucket_type * pBucket = base_class::bucket( nHash ); bOk = pBucket->erase( key, f ); } if ( bOk ) --base_class::m_ItemCounter; return bOk; } /// Deletes the item from the set using \p pred predicate for searching /** The function is an analog of \ref cds_nonintrusive_StripedSet_erase_func "erase(Q const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. @note This function is enabled if the compiler supports C++11 default template arguments for function template and the underlying container supports \p %erase_with feature. */ template < typename Q, typename Less, typename Func , typename Bucket = bucket_type, typename = typename std::enable_if< Bucket::has_erase_with >::type > bool erase_with( Q const& key, Less pred, Func f ) { bool bOk; size_t nHash = base_class::hashing( key ); { scoped_cell_lock sl( base_class::m_MutexPolicy, nHash ); bucket_type * pBucket = base_class::bucket( nHash ); bOk = pBucket->erase( key, pred, f ); } if ( bOk ) --base_class::m_ItemCounter; return bOk; } /// Find the key \p val /** \anchor cds_nonintrusive_StripedSet_find_func The function searches the item with key equal to \p val and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& val ); }; \endcode where \p item is the item found, \p val is the find function argument. The functor can change non-key fields of \p item. The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor can modify both arguments. The type \p Q can differ from \p value_type of items storing in the container. Therefore, the \p value_type should be comparable with type \p Q. The function returns \p true if \p val is found, \p false otherwise. */ template bool find( Q& val, Func f ) { return base_class::find( val, f ); } /// Find the key \p val using \p pred predicate /** The function is an analog of \ref cds_nonintrusive_StripedSet_find_func "find(Q&, Func)" but \p pred is used for key comparing \p Less has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. @note This function is enabled if the compiler supports C++11 default template arguments for function template and the underlying container supports \p %find_with feature. */ template ::type > bool find_with( Q& val, Less pred, Func f ) { return base_class::find_with( val, pred, f ); } /// Find the key \p val /** \anchor cds_nonintrusive_StripedSet_find_cfunc The function searches the item with key equal to \p val and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q const& val ); }; \endcode where \p item is the item found, \p val is the find function argument. The functor can change non-key fields of \p item. The type \p Q can differ from \p value_type of items storing in the container. Therefore, the \p value_type should be comparable with type \p Q. The function returns \p true if \p val is found, \p false otherwise. */ template bool find( Q const& val, Func f ) { return base_class::find( val, f ); } /// Find the key \p val using \p pred predicate /** The function is an analog of \ref cds_nonintrusive_StripedSet_find_cfunc "find(Q const&, Func)" but \p pred is used for key comparing \p Less has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. @note This function is enabled if the compiler supports C++11 default template arguments for function template and the underlying container supports \p %find_with feature. */ template ::type > bool find_with( Q const& val, Less pred, Func f ) { return base_class::find_with( val, pred, f ); } /// Checks whether the set contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. Note the hash functor specified for class \p Traits template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. Otherwise, you may use \p contains( Q const&, Less pred ) functions with explicit predicate for key comparing. */ template bool contains( Q const& key ) { return base_class::contains( key ); } //@cond template CDS_DEPRECATED("use contains()") bool find( Q const& key ) { return contains( key ); } //@endcond /// Checks whether the map contains \p key using \p pred predicate for searching /** The function is similar to contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the map. */ template ::type > bool contains( Q const& key, Less pred ) { return base_class::contains( key, pred ); } //@cond template ::type > CDS_DEPRECATED("use contains()") bool find_with( Q const& val, Less pred ) { return contains( val, pred ); } //@endcond /// Clears the set /** The function erases all items from the set. */ void clear() { return base_class::clear(); } /// Checks if the set is empty /** Emptiness is checked by item counting: if item count is zero then the set is empty. */ bool empty() const { return base_class::empty(); } /// Returns item count in the set size_t size() const { return base_class::size(); } /// Returns the size of hash table /** The hash table size is non-constant and can be increased via resizing. */ size_t bucket_count() const { return base_class::bucket_count(); } /// Returns lock array size size_t lock_count() const { return base_class::lock_count(); } /// Returns resizing policy object resizing_policy& get_resizing_policy() { return base_class::get_resizing_policy(); } /// Returns resizing policy (const version) resizing_policy const& get_resizing_policy() const { return base_class::get_resizing_policy(); } }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_STRIPED_SET_H libcds-2.3.3/cds/container/striped_set/000077500000000000000000000000001341244201700200355ustar00rootroot00000000000000libcds-2.3.3/cds/container/striped_set/adapter.h000066400000000000000000000517751341244201700216450ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_STRIPED_SET_ADAPTER_H #define CDSLIB_CONTAINER_STRIPED_SET_ADAPTER_H #include #include namespace cds { namespace container { /// Striped hash set related definitions namespace striped_set { //@cond struct copy_item ; // copy_item_policy tag template struct copy_item_policy; struct swap_item ; // swap_item_policy tag template struct swap_item_policy; struct move_item ; // move_item_policy tag template struct move_item_policy; //@endcond #ifdef CDS_DOXYGEN_INVOKED /// Default adapter for hash set /** By default, the metafunction does not make any transformation for container type \p Container. \p Container should provide interface suitable for the hash set. The \p Options template argument contains a list of options that has been passed to cds::container::StripedSet. Bucket interface The result of metafunction is a container (a bucket) that should support the following interface: Public typedefs that the bucket should provide: - \p value_type - the type of the item in the bucket - \p iterator - bucket's item iterator - \p const_iterator - bucket's item constant iterator - \p default_resizing_policy - defalt resizing policy preferable for the container. By default, the library defines striped_set::load_factor_resizing<4> for sequential containers like std::list, std::vector, and striped_set::no_resizing for ordered container like std::set, std::unordered_set. Insert value \p val of type \p Q \code template bool insert( const Q& val, Func f ) ; \endcode The function allows to split creating of new item into two part: - create item with key only from \p val - try to insert new item into the container - if inserting is success, calls \p f functor to initialize value-field of the new item. The functor signature is: \code void func( value_type& item ); \endcode where \p item is the item inserted. The type \p Q can differ from \ref value_type of items storing in the container. Therefore, the \p value_type should be comparable with type \p Q and constructible from type \p Q, The user-defined functor is called only if the inserting is success.
Inserts data of type \ref value_type constructed with std::forward(args)... \code template bool emplace( Args&&... args ) ; \endcode Returns \p true if inserting successful, \p false otherwise. This function should be available only for compiler that supports variadic template and move semantics
Updates \p item \code template std::pair update( const Q& val, Func func, bool bAllowInsert ) \endcode The operation performs inserting or changing data. If the \p val key not found in the container, then the new item created from \p val is inserted iff \p bAllowInsert is \p true. Otherwise, the functor \p func is called with the item found. The \p Func functor has interface: \code void func( bool bNew, value_type& item, const Q& val ); \endcode or like a functor: \code struct my_functor { void operator()( bool bNew, value_type& item, const Q& val ); }; \endcode where arguments are: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - container's item - \p val - argument \p val passed into the \p update() function The functor can change non-key fields of the \p item. The type \p Q can differ from \ref value_type of items storing in the container. Therefore, the \p value_type should be comparable with type \p Q and constructible from type \p Q, Returns std::pair where \p first is true if operation is successful, \p second is true if new item has been added or \p false if the item with \p val key already exists.
Delete \p key \code template bool erase( const Q& key, Func f ) \endcode The function searches an item with key \p key, calls \p f functor and deletes the item. If \p key is not found, the functor is not called. The functor \p Func interface is: \code struct extractor { void operator()(value_type const& val); }; \endcode The type \p Q can differ from \ref value_type of items storing in the container. Therefore, the \p value_type should be comparable with type \p Q. Return \p true if key is found and deleted, \p false otherwise
Find the key \p val \code template bool find( Q& val, Func f ) \endcode The function searches the item with key equal to \p val and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& val ); }; \endcode where \p item is the item found, \p val is the find function argument. The functor can change non-key fields of \p item. The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor can modify both arguments. The type \p Q can differ from \ref value_type of items storing in the container. Therefore, the \p value_type should be comparable with type \p Q. The function returns \p true if \p val is found, \p false otherwise.
Clears the container \code void clear() \endcode
Get size of bucket \code size_t size() const \endcode This function can be required by some resizing policy
Move item when resizing \code void move_item( adapted_container& from, iterator it ) \endcode This helper function is invented for the set resizing when the item pointed by \p it iterator is copied from an old bucket \p from to a new bucket pointed by \p this.
*/ template < typename Container, typename... Options> class adapt { public: typedef Container type ; ///< adapted container type typedef typename type::value_type value_type ; ///< value type stored in the container }; #else // CDS_DOXYGEN_INVOKED using cds::intrusive::striped_set::adapt; #endif //@cond using cds::intrusive::striped_set::adapted_sequential_container; using cds::intrusive::striped_set::adapted_container; //@endcond ///@copydoc cds::intrusive::striped_set::load_factor_resizing template using load_factor_resizing = cds::intrusive::striped_set::load_factor_resizing; ///@copydoc cds::intrusive::striped_set::rational_load_factor_resizing template using rational_load_factor_resizing = cds::intrusive::striped_set::rational_load_factor_resizing; ///@copydoc cds::intrusive::striped_set::single_bucket_size_threshold template using single_bucket_size_threshold = cds::intrusive::striped_set::single_bucket_size_threshold; ///@copydoc cds::intrusive::striped_set::no_resizing typedef cds::intrusive::striped_set::no_resizing no_resizing; ///@copydoc cds::intrusive::striped_set::striping template using striping = cds::intrusive::striped_set::striping; ///@copydoc cds::intrusive::striped_set::refinable template < class RecursiveLock = std::recursive_mutex, typename BackOff = cds::backoff::yield, class Alloc = CDS_DEFAULT_ALLOCATOR > using refinable = cds::intrusive::striped_set::refinable; //@cond namespace details { template struct boost_set_copy_policies { struct copy_item_policy { typedef Set set_type; typedef typename set_type::iterator iterator; void operator()( set_type& set, iterator itWhat ) { set.insert( *itWhat ); } }; typedef copy_item_policy swap_item_policy; struct move_item_policy { typedef Set set_type; typedef typename set_type::iterator iterator; void operator()( set_type& set, iterator itWhat ) { set.insert( std::move( *itWhat )); } }; }; template class boost_set_adapter: public striped_set::adapted_container { public: typedef Set container_type; typedef typename container_type::value_type value_type ; ///< value type stored in the container typedef typename container_type::iterator iterator ; ///< container iterator typedef typename container_type::const_iterator const_iterator ; ///< container const iterator static bool const has_find_with = false; static bool const has_erase_with = false; private: typedef typename cds::opt::select< typename cds::opt::value< typename cds::opt::find_option< cds::opt::copy_policy< cds::container::striped_set::move_item > , Options... >::type >::copy_policy , cds::container::striped_set::copy_item, copy_item_policy , cds::container::striped_set::swap_item, swap_item_policy , cds::container::striped_set::move_item, move_item_policy >::type copy_item; private: container_type m_Set; public: boost_set_adapter() {} container_type& base_container() { return m_Set; } template bool insert( const Q& val, Func f ) { std::pair res = m_Set.insert( value_type(val)); if ( res.second ) f( const_cast(*res.first)); return res.second; } template bool emplace( Args&&... args ) { std::pair res = m_Set.emplace( std::forward(args)... ); return res.second; } template std::pair update( const Q& val, Func func, bool bAllowInsert ) { if ( bAllowInsert ) { std::pair res = m_Set.insert( value_type(val)); func( res.second, const_cast(*res.first), val ); return std::make_pair( true, res.second ); } else { auto it = m_Set.find( value_type( val )); if ( it == m_Set.end()) return std::make_pair( false, false ); func( false, const_cast(*it), val ); return std::make_pair( true, false ); } } template bool erase( const Q& key, Func f ) { const_iterator it = m_Set.find( value_type(key)); if ( it == m_Set.end()) return false; f( const_cast(*it)); m_Set.erase( it ); return true; } template bool find( Q& val, Func f ) { iterator it = m_Set.find( value_type(val)); if ( it == m_Set.end()) return false; f( const_cast(*it), val ); return true; } void clear() { m_Set.clear(); } iterator begin() { return m_Set.begin(); } const_iterator begin() const { return m_Set.begin(); } iterator end() { return m_Set.end(); } const_iterator end() const { return m_Set.end(); } void move_item( adapted_container& /*from*/, iterator itWhat ) { assert( m_Set.find( *itWhat ) == m_Set.end()); copy_item()( m_Set, itWhat ); } size_t size() const { return m_Set.size(); } }; template struct boost_map_copy_policies { struct copy_item_policy { typedef Map map_type; typedef typename map_type::value_type pair_type; typedef typename map_type::iterator iterator; void operator()( map_type& map, iterator itWhat ) { map.insert( *itWhat ); } }; struct swap_item_policy { typedef Map map_type; typedef typename map_type::value_type pair_type; typedef typename map_type::iterator iterator; void operator()( map_type& map, iterator itWhat ) { std::pair< iterator, bool > ret = map.insert( pair_type( itWhat->first, typename pair_type::second_type())); assert( ret.second ) ; // successful insertion std::swap( ret.first->second, itWhat->second ); } }; struct move_item_policy { typedef Map map_type; typedef typename map_type::value_type pair_type; typedef typename map_type::iterator iterator; void operator()( map_type& map, iterator itWhat ) { map.insert( std::move( *itWhat )); } }; }; template class boost_map_adapter: public striped_set::adapted_container { public: typedef Map container_type; typedef typename container_type::value_type value_type ; ///< value type stored in the container typedef typename container_type::key_type key_type; typedef typename container_type::mapped_type mapped_type; typedef typename container_type::iterator iterator ; ///< container iterator typedef typename container_type::const_iterator const_iterator ; ///< container const iterator static bool const has_find_with = false; static bool const has_erase_with = false; private: typedef typename cds::opt::select< typename cds::opt::value< typename cds::opt::find_option< cds::opt::copy_policy< cds::container::striped_set::move_item > , Options... >::type >::copy_policy , cds::container::striped_set::copy_item, copy_item_policy , cds::container::striped_set::swap_item, swap_item_policy , cds::container::striped_set::move_item, move_item_policy >::type copy_item; private: container_type m_Map; public: template bool insert( const Q& key, Func f ) { std::pair res = m_Map.insert( value_type( key_type( key ), mapped_type())); if ( res.second ) f( *res.first ); return res.second; } template bool emplace( Q&& key, Args&&... args ) { std::pair res = m_Map.emplace( key_type( std::forward( key )), mapped_type( std::forward( args )...)); return res.second; } template std::pair update( const Q& key, Func func, bool bAllowInsert ) { if ( bAllowInsert ) { std::pair res = m_Map.insert( value_type( key_type( key ), mapped_type())); func( res.second, *res.first ); return std::make_pair( true, res.second ); } else { auto it = m_Map.find( key_type( key )); if ( it == end()) return std::make_pair( false, false ); func( false, *it ); return std::make_pair( true, false ); } } template bool erase( const Q& key, Func f ) { iterator it = m_Map.find( key_type( key )); if ( it == m_Map.end()) return false; f( *it ); m_Map.erase( it ); return true; } template bool find( Q& val, Func f ) { iterator it = m_Map.find( key_type( val )); if ( it == m_Map.end()) return false; f( *it, val ); return true; } void clear() { m_Map.clear(); } iterator begin() { return m_Map.begin(); } const_iterator begin() const { return m_Map.begin(); } iterator end() { return m_Map.end(); } const_iterator end() const { return m_Map.end(); } void move_item( adapted_container& /*from*/, iterator itWhat ) { assert( m_Map.find( itWhat->first ) == m_Map.end()); copy_item()( m_Map, itWhat ); } size_t size() const { return m_Map.size(); } }; } // namespace details //@endcond } // namespace striped_set }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_STRIPED_SET_ADAPTER_H libcds-2.3.3/cds/container/striped_set/boost_flat_set.h000066400000000000000000000045531341244201700232240ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_STRIPED_SET_BOOST_FLAT_SET_ADAPTER_H #define CDSLIB_CONTAINER_STRIPED_SET_BOOST_FLAT_SET_ADAPTER_H #include #if BOOST_VERSION < 104800 # error "For boost::container::flat_set you must use boost 1.48 or above" #endif #include #include //#if CDS_COMPILER == CDS_COMPILER_MSVC && CDS_COMPILER_VERSION >= 1700 //# error "boost::container::flat_set is not compatible with MS VC++ 11" //#endif //@cond namespace cds { namespace container { namespace striped_set { // Copy policy for boost::container::flat_set template struct copy_item_policy< boost::container::flat_set< T, Traits, Alloc > > : public details::boost_set_copy_policies< boost::container::flat_set< T, Traits, Alloc > >::copy_item_policy {}; // Swap policy is not defined for boost::container::flat_set template struct swap_item_policy< boost::container::flat_set< T, Traits, Alloc > > : public details::boost_set_copy_policies< boost::container::flat_set< T, Traits, Alloc > >::swap_item_policy {}; // Move policy for boost::container::flat_set template struct move_item_policy< boost::container::flat_set< T, Traits, Alloc > > : public details::boost_set_copy_policies< boost::container::flat_set< T, Traits, Alloc > >::move_item_policy {}; } // namespace striped_set }} // namespace cds::container namespace cds { namespace intrusive { namespace striped_set { template class adapt< boost::container::flat_set, Options... > { public: typedef boost::container::flat_set container_type ; ///< underlying container type typedef cds::container::striped_set::details::boost_set_adapter< container_type, Options... > type; }; }}} //@endcond #endif // #ifndef CDSLIB_CONTAINER_STRIPED_SET_BOOST_FLAT_SET_ADAPTER_H libcds-2.3.3/cds/container/striped_set/boost_list.h000066400000000000000000000231711341244201700223730ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_STRIPED_SET_BOOST_LIST_ADAPTER_H #define CDSLIB_CONTAINER_STRIPED_SET_BOOST_LIST_ADAPTER_H #include #if BOOST_VERSION < 104800 # error "For boost::container::list you must use boost 1.48 or above" #endif #include // std::lower_bound #include // ref #include #include //@cond namespace cds { namespace container { namespace striped_set { // Copy policy for boost::container::list template struct copy_item_policy< boost::container::list< T, Alloc > > { typedef boost::container::list< T, Alloc > list_type; typedef typename list_type::iterator iterator; void operator()( list_type& list, iterator itInsert, iterator itWhat ) { itInsert = list.insert( itInsert, *itWhat ); } }; // Swap policy for boost::container::list template struct swap_item_policy< boost::container::list< T, Alloc > > { typedef boost::container::list< T, Alloc > list_type; typedef typename list_type::iterator iterator; void operator()( list_type& list, iterator itInsert, iterator itWhat ) { typename list_type::value_type newVal; itInsert = list.insert( itInsert, newVal ); std::swap( *itWhat, *itInsert ); } }; // Move policy for boost::container::list template struct move_item_policy< boost::container::list< T, Alloc > > { typedef boost::container::list< T, Alloc > list_type; typedef typename list_type::iterator iterator; void operator()( list_type& list, iterator itInsert, iterator itWhat ) { list.insert( itInsert, std::move( *itWhat )); } }; } // namespace striped_set }} // namespace cds::container namespace cds { namespace intrusive { namespace striped_set { /// boost::container::list adapter for hash set bucket template class adapt< boost::container::list, Options... > { public: typedef boost::container::list container_type ; ///< underlying container type private: /// Adapted container type class adapted_container: public cds::container::striped_set::adapted_sequential_container { public: typedef typename container_type::value_type value_type ; ///< value type stored in the container typedef typename container_type::iterator iterator ; ///< container iterator typedef typename container_type::const_iterator const_iterator ; ///< container const iterator static bool const has_find_with = true; static bool const has_erase_with = true; private: //@cond typedef typename cds::opt::details::make_comparator_from_option_list< value_type, Options... >::type key_comparator; typedef typename cds::opt::select< typename cds::opt::value< typename cds::opt::find_option< cds::opt::copy_policy< cds::container::striped_set::move_item > , Options... >::type >::copy_policy , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy >::type copy_item; struct find_predicate { bool operator()( value_type const& i1, value_type const& i2) const { return key_comparator()( i1, i2 ) < 0; } template bool operator()( Q const& i1, value_type const& i2) const { return key_comparator()( i1, i2 ) < 0; } template bool operator()( value_type const& i1, Q const& i2) const { return key_comparator()( i1, i2 ) < 0; } }; //@endcond private: //@cond container_type m_List; //@endcond public: adapted_container() {} template bool insert( Q const& val, Func f ) { iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, find_predicate()); if ( it == m_List.end() || key_comparator()( val, *it ) != 0 ) { value_type newItem( val ); it = m_List.insert( it, newItem ); f( *it ); return true; } // key already exists return false; } template bool emplace( Args&&... args ) { value_type val( std::forward(args)... ); iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, find_predicate()); if ( it == m_List.end() || key_comparator()( val, *it ) != 0 ) { m_List.emplace( it, std::move( val )); return true; } return false; } template std::pair update( Q const& val, Func func, bool bAllowInsert ) { iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, find_predicate()); if ( it == m_List.end() || key_comparator()( val, *it ) != 0 ) { // insert new if ( !bAllowInsert ) return std::make_pair( false, false ); value_type newItem( val ); it = m_List.insert( it, newItem ); func( true, *it, val ); return std::make_pair( true, true ); } else { // already exists func( false, *it, val ); return std::make_pair( true, false ); } } template bool erase( Q const& key, Func f ) { iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, find_predicate()); if ( it == m_List.end() || key_comparator()( key, *it ) != 0 ) return false; // key exists f( *it ); m_List.erase( it ); return true; } template bool erase( Q const& key, Less pred, Func f ) { iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, pred ); if ( it == m_List.end() || pred( key, *it ) || pred( *it, key )) return false; // key exists f( *it ); m_List.erase( it ); return true; } template bool find( Q& val, Func f ) { iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, find_predicate()); if ( it == m_List.end() || key_comparator()( val, *it ) != 0 ) return false; // key exists f( *it, val ); return true; } template bool find( Q& val, Less pred, Func f ) { iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, pred ); if ( it == m_List.end() || pred( val, *it ) || pred( *it, val )) return false; // key exists f( *it, val ); return true; } /// Clears the container void clear() { m_List.clear(); } iterator begin() { return m_List.begin(); } const_iterator begin() const { return m_List.begin(); } iterator end() { return m_List.end(); } const_iterator end() const { return m_List.end(); } void move_item( adapted_container& /*from*/, iterator itWhat ) { iterator it = std::lower_bound( m_List.begin(), m_List.end(), *itWhat, find_predicate()); assert( it == m_List.end() || key_comparator()( *itWhat, *it ) != 0 ); copy_item()( m_List, it, itWhat ); } size_t size() const { return m_List.size(); } }; public: typedef adapted_container type ; ///< Result of \p adapt metafunction }; }}} // namespace cds::intrsive::striped_set //@endcond #endif // #ifndef CDSLIB_CONTAINER_STRIPED_SET_BOOST_LIST_ADAPTER_H libcds-2.3.3/cds/container/striped_set/boost_set.h000066400000000000000000000044311341244201700222110ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_STRIPED_SET_BOOST_SET_ADAPTER_H #define CDSLIB_CONTAINER_STRIPED_SET_BOOST_SET_ADAPTER_H #include #if BOOST_VERSION < 104800 # error "For boost::container::set you must use boost 1.48 or above" #endif #include #include //@cond namespace cds { namespace container { namespace striped_set { // Copy policy for boost::container::set template struct copy_item_policy< boost::container::set< T, Traits, Alloc > > : public details::boost_set_copy_policies< boost::container::set< T, Traits, Alloc > >::copy_item_policy {}; // Copy policy for boost::container::set template struct swap_item_policy< boost::container::set< T, Traits, Alloc > > : public details::boost_set_copy_policies< boost::container::set< T, Traits, Alloc > >::swap_item_policy {}; // Swap policy is not defined for boost::container::set // Move policy for boost::container::set template struct move_item_policy< boost::container::set< T, Traits, Alloc > > : public details::boost_set_copy_policies< boost::container::set< T, Traits, Alloc > >::move_item_policy {}; } // namespace striped_set }} // namespace cds::container namespace cds { namespace intrusive { namespace striped_set { /// boost::container::flat_set adapter for hash set bucket template class adapt< boost::container::set, Options... > { public: typedef boost::container::set container_type ; ///< underlying container type typedef cds::container::striped_set::details::boost_set_adapter< container_type, Options... > type; }; }}} // namespace cds::intrusive::striped_set //@endcond #endif // #ifndef CDSLIB_CONTAINER_STRIPED_SET_BOOST_SET_ADAPTER_H libcds-2.3.3/cds/container/striped_set/boost_slist.h000066400000000000000000000233771341244201700225660ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_STRIPED_SET_BOOST_SLIST_ADAPTER_H #define CDSLIB_CONTAINER_STRIPED_SET_BOOST_SLIST_ADAPTER_H #include // ref #include #include //@cond namespace cds { namespace container { namespace striped_set { // Copy policy for boost::container::slist template struct copy_item_policy< boost::container::slist< T, Alloc > > { typedef boost::container::slist< T, Alloc > list_type; typedef typename list_type::iterator iterator; void operator()( list_type& list, iterator itInsert, iterator itWhat ) { list.insert_after( itInsert, *itWhat ); } }; // Swap policy for boost::container::slist template struct swap_item_policy< boost::container::slist< T, Alloc > > { typedef boost::container::slist< T, Alloc > list_type; typedef typename list_type::iterator iterator; void operator()( list_type& list, iterator itInsert, iterator itWhat ) { T newVal; itInsert = list.insert_after( itInsert, newVal ); std::swap( *itInsert, *itWhat ); } }; // Move policy for boost::container::slist template struct move_item_policy< boost::container::slist< T, Alloc > > { typedef boost::container::slist< T, Alloc > list_type; typedef typename list_type::iterator iterator; void operator()( list_type& list, iterator itInsert, iterator itWhat ) { list.insert_after( itInsert, std::move( *itWhat )); } }; } // namespace striped_set }} // namespace cds::container namespace cds { namespace intrusive { namespace striped_set { /// boost::container::slist adapter for hash set bucket template class adapt< boost::container::slist, Options... > { public: typedef boost::container::slist container_type ; ///< underlying container type private: /// Adapted container type class adapted_container: public cds::container::striped_set::adapted_sequential_container { public: typedef typename container_type::value_type value_type ; ///< value type stored in the container typedef typename container_type::iterator iterator ; ///< container iterator typedef typename container_type::const_iterator const_iterator ; ///< container const iterator static bool const has_find_with = true; static bool const has_erase_with = true; private: //@cond typedef typename cds::opt::details::make_comparator_from_option_list< value_type, Options... >::type key_comparator; typedef typename cds::opt::select< typename cds::opt::value< typename cds::opt::find_option< cds::opt::copy_policy< cds::container::striped_set::move_item > , Options... >::type >::copy_policy , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy >::type copy_item; template std::pair< iterator, bool > find_prev_item( Q const& key ) { iterator itPrev = m_List.before_begin(); iterator itEnd = m_List.end(); for ( iterator it = m_List.begin(); it != itEnd; ++it ) { int nCmp = key_comparator()( key, *it ); if ( nCmp < 0 ) itPrev = it; else if ( nCmp > 0 ) break; else return std::make_pair( itPrev, true ); } return std::make_pair( itPrev, false ); } template std::pair< iterator, bool > find_prev_item( Q const& key, Less pred ) { iterator itPrev = m_List.before_begin(); iterator itEnd = m_List.end(); for ( iterator it = m_List.begin(); it != itEnd; ++it ) { if ( pred( key, *it )) itPrev = it; else if ( pred( *it, key )) break; else return std::make_pair( itPrev, true ); } return std::make_pair( itPrev, false ); } //@endcond private: //@cond container_type m_List; //@endcond public: adapted_container() {} template bool insert( const Q& val, Func f ) { std::pair< iterator, bool > pos = find_prev_item( val ); if ( !pos.second ) { value_type newItem( val ); pos.first = m_List.insert_after( pos.first, newItem ); f( *pos.first ); return true; } // key already exists return false; } template bool emplace( Args&&... args ) { value_type val( std::forward(args)... ); std::pair< iterator, bool > pos = find_prev_item( val ); if ( !pos.second ) { m_List.emplace_after( pos.first, std::move( val )); return true; } return false; } template std::pair update( const Q& val, Func func, bool bAllowInsert ) { std::pair< iterator, bool > pos = find_prev_item( val ); if ( !pos.second ) { // insert new if ( !bAllowInsert ) return std::make_pair( false, false ); value_type newItem( val ); pos.first = m_List.insert_after( pos.first, newItem ); func( true, *pos.first, val ); return std::make_pair( true, true ); } else { // already exists func( false, *(++pos.first), val ); return std::make_pair( true, false ); } } template bool erase( Q const& key, Func f ) { std::pair< iterator, bool > pos = find_prev_item( key ); if ( !pos.second ) return false; // key exists iterator it = pos.first; f( *(++it)); m_List.erase_after( pos.first ); return true; } template bool erase( Q const& key, Less pred, Func f ) { std::pair< iterator, bool > pos = find_prev_item( key, pred ); if ( !pos.second ) return false; // key exists iterator it = pos.first; f( *(++it)); m_List.erase_after( pos.first ); return true; } template bool find( Q& val, Func f ) { std::pair< iterator, bool > pos = find_prev_item( val ); if ( !pos.second ) return false; // key exists f( *(++pos.first), val ); return true; } template bool find( Q& val, Less pred, Func f ) { std::pair< iterator, bool > pos = find_prev_item( val, pred ); if ( !pos.second ) return false; // key exists f( *(++pos.first), val ); return true; } void clear() { m_List.clear(); } iterator begin() { return m_List.begin(); } const_iterator begin() const { return m_List.begin(); } iterator end() { return m_List.end(); } const_iterator end() const { return m_List.end(); } void move_item( adapted_container& /*from*/, iterator itWhat ) { std::pair< iterator, bool > pos = find_prev_item( *itWhat ); assert( !pos.second ); copy_item()( m_List, pos.first, itWhat ); } size_t size() const { return m_List.size(); } }; public: typedef adapted_container type ; ///< Result of \p adapt metafunction }; }}} // namespace cds::intrusive::striped_set //@endcond #endif // #ifndef CDSLIB_CONTAINER_STRIPED_SET_BOOST_SLIST_ADAPTER_H libcds-2.3.3/cds/container/striped_set/boost_stable_vector.h000066400000000000000000000234601341244201700242550ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_STRIPED_SET_BOOST_STABLE_VECTOR_ADAPTER_H #define CDSLIB_CONTAINER_STRIPED_SET_BOOST_STABLE_VECTOR_ADAPTER_H #include #if BOOST_VERSION < 104800 # error "For boost::container::stable_vector you must use boost 1.48 or above" #endif #include // ref #include // std::lower_bound #include // std::pair #include #include //@cond namespace cds { namespace container { namespace striped_set { // Copy policy for boost::container::stable_vector template struct copy_item_policy< boost::container::stable_vector< T, Alloc > > { typedef boost::container::stable_vector< T, Alloc > vector_type; typedef typename vector_type::iterator iterator; void operator()( vector_type& vec, iterator itInsert, iterator itWhat ) { vec.insert( itInsert, *itWhat ); } }; // Swap policy for boost::container::stable_vector template struct swap_item_policy< boost::container::stable_vector< T, Alloc > > { typedef boost::container::stable_vector< T, Alloc > vector_type; typedef typename vector_type::iterator iterator; void operator()( vector_type& vec, iterator itInsert, iterator itWhat ) { typename vector_type::value_type newVal; itInsert = vec.insert( itInsert, newVal ); std::swap( *itInsert, *itWhat ); } }; // Move policy for boost::container::stable_vector template struct move_item_policy< boost::container::stable_vector< T, Alloc > > { typedef boost::container::stable_vector< T, Alloc > vector_type; typedef typename vector_type::iterator iterator; void operator()( vector_type& vec, iterator itInsert, iterator itWhat ) { vec.insert( itInsert, std::move( *itWhat )); } }; } // namespace striped_set }} // namespace cds::container namespace cds { namespace intrusive { namespace striped_set { /// boost::container::stable_vector adapter for hash set bucket template class adapt< boost::container::stable_vector, Options... > { public: typedef boost::container::stable_vector container_type ; ///< underlying container type private: /// Adapted container type class adapted_container: public cds::container::striped_set::adapted_sequential_container { public: typedef typename container_type::value_type value_type ; ///< value type stored in the container typedef typename container_type::iterator iterator ; ///< container iterator typedef typename container_type::const_iterator const_iterator ; ///< container const iterator static bool const has_find_with = true; static bool const has_erase_with = true; private: //@cond typedef typename cds::opt::details::make_comparator_from_option_list< value_type, Options... >::type key_comparator; typedef typename cds::opt::select< typename cds::opt::value< typename cds::opt::find_option< cds::opt::copy_policy< cds::container::striped_set::move_item > , Options... >::type >::copy_policy , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy >::type copy_item; struct find_predicate { bool operator()( value_type const& i1, value_type const& i2) const { return key_comparator()( i1, i2 ) < 0; } template bool operator()( Q const& i1, value_type const& i2) const { return key_comparator()( i1, i2 ) < 0; } template bool operator()( value_type const& i1, Q const& i2) const { return key_comparator()( i1, i2 ) < 0; } }; //@endcond private: //@cond container_type m_Vector; //@endcond public: template bool insert( const Q& val, Func f ) { iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, find_predicate()); if ( it == m_Vector.end() || key_comparator()( val, *it ) != 0 ) { value_type newItem( val ); it = m_Vector.insert( it, newItem ); f( *it ); return true; } return false; } template bool emplace( Args&&... args ) { value_type val( std::forward(args)... ); iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, find_predicate()); if ( it == m_Vector.end() || key_comparator()( val, *it ) != 0 ) { it = m_Vector.emplace( it, std::move( val )); return true; } return false; } template std::pair update( const Q& val, Func func, bool bAllowInsert ) { iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, find_predicate()); if ( it == m_Vector.end() || key_comparator()( val, *it ) != 0 ) { // insert new if ( !bAllowInsert ) return std::make_pair( false, false ); value_type newItem( val ); it = m_Vector.insert( it, newItem ); func( true, *it, val ); return std::make_pair( true, true ); } else { // already exists func( false, *it, val ); return std::make_pair( true, false ); } } template bool erase( const Q& key, Func f ) { iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), key, find_predicate()); if ( it == m_Vector.end() || key_comparator()( key, *it ) != 0 ) return false; // key exists f( *it ); m_Vector.erase( it ); return true; } template bool erase( const Q& key, Less pred, Func f ) { iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), key, pred ); if ( it == m_Vector.end() || pred( key, *it ) || pred( *it, key )) return false; // key exists f( *it ); m_Vector.erase( it ); return true; } template bool find( Q& val, Func f ) { iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, find_predicate()); if ( it == m_Vector.end() || key_comparator()( val, *it ) != 0 ) return false; // key exists f( *it, val ); return true; } template bool find( Q& val, Less pred, Func f ) { iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, pred ); if ( it == m_Vector.end() || pred( val, *it ) || pred( *it, val )) return false; // key exists f( *it, val ); return true; } /// Clears the container void clear() { m_Vector.clear(); } iterator begin() { return m_Vector.begin(); } const_iterator begin() const { return m_Vector.begin(); } iterator end() { return m_Vector.end(); } const_iterator end() const { return m_Vector.end(); } void move_item( adapted_container& /*from*/, iterator itWhat ) { iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), *itWhat, find_predicate()); assert( it == m_Vector.end() || key_comparator()( *itWhat, *it ) != 0 ); copy_item()( m_Vector, it, itWhat ); } size_t size() const { return m_Vector.size(); } }; public: typedef adapted_container type ; ///< Result of \p adapt metafunction }; }}} // namespace cds::intrusive::striped_set //@endcond #endif // #ifndef CDSLIB_CONTAINER_STRIPED_SET_BOOST_STABLE_VECTOR_ADAPTER_H libcds-2.3.3/cds/container/striped_set/boost_unordered_set.h000066400000000000000000000040541341244201700242610ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_STRIPED_SET_BOOST_UNORDERED_SET_ADAPTER_H #define CDSLIB_CONTAINER_STRIPED_SET_BOOST_UNORDERED_SET_ADAPTER_H #include #include //@cond namespace cds { namespace container { namespace striped_set { // Copy policy for boost::unordered_set template struct copy_item_policy< boost::unordered_set< T, Traits, Alloc > > : public details::boost_set_copy_policies< boost::unordered_set< T, Traits, Alloc > >::copy_item_policy {}; template struct swap_item_policy< boost::unordered_set< T, Traits, Alloc > > : public details::boost_set_copy_policies< boost::unordered_set< T, Traits, Alloc > >::swap_item_policy {}; // Move policy for boost::unordered_set template struct move_item_policy< boost::unordered_set< T, Traits, Alloc > > : public details::boost_set_copy_policies< boost::unordered_set< T, Traits, Alloc > >::move_item_policy {}; } // namespace striped_set }} // namespace cds::container namespace cds { namespace intrusive { namespace striped_set { /// boost::unordered_set adapter for hash set bucket template class adapt< boost::unordered_set, Options... > { public: typedef boost::unordered_set container_type ; ///< underlying container type typedef cds::container::striped_set::details::boost_set_adapter< container_type, Options... > type; }; }}} // namespace cds::intrusive::striped_set //@endcond #endif // #ifndef CDSLIB_CONTAINER_STRIPED_SET_BOOST_UNORDERED_SET_ADAPTER_H libcds-2.3.3/cds/container/striped_set/boost_vector.h000066400000000000000000000233161341244201700227230ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_STRIPED_SET_BOOST_VECTOR_ADAPTER_H #define CDSLIB_CONTAINER_STRIPED_SET_BOOST_VECTOR_ADAPTER_H #include #if BOOST_VERSION < 104800 # error "For boost::container::vector you must use boost 1.48 or above" #endif #include // ref #include // std::lower_bound #include // std::pair #include // lower_bound #include //@cond namespace cds { namespace container { namespace striped_set { // Copy policy for boost::container::vector template struct copy_item_policy< boost::container::vector< T, Alloc > > { typedef boost::container::vector< T, Alloc > vector_type; typedef typename vector_type::iterator iterator; void operator()( vector_type& vec, iterator itInsert, iterator itWhat ) { vec.insert( itInsert, *itWhat ); } }; // Swap policy for boost::container::vector template struct swap_item_policy< boost::container::vector< T, Alloc > > { typedef boost::container::vector< T, Alloc > vector_type; typedef typename vector_type::iterator iterator; void operator()( vector_type& vec, iterator itInsert, iterator itWhat ) { typename vector_type::value_type newVal; itInsert = vec.insert( itInsert, newVal ); std::swap( *itInsert, *itWhat ); } }; // Move policy for boost::container::vector template struct move_item_policy< boost::container::vector< T, Alloc > > { typedef boost::container::vector< T, Alloc > vector_type; typedef typename vector_type::iterator iterator; void operator()( vector_type& vec, iterator itInsert, iterator itWhat ) { vec.insert( itInsert, std::move( *itWhat )); } }; } // namespace striped_set }} // namespace cds::container namespace cds { namespace intrusive { namespace striped_set { /// boost::container::vector adapter for hash set bucket template class adapt< boost::container::vector, Options... > { public: typedef boost::container::vector container_type ; ///< underlying container type private: /// Adapted container type class adapted_container: public cds::container::striped_set::adapted_sequential_container { public: typedef typename container_type::value_type value_type ; ///< value type stored in the container typedef typename container_type::iterator iterator ; ///< container iterator typedef typename container_type::const_iterator const_iterator ; ///< container const iterator static bool const has_find_with = true; static bool const has_erase_with = true; private: //@cond typedef typename cds::opt::details::make_comparator_from_option_list< value_type, Options... >::type key_comparator; typedef typename cds::opt::select< typename cds::opt::value< typename cds::opt::find_option< cds::opt::copy_policy< cds::container::striped_set::move_item > , Options... >::type >::copy_policy , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy >::type copy_item; struct find_predicate { bool operator()( value_type const& i1, value_type const& i2) const { return key_comparator()( i1, i2 ) < 0; } template bool operator()( Q const& i1, value_type const& i2) const { return key_comparator()( i1, i2 ) < 0; } template bool operator()( value_type const& i1, Q const& i2) const { return key_comparator()( i1, i2 ) < 0; } }; //@endcond private: //@cond container_type m_Vector; //@endcond public: template bool insert( const Q& val, Func f ) { iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, find_predicate()); if ( it == m_Vector.end() || key_comparator()( val, *it ) != 0 ) { value_type newItem( val ); it = m_Vector.insert( it, newItem ); f( *it ); return true; } return false; } template bool emplace( Args&&... args ) { value_type val( std::forward(args)... ); iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, find_predicate()); if ( it == m_Vector.end() || key_comparator()( val, *it ) != 0 ) { it = m_Vector.emplace( it, std::move( val )); return true; } return false; } template std::pair update( const Q& val, Func func, bool bAllowInsert ) { iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, find_predicate()); if ( it == m_Vector.end() || key_comparator()( val, *it ) != 0 ) { // insert new if ( !bAllowInsert ) return std::make_pair( false, false ); value_type newItem( val ); it = m_Vector.insert( it, newItem ); func( true, *it, val ); return std::make_pair( true, true ); } else { // already exists func( false, *it, val ); return std::make_pair( true, false ); } } template bool erase( const Q& key, Func f ) { iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), key, find_predicate()); if ( it == m_Vector.end() || key_comparator()( key, *it ) != 0 ) return false; // key exists f( *it ); m_Vector.erase( it ); return true; } template bool erase( Q const& key, Less pred, Func f ) { iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), key, pred ); if ( it == m_Vector.end() || pred( key, *it ) || pred( *it, key )) return false; // key exists f( *it ); m_Vector.erase( it ); return true; } template bool find( Q& val, Func f ) { iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, find_predicate()); if ( it == m_Vector.end() || key_comparator()( val, *it ) != 0 ) return false; // key exists f( *it, val ); return true; } template bool find( Q& val, Less pred, Func f ) { iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, pred ); if ( it == m_Vector.end() || pred( val, *it ) || pred( *it, val )) return false; // key exists f( *it, val ); return true; } /// Clears the container void clear() { m_Vector.clear(); } iterator begin() { return m_Vector.begin(); } const_iterator begin() const { return m_Vector.begin(); } iterator end() { return m_Vector.end(); } const_iterator end() const { return m_Vector.end(); } void move_item( adapted_container& /*from*/, iterator itWhat ) { iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), *itWhat, find_predicate()); assert( it == m_Vector.end() || key_comparator()( *itWhat, *it ) != 0 ); copy_item()( m_Vector, it, itWhat ); } size_t size() const { return m_Vector.size(); } }; public: typedef adapted_container type ; ///< Result of \p adapt metafunction }; }}} // namespace cds::intrusive::striped_set //@endcond #endif // #ifndef CDSLIB_CONTAINER_STRIPED_SET_BOOST_VECTOR_ADAPTER_H libcds-2.3.3/cds/container/striped_set/std_hash_set.h000066400000000000000000000150111341244201700226540ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_STRIPED_SET_STD_HASH_SET_ADAPTER_H #define CDSLIB_CONTAINER_STRIPED_SET_STD_HASH_SET_ADAPTER_H #include #include //@cond namespace cds { namespace container { namespace striped_set { // Copy policy for std::unordered_set template struct copy_item_policy< std::unordered_set< T, Hash, Pred, Alloc > > { typedef std::unordered_set< T, Hash, Pred, Alloc > set_type; typedef typename set_type::iterator iterator; void operator()( set_type& set, iterator itWhat ) { set.insert( *itWhat ); } }; template struct swap_item_policy< std::unordered_set< T, Hash, Pred, Alloc > >: public copy_item_policy< std::unordered_set< T, Hash, Pred, Alloc > > {}; // Move policy for std::unordered_set template struct move_item_policy< std::unordered_set< T, Hash, Pred, Alloc > > { typedef std::unordered_set< T, Hash, Pred, Alloc > set_type; typedef typename set_type::iterator iterator; void operator()( set_type& set, iterator itWhat ) { set.insert( std::move( *itWhat )); } }; } // namespace striped_set }} // namespace cds::container namespace cds { namespace intrusive { namespace striped_set { /// std::unordered_set adapter for hash set bucket template class adapt< std::unordered_set, Options... > { public: typedef std::unordered_set container_type ; ///< underlying container type private: /// Adapted container type class adapted_container: public cds::container::striped_set::adapted_container { public: typedef typename container_type::value_type value_type ; ///< value type stored in the container typedef typename container_type::iterator iterator ; ///< container iterator typedef typename container_type::const_iterator const_iterator ; ///< container const iterator static bool const has_find_with = false; static bool const has_erase_with = false; private: //@cond typedef typename cds::opt::select< typename cds::opt::value< typename cds::opt::find_option< cds::opt::copy_policy< cds::container::striped_set::move_item > , Options... >::type >::copy_policy , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy // not defined , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy >::type copy_item; //@endcond private: //@cond container_type m_Set; //@endcond public: template bool insert( const Q& val, Func f ) { std::pair res = m_Set.insert( value_type(val)); if ( res.second ) f( const_cast(*res.first)); return res.second; } template bool emplace( Args&&... args ) { std::pair res = m_Set.emplace( std::forward(args)... ); return res.second; } template std::pair update( const Q& val, Func func, bool bAllowInsert ) { if ( bAllowInsert ) { std::pair res = m_Set.insert( value_type(val)); func( res.second, const_cast(*res.first), val ); return std::make_pair( true, res.second ); } else { auto it = m_Set.find( value_type(val)); if ( it == m_Set.end()) return std::make_pair( false, false ); func( false, const_cast(*it), val ); return std::make_pair( true, false ); } } template bool erase( const Q& key, Func f ) { const_iterator it = m_Set.find( value_type(key)); if ( it == m_Set.end()) return false; f( const_cast(*it)); m_Set.erase( it ); return true; } template bool find( Q& val, Func f ) { iterator it = m_Set.find( value_type(val)); if ( it == m_Set.end()) return false; f( const_cast(*it), val ); return true; } /// Clears the container void clear() { m_Set.clear(); } iterator begin() { return m_Set.begin(); } const_iterator begin() const { return m_Set.begin(); } iterator end() { return m_Set.end(); } const_iterator end() const { return m_Set.end(); } void move_item( adapted_container& /*from*/, iterator itWhat ) { assert( m_Set.find( *itWhat ) == m_Set.end()); copy_item()( m_Set, itWhat ); } size_t size() const { return m_Set.size(); } }; public: typedef adapted_container type ; ///< Result of \p adapt metafunction }; }}} // namespace cds::intrusive::striped_set //@endcond #endif // #ifndef CDSLIB_CONTAINER_STRIPED_SET_STD_HASH_SET_ADAPTER_H libcds-2.3.3/cds/container/striped_set/std_list.h000066400000000000000000000251441341244201700220410ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_STRIPED_SET_STD_LIST_ADAPTER_H #define CDSLIB_CONTAINER_STRIPED_SET_STD_LIST_ADAPTER_H #include // ref #include #include // std::lower_bound #include #undef CDS_STD_LIST_SIZE_CXX11_CONFORM #if !( defined(__GLIBCXX__ ) && (!defined(_GLIBCXX_USE_CXX11_ABI) || _GLIBCXX_USE_CXX11_ABI == 0 )) # define CDS_STD_LIST_SIZE_CXX11_CONFORM #endif //@cond namespace cds { namespace container { namespace striped_set { // Copy policy for std::list template struct copy_item_policy< std::list< T, Alloc > > { typedef std::list< T, Alloc > list_type; typedef typename list_type::iterator iterator; void operator()( list_type& list, iterator itInsert, iterator itWhat ) { itInsert = list.insert( itInsert, *itWhat ); } }; // Swap policy for std::list template struct swap_item_policy< std::list< T, Alloc > > { typedef std::list< T, Alloc > list_type; typedef typename list_type::iterator iterator; void operator()( list_type& list, iterator itInsert, iterator itWhat ) { typename list_type::value_type newVal; itInsert = list.insert( itInsert, newVal ); std::swap( *itWhat, *itInsert ); } }; // Move policy for std::list template struct move_item_policy< std::list< T, Alloc > > { typedef std::list< T, Alloc > list_type; typedef typename list_type::iterator iterator; void operator()( list_type& list, iterator itInsert, iterator itWhat ) { list.insert( itInsert, std::move( *itWhat )); } }; } // namespace striped_set }} // namespace cds::container namespace cds { namespace intrusive { namespace striped_set { /// std::list adapter for hash set bucket template class adapt< std::list, Options... > { public: typedef std::list container_type ; ///< underlying container type private: /// Adapted container type class adapted_container: public cds::container::striped_set::adapted_sequential_container { public: typedef typename container_type::value_type value_type ; ///< value type stored in the container typedef typename container_type::iterator iterator ; ///< container iterator typedef typename container_type::const_iterator const_iterator ; ///< container const iterator static bool const has_find_with = true; static bool const has_erase_with = true; private: //@cond typedef typename cds::opt::details::make_comparator_from_option_list< value_type, Options... >::type key_comparator; typedef typename cds::opt::select< typename cds::opt::value< typename cds::opt::find_option< cds::opt::copy_policy< cds::container::striped_set::move_item > , Options... >::type >::copy_policy , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy >::type copy_item; struct find_predicate { bool operator()( value_type const& i1, value_type const& i2) const { return key_comparator()( i1, i2 ) < 0; } template bool operator()( Q const& i1, value_type const& i2) const { return key_comparator()( i1, i2 ) < 0; } template bool operator()( value_type const& i1, Q const& i2) const { return key_comparator()( i1, i2 ) < 0; } }; //@endcond private: //@cond container_type m_List; # if !defined(CDS_STD_LIST_SIZE_CXX11_CONFORM) // GCC C++ lib bug: // In GCC, the complexity of std::list::size() is O(N) // (see http://gcc.gnu.org/bugzilla/show_bug.cgi?id=49561) // Fixed in GCC 5 size_t m_nSize ; // list size # endif //@endcond public: adapted_container() # if !defined(CDS_STD_LIST_SIZE_CXX11_CONFORM) : m_nSize(0) # endif {} template bool insert( const Q& val, Func f ) { iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, find_predicate()); if ( it == m_List.end() || key_comparator()( val, *it ) != 0 ) { value_type newItem( val ); it = m_List.insert( it, newItem ); f( *it ); # if !defined(CDS_STD_LIST_SIZE_CXX11_CONFORM) ++m_nSize; # endif return true; } // key already exists return false; } template bool emplace( Args&&... args ) { value_type val(std::forward(args)...); iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, find_predicate()); if ( it == m_List.end() || key_comparator()( val, *it ) != 0 ) { it = m_List.emplace( it, std::move( val )); # if !defined(CDS_STD_LIST_SIZE_CXX11_CONFORM) ++m_nSize; # endif return true; } return false; } template std::pair update( const Q& val, Func func, bool bAllowInsert ) { iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, find_predicate()); if ( it == m_List.end() || key_comparator()( val, *it ) != 0 ) { // insert new if ( !bAllowInsert ) return std::make_pair( false, false ); value_type newItem( val ); it = m_List.insert( it, newItem ); func( true, *it, val ); # if !defined(CDS_STD_LIST_SIZE_CXX11_CONFORM) ++m_nSize; # endif return std::make_pair( true, true ); } else { // already exists func( false, *it, val ); return std::make_pair( true, false ); } } template bool erase( const Q& key, Func f ) { iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, find_predicate()); if ( it == m_List.end() || key_comparator()( key, *it ) != 0 ) return false; // key exists f( *it ); m_List.erase( it ); # if !defined(CDS_STD_LIST_SIZE_CXX11_CONFORM) --m_nSize; # endif return true; } template bool erase( Q const& key, Less pred, Func f ) { iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, pred ); if ( it == m_List.end() || pred( key, *it ) || pred( *it, key )) return false; // key exists f( *it ); m_List.erase( it ); # if !defined(CDS_STD_LIST_SIZE_CXX11_CONFORM) --m_nSize; # endif return true; } template bool find( Q& val, Func f ) { iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, find_predicate()); if ( it == m_List.end() || key_comparator()( val, *it ) != 0 ) return false; // key exists f( *it, val ); return true; } template bool find( Q& val, Less pred, Func f ) { iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, pred ); if ( it == m_List.end() || pred( val, *it ) || pred( *it, val )) return false; // key exists f( *it, val ); return true; } /// Clears the container void clear() { m_List.clear(); } iterator begin() { return m_List.begin(); } const_iterator begin() const { return m_List.begin(); } iterator end() { return m_List.end(); } const_iterator end() const { return m_List.end(); } void move_item( adapted_container& /*from*/, iterator itWhat ) { iterator it = std::lower_bound( m_List.begin(), m_List.end(), *itWhat, find_predicate()); assert( it == m_List.end() || key_comparator()( *itWhat, *it ) != 0 ); copy_item()( m_List, it, itWhat ); # if !defined(CDS_STD_LIST_SIZE_CXX11_CONFORM) ++m_nSize; # endif } size_t size() const { # if !defined(CDS_STD_LIST_SIZE_CXX11_CONFORM) return m_nSize; # else return m_List.size(); # endif } }; public: typedef adapted_container type ; ///< Result of \p adapt metafunction }; }}} //@endcond #endif // #ifndef CDSLIB_CONTAINER_STRIPED_SET_STD_LIST_ADAPTER_H libcds-2.3.3/cds/container/striped_set/std_set.h000066400000000000000000000144001341244201700216520ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_STRIPED_SET_STD_SET_ADAPTER_H #define CDSLIB_CONTAINER_STRIPED_SET_STD_SET_ADAPTER_H #include #include //@cond namespace cds { namespace container { namespace striped_set { // Copy policy for std::set template struct copy_item_policy< std::set< T, Traits, Alloc > > { typedef std::set< T, Traits, Alloc > set_type; typedef typename set_type::iterator iterator; void operator()( set_type& set, iterator itWhat ) { set.insert( *itWhat ); } }; template struct swap_item_policy< std::set< T, Traits, Alloc > >: public copy_item_policy< std::set< T, Traits, Alloc > > {}; // Move policy for std::set template struct move_item_policy< std::set< T, Traits, Alloc > > { typedef std::set< T, Traits, Alloc > set_type; typedef typename set_type::iterator iterator; void operator()( set_type& set, iterator itWhat ) { set.insert( std::move( *itWhat )); } }; } // namespace striped_set }} // namespace cds::container namespace cds { namespace intrusive { namespace striped_set { /// std::set adapter for hash set bucket template class adapt< std::set, Options... > { public: typedef std::set container_type ; ///< underlying container type private: /// Adapted container type class adapted_container: public cds::container::striped_set::adapted_container { public: typedef typename container_type::value_type value_type ; ///< value type stored in the container typedef typename container_type::iterator iterator ; ///< container iterator typedef typename container_type::const_iterator const_iterator ; ///< container const iterator static bool const has_find_with = false; static bool const has_erase_with = false; private: //@cond typedef typename cds::opt::select< typename cds::opt::value< typename cds::opt::find_option< cds::opt::copy_policy< cds::container::striped_set::move_item > , Options... >::type >::copy_policy , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy >::type copy_item; //@endcond private: //@cond container_type m_Set; //@endcond public: template bool insert( const Q& val, Func f ) { std::pair res = m_Set.insert( value_type(val)); if ( res.second ) f( const_cast(*res.first)); return res.second; } template bool emplace( Args&&... args ) { std::pair res = m_Set.emplace( std::forward(args)... ); return res.second; } template std::pair update( const Q& val, Func func, bool bAllowInsert ) { if ( bAllowInsert ) { std::pair res = m_Set.insert( value_type(val)); func( res.second, const_cast(*res.first), val ); return std::make_pair( true, res.second ); } else { auto it = m_Set.find(value_type(val)); if ( it == m_Set.end()) return std::make_pair( false, false ); func( false, const_cast(*it), val ); return std::make_pair( true, false ); } } template bool erase( const Q& key, Func f ) { iterator it = m_Set.find( value_type(key)); if ( it == m_Set.end()) return false; f( const_cast(*it)); m_Set.erase( it ); return true; } template bool find( Q& val, Func f ) { iterator it = m_Set.find( value_type(val)); if ( it == m_Set.end()) return false; f( const_cast(*it), val ); return true; } void clear() { m_Set.clear(); } iterator begin() { return m_Set.begin(); } const_iterator begin() const { return m_Set.begin(); } iterator end() { return m_Set.end(); } const_iterator end() const { return m_Set.end(); } void move_item( adapted_container& /*from*/, iterator itWhat ) { assert( m_Set.find( *itWhat ) == m_Set.end()); copy_item()( m_Set, itWhat ); } size_t size() const { return m_Set.size(); } }; public: typedef adapted_container type ; ///< Result of \p adapt metafunction }; }}} // namespace cds::intrusive::striped_set //@endcond #endif // #ifndef CDSLIB_CONTAINER_STRIPED_SET_STD_SET_ADAPTER_H libcds-2.3.3/cds/container/striped_set/std_vector.h000066400000000000000000000225531341244201700223710ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_STRIPED_SET_STD_VECTOR_ADAPTER_H #define CDSLIB_CONTAINER_STRIPED_SET_STD_VECTOR_ADAPTER_H #include // ref #include #include // std::lower_bound #include // std::pair #include // lower_bound //@cond namespace cds { namespace container { namespace striped_set { // Copy policy for std::vector template struct copy_item_policy< std::vector< T, Alloc > > { typedef std::vector< T, Alloc > vector_type; typedef typename vector_type::iterator iterator; void operator()( vector_type& vec, iterator itInsert, iterator itWhat ) { vec.insert( itInsert, *itWhat ); } }; // Swap policy for std::vector template struct swap_item_policy< std::vector< T, Alloc > > { typedef std::vector< T, Alloc > vector_type; typedef typename vector_type::iterator iterator; void operator()( vector_type& vec, iterator itInsert, iterator itWhat ) { typename vector_type::value_type newVal; itInsert = vec.insert( itInsert, newVal ); std::swap( *itInsert, *itWhat ); } }; // Move policy for std::vector template struct move_item_policy< std::vector< T, Alloc > > { typedef std::vector< T, Alloc > vector_type; typedef typename vector_type::iterator iterator; void operator()( vector_type& vec, iterator itInsert, iterator itWhat ) { vec.insert( itInsert, std::move( *itWhat )); } }; } // namespace striped_set }} // namespace cds::container namespace cds { namespace intrusive { namespace striped_set { /// std::vector adapter for hash set bucket template class adapt< std::vector, Options... > { public: typedef std::vector container_type ; ///< underlying container type private: /// Adapted container type class adapted_container: public cds::container::striped_set::adapted_sequential_container { public: typedef typename container_type::value_type value_type ; ///< value type stored in the container typedef typename container_type::iterator iterator ; ///< container iterator typedef typename container_type::const_iterator const_iterator ; ///< container const iterator static bool const has_find_with = true; static bool const has_erase_with = true; private: //@cond typedef typename cds::opt::details::make_comparator_from_option_list< value_type, Options... >::type key_comparator; typedef typename cds::opt::select< typename cds::opt::value< typename cds::opt::find_option< cds::opt::copy_policy< cds::container::striped_set::move_item > , Options... >::type >::copy_policy , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy >::type copy_item; struct find_predicate { bool operator()( value_type const& i1, value_type const& i2) const { return key_comparator()( i1, i2 ) < 0; } template bool operator()( Q const& i1, value_type const& i2) const { return key_comparator()( i1, i2 ) < 0; } template bool operator()( value_type const& i1, Q const& i2) const { return key_comparator()( i1, i2 ) < 0; } }; //@endcond private: //@cond container_type m_Vector; //@endcond public: template bool insert( const Q& val, Func f ) { iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, find_predicate()); if ( it == m_Vector.end() || key_comparator()( val, *it ) != 0 ) { value_type newItem( val ); it = m_Vector.insert( it, newItem ); f( *it ); return true; } return false; } template bool emplace( Args&&... args ) { value_type val( std::forward(args)... ); iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, find_predicate()); if ( it == m_Vector.end() || key_comparator()( val, *it ) != 0 ) { it = m_Vector.emplace( it, std::move( val )); return true; } return false; } template std::pair update( const Q& val, Func func, bool bAllowInsert ) { iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, find_predicate()); if ( it == m_Vector.end() || key_comparator()( val, *it ) != 0 ) { // insert new if ( !bAllowInsert ) return std::make_pair( false, false ); value_type newItem( val ); it = m_Vector.insert( it, newItem ); func( true, *it, val ); return std::make_pair( true, true ); } else { // already exists func( false, *it, val ); return std::make_pair( true, false ); } } template bool erase( const Q& key, Func f ) { iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), key, find_predicate()); if ( it == m_Vector.end() || key_comparator()( key, *it ) != 0 ) return false; // key exists f( *it ); m_Vector.erase( it ); return true; } template bool erase( const Q& key, Less pred, Func f ) { iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), key, pred ); if ( it == m_Vector.end() || pred( key, *it ) || pred( *it, key )) return false; // key exists f( *it ); m_Vector.erase( it ); return true; } template bool find( Q& val, Func f ) { iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, find_predicate()); if ( it == m_Vector.end() || key_comparator()( val, *it ) != 0 ) return false; // key exists f( *it, val ); return true; } template bool find( Q& val, Less pred, Func f ) { iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, pred ); if ( it == m_Vector.end() || pred( val, *it ) || pred( *it, val )) return false; // key exists f( *it, val ); return true; } void clear() { m_Vector.clear(); } iterator begin() { return m_Vector.begin(); } const_iterator begin() const { return m_Vector.begin(); } iterator end() { return m_Vector.end(); } const_iterator end() const { return m_Vector.end(); } void move_item( adapted_container& /*from*/, iterator itWhat ) { iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), *itWhat, find_predicate()); assert( it == m_Vector.end() || key_comparator()( *itWhat, *it ) != 0 ); copy_item()( m_Vector, it, itWhat ); } size_t size() const { return m_Vector.size(); } }; public: typedef adapted_container type ; ///< Result of \p adapt metafunction }; }}} // namespace cds::intrusive::striped_set //@endcond #endif // #ifndef CDSLIB_CONTAINER_STRIPED_SET_STD_VECTOR_ADAPTER_H libcds-2.3.3/cds/container/treiber_stack.h000066400000000000000000000357451341244201700205200ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_TREIBER_STACK_H #define CDSLIB_CONTAINER_TREIBER_STACK_H #include // unique_ptr #include #include namespace cds { namespace container { /// TreiberStack related definitions /** @ingroup cds_nonintrusive_helper */ namespace treiber_stack { /// Internal statistics template ::counter_type > using stat = cds::intrusive::treiber_stack::stat< Counter >; /// Dummy internal statistics typedef cds::intrusive::treiber_stack::empty_stat empty_stat; /// TreiberStack default type traits struct traits { /// Back-off strategy typedef cds::backoff::Default back_off; /// Node allocator typedef CDS_DEFAULT_ALLOCATOR allocator; /// C++ memory ordering model /** Can be opt::v::relaxed_ordering (relaxed memory model, the default) or opt::v::sequential_consistent (sequentially consisnent memory model). */ typedef opt::v::relaxed_ordering memory_model; /// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter to enable item counting typedef cds::atomicity::empty_item_counter item_counter; /// Internal statistics (by default, no internal statistics) /** Possible types are: \ref treiber_stack::stat, \ref treiber_stack::empty_stat (the default), user-provided class that supports treiber_stack::stat interface. */ typedef empty_stat stat; /** @name Elimination back-off traits The following traits is used only if elimination enabled */ ///@{ /// Enable elimination back-off; by default, it is disabled static constexpr const bool enable_elimination = false; /// Back-off strategy to wait for elimination, default is cds::backoff::delay<> typedef cds::backoff::delay<> elimination_backoff; /// Buffer type for elimination array /** Possible types are \p opt::v::initialized_static_buffer, \p opt::v::initialized_dynamic_buffer. The buffer can be any size: \p Exp2 template parameter of those classes can be \p false. The size should be selected empirically for your application and hardware, there are no common rules for that. Default is %opt::v::initialized_static_buffer< any_type, 4 > . */ typedef opt::v::initialized_static_buffer< int, 4 > buffer; /// Random engine to generate a random position in elimination array typedef opt::v::c_rand random_engine; /// Lock type used in elimination, default is cds::sync::spin typedef cds::sync::spin lock_type; ///@} }; /// Metafunction converting option list to \p TreiberStack traits /** Supported \p Options are: - \p opt::allocator - allocator (like \p std::allocator) used for allocating stack nodes. Default is \ref CDS_DEFAULT_ALLOCATOR - \p opt::back_off - back-off strategy used. If the option is not specified, the \p cds::backoff::Default is used. - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). - \p opt::item_counter - the type of item counting feature. Default is \p cds::atomicity::empty_item_counter, i.e. no item counting. Use \p cds::atomicity::item_counter to enable item counting. - \p opt::stat - the type to gather internal statistics. Possible option value are: \p treiber_stack::stat, \p treiber_stack::empty_stat (the default), user-provided class that supports \p %treiber_stack::stat interface. - \p opt::enable_elimination - enable elimination back-off for the stack. Default value is \p false. If elimination back-off is enabled, additional options can be specified: - \p opt::buffer - an initialized buffer type for elimination array, see \p opt::v::initialized_static_buffer, \p opt::v::initialized_dynamic_buffer. The buffer can be any size: \p Exp2 template parameter of those classes can be \p false. The size should be selected empirically for your application and hardware, there are no common rules for that. Default is %opt::v::initialized_static_buffer< any_type, 4 > . - \p opt::random_engine - a random engine to generate a random position in elimination array. Default is \p opt::v::c_rand. - \p opt::elimination_backoff - back-off strategy to wait for elimination, default is \p cds::backoff::delay<> - \p opt::lock_type - a lock type used in elimination back-off, default is \p cds::sync::spin. Example: declare %TreiberStack with item counting and internal statistics using \p %make_traits \code typedef cds::container::TreiberStack< cds::gc::HP, Foo, typename cds::container::treiber_stack::make_traits< cds::opt::item_counter< cds::atomicity::item_counter >, cds::opt::stat< cds::intrusive::treiber_stack::stat<> > >::type > myStack; \endcode */ template struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type , Options... >::type type; # endif }; } // namespace treiber_stack //@cond namespace details { template struct make_treiber_stack { typedef GC gc; typedef T value_type; typedef Traits traits; struct node_type: public cds::intrusive::treiber_stack::node< gc > { value_type m_value; node_type( const value_type& val ) : m_value( val ) {} template node_type( Args&&... args ) : m_value( std::forward( args )... ) {} }; typedef typename std::allocator_traits::template rebind_alloc< node_type > allocator_type; typedef cds::details::Allocator< node_type, allocator_type > cxx_allocator; struct node_deallocator { void operator ()( node_type * pNode ) { cxx_allocator().Delete( pNode ); } }; struct intrusive_traits: public traits { typedef cds::intrusive::treiber_stack::base_hook< cds::opt::gc > hook; typedef node_deallocator disposer; static constexpr const opt::link_check_type link_checker = cds::intrusive::treiber_stack::traits::link_checker; }; // Result of metafunction typedef intrusive::TreiberStack< gc, node_type, intrusive_traits > type; }; } // namespace details //@endcond /// Treiber's stack algorithm /** @ingroup cds_nonintrusive_stack It is non-intrusive version of Treiber's stack algorithm based on intrusive implementation intrusive::TreiberStack. Template arguments: - \p GC - garbage collector type: \p gc::HP, gc::DHP - \p T - type stored in the stack. - \p Traits - stack traits, default is \p treiber_stack::traits. You can use \p treiber_stack::make_traits metafunction to make your traits or just derive your traits from \p %treiber_stack::traits: \code struct myTraits: public cds::container::treiber_stack::traits { typedef cds::intrusive::treiber_stack::stat<> stat; typedef cds::atomicity::item_counter item_counter; }; typedef cds::container::TreiberStack< cds::gc::HP, Foo, myTraits > myStack; // Equivalent make_traits example: typedef cds::intrusive::TreiberStack< cds::gc::HP, Foo, typename cds::intrusive::treiber_stack::make_traits< cds::opt::item_counter< cds::atomicity::item_counter >, cds::opt::stat< cds::intrusive::treiber_stack::stat<> > >::type > myStack; \endcode */ template < typename GC, typename T, typename Traits = treiber_stack::traits > class TreiberStack : public #ifdef CDS_DOXYGEN_INVOKED intrusive::TreiberStack< GC, cds::intrusive::treiber_stack::node< T >, Traits > #else details::make_treiber_stack< GC, T, Traits >::type #endif { //@cond typedef details::make_treiber_stack< GC, T, Traits > maker; typedef typename maker::type base_class; //@endcond public: /// Rebind template arguments template struct rebind { typedef TreiberStack< GC2, T2, Traits2 > other; ///< Rebinding result }; public: typedef T value_type ; ///< Value type stored in the stack typedef typename base_class::gc gc ; ///< Garbage collector used typedef typename base_class::back_off back_off ; ///< Back-off strategy used typedef typename maker::allocator_type allocator_type ; ///< Allocator type used for allocating/deallocating the nodes typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_order option typedef typename base_class::stat stat ; ///< Internal statistics policy used protected: typedef typename maker::node_type node_type ; ///< stack node type (derived from \p intrusive::treiber_stack::node) //@cond typedef typename maker::cxx_allocator cxx_allocator; typedef typename maker::node_deallocator node_deallocator; //@endcond protected: ///@cond static node_type * alloc_node( const value_type& val ) { return cxx_allocator().New( val ); } template static node_type * alloc_node_move( Args&&... args ) { return cxx_allocator().MoveNew( std::forward( args )... ); } static void free_node( node_type * p ) { node_deallocator()( p ); } static void retire_node( node_type * p ) { gc::template retire( p ); } struct node_disposer { void operator()( node_type * pNode ) { free_node( pNode ); } }; typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; //@endcond public: /// Constructs empty stack TreiberStack() {} /// Constructs empty stack and initializes elimination back-off data /** This form should be used if you use elimination back-off with dynamically allocated collision array, i.e \p Options... contains cds::opt::buffer< cds::opt::v::initialized_dynamic_buffer >. \p nCollisionCapacity parameter specifies the capacity of collision array. */ TreiberStack( size_t nCollisionCapacity ) : base_class( nCollisionCapacity ) {} /// \p %TreiberStack is not copy-constructible TreiberStack( TreiberStack const& ) = delete; /// Clears the stack on destruction ~TreiberStack() {} /// Pushes copy of \p val on the stack bool push( value_type const& val ) { scoped_node_ptr p( alloc_node(val)); if ( base_class::push( *p )) { p.release(); return true; } return false; } /// Pushes data of type \ref value_type created from std::forward(args)... template bool emplace( Args&&... args ) { scoped_node_ptr p( alloc_node_move( std::forward(args)...)); if ( base_class::push( *p )) { p.release(); return true; } return false; } /// Pops an item from the stack /** The value of popped item is stored in \p val using assignment operator. On success functions returns \p true, \p val contains value popped from the stack. If stack is empty the function returns \p false, \p val is unchanged. */ bool pop( value_type& val ) { return pop_with( [&val]( value_type& src ) { val = std::move(src); } ); } /// Pops an item from the stack with functor /** \p Func can be used to copy/move popped item from the stack. \p Func interface is: \code void func( value_type& src ); \endcode where \p src - item popped. */ template bool pop_with( Func f ) { node_type * p = base_class::pop(); if ( !p ) return false; f( p->m_value ); retire_node( p ); return true; } /// Check if stack is empty bool empty() const { return base_class::empty(); } /// Clear the stack void clear() { base_class::clear(); } /// Returns stack's item count /** The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, this function always returns 0. Warning: even if you use real item counter and it returns 0, this fact is not mean that the stack is empty. To check emptyness use \ref empty() method. */ size_t size() const { return base_class::size(); } /// Returns reference to internal statistics stat const& statistics() const { return base_class::statistics(); } }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_TREIBER_STACK_H libcds-2.3.3/cds/container/vyukov_mpmc_cycle_queue.h000066400000000000000000000475001341244201700226310ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_VYUKOV_MPMC_CYCLE_QUEUE_H #define CDSLIB_CONTAINER_VYUKOV_MPMC_CYCLE_QUEUE_H #include #include #include #include #include namespace cds { namespace container { /// VyukovMPMCCycleQueue related definitions /** @ingroup cds_nonintrusive_helper */ namespace vyukov_queue { /// VyukovMPMCCycleQueue default traits struct traits { /// Buffer type for internal array /* The type of element for the buffer is not important: the queue rebinds the buffer for required type via \p rebind metafunction. For \p VyukovMPMCCycleQueue queue the buffer size should have power-of-2 size. You should use only uninitialized buffer for the queue - \p cds::opt::v::uninitialized_dynamic_buffer (the default), \p cds::opt::v::uninitialized_static_buffer. */ typedef cds::opt::v::uninitialized_dynamic_buffer< void * > buffer; /// A functor to clean item dequeued. /** The functor calls the destructor for queue item. After an item is dequeued, \p value_cleaner cleans the cell that the item has been occupied. If \p T is a complex type, \p value_cleaner may be useful feature. Default value is \ref opt::v::auto_cleaner */ typedef cds::opt::v::auto_cleaner value_cleaner; /// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter to enable item counting typedef cds::atomicity::empty_item_counter item_counter; /// C++ memory ordering model /** Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consistent memory model). */ typedef opt::v::relaxed_ordering memory_model; /// Padding for internal critical atomic data. Default is \p opt::cache_line_padding enum { padding = opt::cache_line_padding }; /// Back-off strategy typedef cds::backoff::Default back_off; /// Single-consumer version /** For single-consumer version of algorithm some additional functions (\p front(), \p pop_front()) is available. Default is \p false */ static constexpr bool const single_consumer = false; }; /// Metafunction converting option list to \p vyukov_queue::traits /** Supported \p Options are: - \p opt::buffer - an uninitialized buffer type for internal cyclic array. Possible types are: \p opt::v::uninitialized_dynamic_buffer (the default), \p opt::v::uninitialized_static_buffer. The type of element in the buffer is not important: it will be changed via \p rebind metafunction. - \p opt::value_cleaner - a functor to clean item dequeued. The functor calls the destructor for queue item. After an item is dequeued, \p value_cleaner cleans the cell that the item has been occupied. If \p T is a complex type, \p value_cleaner can be an useful feature. Default value is \ref opt::v::auto_cleaner - \p opt::back_off - back-off strategy used. If the option is not specified, the \p cds::backoff::Default is used. - \p opt::item_counter - the type of item counting feature. Default is \p cds::atomicity::empty_item_counter (item counting disabled) To enable item counting use \p cds::atomicity::item_counter - \p opt::padding - padding for internal critical atomic data. Default is \p opt::cache_line_padding - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). Example: declare \p %VyukovMPMCCycleQueue with item counting and static iternal buffer of size 1024: \code typedef cds::container::VyukovMPMCCycleQueue< Foo, typename cds::container::vyukov_queue::make_traits< cds::opt::buffer< cds::opt::v::uninitialized_static_buffer< void *, 1024 >, cds::opt::item_counte< cds::atomicity::item_counter > >::type > myQueue; \endcode */ template struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type , Options... >::type type; # endif }; } //namespace vyukov_queue /// Vyukov's MPMC bounded queue /** @ingroup cds_nonintrusive_queue This algorithm is developed by Dmitry Vyukov (see http://www.1024cores.net) It's multi-producer multi-consumer (MPMC), array-based, fails on overflow, does not require GC, w/o priorities, causal FIFO, blocking producers and consumers queue. The algorithm is pretty simple and fast. It's not lock-free in the official meaning, just implemented by means of atomic RMW operations w/o mutexes. The cost of enqueue/dequeue is 1 CAS per operation. No dynamic memory allocation/management during operation. Producers and consumers are separated from each other (as in the two-lock queue), i.e. do not touch the same data while queue is not empty. There is multiple producer/single consumer version \p cds::container::VyukovMPSCCycleQueue that supports \p front() and \p pop_front() functions. Source: - http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue Template parameters - \p T - type stored in queue. - \p Traits - queue traits, default is \p vyukov_queue::traits. You can use \p vyukov_queue::make_traits metafunction to make your traits or just derive your traits from \p %vyukov_queue::traits: \code struct myTraits: public cds::container::vyukov_queue::traits { typedef cds::atomicity::item_counter item_counter; }; typedef cds::container::VyukovMPMCCycleQueue< Foo, myTraits > myQueue; // Equivalent make_traits example: typedef cds::container::VyukovMPMCCycleQueue< cds::gc::HP, Foo, typename cds::container::vyukov_queue::make_traits< cds::opt::item_counter< cds::atomicity::item_counter > >::type > myQueue; \endcode \par License Simplified BSD license by Dmitry Vyukov (http://www.1024cores.net/site/1024cores/home/code-license) */ template class VyukovMPMCCycleQueue : public cds::bounded_container { public: typedef T value_type; ///< Value type to be stored in the queue typedef Traits traits; ///< Queue traits typedef typename traits::item_counter item_counter; ///< Item counter type typedef typename traits::memory_model memory_model; ///< Memory ordering. See \p cds::opt::memory_model option typedef typename traits::value_cleaner value_cleaner; ///< Value cleaner, see \p vyukov_queue::traits::value_cleaner typedef typename traits::back_off back_off; ///< back-off strategy /// \p true for single-consumer version, \p false otherwise static constexpr bool const c_single_consumer = traits::single_consumer; /// Rebind template arguments template struct rebind { typedef VyukovMPMCCycleQueue< T2, Traits2 > other ; ///< Rebinding result }; protected: //@cond typedef atomics::atomic sequence_type; struct cell_type { sequence_type sequence; value_type data; cell_type() {} }; typedef typename traits::buffer::template rebind::other buffer; //@endcond protected: //@cond buffer m_buffer; size_t const m_nBufferMask; typename opt::details::apply_padding< size_t, traits::padding >::padding_type pad1_; sequence_type m_posEnqueue; typename opt::details::apply_padding< sequence_type, traits::padding >::padding_type pad2_; sequence_type m_posDequeue; typename opt::details::apply_padding< sequence_type, traits::padding >::padding_type pad3_; item_counter m_ItemCounter; //@endcond public: /// Constructs the queue of capacity \p nCapacity /** For \p cds::opt::v::uninitialized_static_buffer the \p nCapacity parameter is ignored. The buffer capacity must be the power of two. */ VyukovMPMCCycleQueue( size_t nCapacity = 0 ) : m_buffer( nCapacity ) , m_nBufferMask( m_buffer.capacity() - 1 ) { nCapacity = m_buffer.capacity(); // Buffer capacity must be power of 2 assert( nCapacity >= 2 && (nCapacity & (nCapacity - 1)) == 0 ); for (size_t i = 0; i != nCapacity; ++i ) m_buffer[i].sequence.store(i, memory_model::memory_order_relaxed); m_posEnqueue.store(0, memory_model::memory_order_relaxed); m_posDequeue.store(0, memory_model::memory_order_relaxed); } ~VyukovMPMCCycleQueue() { clear(); } /// Enqueues data to the queue using a functor /** \p Func is a functor called to copy a value to the queue cell. The functor \p f takes one argument - a reference to a empty cell of type \ref value_type : \code cds::container::VyukovMPMCCycleQueue< Foo > myQueue; Bar bar; myQueue.enqueue_with( [&bar]( Foo& dest ) { dest = std::move(bar); } ); \endcode */ template bool enqueue_with(Func f) { cell_type* cell; back_off bkoff; size_t pos = m_posEnqueue.load(memory_model::memory_order_relaxed); for (;;) { cell = &m_buffer[pos & m_nBufferMask]; size_t seq = cell->sequence.load(memory_model::memory_order_acquire); intptr_t dif = static_cast(seq) - static_cast(pos); if (dif == 0) { if ( m_posEnqueue.compare_exchange_weak(pos, pos + 1, memory_model::memory_order_relaxed, atomics::memory_order_relaxed )) break; } else if (dif < 0) { // Queue full? if ( pos - m_posDequeue.load( memory_model::memory_order_relaxed ) == capacity()) return false; // queue full bkoff(); pos = m_posEnqueue.load( memory_model::memory_order_relaxed ); } else pos = m_posEnqueue.load(memory_model::memory_order_relaxed); } f( cell->data ); cell->sequence.store(pos + 1, memory_model::memory_order_release); ++m_ItemCounter; return true; } /// Enqueues \p val value into the queue. /** The new queue item is created by calling placement new in free cell. Returns \p true if success, \p false if the queue is full. */ bool enqueue( value_type const& val ) { return enqueue_with( [&val]( value_type& dest ){ new ( &dest ) value_type( val ); }); } /// Enqueues \p val value into the queue, move semantics bool enqueue( value_type&& val ) { return enqueue_with( [&val]( value_type& dest ) { new (&dest) value_type( std::move( val ));}); } /// Synonym for \p enqueue( value_type const& ) bool push( value_type const& data ) { return enqueue( data ); } /// Synonym for \p enqueue( value_type&& ) bool push( value_type&& data ) { return enqueue( std::move( data )); } /// Synonym for \p enqueue_with() template bool push_with( Func f ) { return enqueue_with( f ); } /// Enqueues data of type \ref value_type constructed with std::forward(args)... template bool emplace( Args&&... args ) { #if (CDS_COMPILER == CDS_COMPILER_GCC) && (CDS_COMPILER_VERSION < 40900) //work around unsupported feature in g++ 4.8 for forwarding parameter packs to lambda. value_type val( std::forward(args)... ); return enqueue_with( [&val]( value_type& dest ){ new ( &dest ) value_type( std::move( val )); }); #else return enqueue_with( [&args ...]( value_type& dest ){ new ( &dest ) value_type( std::forward( args )... ); }); #endif } /// Dequeues a value using a functor /** \p Func is a functor called to copy dequeued value. The functor takes one argument - a reference to removed node: \code cds:container::VyukovMPMCCycleQueue< Foo > myQueue; Bar bar; myQueue.dequeue_with( [&bar]( Foo& src ) { bar = std::move( src );}); \endcode The functor is called only if the queue is not empty. */ template bool dequeue_with( Func f ) { cell_type * cell; back_off bkoff; size_t pos = m_posDequeue.load( memory_model::memory_order_relaxed ); for (;;) { cell = &m_buffer[pos & m_nBufferMask]; size_t seq = cell->sequence.load(memory_model::memory_order_acquire); intptr_t dif = static_cast(seq) - static_cast(pos + 1); if (dif == 0) { if ( m_posDequeue.compare_exchange_weak(pos, pos + 1, memory_model::memory_order_relaxed, atomics::memory_order_relaxed)) break; } else if (dif < 0) { // Queue empty? if ( pos - m_posEnqueue.load( memory_model::memory_order_relaxed ) == 0 ) return false; // queue empty bkoff(); pos = m_posDequeue.load( memory_model::memory_order_relaxed ); } else pos = m_posDequeue.load(memory_model::memory_order_relaxed); } f( cell->data ); value_cleaner()( cell->data ); cell->sequence.store( pos + m_nBufferMask + 1, memory_model::memory_order_release ); --m_ItemCounter; return true; } /// Dequeues a value from the queue /** If queue is not empty, the function returns \p true, \p dest contains a copy of dequeued value. The assignment operator for type \ref value_type is invoked. If queue is empty, the function returns \p false, \p dest is unchanged. */ bool dequeue(value_type& dest ) { return dequeue_with( [&dest]( value_type& src ){ dest = std::move( src );}); } /// Synonym for \p dequeue() bool pop(value_type& data) { return dequeue(data); } /// Synonym for \p dequeue_with() template bool pop_with( Func f ) { return dequeue_with( f ); } /// Returns a pointer to top element of the queue or \p nullptr if queue is empty (only for single-consumer version) template typename std::enable_if::type front() { static_assert( c_single_consumer, "front() is enabled only if traits::single_consumer is true"); cell_type * cell; back_off bkoff; size_t pos = m_posDequeue.load( memory_model::memory_order_relaxed ); for ( ;;) { cell = &m_buffer[pos & m_nBufferMask]; size_t seq = cell->sequence.load( memory_model::memory_order_acquire ); intptr_t dif = static_cast(seq) - static_cast(pos + 1); if ( dif == 0 ) return &cell->data; else if ( dif < 0 ) { // Queue empty? if ( pos - m_posEnqueue.load( memory_model::memory_order_relaxed ) == 0 ) return nullptr; // queue empty bkoff(); pos = m_posDequeue.load( memory_model::memory_order_relaxed ); } else pos = m_posDequeue.load( memory_model::memory_order_relaxed ); } } /// Pops top element; returns \p true if queue is not empty, \p false otherwise (only for single-consumer version) template typename std::enable_if::type pop_front() { return dequeue_with( []( value_type& ) {} ); } /// Checks if the queue is empty bool empty() const { const cell_type * cell; back_off bkoff; size_t pos = m_posDequeue.load(memory_model::memory_order_relaxed); for (;;) { cell = &m_buffer[pos & m_nBufferMask]; size_t seq = cell->sequence.load(memory_model::memory_order_acquire); intptr_t dif = static_cast(seq) - static_cast(pos + 1); if (dif == 0) return false; else if (dif < 0) { if ( pos - m_posEnqueue.load( memory_model::memory_order_relaxed ) == 0 ) return true; } bkoff(); pos = m_posDequeue.load(memory_model::memory_order_relaxed); } } /// Clears the queue void clear() { value_type v; while ( pop(v)); } /// Returns queue's item count /** The value returned depends on \p vyukov_queue::traits::item_counter option. For \p atomicity::empty_item_counter, the function always returns 0. */ size_t size() const { return m_ItemCounter.value(); } /// Returns capacity of the queue size_t capacity() const { return m_buffer.capacity(); } }; //@cond namespace vyukov_queue { template struct single_consumer_traits : public Traits { static constexpr bool const single_consumer = true; }; } // namespace vyukov_queue //@endcond /// Vyukov's queue multiple producer - single consumer version template using VyukovMPSCCycleQueue = VyukovMPMCCycleQueue< T, vyukov_queue::single_consumer_traits >; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_VYUKOV_MPMC_CYCLE_QUEUE_H libcds-2.3.3/cds/container/weak_ringbuffer.h000066400000000000000000001065011341244201700210240ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_WEAK_RINGBUFFER_H #define CDSLIB_CONTAINER_WEAK_RINGBUFFER_H #include #include #include #include #include namespace cds { namespace container { /// \p WeakRingBuffer related definitions /** @ingroup cds_nonintrusive_helper */ namespace weak_ringbuffer { /// \p WeakRingBuffer default traits struct traits { /// Buffer type for internal array /* The type of element for the buffer is not important: \p WeakRingBuffer rebind the buffer for required type via \p rebind metafunction. For \p WeakRingBuffer the buffer size should have power-of-2 size. You should use only uninitialized buffer for the ring buffer - \p cds::opt::v::uninitialized_dynamic_buffer (the default), \p cds::opt::v::uninitialized_static_buffer. */ typedef cds::opt::v::uninitialized_dynamic_buffer< void * > buffer; /// A functor to clean item dequeued. /** The functor calls the destructor for popped element. After a set of items is dequeued, \p value_cleaner cleans the cells that the items have been occupied. If \p T is a complex type, \p value_cleaner may be useful feature. For POD types \ref opt::v::empty_cleaner is suitable Default value is \ref opt::v::auto_cleaner that calls destructor only if it is not trivial. */ typedef cds::opt::v::auto_cleaner value_cleaner; /// C++ memory ordering model /** Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consistent memory model). */ typedef opt::v::relaxed_ordering memory_model; /// Padding for internal critical atomic data. Default is \p opt::cache_line_padding enum { padding = opt::cache_line_padding }; }; /// Metafunction converting option list to \p weak_ringbuffer::traits /** Supported \p Options are: - \p opt::buffer - an uninitialized buffer type for internal cyclic array. Possible types are: \p opt::v::uninitialized_dynamic_buffer (the default), \p opt::v::uninitialized_static_buffer. The type of element in the buffer is not important: it will be changed via \p rebind metafunction. - \p opt::value_cleaner - a functor to clean items dequeued. The functor calls the destructor for ring-buffer item. After a set of items is dequeued, \p value_cleaner cleans the cells that the items have been occupied. If \p T is a complex type, \p value_cleaner can be an useful feature. Default value is \ref opt::v::empty_cleaner that is suitable for POD types. - \p opt::padding - padding for internal critical atomic data. Default is \p opt::cache_line_padding - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). Example: declare \p %WeakRingBuffer with static iternal buffer for 1024 objects: \code typedef cds::container::WeakRingBuffer< Foo, typename cds::container::weak_ringbuffer::make_traits< cds::opt::buffer< cds::opt::v::uninitialized_static_buffer< void *, 1024 > >::type > myRing; \endcode */ template struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type , Options... >::type type; # endif }; } // namespace weak_ringbuffer /// Single-producer single-consumer ring buffer /** @ingroup cds_nonintrusive_queue Source: [2013] Nhat Minh Le, Adrien Guatto, Albert Cohen, Antoniu Pop. Correct and Effcient Bounded FIFO Queues. [Research Report] RR-8365, INRIA. 2013. Ring buffer is a bounded queue. Additionally, \p %WeakRingBuffer supports batch operations - you can push/pop an array of elements. There are a specialization \ref cds_nonintrusive_WeakRingBuffer_void "WeakRingBuffer" that is not a queue but a "memory pool" between producer and consumer threads. \p WeakRingBuffer supports variable-sized data. @warning: \p %WeakRingBuffer is developed for 64-bit architecture. 32-bit platform must provide support for 64-bit atomics. */ template class WeakRingBuffer: public cds::bounded_container { public: typedef T value_type; ///< Value type to be stored in the ring buffer typedef Traits traits; ///< Ring buffer traits typedef typename traits::memory_model memory_model; ///< Memory ordering. See \p cds::opt::memory_model option typedef typename traits::value_cleaner value_cleaner; ///< Value cleaner, see \p weak_ringbuffer::traits::value_cleaner /// Rebind template arguments template struct rebind { typedef WeakRingBuffer< T2, Traits2 > other; ///< Rebinding result }; //@cond // Only for tests typedef size_t item_counter; //@endcond private: //@cond typedef typename traits::buffer::template rebind< value_type >::other buffer; typedef uint64_t counter_type; //@endcond public: /// Creates the ring buffer of \p capacity /** For \p cds::opt::v::uninitialized_static_buffer the \p nCapacity parameter is ignored. If the buffer capacity is a power of two, lightweight binary arithmetics is used instead of modulo arithmetics. */ WeakRingBuffer( size_t capacity = 0 ) : front_( 0 ) , pfront_( 0 ) , cback_( 0 ) , buffer_( capacity ) { back_.store( 0, memory_model::memory_order_release ); } /// Destroys the ring buffer ~WeakRingBuffer() { value_cleaner cleaner; counter_type back = back_.load( memory_model::memory_order_relaxed ); for ( counter_type front = front_.load( memory_model::memory_order_relaxed ); front != back; ++front ) cleaner( buffer_[ buffer_.mod( front ) ] ); } /// Batch push - push array \p arr of size \p count /** \p CopyFunc is a per-element copy functor: for each element of \p arr copy( dest, arr[i] ) is called. The \p CopyFunc signature: \code void copy_func( value_type& element, Q const& source ); \endcode Here \p element is uninitialized so you should construct it using placement new if needed; for example, if the element type is \p str::string and \p Q is char const*, \p copy functor can be: \code cds::container::WeakRingBuffer ringbuf; char const* arr[10]; ringbuf.push( arr, 10, []( std::string& element, char const* src ) { new( &element ) std::string( src ); }); \endcode You may use move semantics if appropriate: \code cds::container::WeakRingBuffer ringbuf; std::string arr[10]; ringbuf.push( arr, 10, []( std::string& element, std:string& src ) { new( &element ) std::string( std::move( src )); }); \endcode Returns \p true if success or \p false if not enough space in the ring */ template bool push( Q* arr, size_t count, CopyFunc copy ) { assert( count < capacity()); counter_type back = back_.load( memory_model::memory_order_relaxed ); assert( static_cast( back - pfront_ ) <= capacity()); if ( static_cast( pfront_ + capacity() - back ) < count ) { pfront_ = front_.load( memory_model::memory_order_acquire ); if ( static_cast( pfront_ + capacity() - back ) < count ) { // not enough space return false; } } // copy data for ( size_t i = 0; i < count; ++i, ++back ) copy( buffer_[buffer_.mod( back )], arr[i] ); back_.store( back, memory_model::memory_order_release ); return true; } /// Batch push - push array \p arr of size \p count with assignment as copy functor /** This function is equivalent for: \code push( arr, count, []( value_type& dest, Q const& src ) { dest = src; } ); \endcode The function is available only if std::is_constructible::value is \p true. Returns \p true if success or \p false if not enough space in the ring */ template typename std::enable_if< std::is_constructible::value, bool>::type push( Q* arr, size_t count ) { return push( arr, count, []( value_type& dest, Q const& src ) { new( &dest ) value_type( src ); } ); } /// Push one element created from \p args /** The function is available only if std::is_constructible::value is \p true. Returns \p false if the ring is full or \p true otherwise. */ template typename std::enable_if< std::is_constructible::value, bool>::type emplace( Args&&... args ) { counter_type back = back_.load( memory_model::memory_order_relaxed ); assert( static_cast( back - pfront_ ) <= capacity()); if ( pfront_ + capacity() - back < 1 ) { pfront_ = front_.load( memory_model::memory_order_acquire ); if ( pfront_ + capacity() - back < 1 ) { // not enough space return false; } } new( &buffer_[buffer_.mod( back )] ) value_type( std::forward(args)... ); back_.store( back + 1, memory_model::memory_order_release ); return true; } /// Enqueues data to the ring using a functor /** \p Func is a functor called to copy a value to the ring element. The functor \p f takes one argument - a reference to a empty cell of type \ref value_type : \code cds::container::WeakRingBuffer< Foo > myRing; Bar bar; myRing.enqueue_with( [&bar]( Foo& dest ) { dest = std::move(bar); } ); \endcode */ template bool enqueue_with( Func f ) { counter_type back = back_.load( memory_model::memory_order_relaxed ); assert( static_cast( back - pfront_ ) <= capacity()); if ( pfront_ + capacity() - back < 1 ) { pfront_ = front_.load( memory_model::memory_order_acquire ); if ( pfront_ + capacity() - back < 1 ) { // not enough space return false; } } f( buffer_[buffer_.mod( back )] ); back_.store( back + 1, memory_model::memory_order_release ); return true; } /// Enqueues \p val value into the queue. /** The new queue item is created by calling placement new in free cell. Returns \p true if success, \p false if the ring is full. */ bool enqueue( value_type const& val ) { return emplace( val ); } /// Enqueues \p val value into the queue, move semantics bool enqueue( value_type&& val ) { return emplace( std::move( val )); } /// Synonym for \p enqueue( value_type const& ) bool push( value_type const& val ) { return enqueue( val ); } /// Synonym for \p enqueue( value_type&& ) bool push( value_type&& val ) { return enqueue( std::move( val )); } /// Synonym for \p enqueue_with() template bool push_with( Func f ) { return enqueue_with( f ); } /// Batch pop \p count element from the ring buffer into \p arr /** \p CopyFunc is a per-element copy functor: for each element of \p arr copy( arr[i], source ) is called. The \p CopyFunc signature: \code void copy_func( Q& dest, value_type& elemen ); \endcode Returns \p true if success or \p false if not enough space in the ring */ template bool pop( Q* arr, size_t count, CopyFunc copy ) { assert( count < capacity()); counter_type front = front_.load( memory_model::memory_order_relaxed ); assert( static_cast( cback_ - front ) < capacity()); if ( static_cast( cback_ - front ) < count ) { cback_ = back_.load( memory_model::memory_order_acquire ); if ( static_cast( cback_ - front ) < count ) return false; } // copy data value_cleaner cleaner; for ( size_t i = 0; i < count; ++i, ++front ) { value_type& val = buffer_[buffer_.mod( front )]; copy( arr[i], val ); cleaner( val ); } front_.store( front, memory_model::memory_order_release ); return true; } /// Batch pop - push array \p arr of size \p count with assignment as copy functor /** This function is equivalent for: \code pop( arr, count, []( Q& dest, value_type& src ) { dest = src; } ); \endcode The function is available only if std::is_assignable::value is \p true. Returns \p true if success or \p false if not enough space in the ring */ template typename std::enable_if< std::is_assignable::value, bool>::type pop( Q* arr, size_t count ) { return pop( arr, count, []( Q& dest, value_type& src ) { dest = src; } ); } /// Dequeues an element from the ring to \p val /** The function is available only if std::is_assignable::value is \p true. Returns \p false if the ring is full or \p true otherwise. */ template typename std::enable_if< std::is_assignable::value, bool>::type dequeue( Q& val ) { return pop( &val, 1 ); } /// Synonym for \p dequeue( Q& ) template typename std::enable_if< std::is_assignable::value, bool>::type pop( Q& val ) { return dequeue( val ); } /// Dequeues a value using a functor /** \p Func is a functor called to copy dequeued value. The functor takes one argument - a reference to removed node: \code cds:container::WeakRingBuffer< Foo > myRing; Bar bar; myRing.dequeue_with( [&bar]( Foo& src ) { bar = std::move( src );}); \endcode Returns \p true if the ring is not empty, \p false otherwise. The functor is called only if the ring is not empty. */ template bool dequeue_with( Func f ) { counter_type front = front_.load( memory_model::memory_order_relaxed ); assert( static_cast( cback_ - front ) < capacity()); if ( cback_ - front < 1 ) { cback_ = back_.load( memory_model::memory_order_acquire ); if ( cback_ - front < 1 ) return false; } value_type& val = buffer_[buffer_.mod( front )]; f( val ); value_cleaner()( val ); front_.store( front + 1, memory_model::memory_order_release ); return true; } /// Synonym for \p dequeue_with() template bool pop_with( Func f ) { return dequeue_with( f ); } /// Gets pointer to first element of ring buffer /** If the ring buffer is empty, returns \p nullptr The function is thread-safe since there is only one consumer. Recall, \p WeakRingBuffer is single-producer/single consumer container. */ value_type* front() { counter_type front = front_.load( memory_model::memory_order_relaxed ); assert( static_cast( cback_ - front ) < capacity()); if ( cback_ - front < 1 ) { cback_ = back_.load( memory_model::memory_order_acquire ); if ( cback_ - front < 1 ) return nullptr; } return &buffer_[buffer_.mod( front )]; } /// Removes front element of ring-buffer /** If the ring-buffer is empty, returns \p false. Otherwise, pops the first element from the ring. */ bool pop_front() { counter_type front = front_.load( memory_model::memory_order_relaxed ); assert( static_cast( cback_ - front ) <= capacity()); if ( cback_ - front < 1 ) { cback_ = back_.load( memory_model::memory_order_acquire ); if ( cback_ - front < 1 ) return false; } // clean cell value_cleaner()( buffer_[buffer_.mod( front )] ); front_.store( front + 1, memory_model::memory_order_release ); return true; } /// Clears the ring buffer (only consumer can call this function!) void clear() { value_type v; while ( pop( v )); } /// Checks if the ring-buffer is empty bool empty() const { return front_.load( memory_model::memory_order_relaxed ) == back_.load( memory_model::memory_order_relaxed ); } /// Checks if the ring-buffer is full bool full() const { return back_.load( memory_model::memory_order_relaxed ) - front_.load( memory_model::memory_order_relaxed ) >= capacity(); } /// Returns the current size of ring buffer size_t size() const { return static_cast( back_.load( memory_model::memory_order_relaxed ) - front_.load( memory_model::memory_order_relaxed )); } /// Returns capacity of the ring buffer size_t capacity() const { return buffer_.capacity(); } private: //@cond atomics::atomic front_; typename opt::details::apply_padding< atomics::atomic, traits::padding >::padding_type pad1_; atomics::atomic back_; typename opt::details::apply_padding< atomics::atomic, traits::padding >::padding_type pad2_; counter_type pfront_; typename opt::details::apply_padding< counter_type, traits::padding >::padding_type pad3_; counter_type cback_; typename opt::details::apply_padding< counter_type, traits::padding >::padding_type pad4_; buffer buffer_; //@endcond }; /// Single-producer single-consumer ring buffer for untyped variable-sized data /** @ingroup cds_nonintrusive_queue @anchor cds_nonintrusive_WeakRingBuffer_void This SPSC ring-buffer is intended for data of variable size. The producer allocates a buffer from ring, you fill it with data and pushes them back to ring. The consumer thread reads data from front-end and then pops them: \code // allocates 1M ring buffer WeakRingBuffer theRing( 1024 * 1024 ); void producer_thread() { // Get data of size N bytes size_t size; void* data; while ( true ) { // Get external data std::tie( data, size ) = get_data(); if ( data == nullptr ) break; // Allocates a buffer from the ring void* buf = theRing.back( size ); if ( !buf ) { std::cout << "The ring is full" << std::endl; break; } memcpy( buf, data, size ); // Push data into the ring theRing.push_back(); } } void consumer_thread() { while ( true ) { auto buf = theRing.front(); if ( buf.first == nullptr ) { std::cout << "The ring is empty" << std::endl; break; } // Process data process_data( buf.first, buf.second ); // Free buffer theRing.pop_front(); } } \endcode @warning: \p %WeakRingBuffer is developed for 64-bit architecture. 32-bit platform must provide support for 64-bit atomics. */ #ifdef CDS_DOXYGEN_INVOKED template #else template #endif class WeakRingBuffer: public cds::bounded_container { public: typedef Traits traits; ///< Ring buffer traits typedef typename traits::memory_model memory_model; ///< Memory ordering. See \p cds::opt::memory_model option private: //@cond typedef typename traits::buffer::template rebind< uint8_t >::other buffer; typedef uint64_t counter_type; //@endcond public: /// Creates the ring buffer of \p capacity bytes /** For \p cds::opt::v::uninitialized_static_buffer the \p nCapacity parameter is ignored. If the buffer capacity is a power of two, lightweight binary arithmetics is used instead of modulo arithmetics. */ WeakRingBuffer( size_t capacity = 0 ) : front_( 0 ) , pfront_( 0 ) , cback_( 0 ) , buffer_( capacity ) { back_.store( 0, memory_model::memory_order_release ); } /// [producer] Reserve \p size bytes /** The function returns a pointer to reserved buffer of \p size bytes. If no enough space in the ring buffer the function returns \p nullptr. After successful \p %back() you should fill the buffer provided and call \p push_back(): \code // allocates 1M ring buffer WeakRingBuffer theRing( 1024 * 1024 ); void producer_thread() { // Get data of size N bytes size_t size;1 void* data; while ( true ) { // Get external data std::tie( data, size ) = get_data(); if ( data == nullptr ) break; // Allocates a buffer from the ring void* buf = theRing.back( size ); if ( !buf ) { std::cout << "The ring is full" << std::endl; break; } memcpy( buf, data, size ); // Push data into the ring theRing.push_back(); } } \endcode */ void* back( size_t size ) { assert( size > 0 ); // Any data is rounded to 8-byte boundary size_t real_size = calc_real_size( size ); // check if we can reserve real_size bytes assert( real_size < capacity()); counter_type back = back_.load( memory_model::memory_order_relaxed ); assert( static_cast( back - pfront_ ) <= capacity()); if ( static_cast( pfront_ + capacity() - back ) < real_size ) { pfront_ = front_.load( memory_model::memory_order_acquire ); if ( static_cast( pfront_ + capacity() - back ) < real_size ) { // not enough space return nullptr; } } uint8_t* reserved = buffer_.buffer() + buffer_.mod( back ); // Check if the buffer free space is enough for storing real_size bytes size_t tail_size = capacity() - static_cast( buffer_.mod( back )); if ( tail_size < real_size ) { // make unused tail assert( tail_size >= sizeof( size_t )); assert( !is_tail( tail_size )); *reinterpret_cast( reserved ) = make_tail( tail_size - sizeof(size_t)); back += tail_size; // We must be in beginning of buffer assert( buffer_.mod( back ) == 0 ); if ( static_cast( pfront_ + capacity() - back ) < real_size ) { pfront_ = front_.load( memory_model::memory_order_acquire ); if ( static_cast( pfront_ + capacity() - back ) < real_size ) { // not enough space return nullptr; } } back_.store( back, memory_model::memory_order_release ); reserved = buffer_.buffer(); } // reserve and store size *reinterpret_cast( reserved ) = size; return reinterpret_cast( reserved + sizeof( size_t )); } /// [producer] Push reserved bytes into ring /** The function pushes reserved buffer into the ring. Afte this call, the buffer becomes visible by a consumer: \code // allocates 1M ring buffer WeakRingBuffer theRing( 1024 * 1024 ); void producer_thread() { // Get data of size N bytes size_t size;1 void* data; while ( true ) { // Get external data std::tie( data, size ) = get_data(); if ( data == nullptr ) break; // Allocates a buffer from the ring void* buf = theRing.back( size ); if ( !buf ) { std::cout << "The ring is full" << std::endl; break; } memcpy( buf, data, size ); // Push data into the ring theRing.push_back(); } } \endcode */ void push_back() { counter_type back = back_.load( memory_model::memory_order_relaxed ); uint8_t* reserved = buffer_.buffer() + buffer_.mod( back ); size_t real_size = calc_real_size( *reinterpret_cast( reserved )); assert( real_size < capacity()); back_.store( back + real_size, memory_model::memory_order_release ); } /// [producer] Push \p data of \p size bytes into ring /** This function invokes \p back( size ), \p memcpy( buf, data, size ) and \p push_back() in one call. */ bool push_back( void const* data, size_t size ) { void* buf = back( size ); if ( buf ) { memcpy( buf, data, size ); push_back(); return true; } return false; } /// [consumer] Get top data from the ring /** If the ring is empty, the function returns \p nullptr in \p std:pair::first. */ std::pair front() { counter_type front = front_.load( memory_model::memory_order_relaxed ); assert( static_cast( cback_ - front ) < capacity()); if ( cback_ - front < sizeof( size_t )) { cback_ = back_.load( memory_model::memory_order_acquire ); if ( cback_ - front < sizeof( size_t )) return std::make_pair( nullptr, 0u ); } uint8_t * buf = buffer_.buffer() + buffer_.mod( front ); // check alignment assert( ( reinterpret_cast( buf ) & ( sizeof( uintptr_t ) - 1 )) == 0 ); size_t size = *reinterpret_cast( buf ); if ( is_tail( size )) { // unused tail, skip CDS_VERIFY( pop_front()); front = front_.load( memory_model::memory_order_relaxed ); if ( cback_ - front < sizeof( size_t )) { cback_ = back_.load( memory_model::memory_order_acquire ); if ( cback_ - front < sizeof( size_t )) return std::make_pair( nullptr, 0u ); } buf = buffer_.buffer() + buffer_.mod( front ); size = *reinterpret_cast( buf ); assert( !is_tail( size )); assert( buf == buffer_.buffer()); } #ifdef _DEBUG size_t real_size = calc_real_size( size ); if ( static_cast( cback_ - front ) < real_size ) { cback_ = back_.load( memory_model::memory_order_acquire ); assert( static_cast( cback_ - front ) >= real_size ); } #endif return std::make_pair( reinterpret_cast( buf + sizeof( size_t )), size ); } /// [consumer] Pops top data /** Typical consumer workloop: \code // allocates 1M ring buffer WeakRingBuffer theRing( 1024 * 1024 ); void consumer_thread() { while ( true ) { auto buf = theRing.front(); if ( buf.first == nullptr ) { std::cout << "The ring is empty" << std::endl; break; } // Process data process_data( buf.first, buf.second ); // Free buffer theRing.pop_front(); } } \endcode */ bool pop_front() { counter_type front = front_.load( memory_model::memory_order_relaxed ); assert( static_cast( cback_ - front ) <= capacity()); if ( cback_ - front < sizeof(size_t)) { cback_ = back_.load( memory_model::memory_order_acquire ); if ( cback_ - front < sizeof( size_t )) return false; } uint8_t * buf = buffer_.buffer() + buffer_.mod( front ); // check alignment assert( ( reinterpret_cast( buf ) & ( sizeof( uintptr_t ) - 1 )) == 0 ); size_t size = *reinterpret_cast( buf ); size_t real_size = calc_real_size( untail( size )); #ifdef _DEBUG if ( static_cast( cback_ - front ) < real_size ) { cback_ = back_.load( memory_model::memory_order_acquire ); assert( static_cast( cback_ - front ) >= real_size ); } #endif front_.store( front + real_size, memory_model::memory_order_release ); return true; } /// [consumer] Clears the ring buffer void clear() { for ( auto el = front(); el.first; el = front()) pop_front(); } /// Checks if the ring-buffer is empty bool empty() const { return front_.load( memory_model::memory_order_relaxed ) == back_.load( memory_model::memory_order_relaxed ); } /// Checks if the ring-buffer is full bool full() const { return back_.load( memory_model::memory_order_relaxed ) - front_.load( memory_model::memory_order_relaxed ) >= capacity(); } /// Returns the current size of ring buffer size_t size() const { return static_cast( back_.load( memory_model::memory_order_relaxed ) - front_.load( memory_model::memory_order_relaxed )); } /// Returns capacity of the ring buffer size_t capacity() const { return buffer_.capacity(); } private: //@cond static size_t calc_real_size( size_t size ) { size_t real_size = (( size + sizeof( uintptr_t ) - 1 ) & ~( sizeof( uintptr_t ) - 1 )) + sizeof( size_t ); assert( real_size > size ); assert( real_size - size >= sizeof( size_t )); return real_size; } static bool is_tail( size_t size ) { return ( size & ( size_t( 1 ) << ( sizeof( size_t ) * 8 - 1 ))) != 0; } static size_t make_tail( size_t size ) { return size | ( size_t( 1 ) << ( sizeof( size_t ) * 8 - 1 )); } static size_t untail( size_t size ) { return size & (( size_t( 1 ) << ( sizeof( size_t ) * 8 - 1 )) - 1); } //@endcond private: //@cond atomics::atomic front_; typename opt::details::apply_padding< atomics::atomic, traits::padding >::padding_type pad1_; atomics::atomic back_; typename opt::details::apply_padding< atomics::atomic, traits::padding >::padding_type pad2_; counter_type pfront_; typename opt::details::apply_padding< counter_type, traits::padding >::padding_type pad3_; counter_type cback_; typename opt::details::apply_padding< counter_type, traits::padding >::padding_type pad4_; buffer buffer_; //@endcond }; }} // namespace cds::container #endif // #ifndef CDSLIB_CONTAINER_WEAK_RINGBUFFER_H libcds-2.3.3/cds/details/000077500000000000000000000000001341244201700151535ustar00rootroot00000000000000libcds-2.3.3/cds/details/aligned_allocator.h000066400000000000000000000066771341244201700210070ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_DETAILS_ALIGNED_ALLOCATOR_H #define CDSLIB_DETAILS_ALIGNED_ALLOCATOR_H #include #include namespace cds { namespace details { /// Allocator for aligned data /** The class is the wrapper around user-defined aligned allocator. Template parameters: \li \p T is a type to allocate \li \p ALIGNED_ALLOCATOR is an aligned allocator implementation. Default implementation is defined by macro CDS_DEFAULT_ALIGNED_ALLOCATOR from cds/user_setup/allocator.h header file. The \p nAlign parameter of member function specifyes desired aligment of data allocated. \par Note When an array allocation is performed the allocator guarantees the alignment for first element of array only. To guarantee the alignment for each element of the array the size of type \p T must be multiple of \p nAlign: \code sizeof(T) % nAlign == 0 \endcode */ template < typename T , typename ALIGNED_ALLOCATOR = CDS_DEFAULT_ALIGNED_ALLOCATOR > class AlignedAllocator: public ALIGNED_ALLOCATOR::template rebind::other { public: /// Underlying aligned allocator type typedef typename ALIGNED_ALLOCATOR::template rebind::other allocator_type; /// Analogue of operator new T(\p src... ) template T * New( size_t nAlign, const S&... src ) { return Construct( allocator_type::allocate( nAlign, 1), src... ); } /// Analogue of operator new T[\p nCount ] T * NewArray( size_t nAlign, size_t nCount ) { T * p = allocator_type::allocate( nAlign, nCount ); for ( size_t i = 0; i < nCount; ++i ) Construct( p + i ); return p; } /// Analogue of operator new T[\p nCount ]. /** Each item of array of type T is initialized by parameter \p src. */ template T * NewArray( size_t nAlign, size_t nCount, const S& src ) { T * p = allocator_type::allocate( nAlign, nCount ); for ( size_t i = 0; i < nCount; ++i ) Construct( p + i, src ); return p; } /// Analogue of operator delete void Delete( T * p ) { allocator_type::destroy( p ); allocator_type::deallocate( p, 1 ); } /// Analogue of operator delete [] void Delete( T * p, size_t nCount ) { for ( size_t i = 0; i < nCount; ++i ) allocator_type::destroy( p + i ); allocator_type::deallocate( p, nCount ); } /// Analogue of placement operator new( \p p ) T( \p src... ) template T * Construct( void * p, const S&... src ) { return new( p ) T( src... ); } /// Rebinds allocator to other type \p Q instead of \p T template struct rebind { typedef AlignedAllocator< Q, typename ALIGNED_ALLOCATOR::template rebind::other > other ; ///< Rebinding result }; }; }} // namespace cds::details #endif // #ifndef CDSLIB_DETAILS_ALIGNED_ALLOCATOR_H libcds-2.3.3/cds/details/aligned_type.h000066400000000000000000000056641341244201700200030ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_DETAILS_ALIGNED_TYPE_H #define CDSLIB_DETAILS_ALIGNED_TYPE_H #include namespace cds { namespace details { /// Aligned type /** This meta-algorithm solves compiler problem when you need to declare a type \p T with alignment equal to another type alignment. For example, the following declaration produces an error in Microsoft Visual Studio 2008 compiler: \code typedef double my_double; typedef __declspec(align( __alignof(my_double))) int aligned_int; \endcode In MS VS, the __declspec(align(N)) construction requires that N must be a integer constant (1, 2, 4 and so on) but not an integer constant expression. The result of this meta-algo is a type \p aligned_type::type that is \p T aligned by \p Alignment. For example, with \p aligned_type the prevoius example will not generate an error: \code typedef double my_double; typedef typename cds::details::aligned_type::type aligned_int; \endcode and result of this declaration is equivalent to \code typedef __declspec(align(8)) int aligned_int; \endcode The \p Alignment template parameter must be a constant expression and its result must be power of two. The maximum of its value is 1024. See also \ref align_as */ template struct aligned_type #ifdef CDS_DOXYGEN_INVOKED {} #endif ; //@cond none # define CDS_ALIGNED_TYPE_impl(nAlign) template struct aligned_type { typedef CDS_TYPE_ALIGNMENT(nAlign) T type; } CDS_ALIGNED_TYPE_impl(1); CDS_ALIGNED_TYPE_impl(2); CDS_ALIGNED_TYPE_impl(4); CDS_ALIGNED_TYPE_impl(8); CDS_ALIGNED_TYPE_impl(16); CDS_ALIGNED_TYPE_impl(32); CDS_ALIGNED_TYPE_impl(64); CDS_ALIGNED_TYPE_impl(128); CDS_ALIGNED_TYPE_impl(256); CDS_ALIGNED_TYPE_impl(512); CDS_ALIGNED_TYPE_impl(1024); # undef CDS_ALIGNED_TYPE_impl //@endcond /** Alignment by example This meta-algo is similar to \ref aligned_type . For example, the following code \code typedef typename cds::details::align_as::type aligned_int; \endcode declares type \p aligned_int which is \p int aligned like \p double. See also: \ref aligned_type */ template struct align_as { /// Result of meta-algo: type \p T aligned like type \p AlignAs typedef typename aligned_type::type type; }; }} // namespace cds::details #endif // #ifndef CDSLIB_DETAILS_ALIGNED_TYPE_H libcds-2.3.3/cds/details/allocator.h000066400000000000000000000156351341244201700173160ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_DETAILS_ALLOCATOR_H #define CDSLIB_DETAILS_ALLOCATOR_H #include #include #include #include namespace cds { namespace details { /// Extends \p std::allocator interface to provide semantics like operator \p new and \p delete /** The class is the wrapper around underlying \p Alloc class. \p Alloc provides the \p std::allocator interface. */ template class Allocator : public std::conditional< std::is_same< T, typename Alloc::value_type>::value , Alloc , typename std::allocator_traits::template rebind_alloc >::type { public: /// Underlying allocator type typedef typename std::conditional< std::is_same< T, typename Alloc::value_type>::value , Alloc , typename std::allocator_traits::template rebind_alloc >::type allocator_type; /// Allocator traits typedef std::allocator_traits< allocator_type > allocator_traits; /// \p true if underlined allocator is \p std::allocator, \p false otherwise static constexpr bool const c_bStdAllocator = std::is_same< allocator_type, std::allocator>::value; /// Element type typedef T value_type; /// Analogue of operator new T(\p src... ) template value_type * New( S const&... src ) { return Construct( allocator_traits::allocate( *this, 1, nullptr ), src... ); } /// Analogue of operator new T( std::forward(args)... ) (move semantics) template value_type * MoveNew( Args&&... args ) { return MoveConstruct( allocator_traits::allocate( *this, 1, nullptr ), std::forward(args)... ); } /// Analogue of operator new T[\p nCount ] value_type * NewArray( size_t nCount ) { value_type * p = allocator_traits::allocate( *this, nCount, nullptr ); for ( size_t i = 0; i < nCount; ++i ) Construct( p + i ); return p; } /// Analogue of operator new T[\p nCount ]. /** Each item of array of type T is initialized by parameter \p src: T( src ) */ template value_type * NewArray( size_t nCount, S const& src ) { value_type * p = allocator_traits::allocate( *this, nCount, nullptr ); for ( size_t i = 0; i < nCount; ++i ) Construct( p + i, src ); return p; } # if CDS_COMPILER == CDS_COMPILER_INTEL //@cond value_type * NewBlock( size_t nSize ) { return Construct( heap_alloc( nSize )); } //@endcond # endif /// Allocates block of memory of size at least \p nSize bytes. /** Internally, the block is allocated as an array of \p void* pointers, then \p Construct() method is called to initialize \p T. Precondition: nSize >= sizeof(T) */ template value_type * NewBlock( size_t nSize, S const&... src ) { return Construct( heap_alloc( nSize ), src... ); } /// Analogue of operator delete void Delete( value_type * p ) { allocator_traits::destroy( *this, p ); allocator_traits::deallocate( *this, p, 1 ); } /// Analogue of operator delete [] void Delete( value_type * p, size_t nCount ) { for ( size_t i = 0; i < nCount; ++i ) allocator_traits::destroy( *this, p + i ); allocator_traits::deallocate( *this, p, nCount ); } # if CDS_COMPILER == CDS_COMPILER_INTEL //@cond value_type * Construct( void * p ) { return new( p ) value_type; } //@endcond # endif /// Analogue of placement operator new( \p p ) T( src... ) template value_type * Construct( void * p, S const&... src ) { value_type * pv = new( p ) value_type( src... ); return pv; } /// Analogue of placement operator new( p ) T( std::forward(args)... ) template value_type * MoveConstruct( void * p, Args&&... args ) { value_type * pv = new( p ) value_type( std::forward(args)... ); return pv; } /// Rebinds allocator to other type \p Q instead of \p T template struct rebind { typedef Allocator< Q, typename allocator_traits::template rebind_alloc> other ; ///< Rebinding result }; private: //@cond void * heap_alloc( size_t nByteSize ) { assert( nByteSize >= sizeof(value_type)); size_t const nPtrSize = ( nByteSize + sizeof(void *) - 1 ) / sizeof(void *); typedef typename std::allocator_traits::template rebind_alloc< void * > void_allocator; return void_allocator().allocate( nPtrSize ); } //@endcond }; /// Deferral removing of the object of type \p T. Helper class template struct deferral_deleter { typedef T type ; ///< Type typedef Alloc allocator_type ; ///< Allocator for removing /// Frees the object \p p /** Caveats: this function uses temporary object of type \ref cds::details::Allocator to free the node \p p. So, the node allocator should be stateless. It is standard requirement for \p std::allocator class objects. Do not use this function directly. */ static void free( T * p ) { Allocator a; a.Delete( p ); } }; } // namespace details } // namespace cds #endif // #ifndef CDSLIB_DETAILS_ALLOCATOR_H libcds-2.3.3/cds/details/binary_functor_wrapper.h000066400000000000000000000034131341244201700221110ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_DETAILS_BINARY_FUNCTOR_WRAPPER_H #define CDSLIB_DETAILS_BINARY_FUNCTOR_WRAPPER_H #include //@cond namespace cds { namespace details { template struct binary_functor_wrapper { typedef ReturnType return_type; typedef Functor functor_type; typedef ArgType argument_type; typedef Accessor accessor; return_type operator()( argument_type const& a1, argument_type const& a2 ) const { return functor_type()( accessor()( a1 ), accessor()( a2 )); } template return_type operator()( argument_type const& a, Q const& q ) const { return functor_type()( accessor()(a), q ); } template return_type operator()( Q const& q, argument_type const& a ) const { return functor_type()( q, accessor()(a)); } template return_type operator()( Q1 const& q1, Q2 const& q2 ) const { return functor_type()( q1, q2 ); } }; template using predicate_wrapper = binary_functor_wrapper< bool, Predicate, ArgType, Accessor>; template using compare_wrapper = binary_functor_wrapper< int, Compare, ArgType, Accessor>; }} // namespace cds::details //@endcond #endif // #ifndef CDSLIB_DETAILS_BINARY_FUNCTOR_WRAPPER_H libcds-2.3.3/cds/details/bit_reverse_counter.h000066400000000000000000000037221341244201700214000ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_DETAILS_BIT_REVERSE_COUNTER_H #define CDSLIB_DETAILS_BIT_REVERSE_COUNTER_H #include //@cond namespace cds { namespace bitop { template class bit_reverse_counter { public: typedef Counter counter_type; private: counter_type m_nCounter; counter_type m_nReversed; int m_nHighBit; public: bit_reverse_counter() : m_nCounter(0) , m_nReversed(0) , m_nHighBit(-1) {} counter_type inc() { ++m_nCounter; int nBit; for ( nBit = m_nHighBit - 1; nBit >= 0; --nBit ) { if ( !cds::bitop::complement( m_nReversed, nBit )) break; } if ( nBit < 0 ) { m_nReversed = m_nCounter; ++m_nHighBit; } return m_nReversed; } counter_type dec() { counter_type ret = m_nReversed; --m_nCounter; int nBit; for ( nBit = m_nHighBit - 1; nBit >= 0; --nBit ) { if ( cds::bitop::complement( m_nReversed, nBit )) break; } if ( nBit < 0 ) { m_nReversed = m_nCounter; --m_nHighBit; } return ret; } counter_type value() const { return m_nCounter; } counter_type reversed_value() const { return m_nReversed; } int high_bit() const { return m_nHighBit; } }; }} // namespace cds::bitop //@endcond #endif // #ifndef CDSLIB_DETAILS_BIT_REVERSE_COUNTER_H libcds-2.3.3/cds/details/bitop_generic.h000066400000000000000000000172521341244201700201440ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_DETAILS_BITOP_GENERIC_H #define CDSLIB_DETAILS_BITOP_GENERIC_H #include // rand() namespace cds { namespace bitop { namespace platform { // Return true if x = 2 ** k, k >= 0 #ifndef cds_bitop_isPow2_32_DEFINED static inline bool isPow2_32( uint32_t x ) { return (x & ( x - 1 )) == 0 && x; } #endif #ifndef cds_bitop_isPow2_64_DEFINED static inline bool isPow2_64( uint64_t x ) { return (x & ( x - 1 )) == 0 && x; } #endif //*************************************************** // Most significant bit number (1..N) // Return 0 if x == 0 // #ifndef cds_bitop_msb32_DEFINED // Return number (1..32) of most significant bit // Return 0 if x == 0 // Source: Linux kernel static inline int msb32( uint32_t x ) { int r = 32; if (!x) return 0; if (!(x & 0xffff0000u)) { x <<= 16; r -= 16; } if (!(x & 0xff000000u)) { x <<= 8; r -= 8; } if (!(x & 0xf0000000u)) { x <<= 4; r -= 4; } if (!(x & 0xc0000000u)) { x <<= 2; r -= 2; } if (!(x & 0x80000000u)) { //x <<= 1; r -= 1; } return r; } #endif #ifndef cds_bitop_msb32nz_DEFINED static inline int msb32nz( uint32_t x ) { return msb32( x ) - 1; } #endif #ifndef cds_bitop_msb64_DEFINED static inline int msb64( uint64_t x ) { uint32_t h = (uint32_t) (x >> 32); if ( h ) return msb32( h ) + 32; return msb32( (uint32_t) x ); } #endif #ifndef cds_bitop_msb64nz_DEFINED static inline int msb64nz( uint64_t x ) { return msb64( x ) - 1; } #endif //*************************************************** // Least significant bit number (1..N) // Return 0 if x == 0 // #ifndef cds_bitop_lsb32_DEFINED // Return number (1..32) of least significant bit // Return 0 if x == 0 // Source: Linux kernel static inline int lsb32( uint32_t x ) { int r = 1; if (!x) return 0; if (!(x & 0xffff)) { x >>= 16; r += 16; } if (!(x & 0xff)) { x >>= 8; r += 8; } if (!(x & 0xf)) { x >>= 4; r += 4; } if (!(x & 3)) { x >>= 2; r += 2; } if (!(x & 1)) { //x >>= 1; r += 1; } return r; } #endif #ifndef cds_bitop_lsb32nz_DEFINED static inline int lsb32nz( uint32_t x ) { return lsb32( x ) - 1; } #endif #ifndef cds_bitop_lsb64_DEFINED static inline int lsb64( uint64_t x ) { if ( !x ) return 0; if ( x & 0xffffffffu ) return lsb32( (uint32_t) x ); return lsb32( (uint32_t) (x >> 32)) + 32; } #endif #ifndef cds_bitop_lsb64nz_DEFINED static inline int lsb64nz( uint64_t x ) { return lsb64( x ) - 1; } #endif //****************************************************** // Reverse bit order //****************************************************** #ifndef cds_bitop_rbo32_DEFINED static inline uint32_t rbo32( uint32_t x ) { // swap odd and even bits x = ((x >> 1) & 0x55555555) | ((x & 0x55555555) << 1); // swap consecutive pairs x = ((x >> 2) & 0x33333333) | ((x & 0x33333333) << 2); // swap nibbles ... x = ((x >> 4) & 0x0F0F0F0F) | ((x & 0x0F0F0F0F) << 4); // swap bytes x = ((x >> 8) & 0x00FF00FF) | ((x & 0x00FF00FF) << 8); // swap 2-byte long pairs return ( x >> 16 ) | ( x << 16 ); } #endif #ifndef cds_bitop_rbo64_DEFINED static inline uint64_t rbo64( uint64_t x ) { // Low 32bit Hight 32bit return ( static_cast(rbo32( (uint32_t) x )) << 32 ) | ( static_cast( rbo32( (uint32_t) (x >> 32)))); } #endif //****************************************************** // Set bit count. Return count of non-zero bits in word //****************************************************** #ifndef cds_bitop_sbc32_DEFINED static inline int sbc32( uint32_t x ) { # ifdef cds_beans_zbc32_DEFINED return 32 - zbc32( x ); # else // Algorithm from Sean Eron Anderson's great collection x = x - ((x >> 1) & 0x55555555); x = (x & 0x33333333) + ((x >> 2) & 0x33333333); return (((x + (x >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24; # endif } #endif #ifndef cds_bitop_sbc64_DEFINED static inline int sbc64( uint64_t x ) { # ifdef cds_beans_zbc64_DEFINED return 64 - zbc64( x ); # else return sbc32( (uint32_t) (x >> 32)) + sbc32( (uint32_t) x ); # endif } #endif //****************************************************** // Zero bit count. Return count of zero bits in word //****************************************************** #ifndef cds_bitop_zbc32_DEFINED static inline int zbc32( uint32_t x ) { return 32 - sbc32( x ); } #endif #ifndef cds_bitop_zbc64_DEFINED static inline int zbc64( uint64_t x ) { return 64 - sbc64( x ); } #endif // Bit complement #ifndef cds_bitop_complement32_DEFINED static inline bool complement32( uint32_t * pArg, unsigned int nBit ) { assert( pArg ); uint32_t nVal = *pArg & (1 << nBit); *pArg ^= 1 << nBit; return nVal != 0; } #endif #ifndef cds_bitop_complement64_DEFINED static inline bool complement64( uint64_t * pArg, unsigned int nBit ) { assert( pArg ); uint64_t nVal = *pArg & (uint64_t(1) << nBit); *pArg ^= uint64_t(1) << nBit; return nVal != 0; } #endif /* Simple random number generator Source: [2003] George Marsaglia "Xorshift RNGs" */ static inline uint32_t RandXorShift32(uint32_t x) { //static uint32_t xRandom = 2463534242UL ; //rand() | 0x0100 ; // must be nonzero //uint32_t x = xRandom; if ( !x ) x = (( std::rand() + 1) << 16 ) + std::rand() + 1; x ^= x << 13; x ^= x >> 15; return x ^= x << 5; } static inline uint64_t RandXorShift64(uint64_t x) { //static uint64_t xRandom = 88172645463325252LL; //uint64_t x = xRandom; if ( !x ) x = 88172645463325252LL; x ^= x << 13; x ^= x >> 7; return x ^= x << 17; } }} // namespace bitop::platform } // namespace cds #endif // CDSLIB_DETAILS_BITOP_GENERIC_H libcds-2.3.3/cds/details/bounded_array.h000066400000000000000000000064271341244201700201530ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_IMPL_BOUNDED_ARRAY_H #define CDSLIB_IMPL_BOUNDED_ARRAY_H /* Dynamic non-growing array Editions: 2008.03.08 Maxim.Khiszinsky Created */ #include #include #include //@cond namespace cds { namespace details { /// Bounded dynamic array /** The class template is intended for storing fixed-size sequences of objects. Array capacity is constant and cannot be changed after creation of object of the class. It is suitable for managing objects of non-copyable type \p T. \par Template parameters - \p T type of elements - \p Allocator dynamic memory allocator class (std::allocator semantics) */ template class bounded_array { public: typedef T value_type ; ///< value type stored in the array typedef Allocator allocator_type ; ///< allocator type typedef value_type * iterator ; ///< item iterator typedef value_type const * const_iterator ; ///< item const iterator private: typedef cds::details::Allocator< T, allocator_type> allocator_impl; value_type * m_arr; const size_t m_nCapacity; public: /// Default ctor explicit bounded_array( size_t nCapacity ///< capacity ) : m_arr( allocator_impl().NewArray( nCapacity )) , m_nCapacity( nCapacity ) {} ~bounded_array() { allocator_impl().Delete( m_arr, capacity()); } const value_type& operator []( size_t nItem ) const { assert( nItem < capacity()); return m_arr[nItem]; } value_type& operator []( size_t nItem ) { assert( nItem < capacity()); return m_arr[nItem]; } size_t size() const noexcept { return capacity(); } size_t capacity() const noexcept { return m_nCapacity; } /// Returns pointer to the first item in the array value_type * top() noexcept { return m_arr; } /// Get begin iterator const_iterator begin() const noexcept { return m_arr; } iterator begin() noexcept { return m_arr; } /// Get end iterator const_iterator end() const noexcept { return begin() + capacity(); } iterator end() noexcept { return begin() + capacity(); } }; } // namespace details } // namespace cds //@endcond #endif // #ifndef CDSLIB_IMPL_BOUNDED_ARRAY_H libcds-2.3.3/cds/details/bounded_container.h000066400000000000000000000011401341244201700210020ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_BOUNDED_CONTAINER_H #define CDSLIB_BOUNDED_CONTAINER_H namespace cds { /// Bounded container /** If a container has upper limit of item then it should be based on bounded_container class. Example of those containers: cyclic queue (\p cds::container::VyukovMPMCCycleQueue) */ struct bounded_container {}; } // namespace cds #endif // CDSLIB_BOUNDED_CONTAINER_H libcds-2.3.3/cds/details/defs.h000066400000000000000000000334061341244201700162530ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_DEFS_H #define CDSLIB_DEFS_H #include #include #include #include #include #include #include #include #include /** \mainpage CDS: Concurrent Data Structures library This library is a collection of lock-free and lock-based fine-grained algorithms of data structures like maps, queues, list etc. The library contains implementation of well-known data structures and memory reclamation schemas for modern processor architectures. The library is written on C++11. The main namespace for the library is \ref cds. To see the full list of container's class go to modules tab. Supported processor architectures and operating systems (OS) are: - x86 [32bit] Linux, Windows, FreeBSD, MinGW - amd64 (x86-64) [64bit] Linux, Windows, FreeBSD, MinGW - ia64 (itanium) [64bit] Linux, HP-UX 11.23, HP-UX 11.31 - sparc [64bit] Sun Solaris - Mac OS X amd64 - ppc64 Linux Supported compilers: - GCC 4.8+ - Clang 3.6+ - MS Visual C++ 2015 and above - Intel C++ Compiler 15 For each lock-free data structure the \p CDS library presents several implementation based on published papers. For example, there are several implementations of queue, each of them is divided by memory reclamation schema used. However, any implementation supports common interface for the type of data structure. To use any lock-free data structure, the following are needed: - atomic operation library conforming with C++11 memory model. The libcds can be built with \p std::atomic, \p boost::atomic or its own @ref cds_cxx11_atomic "atomic implementation" - safe memory reclamation (SMR) or garbage collecting (GC) algorithm. SMR is the main part of lock-free data structs. The SMR solves the problem of safe memory reclamation that is one of the main problem for lock-free programming. The library contains the implementations of several light-weight \ref cds_garbage_collector "memory reclamation schemes": - M.Michael's Hazard Pointer - see \p cds::gc::HP, \p cds::gc::DHP for more explanation - User-space Read-Copy Update (RCU) - see \p cds::urcu namespace - there is an empty \p cds::gc::nogc "GC" for append-only containers that do not support item reclamation. Many GC requires a support from the thread. The library does not define the threading model you must use, it is developed to support various ones; about incorporating cds library to your threading model see \p cds::threading. \anchor cds_how_to_use \par How to use The main part of lock-free programming is SMR, so-called garbage collector, for safe memory reclamation. The library provides several types of SMR schemes. One of widely used and well-tested is Hazard Pointer memory reclamation schema discovered by M. Micheal and implemented in the library as \p cds::gc::HP class. Usually, the application is based on only one type of GC. In the next example we mean that you use Hazard Pointer \p cds::gc::HP - based containers. First, in your code you should initialize \p cds library and Hazard Pointer in \p main() function: \code #include // for cds::Initialize and cds::Terminate #include // for cds::HP (Hazard Pointer) SMR int main(int argc, char** argv) { // Initialize libcds cds::Initialize(); { // Initialize Hazard Pointer singleton cds::gc::HP hpGC; // If main thread uses lock-free containers // the main thread should be attached to libcds infrastructure cds::threading::Manager::attachThread(); // Now you can use HP-based containers in the main thread //... } // Terminate libcds cds::Terminate(); } \endcode Second, any of your thread should be attached to \p cds infrastructure. \code #include int myThreadEntryPoint(void *) { // Attach the thread to libcds infrastructure cds::threading::Manager::attachThread(); // Now you can use HP-based containers in the thread //... // Detach thread when terminating cds::threading::Manager::detachThread(); } \endcode After that, you can use \p cds lock-free containers safely without any external synchronization. In some cases, you should work in an external thread. For example, your application is a plug-in for a server that calls your code in a thread that has been created by the server. In this case, you should use persistent mode of garbage collecting. In this mode, the thread attaches to the GC singleton only if it is not attached yet and never call detaching: \code #include int plugin_entry_point() { // Attach the thread if it is not attached yet if ( !cds::threading::Manager::isThreadAttached()) cds::threading::Manager::attachThread(); // Do some work with HP-related containers ... } \endcode \par How to build The cds is mostly header-only library. Only small part of library related to GC core functionality should be compiled. cds depends on C++ standard library only. Test suite depends on: - \p boost.thread (thread-loal storage support), boost.system - \p google-test Some parts of libcds may depend on DCAS (double-width compare-and-swap) atomic primitive if the target architecture supports it. For x86, cmake build script enables -mcx16 compiler flag that switches DCAS support on. You may manually disable DCAS support with the following command line flags in GCC/clang (for MS VC++ compiler DCAS is not supported): - \p -DCDS_DISABLE_128BIT_ATOMIC - for 64bit build - \p -DCDS_DISABLE_64BIT_ATOMIC - for 32bit build @warning All your projects AND libcds MUST be compiled with the same flags - either with DCAS support or without it. \par Windows build Prerequisites: for building cds library and test suite you need: - perl installed; \p PATH environment variable should contain full path to Perl binary. Perl is used to generate large dictionary for testing purpose; - boost library 1.51 and above. You should create environment variable \p BOOST_PATH containing full path to \p boost root directory (for example, C:\\libs\\boost_1_57_0). Open solution file cds\projects\vc141\cds.sln with Microsoft VisualStudio 2017. The solution contains \p cds project and a lot of test projects. Just build the library using solution. Warning: the solution depends on \p BOOST_PATH environment variable that specifies full path to \p boost library root directory. The test projects search \p boost libraries in: - for 32bit: \$(BOOST_PATH)/stage/lib, \$(BOOST_PATH)/stage32/lib, and \$(BOOST_PATH)/bin. - for 64bit: \$(BOOST_PATH)/stage64/lib and \$(BOOST_PATH)/bin. If you use static libcds, you should compile your projects with CDS_BUILD_STATIC_LIB preprocessor definition. All tests are based on googletest framework. The following environment variables specify where to find gtest include and library directories: - \p GTEST_ROOT - gtest root directory. \$(GTEST_ROOT)/include specifies full path to gtest include files; - \p GTEST_LIB64 - the path to 64bit gtest library dir; - \p GTEST_LIB32 - the path to 32bit gtest library dir. \par *NIX build For Unix-like systems GCC and Clang compilers are supported. Use GCC 4.8+ compiler or Clang 3.6+ to build cds library with CMake. See accompanying file /build/cmake/readme.md for more info. */ /// The main library namespace namespace cds {} /* \brief Basic typedefs and defines You do not need include this header directly. All library header files depends on defs.h and include it. Defines macros: CDS_COMPILER Compiler: - CDS_COMPILER_MSVC Microsoft Visual C++ - CDS_COMPILER_GCC GNU C++ - CDS_COMPILER_CLANG clang - CDS_COMPILER_UNKNOWN unknown compiler CDS_COMPILER__NAME Character compiler name CDS_COMPILER_VERSION Compliler version (number) CDS_BUILD_BITS Resulting binary code: - 32 32bit - 64 64bit - -1 undefined CDS_POW2_BITS CDS_BUILD_BITS == 2**CDS_POW2_BITS CDS_PROCESSOR_ARCH The processor architecture: - CDS_PROCESSOR_X86 Intel x86 (32bit) - CDS_PROCESSOR_AMD64 Amd64, Intel x86-64 (64bit) - CDS_PROCESSOR_IA64 Intel IA64 (Itanium) - CDS_PROCESSOR_SPARC Sparc - CDS_PROCESSOR_PPC64 PowerPC64 - CDS_PROCESSOR_ARM7 ARM v7 - CDS_PROCESSOR_ARM8 ARM v8 - CDS_PROCESSOR_UNKNOWN undefined processor architecture CDS_PROCESSOR__NAME The name (string) of processor architecture CDS_OS_TYPE Operating system type: - CDS_OS_UNKNOWN unknown OS - CDS_OS_PTHREAD unknown OS with pthread - CDS_OS_WIN32 Windows 32bit - CDS_OS_WIN64 Windows 64bit - CDS_OS_LINUX Linux - CDS_OS_SUN_SOLARIS Sun Solaris - CDS_OS_HPUX HP-UX - CDS_OS_AIX IBM AIX - CDS_OS_BSD FreeBSD, OpenBSD, NetBSD - common flag - CDS_OS_FREE_BSD FreeBSD - CDS_OS_OPEN_BSD OpenBSD - CSD_OS_NET_BSD NetBSD - CDS_OS_MINGW MinGW - CDS_OS_OSX Apple OS X CDS_OS__NAME The name (string) of operating system type CDS_OS_INTERFACE OS interface: - CDS_OSI_UNIX Unix (POSIX) - CDS_OSI_WINDOWS Windows CDS_BUILD_TYPE Build type: 'RELEASE' or 'DEBUG' string */ #if defined(_DEBUG) || !defined(NDEBUG) # define CDS_DEBUG # define CDS_BUILD_TYPE "DEBUG" #else # define CDS_BUILD_TYPE "RELEASE" #endif /// Unused function argument #define CDS_UNUSED(x) (void)(x) // Supported compilers: #define CDS_COMPILER_MSVC 1 #define CDS_COMPILER_GCC 2 #define CDS_COMPILER_INTEL 3 #define CDS_COMPILER_CLANG 4 #define CDS_COMPILER_UNKNOWN -1 // Supported processor architectures: #define CDS_PROCESSOR_X86 1 #define CDS_PROCESSOR_IA64 2 #define CDS_PROCESSOR_SPARC 3 #define CDS_PROCESSOR_AMD64 4 #define CDS_PROCESSOR_PPC64 5 // PowerPC 64bit #define CDS_PROCESSOR_ARM7 7 #define CDS_PROCESSOR_ARM8 8 #define CDS_PROCESSOR_UNKNOWN -1 // Supported OS interfaces #define CDS_OSI_UNKNOWN 0 #define CDS_OSI_UNIX 1 #define CDS_OSI_WINDOWS 2 // Supported operating systems (value of CDS_OS_TYPE): #define CDS_OS_UNKNOWN -1 #define CDS_OS_WIN32 1 #define CDS_OS_WIN64 5 #define CDS_OS_LINUX 10 #define CDS_OS_SUN_SOLARIS 20 #define CDS_OS_HPUX 30 #define CDS_OS_AIX 50 // IBM AIX #define CDS_OS_FREE_BSD 61 #define CDS_OS_OPEN_BSD 62 #define CDS_OS_NET_BSD 63 #define CDS_OS_MINGW 70 #define CDS_OS_OSX 80 #define CDS_OS_PTHREAD 100 #if defined(_MSC_VER) # if defined(__ICL) || defined(__INTEL_COMPILER) # define CDS_COMPILER CDS_COMPILER_INTEL # elif defined(__clang__) # define CDS_COMPILER CDS_COMPILER_CLANG # else # define CDS_COMPILER CDS_COMPILER_MSVC # endif #elif defined(__clang__) // Clang checking must be before GCC since Clang defines __GCC__ too # define CDS_COMPILER CDS_COMPILER_CLANG #elif defined( __GCC__ ) || defined(__GNUC__) # if defined(__ICL) || defined(__INTEL_COMPILER) # define CDS_COMPILER CDS_COMPILER_INTEL # else # define CDS_COMPILER CDS_COMPILER_GCC # endif #else # define CDS_COMPILER CDS_COMPILER_UNKNOWN #endif // Compiler choice // CDS_VERIFY: Debug - assert(_expr); Release - _expr #ifdef CDS_DEBUG # define CDS_VERIFY( _expr ) assert( _expr ) # define CDS_VERIFY_FALSE( _expr ) assert( !( _expr )) # define CDS_DEBUG_ONLY( _expr ) _expr # define CDS_VERIFY_EQ( _expr, val ) assert( (_expr) == (val) ) #else # define CDS_VERIFY( _expr ) _expr # define CDS_VERIFY_FALSE( _expr ) _expr # define CDS_DEBUG_ONLY( _expr ) # define CDS_VERIFY_EQ( _expr, val ) _expr #endif #ifdef CDS_STRICT # define CDS_STRICT_DO(_expr) _expr #else # define CDS_STRICT_DO( _expr ) #endif #ifdef CDS_DEBUG # define cds_assert( expr ) assert( expr ) #else static inline void cds_assert( bool expr ) { if ( !expr ) abort(); } #endif // Compiler-specific defines #include /************************************************************************* Common things **************************************************************************/ namespace cds { /// any_type is used as a placeholder for auto-calculated type (usually in \p rebind templates) struct any_type {}; } // namespace cds #endif // #ifndef CDSLIB_DEFS_H libcds-2.3.3/cds/details/is_aligned.h000066400000000000000000000021031341244201700174160ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_DETAILS_IS_ALIGNED_H #define CDSLIB_DETAILS_IS_ALIGNED_H #include namespace cds { namespace details { /// Checks if the pointer \p p has \p ALIGN byte alignment /** \p ALIGN must be power of 2. The function is mostly intended for run-time assertion */ template static inline bool is_aligned(T const * p) { return (((uintptr_t)p) & uintptr_t(ALIGN - 1)) == 0; } /// Checks if the pointer \p p has \p nAlign byte alignment /** \p nAlign must be power of 2. The function is mostly intended for run-time assertion */ template static inline bool is_aligned(T const * p, size_t nAlign) { return (((uintptr_t)p) & uintptr_t(nAlign - 1)) == 0; } }} // namespace cds::details #endif // #ifndef CDSLIB_DETAILS_IS_ALIGNED_H libcds-2.3.3/cds/details/make_const_type.h000066400000000000000000000014541341244201700205140ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_DETAILS_MAKE_CONST_TYPE_H #define CDSLIB_DETAILS_MAKE_CONST_TYPE_H #include namespace cds { namespace details { //@cond template struct make_const_type { typedef T type; typedef T * pointer; typedef T & reference; }; template struct make_const_type { typedef T const type; typedef T const * pointer; typedef T const & reference; }; //@endcond }} // namespace cds::details #endif // #ifndef CDSLIB_DETAILS_MAKE_CONST_TYPE_H libcds-2.3.3/cds/details/marked_ptr.h000066400000000000000000000315621341244201700174630ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_DETAILS_MARKED_PTR_H #define CDSLIB_DETAILS_MARKED_PTR_H #include namespace cds { namespace details { /// Marked pointer /** On the modern architectures, the default data alignment is 4 (for 32bit) or 8 byte for 64bit. Therefore, the least 2 or 3 bits of the pointer is always zero and can be used as a bitfield to store logical flags. This trick is widely used in lock-free programming to operate with the pointer and its flags atomically. Template parameters: - T - type of pointer - Bitmask - bitmask of least unused bits */ template class marked_ptr { T * m_ptr ; ///< pointer and its mark bits public: typedef T value_type ; ///< type of value the class points to typedef T * pointer_type ; ///< type of pointer static constexpr const uintptr_t bitmask = Bitmask; ///< bitfield bitmask static constexpr const uintptr_t pointer_bitmask = ~bitmask; ///< pointer bitmask public: /// Constructs null marked pointer. The flag is cleared. constexpr marked_ptr() noexcept : m_ptr( nullptr ) {} /// Constructs marked pointer with \p ptr value. The least bit(s) of \p ptr is the flag. constexpr explicit marked_ptr( value_type * ptr ) noexcept : m_ptr( ptr ) {} /// Constructs marked pointer with \p ptr value and \p nMask flag. /** The \p nMask argument defines the OR-bits */ marked_ptr( value_type * ptr, int nMask ) noexcept : m_ptr( ptr ) { assert( bits() == 0 ); *this |= nMask; } /// Copy constructor marked_ptr( marked_ptr const& src ) noexcept = default; /// Copy-assignment operator marked_ptr& operator =( marked_ptr const& p ) noexcept = default; # if !defined(CDS_DISABLE_DEFAULT_MOVE_CTOR) //@cond marked_ptr( marked_ptr&& src ) noexcept = default; marked_ptr& operator =( marked_ptr&& p ) noexcept = default; //@endcond # endif //TODO: make move ctor private: //@cond union pointer_cast { T * ptr; uintptr_t n; pointer_cast(T * p) : ptr(p) {} pointer_cast(uintptr_t i) : n(i) {} }; static uintptr_t to_int( value_type * p ) noexcept { return pointer_cast(p).n; } static value_type * to_ptr( uintptr_t n ) noexcept { return pointer_cast(n).ptr; } uintptr_t to_int() const noexcept { return to_int( m_ptr ); } //@endcond public: /// Returns the pointer without mark bits (real pointer) const version value_type * ptr() const noexcept { return to_ptr( to_int() & ~bitmask ); } /// Returns the pointer and bits together value_type * all() const noexcept { return m_ptr; } /// Returns the least bits of pointer according to \p Bitmask template argument of the class uintptr_t bits() const noexcept { return to_int() & bitmask; } /// Analogue for \ref ptr value_type * operator ->() const noexcept { return ptr(); } /// Assignment operator sets markup bits to zero marked_ptr operator =( T * p ) noexcept { m_ptr = p; return *this; } /// Set LSB bits as bits() | nBits marked_ptr& operator |=( int nBits ) noexcept { assert( (nBits & pointer_bitmask) == 0 ); m_ptr = to_ptr( to_int() | nBits ); return *this; } /// Set LSB bits as bits() & nBits marked_ptr& operator &=( int nBits ) noexcept { assert( (nBits & pointer_bitmask) == 0 ); m_ptr = to_ptr( to_int() & (pointer_bitmask | nBits)); return *this; } /// Set LSB bits as bits() ^ nBits marked_ptr& operator ^=( int nBits ) noexcept { assert( (nBits & pointer_bitmask) == 0 ); m_ptr = to_ptr( to_int() ^ nBits ); return *this; } /// Returns p |= nBits friend marked_ptr operator |( marked_ptr p, int nBits) noexcept { p |= nBits; return p; } /// Returns p |= nBits friend marked_ptr operator |( int nBits, marked_ptr p ) noexcept { p |= nBits; return p; } /// Returns p &= nBits friend marked_ptr operator &( marked_ptr p, int nBits) noexcept { p &= nBits; return p; } /// Returns p &= nBits friend marked_ptr operator &( int nBits, marked_ptr p ) noexcept { p &= nBits; return p; } /// Returns p ^= nBits friend marked_ptr operator ^( marked_ptr p, int nBits) noexcept { p ^= nBits; return p; } /// Returns p ^= nBits friend marked_ptr operator ^( int nBits, marked_ptr p ) noexcept { p ^= nBits; return p; } /// Inverts LSBs of pointer \p p friend marked_ptr operator ~( marked_ptr p ) noexcept { return p ^ marked_ptr::bitmask; } /// Comparing two marked pointer including its mark bits friend bool operator ==( marked_ptr p1, marked_ptr p2 ) noexcept { return p1.all() == p2.all(); } /// Comparing marked pointer and raw pointer, mark bits of \p p1 is ignored friend bool operator ==( marked_ptr p1, value_type const * p2 ) noexcept { return p1.ptr() == p2; } /// Comparing marked pointer and raw pointer, mark bits of \p p2 is ignored friend bool operator ==( value_type const * p1, marked_ptr p2 ) noexcept { return p1 == p2.ptr(); } /// Comparing two marked pointer including its mark bits friend bool operator !=( marked_ptr p1, marked_ptr p2 ) noexcept { return p1.all() != p2.all(); } /// Comparing marked pointer and raw pointer, mark bits of \p p1 is ignored friend bool operator !=( marked_ptr p1, value_type const * p2 ) noexcept { return p1.ptr() != p2; } /// Comparing marked pointer and raw pointer, mark bits of \p p2 is ignored friend bool operator !=( value_type const * p1, marked_ptr p2 ) noexcept { return p1 != p2.ptr(); } //@cond /// atomic< marked_ptr< T, Bitmask > > support T *& impl_ref() noexcept { return m_ptr; } //@endcond }; } // namespace details } // namespace cds //@cond CDS_CXX11_ATOMIC_BEGIN_NAMESPACE template class atomic< cds::details::marked_ptr > { private: typedef cds::details::marked_ptr marked_ptr; typedef atomics::atomic atomic_impl; atomic_impl m_atomic; public: bool is_lock_free() const volatile noexcept { return m_atomic.is_lock_free(); } bool is_lock_free() const noexcept { return m_atomic.is_lock_free(); } void store(marked_ptr val, memory_order order = memory_order_seq_cst) volatile noexcept { m_atomic.store( val.all(), order ); } void store(marked_ptr val, memory_order order = memory_order_seq_cst) noexcept { m_atomic.store( val.all(), order ); } marked_ptr load(memory_order order = memory_order_seq_cst) const volatile noexcept { return marked_ptr( m_atomic.load( order )); } marked_ptr load(memory_order order = memory_order_seq_cst) const noexcept { return marked_ptr( m_atomic.load( order )); } operator marked_ptr() const volatile noexcept { return load(); } operator marked_ptr() const noexcept { return load(); } marked_ptr exchange(marked_ptr val, memory_order order = memory_order_seq_cst) volatile noexcept { return marked_ptr( m_atomic.exchange( val.all(), order )); } marked_ptr exchange(marked_ptr val, memory_order order = memory_order_seq_cst) noexcept { return marked_ptr( m_atomic.exchange( val.all(), order )); } bool compare_exchange_weak(marked_ptr& expected, marked_ptr desired, memory_order success_order, memory_order failure_order) volatile noexcept { return m_atomic.compare_exchange_weak( expected.impl_ref(), desired.all(), success_order, failure_order ); } bool compare_exchange_weak(marked_ptr& expected, marked_ptr desired, memory_order success_order, memory_order failure_order) noexcept { return m_atomic.compare_exchange_weak( expected.impl_ref(), desired.all(), success_order, failure_order ); } bool compare_exchange_strong(marked_ptr& expected, marked_ptr desired, memory_order success_order, memory_order failure_order) volatile noexcept { return m_atomic.compare_exchange_strong( expected.impl_ref(), desired.all(), success_order, failure_order ); } bool compare_exchange_strong(marked_ptr& expected, marked_ptr desired, memory_order success_order, memory_order failure_order) noexcept { return m_atomic.compare_exchange_strong( expected.impl_ref(), desired.all(), success_order, failure_order ); } bool compare_exchange_weak(marked_ptr& expected, marked_ptr desired, memory_order success_order = memory_order_seq_cst) volatile noexcept { return m_atomic.compare_exchange_weak( expected.impl_ref(), desired.all(), success_order ); } bool compare_exchange_weak(marked_ptr& expected, marked_ptr desired, memory_order success_order = memory_order_seq_cst) noexcept { return m_atomic.compare_exchange_weak( expected.impl_ref(), desired.all(), success_order ); } bool compare_exchange_strong(marked_ptr& expected, marked_ptr desired, memory_order success_order = memory_order_seq_cst) volatile noexcept { return m_atomic.compare_exchange_strong( expected.impl_ref(), desired.all(), success_order ); } bool compare_exchange_strong(marked_ptr& expected, marked_ptr desired, memory_order success_order = memory_order_seq_cst) noexcept { return m_atomic.compare_exchange_strong( expected.impl_ref(), desired.all(), success_order ); } constexpr atomic() noexcept : m_atomic( nullptr ) {} constexpr explicit atomic(marked_ptr val) noexcept : m_atomic( val.all()) {} constexpr explicit atomic(T * p) noexcept : m_atomic( p ) {} atomic(const atomic&) = delete; atomic& operator=(const atomic&) = delete; #if !(CDS_COMPILER == CDS_COMPILER_MSVC && CDS_COMPILER_VERSION < CDS_COMPILER_MSVC15) // MSVC12, MSVC14, MSVC14.1: warning C4522: multiple assignment operators specified atomic& operator=(const atomic&) volatile = delete; marked_ptr operator=(marked_ptr val) volatile noexcept { store( val ); return val; } #endif marked_ptr operator=(marked_ptr val) noexcept { store( val ); return val; } }; CDS_CXX11_ATOMIC_END_NAMESPACE //@endcond #endif // #ifndef CDSLIB_DETAILS_MARKED_PTR_H libcds-2.3.3/cds/details/size_t_cast.h000066400000000000000000000015111341244201700176310ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_DETAILS_SIZE_T_CAST_H #define CDSLIB_DETAILS_SIZE_T_CAST_H #include //@cond namespace cds { namespace details { template struct size_t_unsigned; template <> struct size_t_unsigned<4> { typedef uint32_t type; }; template <> struct size_t_unsigned<8> { typedef uint64_t type; }; static inline size_t_unsigned::type size_t_cast( size_t n ) { return static_cast< size_t_unsigned::type>( n ); } }} // namespace cds::details //@endcond #endif // #ifndef CDSLIB_DETAILS_SIZE_T_CAST_H libcds-2.3.3/cds/details/throw_exception.h000066400000000000000000000036241341244201700205520ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_DETAILS_THROW_EXCEPTION_H #define CDSLIB_DETAILS_THROW_EXCEPTION_H #include #if !defined( CDS_EXCEPTION_ENABLED ) && !defined( CDS_USER_DEFINED_THROW_HANDLER ) # include #endif namespace cds { #if !defined( CDS_USER_DEFINED_THROW_EXCEPTION ) #if defined( CDS_EXCEPTION_ENABLED ) /// Function to throw an exception /** If you compile your code with exception enabled, \p %throw_exception() function throws the \p exception. If exception is disabled, \p %throw_exception() prints an exception message to standard output and call \p abort(). You can supply your own \p %cds::throw_exception() function; for that you should specify \p -DCDS_USER_DEFINED_THROW_EXCEPTION in compiler command line. @note \p %throw_exception() never returns. If the user-defined \p %throw_exception() returns, the behavior is undefined. */ template CDS_NORETURN static inline void throw_exception( E&& exception, ///< Exception to throw char const* file, ///< Source filename int line ///< File line ) { CDS_UNUSED( file ); CDS_UNUSED( line ); throw exception; } #else template CDS_NORETURN static inline void throw_exception( E&& exception, char const* file, int line ) { printf( "file %s, line %d: %s\n", file, line, exception.what()); abort(); } #endif //#else // User-provided cds::throw_exception() #endif #define CDS_THROW_EXCEPTION( exception ) ::cds::throw_exception( exception, __FILE__, __LINE__ ) } // namespace cds #endif // #ifndef CDSLIB_DETAILS_THROW_EXCEPTION_H libcds-2.3.3/cds/details/tls_holder.h000066400000000000000000000017671341244201700174760ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_DETAILS_TLS_HOLDER_H #define CDSLIB_DETAILS_TLS_HOLDER_H #include //@cond namespace cds { namespace details { // // The class emulates the ollowing incorrect code: // template // class Foo { // static thread_local T tls_; // }; // Each instantiation of tls_holder has its own TLS // template class tls_holder { static T* tls() { thread_local T s_tls; return &s_tls; } public: static void set( T val ) { *tls() = val; } static T get() { return *tls(); } }; }} // namespace cds::details //@endcond #endif // #ifndef CDSLIB_DETAILS_TLS_HOLDER_H libcds-2.3.3/cds/details/type_padding.h000066400000000000000000000032431341244201700177750ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_DETAILS_TYPE_PADDING_H #define CDSLIB_DETAILS_TYPE_PADDING_H namespace cds { namespace details { //@cond none template struct type_padding_helper: public T { enum { value = Modulo }; char _[Align - Modulo] ; // padding using T::T; }; template struct type_padding_helper: public T { enum { value = 0 }; using T::T; }; //@endcond /// Automatic alignment type \p T to \p AlignFactor /** The class adds appropriate bytes to type T that the following condition is true: \code sizeof( type_padding::type ) % AlignFactor == 0 \endcode It is guaranteed that count of padding bytes no more than AlignFactor - 1. \b Applicability: type \p T must not have constructors another that default ctor. For example, \p T may be any POD type. */ template class type_padding { public: /// Align factor enum { align_factor = AlignFactor <= 0 ? 1 : AlignFactor }; /// Result type typedef type_padding_helper type; /// Padding constant enum { value = type::value }; }; }} // namespace cds::details #endif // #ifndef CDSLIB_DETAILS_TYPE_PADDING_H libcds-2.3.3/cds/gc/000077500000000000000000000000001341244201700141175ustar00rootroot00000000000000libcds-2.3.3/cds/gc/default_gc.h000066400000000000000000000006761341244201700163760ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_GC_DEFAULT_GC_H #define CDSLIB_GC_DEFAULT_GC_H #include namespace cds { namespace gc { /// Default garbage collector typedef HP default_gc; }} // namespace cds::gc #endif // #ifndef CDSLIB_GC_DEFAULT_GC_H libcds-2.3.3/cds/gc/details/000077500000000000000000000000001341244201700155445ustar00rootroot00000000000000libcds-2.3.3/cds/gc/details/hp_common.h000066400000000000000000000066541341244201700177070ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_GC_DETAILS_HP_COMMON_H #define CDSLIB_GC_DETAILS_HP_COMMON_H #include #include #ifdef CDS_ENABLE_HPSTAT # define CDS_HPSTAT( expr ) expr #else # define CDS_HPSTAT( expr ) #endif //@cond namespace cds { namespace gc { namespace hp { namespace common { /// Hazard pointer type typedef void* hazard_ptr; /// Retired pointer using cds::gc::details::retired_ptr; using cds::gc::make_retired_ptr; /// Hazard pointer guard class guard { public: guard() noexcept : hp_( nullptr ) , next_( nullptr ) {} template T* operator=( T* ptr ) noexcept { set( ptr ); return ptr; } std::nullptr_t operator=( std::nullptr_t ) noexcept { clear(); return nullptr; } hazard_ptr get() const noexcept { return hp_.load( atomics::memory_order_acquire ); } hazard_ptr get( atomics::memory_order order ) const noexcept { return hp_.load( order ); } template T* get_as() const noexcept { return reinterpret_cast( get()); } template void set( T* ptr ) noexcept { hp_.store( reinterpret_cast( ptr ), atomics::memory_order_release ); } void clear( atomics::memory_order order ) noexcept { hp_.store( nullptr, order ); } void clear() noexcept { clear( atomics::memory_order_release ); } private: atomics::atomic hp_; public: guard* next_; // free guard list }; /// Array of guards template class guard_array { public: static size_t const c_nCapacity = Capacity; public: guard_array() : arr_{ nullptr } {} static constexpr size_t capacity() { return c_nCapacity; } guard* operator[]( size_t idx ) const noexcept { assert( idx < capacity()); return arr_[idx]; } template void set( size_t idx, T* ptr ) noexcept { assert( idx < capacity()); assert( arr_[idx] != nullptr ); arr_[idx]->set( ptr ); } void clear( size_t idx ) noexcept { assert( idx < capacity()); assert( arr_[idx] != nullptr ); arr_[idx]->clear(); } guard* release( size_t idx ) noexcept { assert( idx < capacity()); guard* g = arr_[idx]; arr_[idx] = nullptr; return g; } void reset( size_t idx, guard* g ) noexcept { assert( idx < capacity()); assert( arr_[idx] == nullptr ); arr_[idx] = g; } private: guard* arr_[c_nCapacity]; }; /// Retired pointer disposer typedef void ( *disposer_func )( void* ); }}}} // namespace cds::gc::hp::common //@endcond #endif // #ifndef CDSLIB_GC_DETAILS_HP_COMMON_H libcds-2.3.3/cds/gc/details/retired_ptr.h000066400000000000000000000067021341244201700202450ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_GC_DETAILS_RETIRED_PTR_H #define CDSLIB_GC_DETAILS_RETIRED_PTR_H #include //@cond namespace cds { namespace gc { /// Common implementation details for any GC namespace details { /// Pointer to function to free (destruct and deallocate) retired pointer of specific type typedef void (* free_retired_ptr_func )( void * ); /// Retired pointer /** Pointer to an object that is ready to delete. */ struct retired_ptr { /// Pointer type typedef void * pointer; union { pointer m_p; ///< retired pointer uintptr_t m_n; }; free_retired_ptr_func m_funcFree; ///< pointer to the destructor function /// Comparison of two retired pointers static bool less( const retired_ptr& p1, const retired_ptr& p2 ) noexcept { return p1.m_p < p2.m_p; } /// Default ctor initializes pointer to \p nullptr retired_ptr() noexcept : m_p( nullptr ) , m_funcFree( nullptr ) {} /// Ctor retired_ptr( pointer p, free_retired_ptr_func func ) noexcept : m_p( p ) , m_funcFree( func ) {} /// Typecasting ctor template retired_ptr( T* p, free_retired_ptr_func func) noexcept : m_p( reinterpret_cast(p)) , m_funcFree( func ) {} /// Assignment operator retired_ptr& operator =( retired_ptr const& s) noexcept { m_p = s.m_p; m_funcFree = s.m_funcFree; return *this; } /// Invokes destructor function for the pointer void free() { assert( m_funcFree ); assert( m_p ); m_funcFree( m_p ); CDS_STRICT_DO( clear()); } /// Checks if the retired pointer is not empty explicit operator bool() const noexcept { return m_p != nullptr; } /// Clears retired pointer without \p free() call void clear() { m_p = nullptr; m_funcFree = nullptr; } }; static inline bool operator <( const retired_ptr& p1, const retired_ptr& p2 ) noexcept { return retired_ptr::less( p1, p2 ); } static inline bool operator ==( const retired_ptr& p1, const retired_ptr& p2 ) noexcept { return p1.m_p == p2.m_p; } static inline bool operator !=( const retired_ptr& p1, const retired_ptr& p2 ) noexcept { return !(p1 == p2); } } // namespace details template static inline cds::gc::details::retired_ptr make_retired_ptr( T * p ) { return cds::gc::details::retired_ptr( p, +[]( void* p ) { Func()( static_cast( p )); }); } }} // namespace cds::gc //@endcond #endif // #ifndef CDSLIB_GC_DETAILS_RETIRED_PTR_H libcds-2.3.3/cds/gc/dhp.h000066400000000000000000001453161341244201700150550ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_GC_DHP_SMR_H #define CDSLIB_GC_DHP_SMR_H #include #include #include #include #include #include #include namespace cds { namespace gc { /// Dynamic (adaptive) Hazard Pointer implementation details namespace dhp { using namespace cds::gc::hp::common; /// Exception "Dynamic Hazard Pointer SMR is not initialized" class not_initialized: public std::runtime_error { public: //@cond not_initialized() : std::runtime_error( "Global DHP SMR object is not initialized" ) {} //@endcond }; //@cond struct guard_block: public cds::intrusive::FreeListImpl::node { guard_block* next_block_ = nullptr; // next block in the thread list guard* first() { return reinterpret_cast( this + 1 ); } }; //@endcond //@cond /// \p guard_block allocator (global object) class hp_allocator { friend class smr; public: static hp_allocator& instance(); CDS_EXPORT_API guard_block* alloc(); void free( guard_block* block ) { free_list_.put( block ); } private: hp_allocator() #ifdef CDS_ENABLE_HPSTAT : block_allocated_(0) #endif {} CDS_EXPORT_API ~hp_allocator(); private: cds::intrusive::FreeListImpl free_list_; ///< list of free \p guard_block #ifdef CDS_ENABLE_HPSTAT public: atomics::atomic block_allocated_; ///< count of allocated blocks #endif }; //@endcond //@cond /// Per-thread hazard pointer storage class thread_hp_storage { friend class smr; public: thread_hp_storage( guard* arr, size_t nSize ) noexcept : free_head_( arr ) , array_( arr ) , initial_capacity_( nSize ) # ifdef CDS_ENABLE_HPSTAT , alloc_guard_count_( 0 ) , free_guard_count_( 0 ) , extend_call_count_( 0 ) # endif { // Initialize guards new( arr ) guard[nSize]; extended_list_.store( nullptr, atomics::memory_order_release ); } thread_hp_storage() = delete; thread_hp_storage( thread_hp_storage const& ) = delete; thread_hp_storage( thread_hp_storage&& ) = delete; ~thread_hp_storage() { clear(); } guard* alloc() { if ( cds_unlikely( free_head_ == nullptr )) { extend(); assert( free_head_ != nullptr ); } guard* g = free_head_; free_head_ = g->next_; CDS_HPSTAT( ++alloc_guard_count_ ); return g; } void free( guard* g ) noexcept { if ( g ) { g->clear(); g->next_ = free_head_; free_head_ = g; CDS_HPSTAT( ++free_guard_count_ ); } } template< size_t Capacity> size_t alloc( guard_array& arr ) { for ( size_t i = 0; i < Capacity; ++i ) { if ( cds_unlikely( free_head_ == nullptr )) extend(); arr.reset( i, free_head_ ); free_head_ = free_head_->next_; } CDS_HPSTAT( alloc_guard_count_ += Capacity ); return Capacity; } template void free( guard_array& arr ) noexcept { guard* gList = free_head_; for ( size_t i = 0; i < Capacity; ++i ) { guard* g = arr[i]; if ( g ) { g->clear(); g->next_ = gList; gList = g; CDS_HPSTAT( ++free_guard_count_ ); } } free_head_ = gList; } void clear() { // clear array_ for ( guard* cur = array_, *last = array_ + initial_capacity_; cur < last; ++cur ) cur->clear(); // free all extended blocks hp_allocator& a = hp_allocator::instance(); for ( guard_block* p = extended_list_.load( atomics::memory_order_relaxed ); p; ) { guard_block* next = p->next_block_; a.free( p ); p = next; } extended_list_.store( nullptr, atomics::memory_order_release ); } void init() { assert( extended_list_.load(atomics::memory_order_relaxed) == nullptr ); guard* p = array_; for ( guard* pEnd = p + initial_capacity_ - 1; p != pEnd; ++p ) p->next_ = p + 1; p->next_ = nullptr; free_head_ = array_; } private: void extend() { assert( free_head_ == nullptr ); guard_block* block = hp_allocator::instance().alloc(); block->next_block_ = extended_list_.load( atomics::memory_order_relaxed ); extended_list_.store( block, atomics::memory_order_release ); free_head_ = block->first(); CDS_HPSTAT( ++extend_call_count_ ); } private: guard* free_head_; ///< Head of free guard list atomics::atomic extended_list_; ///< Head of extended guard blocks allocated for the thread guard* const array_; ///< initial HP array size_t const initial_capacity_; ///< Capacity of \p array_ # ifdef CDS_ENABLE_HPSTAT public: size_t alloc_guard_count_; size_t free_guard_count_; size_t extend_call_count_; # endif }; //@endcond //@cond struct retired_block: public cds::intrusive::FreeListImpl::node { retired_block* next_; ///< Next block in thread-private retired array static size_t const c_capacity = 256; retired_block() : next_( nullptr ) {} retired_ptr* first() const { return reinterpret_cast( const_cast( this ) + 1 ); } retired_ptr* last() const { return first() + c_capacity; } }; //@endcond //@cond class retired_allocator { friend class smr; public: static retired_allocator& instance(); CDS_EXPORT_API retired_block* alloc(); void free( retired_block* block ) { block->next_ = nullptr; free_list_.put( block ); } private: retired_allocator() #ifdef CDS_ENABLE_HPSTAT : block_allocated_(0) #endif {} CDS_EXPORT_API ~retired_allocator(); private: cds::intrusive::FreeListImpl free_list_; ///< list of free \p guard_block #ifdef CDS_ENABLE_HPSTAT public: atomics::atomic block_allocated_; ///< Count of allocated blocks #endif }; //@endcond //@cond /// Per-thread retired array class retired_array { friend class smr; public: retired_array() noexcept : current_block_( nullptr ) , current_cell_( nullptr ) , list_head_( nullptr ) , list_tail_( nullptr ) , block_count_(0) # ifdef CDS_ENABLE_HPSTAT , retire_call_count_( 0 ) , extend_call_count_( 0 ) # endif {} retired_array( retired_array const& ) = delete; retired_array( retired_array&& ) = delete; ~retired_array() { assert( empty()); fini(); } bool push( retired_ptr const& p ) noexcept { assert( current_block_ != nullptr ); assert( current_block_->first() <= current_cell_ ); assert( current_cell_ < current_block_->last()); //assert( &p != current_cell_ ); *current_cell_ = p; CDS_HPSTAT( ++retire_call_count_ ); if ( ++current_cell_ == current_block_->last()) { // goto next block if exists if ( current_block_->next_ ) { current_block_ = current_block_->next_; current_cell_ = current_block_->first(); return true; } // no free block // smr::scan() extend retired_array if needed return false; } return true; } bool repush( retired_ptr* p ) noexcept { bool ret = push( *p ); CDS_HPSTAT( --retire_call_count_ ); assert( ret ); return ret; } private: // called by smr void init() { if ( list_head_ == nullptr ) { retired_block* block = retired_allocator::instance().alloc(); assert( block->next_ == nullptr ); current_block_ = list_head_ = list_tail_ = block; current_cell_ = block->first(); block_count_ = 1; } } void fini() { retired_allocator& alloc = retired_allocator::instance(); for ( retired_block* p = list_head_; p; ) { retired_block* next = p->next_; alloc.free( p ); p = next; } current_block_ = list_head_ = list_tail_ = nullptr; current_cell_ = nullptr; block_count_ = 0; } void extend() { assert( list_head_ != nullptr ); assert( current_block_ == list_tail_ ); assert( current_cell_ == current_block_->last()); retired_block* block = retired_allocator::instance().alloc(); assert( block->next_ == nullptr ); current_block_ = list_tail_ = list_tail_->next_ = block; current_cell_ = block->first(); ++block_count_; CDS_HPSTAT( ++extend_call_count_ ); } bool empty() const { return current_block_ == nullptr || ( current_block_ == list_head_ && current_cell_ == current_block_->first()); } private: retired_block* current_block_; retired_ptr* current_cell_; // in current_block_ retired_block* list_head_; retired_block* list_tail_; size_t block_count_; # ifdef CDS_ENABLE_HPSTAT public: size_t retire_call_count_; size_t extend_call_count_; # endif }; //@endcond /// Internal statistics struct stat { size_t guard_allocated; ///< Count of allocated HP guards size_t guard_freed; ///< Count of freed HP guards size_t retired_count; ///< Count of retired pointers size_t free_count; ///< Count of free pointers size_t scan_count; ///< Count of \p scan() call size_t help_scan_count; ///< Count of \p help_scan() call size_t thread_rec_count; ///< Count of thread records size_t hp_block_count; ///< Count of extended HP blocks allocated size_t retired_block_count; ///< Count of retired blocks allocated size_t hp_extend_count; ///< Count of hp array \p extend() call size_t retired_extend_count; ///< Count of retired array \p extend() call /// Default ctor stat() { clear(); } /// Clears all counters void clear() { guard_allocated = guard_freed = retired_count = free_count = scan_count = help_scan_count = thread_rec_count = hp_block_count = retired_block_count = hp_extend_count = retired_extend_count = 0; } }; //@cond /// Per-thread data struct thread_data { thread_hp_storage hazards_; ///< Hazard pointers private to the thread retired_array retired_; ///< Retired data private to the thread char pad1_[cds::c_nCacheLineSize]; atomics::atomic sync_; ///< dummy var to introduce synchronizes-with relationship between threads char pad2_[cds::c_nCacheLineSize]; # ifdef CDS_ENABLE_HPSTAT size_t free_call_count_; size_t scan_call_count_; size_t help_scan_call_count_; # endif // CppCheck warn: pad1_ and pad2_ is uninitialized in ctor // cppcheck-suppress uninitMemberVar thread_data( guard* guards, size_t guard_count ) : hazards_( guards, guard_count ) , sync_( 0 ) # ifdef CDS_ENABLE_HPSTAT , free_call_count_(0) , scan_call_count_(0) , help_scan_call_count_(0) # endif {} thread_data() = delete; thread_data( thread_data const& ) = delete; thread_data( thread_data&& ) = delete; void sync() { sync_.fetch_add( 1, atomics::memory_order_acq_rel ); } }; //@endcond //@cond // Dynamic (adaptive) Hazard Pointer SMR (Safe Memory Reclamation) class smr { struct thread_record; public: /// Returns the instance of Hazard Pointer \ref smr static smr& instance() { # ifdef CDS_DISABLE_SMR_EXCEPTION assert( instance_ != nullptr ); # else if ( !instance_ ) CDS_THROW_EXCEPTION( not_initialized()); # endif return *instance_; } /// Creates Dynamic Hazard Pointer SMR singleton /** Dynamic Hazard Pointer SMR is a singleton. If DHP instance is not initialized then the function creates the instance. Otherwise it does nothing. The Michael's HP reclamation schema depends of three parameters: - \p nHazardPtrCount - HP pointer count per thread. Usually it is small number (2-4) depending from the data structure algorithms. By default, if \p nHazardPtrCount = 0, the function uses maximum of HP count for CDS library - \p nMaxThreadCount - max count of thread with using HP GC in your application. Default is 100. - \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than nHazardPtrCount * nMaxThreadCount Default is 2 * nHazardPtrCount * nMaxThreadCount */ static CDS_EXPORT_API void construct( size_t nInitialHazardPtrCount = 16 ///< Initial number of hazard pointer per thread ); // for back-copatibility static void Construct( size_t nInitialHazardPtrCount = 16 ///< Initial number of hazard pointer per thread ) { construct( nInitialHazardPtrCount ); } /// Destroys global instance of \ref smr /** The parameter \p bDetachAll should be used carefully: if its value is \p true, then the object destroyed automatically detaches all attached threads. This feature can be useful when you have no control over the thread termination, for example, when \p libcds is injected into existing external thread. */ static CDS_EXPORT_API void destruct( bool bDetachAll = false ///< Detach all threads ); // for back-compatibility static void Destruct( bool bDetachAll = false ///< Detach all threads ) { destruct( bDetachAll ); } /// Checks if global SMR object is constructed and may be used static bool isUsed() noexcept { return instance_ != nullptr; } /// Set memory management functions /** @note This function may be called BEFORE creating an instance of Dynamic Hazard Pointer SMR SMR object allocates some memory for thread-specific data and for creating SMR object. By default, a standard \p new and \p delete operators are used for this. */ static CDS_EXPORT_API void set_memory_allocator( void* ( *alloc_func )( size_t size ), void( *free_func )( void * p ) ); /// Returns thread-local data for the current thread static CDS_EXPORT_API thread_data* tls(); static CDS_EXPORT_API void attach_thread(); static CDS_EXPORT_API void detach_thread(); /// Get internal statistics CDS_EXPORT_API void statistics( stat& st ); public: // for internal use only /// The main garbage collecting function CDS_EXPORT_API void scan( thread_data* pRec ); /// Helper scan routine /** The function guarantees that every node that is eligible for reuse is eventually freed, barring thread failures. To do so, after executing \p scan(), a thread executes a \p %help_scan(), where it checks every HP record. If an HP record is inactive, the thread moves all "lost" reclaimed pointers to thread's list of reclaimed pointers. The function is called internally by \p scan(). */ CDS_EXPORT_API void help_scan( thread_data* pThis ); hp_allocator& get_hp_allocator() { return hp_allocator_; } retired_allocator& get_retired_allocator() { return retired_allocator_; } private: CDS_EXPORT_API explicit smr( size_t nInitialHazardPtrCount ); CDS_EXPORT_API ~smr(); CDS_EXPORT_API void detach_all_thread(); private: CDS_EXPORT_API thread_record* create_thread_data(); static CDS_EXPORT_API void destroy_thread_data( thread_record* pRec ); /// Allocates Hazard Pointer SMR thread private data CDS_EXPORT_API thread_record* alloc_thread_data(); /// Free HP SMR thread-private data CDS_EXPORT_API void free_thread_data( thread_record* pRec, bool callHelpScan ); private: static CDS_EXPORT_API smr* instance_; atomics::atomic< thread_record*> thread_list_; ///< Head of thread list size_t const initial_hazard_count_; ///< initial number of hazard pointers per thread hp_allocator hp_allocator_; retired_allocator retired_allocator_; // temporaries std::atomic last_plist_size_; ///< HP array size in last scan() call }; //@endcond //@cond // for backward compatibility typedef smr GarbageCollector; // inlines inline hp_allocator& hp_allocator::instance() { return smr::instance().get_hp_allocator(); } inline retired_allocator& retired_allocator::instance() { return smr::instance().get_retired_allocator(); } //@endcond } // namespace dhp /// Dynamic (adaptie) Hazard Pointer SMR /** @ingroup cds_garbage_collector Implementation of Dynamic (adaptive) Hazard Pointer SMR Sources: - [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes" - [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects" - [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers" %DHP is an adaptive variant of classic \p cds::gc::HP, see @ref cds_garbage_collectors_comparison "Compare HP implementation" @note Internally, %DHP depends on free-list implementation. There are DCAS-based free-list \p cds::intrusive::TaggedFreeList and more complicated CAS-based free-list \p cds::intrusive::FreeList. For x86 architecture and GCC/clang, libcds selects appropriate free-list based on \p -mcx16 compiler flag. You may manually disable DCAS support specifying \p -DCDS_DISABLE_128BIT_ATOMIC for 64bit build or \p -DCDS_DISABLE_64BIT_ATOMIC for 32bit build in compiler command line. All your projects and libcds MUST be compiled with the same flags - either with DCAS support or without it. For MS VC++ compiler DCAS is not supported. See \ref cds_how_to_use "How to use" section for details how to apply SMR. */ class DHP { public: /// Native guarded pointer type typedef void* guarded_pointer; /// Atomic reference template using atomic_ref = atomics::atomic; /// Atomic type /** @headerfile cds/gc/dhp.h */ template using atomic_type = atomics::atomic; /// Atomic marked pointer template using atomic_marked_ptr = atomics::atomic; /// Internal statistics typedef dhp::stat stat; /// Dynamic Hazard Pointer guard /** A guard is a hazard pointer. Additionally, the \p %Guard class manages allocation and deallocation of the hazard pointer \p %Guard object is movable but not copyable. The guard object can be in two states: - unlinked - the guard is not linked with any internal hazard pointer. In this state no operation except \p link() and move assignment is supported. - linked (default) - the guard allocates an internal hazard pointer and fully operable. Due to performance reason the implementation does not check state of the guard in runtime. @warning Move assignment can transfer the guard in unlinked state, use with care. */ class Guard { public: /// Default ctor allocates a guard (hazard pointer) from thread-private storage Guard() noexcept : guard_( dhp::smr::tls()->hazards_.alloc()) {} /// Initilalizes an unlinked guard i.e. the guard contains no hazard pointer. Used for move semantics support explicit Guard( std::nullptr_t ) noexcept : guard_( nullptr ) {} /// Move ctor - \p src guard becomes unlinked (transfer internal guard ownership) Guard( Guard&& src ) noexcept : guard_( src.guard_ ) { src.guard_ = nullptr; } /// Move assignment: the internal guards are swapped between \p src and \p this /** @warning \p src will become in unlinked state if \p this was unlinked on entry. */ Guard& operator=( Guard&& src ) noexcept { std::swap( guard_, src.guard_ ); return *this; } /// Copy ctor is prohibited - the guard is not copyable Guard( Guard const& ) = delete; /// Copy assignment is prohibited Guard& operator=( Guard const& ) = delete; /// Frees the internal hazard pointer if the guard is in linked state ~Guard() { unlink(); } /// Checks if the guard object linked with any internal hazard pointer bool is_linked() const { return guard_ != nullptr; } /// Links the guard with internal hazard pointer if the guard is in unlinked state void link() { if ( !guard_ ) guard_ = dhp::smr::tls()->hazards_.alloc(); } /// Unlinks the guard from internal hazard pointer; the guard becomes in unlinked state void unlink() { if ( guard_ ) { dhp::smr::tls()->hazards_.free( guard_ ); guard_ = nullptr; } } /// Protects a pointer of type atomic /** Return the value of \p toGuard The function tries to load \p toGuard and to store it to the HP slot repeatedly until the guard's value equals \p toGuard */ template T protect( atomics::atomic const& toGuard ) { return protect(toGuard, [](T p) { return p; }); } /// Protects a converted pointer of type atomic /** Return the value of \p toGuard The function tries to load \p toGuard and to store result of \p f functor to the HP slot repeatedly until the guard's value equals \p toGuard. The function is useful for intrusive containers when \p toGuard is a node pointer that should be converted to a pointer to the value type before guarding. The parameter \p f of type Func is a functor that makes this conversion: \code struct functor { value_type * operator()( T * p ); }; \endcode Really, the result of f( toGuard.load()) is assigned to the hazard pointer. */ template T protect( atomics::atomic const& toGuard, Func f ) { assert( guard_ != nullptr ); T pCur = toGuard.load(atomics::memory_order_relaxed); T pRet; do { pRet = pCur; assign( f( pCur )); pCur = toGuard.load(atomics::memory_order_acquire); } while ( pRet != pCur ); return pCur; } /// Store \p p to the guard /** The function is just an assignment, no loop is performed. Can be used for a pointer that cannot be changed concurrently or for already guarded pointer. */ template T* assign( T* p ) { assert( guard_ != nullptr ); guard_->set( p ); dhp::smr::tls()->sync(); return p; } //@cond std::nullptr_t assign( std::nullptr_t ) { assert( guard_ != nullptr ); clear(); return nullptr; } //@endcond /// Store marked pointer \p p to the guard /** The function is just an assignment of p.ptr(), no loop is performed. Can be used for a marked pointer that cannot be changed concurrently or for already guarded pointer. */ template T* assign( cds::details::marked_ptr p ) { return assign( p.ptr()); } /// Copy from \p src guard to \p this guard void copy( Guard const& src ) { assign( src.get_native()); } /// Clears value of the guard void clear() { assert( guard_ != nullptr ); guard_->clear(); } /// Gets the value currently protected (relaxed read) template T * get() const { assert( guard_ != nullptr ); return guard_->get_as(); } /// Gets native guarded pointer stored void* get_native() const { assert( guard_ != nullptr ); return guard_->get(); } //@cond dhp::guard* release() { dhp::guard* g = guard_; guard_ = nullptr; return g; } dhp::guard*& guard_ref() { return guard_; } //@endcond private: //@cond dhp::guard* guard_; //@endcond }; /// Array of Dynamic Hazard Pointer guards /** The class is intended for allocating an array of hazard pointer guards. Template parameter \p Count defines the size of the array. A \p %GuardArray object is not copy- and move-constructible and not copy- and move-assignable. */ template class GuardArray { public: /// Rebind array for other size \p OtherCount template struct rebind { typedef GuardArray other ; ///< rebinding result }; /// Array capacity static constexpr const size_t c_nCapacity = Count; public: /// Default ctor allocates \p Count hazard pointers GuardArray() { dhp::smr::tls()->hazards_.alloc( guards_ ); } /// Move ctor is prohibited GuardArray( GuardArray&& ) = delete; /// Move assignment is prohibited GuardArray& operator=( GuardArray&& ) = delete; /// Copy ctor is prohibited GuardArray( GuardArray const& ) = delete; /// Copy assignment is prohibited GuardArray& operator=( GuardArray const& ) = delete; /// Frees allocated hazard pointers ~GuardArray() { dhp::smr::tls()->hazards_.free( guards_ ); } /// Protects a pointer of type \p atomic /** Return the value of \p toGuard The function tries to load \p toGuard and to store it to the slot \p nIndex repeatedly until the guard's value equals \p toGuard */ template T protect( size_t nIndex, atomics::atomic const& toGuard ) { return protect(nIndex, toGuard, [](T p) { return p; }); } /// Protects a pointer of type \p atomic /** Return the value of \p toGuard The function tries to load \p toGuard and to store it to the slot \p nIndex repeatedly until the guard's value equals \p toGuard The function is useful for intrusive containers when \p toGuard is a node pointer that should be converted to a pointer to the value type before guarding. The parameter \p f of type Func is a functor to make that conversion: \code struct functor { value_type * operator()( T * p ); }; \endcode Actually, the result of f( toGuard.load()) is assigned to the hazard pointer. */ template T protect( size_t nIndex, atomics::atomic const& toGuard, Func f ) { assert( nIndex < capacity()); T pRet; do { assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_relaxed))); } while ( pRet != toGuard.load(atomics::memory_order_acquire)); return pRet; } /// Store \p p to the slot \p nIndex /** The function is just an assignment, no loop is performed. */ template T * assign( size_t nIndex, T * p ) { assert( nIndex < capacity()); guards_.set( nIndex, p ); dhp::smr::tls()->sync(); return p; } /// Store marked pointer \p p to the guard /** The function is just an assignment of p.ptr(), no loop is performed. Can be used for a marked pointer that cannot be changed concurrently or for already guarded pointer. */ template T * assign( size_t nIndex, cds::details::marked_ptr p ) { return assign( nIndex, p.ptr()); } /// Copy guarded value from \p src guard to slot at index \p nIndex void copy( size_t nIndex, Guard const& src ) { assign( nIndex, src.get_native()); } /// Copy guarded value from slot \p nSrcIndex to slot at index \p nDestIndex void copy( size_t nDestIndex, size_t nSrcIndex ) { assign( nDestIndex, get_native( nSrcIndex )); } /// Clear value of the slot \p nIndex void clear( size_t nIndex ) { guards_.clear( nIndex ); } /// Get current value of slot \p nIndex template T * get( size_t nIndex ) const { assert( nIndex < capacity()); return guards_[nIndex]->template get_as(); } /// Get native guarded pointer stored guarded_pointer get_native( size_t nIndex ) const { assert( nIndex < capacity()); return guards_[nIndex]->get(); } //@cond dhp::guard* release( size_t nIndex ) noexcept { return guards_.release( nIndex ); } //@endcond /// Capacity of the guard array static constexpr size_t capacity() { return Count; } private: //@cond dhp::guard_array guards_; //@endcond }; /// Guarded pointer /** A guarded pointer is a pair of a pointer and GC's guard. Usually, it is used for returning a pointer to the item from an lock-free container. The guard prevents the pointer to be early disposed (freed) by GC. After destructing \p %guarded_ptr object the pointer can be disposed (freed) automatically at any time. Template arguments: - \p GuardedType - a type which the guard stores - \p ValueType - a value type - \p Cast - a functor for converting GuardedType* to ValueType*. Default is \p void (no casting). For intrusive containers, \p GuardedType is the same as \p ValueType and no casting is needed. In such case the \p %guarded_ptr is: @code typedef cds::gc::DHP::guarded_ptr< foo > intrusive_guarded_ptr; @endcode For standard (non-intrusive) containers \p GuardedType is not the same as \p ValueType and casting is needed. For example: @code struct foo { int const key; std::string value; }; struct value_accessor { std::string* operator()( foo* pFoo ) const { return &(pFoo->value); } }; // Guarded ptr typedef cds::gc::DHP::guarded_ptr< Foo, std::string, value_accessor > nonintrusive_guarded_ptr; @endcode You don't need use this class directly. All set/map container classes from \p libcds declare the typedef for \p %guarded_ptr with appropriate casting functor. */ template class guarded_ptr { //@cond struct trivial_cast { ValueType * operator()( GuardedType * p ) const { return p; } }; template friend class guarded_ptr; //@endcond public: typedef GuardedType guarded_type; ///< Guarded type typedef ValueType value_type; ///< Value type /// Functor for casting \p guarded_type to \p value_type typedef typename std::conditional< std::is_same::value, trivial_cast, Cast >::type value_cast; public: /// Creates empty guarded pointer guarded_ptr() noexcept : guard_( nullptr ) {} //@cond explicit guarded_ptr( dhp::guard* g ) noexcept : guard_( g ) {} /// Initializes guarded pointer with \p p explicit guarded_ptr( guarded_type * p ) noexcept : guard_( nullptr ) { reset( p ); } explicit guarded_ptr( std::nullptr_t ) noexcept : guard_( nullptr ) {} //@endcond /// Move ctor guarded_ptr( guarded_ptr&& gp ) noexcept : guard_( gp.guard_ ) { gp.guard_ = nullptr; } /// Move ctor template guarded_ptr( guarded_ptr&& gp ) noexcept : guard_( gp.guard_ ) { gp.guard_ = nullptr; } /// Ctor from \p Guard explicit guarded_ptr( Guard&& g ) noexcept : guard_( g.release()) {} /// The guarded pointer is not copy-constructible guarded_ptr( guarded_ptr const& gp ) = delete; /// Clears the guarded pointer /** \ref release is called if guarded pointer is not \ref empty */ ~guarded_ptr() noexcept { release(); } /// Move-assignment operator guarded_ptr& operator=( guarded_ptr&& gp ) noexcept { std::swap( guard_, gp.guard_ ); return *this; } /// Move-assignment from \p Guard guarded_ptr& operator=( Guard&& g ) noexcept { std::swap( guard_, g.guard_ref()); return *this; } /// The guarded pointer is not copy-assignable guarded_ptr& operator=(guarded_ptr const& gp) = delete; /// Returns a pointer to guarded value value_type * operator ->() const noexcept { assert( !empty()); return value_cast()( guard_->get_as()); } /// Returns a reference to guarded value value_type& operator *() noexcept { assert( !empty()); return *value_cast()( guard_->get_as()); } /// Returns const reference to guarded value value_type const& operator *() const noexcept { assert( !empty()); return *value_cast()(reinterpret_cast(guard_->get())); } /// Checks if the guarded pointer is \p nullptr bool empty() const noexcept { return guard_ == nullptr || guard_->get( atomics::memory_order_relaxed ) == nullptr; } /// \p bool operator returns !empty() explicit operator bool() const noexcept { return !empty(); } /// Clears guarded pointer /** If the guarded pointer has been released, the pointer can be disposed (freed) at any time. Dereferncing the guarded pointer after \p release() is dangerous. */ void release() noexcept { free_guard(); } //@cond // For internal use only!!! void reset(guarded_type * p) noexcept { alloc_guard(); assert( guard_ ); guard_->set( p ); } //@endcond private: //@cond void alloc_guard() { if ( !guard_ ) guard_ = dhp::smr::tls()->hazards_.alloc(); } void free_guard() { if ( guard_ ) { dhp::smr::tls()->hazards_.free( guard_ ); guard_ = nullptr; } } //@endcond private: //@cond dhp::guard* guard_; //@endcond }; public: /// Initializes %DHP memory manager singleton /** Constructor creates and initializes %DHP global object. %DHP object should be created before using CDS data structure based on \p %cds::gc::DHP. Usually, it is created in the beginning of \p main() function. After creating of global object you may use CDS data structures based on \p %cds::gc::DHP. \p nInitialThreadGuardCount - initial count of guard allocated for each thread. When a thread is initialized the GC allocates local guard pool for the thread from a common guard pool. By perforce the local thread's guard pool is grown automatically from common pool. When the thread terminated its guard pool is backed to common GC's pool. */ explicit DHP( size_t nInitialHazardPtrCount = 16 ///< Initial number of hazard pointer per thread ) { dhp::smr::construct( nInitialHazardPtrCount ); } /// Destroys %DHP memory manager /** The destructor destroys %DHP global object. After calling of this function you may \b NOT use CDS data structures based on \p %cds::gc::DHP. Usually, %DHP object is destroyed at the end of your \p main(). */ ~DHP() { dhp::GarbageCollector::destruct( true ); } /// Checks if count of hazard pointer is no less than \p nCountNeeded /** The function always returns \p true since the guard count is unlimited for \p %gc::DHP garbage collector. */ static constexpr bool check_available_guards( #ifdef CDS_DOXYGEN_INVOKED size_t nCountNeeded, #else size_t #endif ) { return true; } /// Set memory management functions /** @note This function may be called BEFORE creating an instance of Dynamic Hazard Pointer SMR SMR object allocates some memory for thread-specific data and for creating SMR object. By default, a standard \p new and \p delete operators are used for this. */ static void set_memory_allocator( void* ( *alloc_func )( size_t size ), ///< \p malloc() function void( *free_func )( void * p ) ///< \p free() function ) { dhp::smr::set_memory_allocator( alloc_func, free_func ); } /// Retire pointer \p p with function \p pFunc /** The function places pointer \p p to array of pointers ready for removing. (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it. \p func is a disposer: when \p p can be safely removed, \p func is called. */ template static void retire( T * p, void (* func)(void *)) { dhp::thread_data* rec = dhp::smr::tls(); if ( !rec->retired_.push( dhp::retired_ptr( p, func ))) dhp::smr::instance().scan( rec ); } /// Retire pointer \p p with functor of type \p Disposer /** The function places pointer \p p to array of pointers ready for removing. (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it. Deleting the pointer is an invocation of some object of type \p Disposer; the interface of \p Disposer is: \code template struct disposer { void operator()( T * p ) ; // disposing operator }; \endcode Since the functor call can happen at any time after \p retire() call, additional restrictions are imposed to \p Disposer type: - it should be stateless functor - it should be default-constructible - the result of functor call with argument \p p should not depend on where the functor will be called. \par Examples: Operator \p delete functor: \code template struct disposer { void operator ()( T * p ) { delete p; } }; // How to call HP::retire method int * p = new int; // ... use p in lock-free manner cds::gc::DHP::retire( p ) ; // place p to retired pointer array of DHP SMR \endcode Functor based on \p std::allocator : \code template > struct disposer { template void operator()( T * p ) { typedef typename Alloc::templare rebind::other alloc_t; alloc_t a; a.destroy( p ); a.deallocate( p, 1 ); } }; \endcode */ template static void retire( T* p ) { if ( !dhp::smr::tls()->retired_.push( dhp::retired_ptr( p, +[]( void* p ) { Disposer()( static_cast( p )); }))) scan(); } /// Checks if Dynamic Hazard Pointer GC is constructed and may be used static bool isUsed() { return dhp::smr::isUsed(); } /// Forced GC cycle call for current thread /** Usually, this function should not be called directly. */ static void scan() { dhp::smr::instance().scan( dhp::smr::tls()); } /// Synonym for \p scan() static void force_dispose() { scan(); } /// Returns internal statistics /** The function clears \p st before gathering statistics. @note Internal statistics is available only if you compile \p libcds and your program with \p -DCDS_ENABLE_HPSTAT. */ static void statistics( stat& st ) { dhp::smr::instance().statistics( st ); } /// Returns post-mortem statistics /** Post-mortem statistics is gathered in the \p %DHP object destructor and can be accessible after destructing the global \p %DHP object. @note Internal statistics is available only if you compile \p libcds and your program with \p -DCDS_ENABLE_HPSTAT. Usage: \code int main() { cds::Initialize(); { // Initialize DHP SMR cds::gc::DHP dhp; // deal with DHP-based data structured // ... } // DHP object destroyed // Get total post-mortem statistics cds::gc::DHP::stat const& st = cds::gc::DHP::postmortem_statistics(); printf( "DHP statistics:\n" " thread count = %llu\n" " guard allocated = %llu\n" " guard freed = %llu\n" " retired data count = %llu\n" " free data count = %llu\n" " scan() call count = %llu\n" " help_scan() call count = %llu\n", st.thread_rec_count, st.guard_allocated, st.guard_freed, st.retired_count, st.free_count, st.scan_count, st.help_scan_count ); cds::Terminate(); } \endcode */ CDS_EXPORT_API static stat const& postmortem_statistics(); }; }} // namespace cds::gc #endif // #ifndef CDSLIB_GC_DHP_SMR_H libcds-2.3.3/cds/gc/hp.h000066400000000000000000001517761341244201700147200ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_GC_HP_SMR_H #define CDSLIB_GC_HP_SMR_H #include #include #include #include #include #include /** @page cds_garbage_collectors_comparison Hazard Pointer SMR implementations @ingroup cds_garbage_collector
Feature %cds::gc::HP %cds::gc::DHP
Max number of guarded (hazard) pointers per thread limited (specified at construction time) unlimited (dynamically allocated when needed)
Max number of retired pointers1 bounded, specified at construction time bounded, adaptive, depends on current thread count and number of hazard pointer for each thread
Thread count bounded, upper bound is specified at construction time unbounded
1Unbounded count of retired pointers means a possibility of memory exhaustion. */ namespace cds { /// @defgroup cds_garbage_collector Garbage collectors /// Different safe memory reclamation schemas (garbage collectors) /** @ingroup cds_garbage_collector This namespace specifies different safe memory reclamation (SMR) algorithms. See \ref cds_garbage_collector "Garbage collectors" */ namespace gc { } // namespace gc } // namespace cds namespace cds { namespace gc { /// Hazard pointer implementation details namespace hp { using namespace cds::gc::hp::common; /// Exception "Not enough Hazard Pointer" class not_enough_hazard_ptr: public std::length_error { //@cond public: not_enough_hazard_ptr() : std::length_error( "Not enough Hazard Pointer" ) {} //@endcond }; /// Exception "Hazard Pointer SMR is not initialized" class not_initialized: public std::runtime_error { //@cond public: not_initialized() : std::runtime_error( "Global Hazard Pointer SMR object is not initialized" ) {} //@endcond }; //@cond /// Per-thread hazard pointer storage class thread_hp_storage { public: thread_hp_storage( guard* arr, size_t nSize ) noexcept : free_head_( arr ) , array_( arr ) , capacity_( nSize ) # ifdef CDS_ENABLE_HPSTAT , alloc_guard_count_(0) , free_guard_count_(0) # endif { // Initialize guards new( arr ) guard[nSize]; for ( guard* pEnd = arr + nSize - 1; arr < pEnd; ++arr ) arr->next_ = arr + 1; arr->next_ = nullptr; } thread_hp_storage() = delete; thread_hp_storage( thread_hp_storage const& ) = delete; thread_hp_storage( thread_hp_storage&& ) = delete; size_t capacity() const noexcept { return capacity_; } bool full() const noexcept { return free_head_ == nullptr; } guard* alloc() { # ifdef CDS_DISABLE_SMR_EXCEPTION assert( !full()); # else if ( full()) CDS_THROW_EXCEPTION( not_enough_hazard_ptr()); # endif guard* g = free_head_; free_head_ = g->next_; CDS_HPSTAT( ++alloc_guard_count_ ); return g; } void free( guard* g ) noexcept { assert( g >= array_ && g < array_ + capacity()); if ( g ) { g->clear(); g->next_ = free_head_; free_head_ = g; CDS_HPSTAT( ++free_guard_count_ ); } } template< size_t Capacity> size_t alloc( guard_array& arr ) { size_t i; guard* g = free_head_; for ( i = 0; i < Capacity && g; ++i ) { arr.reset( i, g ); g = g->next_; } # ifdef CDS_DISABLE_SMR_EXCEPTION assert( i == Capacity ); # else if ( i != Capacity ) CDS_THROW_EXCEPTION( not_enough_hazard_ptr()); # endif free_head_ = g; CDS_HPSTAT( alloc_guard_count_ += Capacity ); return i; } template void free( guard_array& arr ) noexcept { guard* gList = free_head_; for ( size_t i = 0; i < Capacity; ++i ) { guard* g = arr[i]; if ( g ) { g->clear(); g->next_ = gList; gList = g; CDS_HPSTAT( ++free_guard_count_ ); } } free_head_ = gList; } // cppcheck-suppress functionConst void clear() { for ( guard* cur = array_, *last = array_ + capacity(); cur < last; ++cur ) cur->clear(); } guard& operator[]( size_t idx ) { assert( idx < capacity()); return array_[idx]; } static size_t calc_array_size( size_t capacity ) { return sizeof( guard ) * capacity; } guard* begin() const { return array_; } guard* end() const { return &array_[capacity_]; } private: guard* free_head_; ///< Head of free guard list guard* const array_; ///< HP array size_t const capacity_; ///< HP array capacity # ifdef CDS_ENABLE_HPSTAT public: size_t alloc_guard_count_; size_t free_guard_count_; # endif }; //@endcond //@cond /// Per-thread retired array class retired_array { public: retired_array( retired_ptr* arr, size_t capacity ) noexcept : current_( arr ) , last_( arr + capacity ) , retired_( arr ) # ifdef CDS_ENABLE_HPSTAT , retire_call_count_(0) # endif {} retired_array() = delete; retired_array( retired_array const& ) = delete; retired_array( retired_array&& ) = delete; size_t capacity() const noexcept { return last_ - retired_; } size_t size() const noexcept { return current_.load(atomics::memory_order_relaxed) - retired_; } bool push( retired_ptr&& p ) noexcept { retired_ptr* cur = current_.load( atomics::memory_order_relaxed ); *cur = p; CDS_HPSTAT( ++retire_call_count_ ); current_.store( cur + 1, atomics::memory_order_relaxed ); return cur + 1 < last_; } retired_ptr* first() const noexcept { return retired_; } retired_ptr* last() const noexcept { return current_.load( atomics::memory_order_relaxed ); } void reset( size_t nSize ) noexcept { current_.store( first() + nSize, atomics::memory_order_relaxed ); } void interthread_clear() { current_.exchange( first(), atomics::memory_order_acq_rel ); } bool full() const noexcept { return current_.load( atomics::memory_order_relaxed ) == last_; } static size_t calc_array_size( size_t capacity ) { return sizeof( retired_ptr ) * capacity; } private: atomics::atomic current_; retired_ptr* const last_; retired_ptr* const retired_; # ifdef CDS_ENABLE_HPSTAT public: size_t retire_call_count_; # endif }; //@endcond /// Internal statistics struct stat { size_t guard_allocated; ///< Count of allocated HP guards size_t guard_freed; ///< Count of freed HP guards size_t retired_count; ///< Count of retired pointers size_t free_count; ///< Count of free pointers size_t scan_count; ///< Count of \p scan() call size_t help_scan_count; ///< Count of \p help_scan() call size_t thread_rec_count; ///< Count of thread records /// Default ctor stat() { clear(); } /// Clears all counters void clear() { guard_allocated = guard_freed = retired_count = free_count = scan_count = help_scan_count = thread_rec_count = 0; } }; //@cond /// Per-thread data struct thread_data { thread_hp_storage hazards_; ///< Hazard pointers private to the thread retired_array retired_; ///< Retired data private to the thread char pad1_[cds::c_nCacheLineSize]; atomics::atomic sync_; ///< dummy var to introduce synchronizes-with relationship between threads char pad2_[cds::c_nCacheLineSize]; # ifdef CDS_ENABLE_HPSTAT // Internal statistics: size_t free_count_; size_t scan_count_; size_t help_scan_count_; # endif // CppCheck warn: pad1_ and pad2_ is uninitialized in ctor // cppcheck-suppress uninitMemberVar thread_data( guard* guards, size_t guard_count, retired_ptr* retired_arr, size_t retired_capacity ) : hazards_( guards, guard_count ) , retired_( retired_arr, retired_capacity ) , sync_(0) # ifdef CDS_ENABLE_HPSTAT , free_count_(0) , scan_count_(0) , help_scan_count_(0) # endif {} thread_data() = delete; thread_data( thread_data const& ) = delete; thread_data( thread_data&& ) = delete; void sync() { sync_.fetch_add( 1, atomics::memory_order_acq_rel ); } }; //@endcond /// \p smr::scan() strategy enum scan_type { classic, ///< classic scan as described in Michael's works (see smr::classic_scan()) inplace ///< inplace scan without allocation (see smr::inplace_scan()) }; //@cond /// Hazard Pointer SMR (Safe Memory Reclamation) class smr { struct thread_record; public: /// Returns the instance of Hazard Pointer \ref smr static smr& instance() { # ifdef CDS_DISABLE_SMR_EXCEPTION assert( instance_ != nullptr ); # else if ( !instance_ ) CDS_THROW_EXCEPTION( not_initialized()); # endif return *instance_; } /// Creates Hazard Pointer SMR singleton /** Hazard Pointer SMR is a singleton. If HP instance is not initialized then the function creates the instance. Otherwise it does nothing. The Michael's HP reclamation schema depends of three parameters: - \p nHazardPtrCount - HP pointer count per thread. Usually it is small number (2-4) depending from the data structure algorithms. By default, if \p nHazardPtrCount = 0, the function uses maximum of HP count for CDS library - \p nMaxThreadCount - max count of thread with using HP GC in your application. Default is 100. - \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than nHazardPtrCount * nMaxThreadCount Default is 2 * nHazardPtrCount * nMaxThreadCount */ static CDS_EXPORT_API void construct( size_t nHazardPtrCount = 0, ///< Hazard pointer count per thread size_t nMaxThreadCount = 0, ///< Max count of simultaneous working thread in your application size_t nMaxRetiredPtrCount = 0, ///< Capacity of the array of retired objects for the thread scan_type nScanType = inplace ///< Scan type (see \ref scan_type enum) ); // for back-copatibility static void Construct( size_t nHazardPtrCount = 0, ///< Hazard pointer count per thread size_t nMaxThreadCount = 0, ///< Max count of simultaneous working thread in your application size_t nMaxRetiredPtrCount = 0, ///< Capacity of the array of retired objects for the thread scan_type nScanType = inplace ///< Scan type (see \ref scan_type enum) ) { construct( nHazardPtrCount, nMaxThreadCount, nMaxRetiredPtrCount, nScanType ); } /// Destroys global instance of \ref smr /** The parameter \p bDetachAll should be used carefully: if its value is \p true, then the object destroyed automatically detaches all attached threads. This feature can be useful when you have no control over the thread termination, for example, when \p libcds is injected into existing external thread. */ static CDS_EXPORT_API void destruct( bool bDetachAll = false ///< Detach all threads ); // for back-compatibility static void Destruct( bool bDetachAll = false ///< Detach all threads ) { destruct( bDetachAll ); } /// Checks if global SMR object is constructed and may be used static bool isUsed() noexcept { return instance_ != nullptr; } /// Set memory management functions /** @note This function may be called BEFORE creating an instance of Hazard Pointer SMR SMR object allocates some memory for thread-specific data and for creating SMR object. By default, a standard \p new and \p delete operators are used for this. */ static CDS_EXPORT_API void set_memory_allocator( void* ( *alloc_func )( size_t size ), void (*free_func )( void * p ) ); /// Returns max Hazard Pointer count per thread size_t get_hazard_ptr_count() const noexcept { return hazard_ptr_count_; } /// Returns max thread count size_t get_max_thread_count() const noexcept { return max_thread_count_; } /// Returns max size of retired objects array size_t get_max_retired_ptr_count() const noexcept { return max_retired_ptr_count_; } /// Get current scan strategy scan_type get_scan_type() const { return scan_type_; } /// Checks that required hazard pointer count \p nRequiredCount is less or equal then max hazard pointer count /** If nRequiredCount > get_hazard_ptr_count() then the exception \p not_enough_hazard_ptr is thrown */ static void check_hazard_ptr_count( size_t nRequiredCount ) { if ( instance().get_hazard_ptr_count() < nRequiredCount ) { # ifdef CDS_DISABLE_SMR_EXCEPTION assert( false ); // not enough hazard ptr # else CDS_THROW_EXCEPTION( not_enough_hazard_ptr()); # endif } } /// Returns thread-local data for the current thread static CDS_EXPORT_API thread_data* tls(); static CDS_EXPORT_API void attach_thread(); static CDS_EXPORT_API void detach_thread(); /// Get internal statistics CDS_EXPORT_API void statistics( stat& st ); public: // for internal use only /// The main garbage collecting function /** This function is called internally when upper bound of thread's list of reclaimed pointers is reached. There are the following scan algorithm: - \ref hzp_gc_classic_scan "classic_scan" allocates memory for internal use - \ref hzp_gc_inplace_scan "inplace_scan" does not allocate any memory Use \p set_scan_type() member function to setup appropriate scan algorithm. */ void scan( thread_data* pRec ) { pRec->sync(); ( this->*scan_func_ )( pRec ); } /// Helper scan routine /** The function guarantees that every node that is eligible for reuse is eventually freed, barring thread failures. To do so, after executing \p scan(), a thread executes a \p %help_scan(), where it checks every HP record. If an HP record is inactive, the thread moves all "lost" reclaimed pointers to thread's list of reclaimed pointers. The function is called internally by \p scan(). */ CDS_EXPORT_API void help_scan( thread_data* pThis ); private: CDS_EXPORT_API smr( size_t nHazardPtrCount, ///< Hazard pointer count per thread size_t nMaxThreadCount, ///< Max count of simultaneous working thread in your application size_t nMaxRetiredPtrCount, ///< Capacity of the array of retired objects for the thread scan_type nScanType ///< Scan type (see \ref scan_type enum) ); CDS_EXPORT_API ~smr(); CDS_EXPORT_API void detach_all_thread(); /// Classic scan algorithm /** @anchor hzp_gc_classic_scan Classical scan algorithm as described in Michael's paper. A scan includes four stages. The first stage involves scanning the array HP for non-null values. Whenever a non-null value is encountered, it is inserted in a local list of currently protected pointer. Only stage 1 accesses shared variables. The following stages operate only on private variables. The second stage of a scan involves sorting local list of protected pointers to allow binary search in the third stage. The third stage of a scan involves checking each reclaimed node against the pointers in local list of protected pointers. If the binary search yields no match, the node is freed. Otherwise, it cannot be deleted now and must kept in thread's list of reclaimed pointers. The forth stage prepares new thread's private list of reclaimed pointers that could not be freed during the current scan, where they remain until the next scan. This algorithm allocates memory for internal HP array. This function is called internally by ThreadGC object when upper bound of thread's list of reclaimed pointers is reached. */ CDS_EXPORT_API void classic_scan( thread_data* pRec ); /// In-place scan algorithm /** @anchor hzp_gc_inplace_scan Unlike the \p classic_scan() algorithm, \p %inplace_scan() does not allocate any memory. All operations are performed in-place. */ CDS_EXPORT_API void inplace_scan( thread_data* pRec ); private: CDS_EXPORT_API thread_record* create_thread_data(); static CDS_EXPORT_API void destroy_thread_data( thread_record* pRec ); /// Allocates Hazard Pointer SMR thread private data CDS_EXPORT_API thread_record* alloc_thread_data(); /// Free HP SMR thread-private data CDS_EXPORT_API void free_thread_data( thread_record* pRec, bool callHelpScan ); private: static CDS_EXPORT_API smr* instance_; atomics::atomic< thread_record*> thread_list_; ///< Head of thread list size_t const hazard_ptr_count_; ///< max count of thread's hazard pointer size_t const max_thread_count_; ///< max count of thread size_t const max_retired_ptr_count_; ///< max count of retired ptr per thread scan_type const scan_type_; ///< scan type (see \ref scan_type enum) void ( smr::*scan_func_ )( thread_data* pRec ); }; //@endcond //@cond // for backward compatibility typedef smr GarbageCollector; //@endcond } // namespace hp /// Hazard Pointer SMR (Safe Memory Reclamation) /** @ingroup cds_garbage_collector Implementation of classic Hazard Pointer SMR Sources: - [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes" - [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects" - [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers" Hazard Pointer SMR is a singleton. The main user-level part of Hazard Pointer schema is \p %cds::gc::HP class and its nested classes. Before use any HP-related class you must initialize \p %HP by contructing \p %cds::gc::HP object in beginning of your \p main(). See \ref cds_how_to_use "How to use" section for details how to apply SMR schema. */ class HP { public: /// Native guarded pointer type typedef hp::hazard_ptr guarded_pointer; /// Atomic reference template using atomic_ref = atomics::atomic; /// Atomic marked pointer template using atomic_marked_ptr = atomics::atomic; /// Atomic type template using atomic_type = atomics::atomic; /// Exception "Not enough Hazard Pointer" typedef hp::not_enough_hazard_ptr not_enough_hazard_ptr_exception; /// Internal statistics typedef hp::stat stat; /// Hazard Pointer guard /** A guard is a hazard pointer. Additionally, the \p %Guard class manages allocation and deallocation of the hazard pointer. \p %Guard object is movable but not copyable. The guard object can be in two states: - unlinked - the guard is not linked with any internal hazard pointer. In this state no operation except \p link() and move assignment is supported. - linked (default) - the guard allocates an internal hazard pointer and completely operable. Due to performance reason the implementation does not check state of the guard at runtime. @warning Move assignment transfers the guard in unlinked state, use with care. */ class Guard { public: /// Default ctor allocates a guard (hazard pointer) from thread-private storage /** @warning Can throw \p not_enough_hazard_ptr if internal hazard pointer objects are exhausted. */ Guard() : guard_( hp::smr::tls()->hazards_.alloc()) {} /// Initilalizes an unlinked guard i.e. the guard contains no hazard pointer. Used for move semantics support explicit Guard( std::nullptr_t ) noexcept : guard_( nullptr ) {} /// Move ctor - \p src guard becomes unlinked (transfer internal guard ownership) Guard( Guard&& src ) noexcept : guard_( src.guard_ ) { src.guard_ = nullptr; } /// Move assignment: the internal guards are swapped between \p src and \p this /** @warning \p src will become in unlinked state if \p this was unlinked on entry. */ Guard& operator=( Guard&& src ) noexcept { std::swap( guard_, src.guard_ ); return *this; } /// Copy ctor is prohibited - the guard is not copyable Guard( Guard const& ) = delete; /// Copy assignment is prohibited Guard& operator=( Guard const& ) = delete; /// Frees the internal hazard pointer if the guard is in linked state ~Guard() { unlink(); } /// Checks if the guard object linked with any internal hazard pointer bool is_linked() const { return guard_ != nullptr; } /// Links the guard with internal hazard pointer if the guard is in unlinked state /** @warning Can throw \p not_enough_hazard_ptr_exception if internal hazard pointer array is exhausted. */ void link() { if ( !guard_ ) guard_ = hp::smr::tls()->hazards_.alloc(); } /// Unlinks the guard from internal hazard pointer; the guard becomes in unlinked state void unlink() { if ( guard_ ) { hp::smr::tls()->hazards_.free( guard_ ); guard_ = nullptr; } } /// Protects a pointer of type \p atomic /** Return the value of \p toGuard The function tries to load \p toGuard and to store it to the HP slot repeatedly until the guard's value equals \p toGuard @warning The guad object should be in linked state, otherwise the result is undefined */ template T protect( atomics::atomic const& toGuard ) { return protect(toGuard, [](T p) { return p; }); } /// Protects a converted pointer of type \p atomic /** Return the value of \p toGuard The function tries to load \p toGuard and to store result of \p f functor to the HP slot repeatedly until the guard's value equals \p toGuard. The function is useful for intrusive containers when \p toGuard is a node pointer that should be converted to a pointer to the value before protecting. The parameter \p f of type Func is a functor that makes this conversion: \code struct functor { value_type * operator()( T * p ); }; \endcode Actually, the result of f( toGuard.load()) is assigned to the hazard pointer. @warning The guad object should be in linked state, otherwise the result is undefined */ template T protect( atomics::atomic const& toGuard, Func f ) { assert( guard_ != nullptr ); T pCur = toGuard.load(atomics::memory_order_relaxed); T pRet; do { pRet = pCur; assign( f( pCur )); pCur = toGuard.load(atomics::memory_order_acquire); } while ( pRet != pCur ); return pCur; } /// Store \p p to the guard /** The function equals to a simple assignment the value \p p to guard, no loop is performed. Can be used for a pointer that cannot be changed concurrently or if the pointer is already guarded by another guard. @warning The guad object should be in linked state, otherwise the result is undefined */ template T * assign( T* p ) { assert( guard_ != nullptr ); guard_->set( p ); hp::smr::tls()->sync(); return p; } //@cond std::nullptr_t assign( std::nullptr_t ) { assert( guard_ != nullptr ); guard_->clear(); return nullptr; } //@endcond /// Copy a value guarded from \p src guard to \p this guard (valid only in linked state) void copy( Guard const& src ) { assign( src.get_native()); } /// Store marked pointer \p p to the guard /** The function equals to a simple assignment of p.ptr(), no loop is performed. Can be used for a marked pointer that cannot be changed concurrently or if the marked pointer is already guarded by another guard. @warning The guard object should be in linked state, otherwise the result is undefined */ template T * assign( cds::details::marked_ptr p ) { return assign( p.ptr()); } /// Clear value of the guard (valid only in linked state) void clear() { assign( nullptr ); } /// Get the value currently protected (valid only in linked state) template T * get() const { assert( guard_ != nullptr ); return guard_->get_as(); } /// Get native hazard pointer stored (valid only in linked state) guarded_pointer get_native() const { assert( guard_ != nullptr ); return guard_->get(); } //@cond hp::guard* release() { hp::guard* g = guard_; guard_ = nullptr; return g; } hp::guard*& guard_ref() { return guard_; } //@endcond private: //@cond hp::guard* guard_; //@endcond }; /// Array of Hazard Pointer guards /** The class is intended for allocating an array of hazard pointer guards. Template parameter \p Count defines the size of the array. */ template class GuardArray { public: /// Rebind array for other size \p Count2 template struct rebind { typedef GuardArray other; ///< rebinding result }; /// Array capacity static constexpr const size_t c_nCapacity = Count; public: /// Default ctor allocates \p Count hazard pointers GuardArray() { hp::smr::tls()->hazards_.alloc( guards_ ); } /// Move ctor is prohibited GuardArray( GuardArray&& ) = delete; /// Move assignment is prohibited GuardArray& operator=( GuardArray&& ) = delete; /// Copy ctor is prohibited GuardArray( GuardArray const& ) = delete; /// Copy assignment is prohibited GuardArray& operator=( GuardArray const& ) = delete; /// Frees allocated hazard pointers ~GuardArray() { hp::smr::tls()->hazards_.free( guards_ ); } /// Protects a pointer of type \p atomic /** Return the value of \p toGuard The function tries to load \p toGuard and to store it to the slot \p nIndex repeatedly until the guard's value equals \p toGuard */ template T protect( size_t nIndex, atomics::atomic const& toGuard ) { return protect(nIndex, toGuard, [](T p) { return p; }); } /// Protects a pointer of type \p atomic /** Return the value of \p toGuard The function tries to load \p toGuard and to store it to the slot \p nIndex repeatedly until the guard's value equals \p toGuard The function is useful for intrusive containers when \p toGuard is a node pointer that should be converted to a pointer to the value type before guarding. The parameter \p f of type Func is a functor that makes this conversion: \code struct functor { value_type * operator()( T * p ); }; \endcode Really, the result of f( toGuard.load()) is assigned to the hazard pointer. */ template T protect( size_t nIndex, atomics::atomic const& toGuard, Func f ) { assert( nIndex < capacity()); T pRet; do { assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_relaxed))); } while ( pRet != toGuard.load(atomics::memory_order_acquire)); return pRet; } /// Store \p to the slot \p nIndex /** The function equals to a simple assignment, no loop is performed. */ template T * assign( size_t nIndex, T * p ) { assert( nIndex < capacity()); guards_.set( nIndex, p ); hp::smr::tls()->sync(); return p; } /// Store marked pointer \p p to the guard /** The function equals to a simple assignment of p.ptr(), no loop is performed. Can be used for a marked pointer that cannot be changed concurrently. */ template T * assign( size_t nIndex, cds::details::marked_ptr p ) { return assign( nIndex, p.ptr()); } /// Copy guarded value from \p src guard to slot at index \p nIndex void copy( size_t nIndex, Guard const& src ) { assign( nIndex, src.get_native()); } /// Copy guarded value from slot \p nSrcIndex to the slot \p nDestIndex void copy( size_t nDestIndex, size_t nSrcIndex ) { assign( nDestIndex, get_native( nSrcIndex )); } /// Clear value of the slot \p nIndex void clear( size_t nIndex ) { guards_.clear( nIndex ); } /// Get current value of slot \p nIndex template T * get( size_t nIndex ) const { assert( nIndex < capacity()); return guards_[nIndex]->template get_as(); } /// Get native hazard pointer stored guarded_pointer get_native( size_t nIndex ) const { assert( nIndex < capacity()); return guards_[nIndex]->get(); } //@cond hp::guard* release( size_t nIndex ) noexcept { return guards_.release( nIndex ); } //@endcond /// Capacity of the guard array static constexpr size_t capacity() { return c_nCapacity; } private: //@cond hp::guard_array guards_; //@endcond }; /// Guarded pointer /** A guarded pointer is a pair of a pointer and GC's guard. Usually, it is used for returning a pointer to an element of a lock-free container. The guard prevents the pointer to be early disposed (freed) by SMR. After destructing \p %guarded_ptr object the pointer can be disposed (freed) automatically at any time. Template arguments: - \p GuardedType - a type which the guard stores - \p ValueType - a value type - \p Cast - a functor for converting GuardedType* to ValueType*. Default is \p void (no casting). For intrusive containers, \p GuardedType is the same as \p ValueType and no casting is needed. In such case the \p %guarded_ptr is: @code typedef cds::gc::HP::guarded_ptr< foo > intrusive_guarded_ptr; @endcode For standard (non-intrusive) containers \p GuardedType is not the same as \p ValueType and casting is needed. For example: @code struct foo { int const key; std::string value; }; struct value_accessor { std::string* operator()( foo* pFoo ) const { return &(pFoo->value); } }; // Guarded ptr typedef cds::gc::HP::guarded_ptr< Foo, std::string, value_accessor > nonintrusive_guarded_ptr; @endcode You don't need use this class directly. All set/map container classes from \p libcds declare the typedef for \p %guarded_ptr with appropriate casting functor. */ template class guarded_ptr { //@cond struct trivial_cast { ValueType * operator()( GuardedType * p ) const { return p; } }; template friend class guarded_ptr; //@endcond public: typedef GuardedType guarded_type; ///< Guarded type typedef ValueType value_type; ///< Value type /// Functor for casting \p guarded_type to \p value_type typedef typename std::conditional< std::is_same::value, trivial_cast, Cast >::type value_cast; public: /// Creates empty guarded pointer guarded_ptr() noexcept : guard_(nullptr) {} //@cond explicit guarded_ptr( hp::guard* g ) noexcept : guard_( g ) {} /// Initializes guarded pointer with \p p explicit guarded_ptr( guarded_type* p ) noexcept : guard_( nullptr ) { reset(p); } explicit guarded_ptr( std::nullptr_t ) noexcept : guard_( nullptr ) {} //@endcond /// Move ctor guarded_ptr( guarded_ptr&& gp ) noexcept : guard_( gp.guard_ ) { gp.guard_ = nullptr; } /// Move ctor template guarded_ptr( guarded_ptr&& gp ) noexcept : guard_( gp.guard_ ) { gp.guard_ = nullptr; } /// Ctor from \p Guard explicit guarded_ptr( Guard&& g ) noexcept : guard_( g.release()) {} /// The guarded pointer is not copy-constructible guarded_ptr( guarded_ptr const& gp ) = delete; /// Clears the guarded pointer /** \ref release() is called if guarded pointer is not \ref empty() */ ~guarded_ptr() noexcept { release(); } /// Move-assignment operator guarded_ptr& operator=( guarded_ptr&& gp ) noexcept { std::swap( guard_, gp.guard_ ); return *this; } /// Move-assignment from \p Guard guarded_ptr& operator=( Guard&& g ) noexcept { std::swap( guard_, g.guard_ref()); return *this; } /// The guarded pointer is not copy-assignable guarded_ptr& operator=(guarded_ptr const& gp) = delete; /// Returns a pointer to guarded value value_type * operator ->() const noexcept { assert( !empty()); return value_cast()( guard_->get_as()); } /// Returns a reference to guarded value value_type& operator *() noexcept { assert( !empty()); return *value_cast()( guard_->get_as()); } /// Returns const reference to guarded value value_type const& operator *() const noexcept { assert( !empty()); return *value_cast()( guard_->get_as()); } /// Checks if the guarded pointer is \p nullptr bool empty() const noexcept { return !guard_ || guard_->get( atomics::memory_order_relaxed ) == nullptr; } /// \p bool operator returns !empty() explicit operator bool() const noexcept { return !empty(); } /// Clears guarded pointer /** If the guarded pointer has been released, the pointer can be disposed (freed) at any time. Dereferncing the guarded pointer after \p release() is dangerous. */ void release() noexcept { free_guard(); } //@cond // For internal use only!!! void reset(guarded_type * p) noexcept { alloc_guard(); assert( guard_ ); guard_->set(p); } //@endcond private: //@cond void alloc_guard() { if ( !guard_ ) guard_ = hp::smr::tls()->hazards_.alloc(); } void free_guard() { if ( guard_ ) { hp::smr::tls()->hazards_.free( guard_ ); guard_ = nullptr; } } //@endcond private: //@cond hp::guard* guard_; //@endcond }; public: /// \p scan() type enum class scan_type { classic = hp::classic, ///< classic scan as described in Michael's papers inplace = hp::inplace ///< inplace scan without allocation }; /// Initializes %HP singleton /** The constructor initializes Hazard Pointer SMR singleton with passed parameters. If the instance does not yet exist then the function creates the instance. Otherwise it does nothing. The Michael's %HP reclamation schema depends of three parameters: - \p nHazardPtrCount - hazard pointer count per thread. Usually it is small number (up to 10) depending from the data structure algorithms. If \p nHazardPtrCount = 0, the defaul value 8 is used - \p nMaxThreadCount - max count of thread with using Hazard Pointer GC in your application. Default is 100. - \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than nHazardPtrCount * nMaxThreadCount . Default is 2 * nHazardPtrCount * nMaxThreadCount . */ HP( size_t nHazardPtrCount = 0, ///< Hazard pointer count per thread size_t nMaxThreadCount = 0, ///< Max count of simultaneous working thread in your application size_t nMaxRetiredPtrCount = 0, ///< Capacity of the array of retired objects for the thread scan_type nScanType = scan_type::inplace ///< Scan type (see \p scan_type enum) ) { hp::smr::construct( nHazardPtrCount, nMaxThreadCount, nMaxRetiredPtrCount, static_cast(nScanType) ); } /// Terminates GC singleton /** The destructor destroys %HP global object. After calling of this function you may \b NOT use CDS data structures based on \p %cds::gc::HP. Usually, %HP object is destroyed at the end of your \p main(). */ ~HP() { hp::smr::destruct( true ); } /// Checks that required hazard pointer count \p nCountNeeded is less or equal then max hazard pointer count /** If nRequiredCount > get_hazard_ptr_count() then the exception \p not_enough_hazard_ptr is thrown */ static void check_available_guards( size_t nCountNeeded ) { hp::smr::check_hazard_ptr_count( nCountNeeded ); } /// Set memory management functions /** @note This function may be called BEFORE creating an instance of Hazard Pointer SMR SMR object allocates some memory for thread-specific data and for creating SMR object. By default, a standard \p new and \p delete operators are used for this. */ static void set_memory_allocator( void* ( *alloc_func )( size_t size ), ///< \p malloc() function void( *free_func )( void * p ) ///< \p free() function ) { hp::smr::set_memory_allocator( alloc_func, free_func ); } /// Returns max Hazard Pointer count static size_t max_hazard_count() { return hp::smr::instance().get_hazard_ptr_count(); } /// Returns max count of thread static size_t max_thread_count() { return hp::smr::instance().get_max_thread_count(); } /// Returns capacity of retired pointer array static size_t retired_array_capacity() { return hp::smr::instance().get_max_retired_ptr_count(); } /// Retire pointer \p p with function \p func /** The function places pointer \p p to array of pointers ready for removing. (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it. \p func is a disposer: when \p p can be safely removed, \p func is called. */ template static void retire( T * p, void( *func )( void * )) { hp::thread_data* rec = hp::smr::tls(); if ( !rec->retired_.push( hp::retired_ptr( p, func ))) hp::smr::instance().scan( rec ); } /// Retire pointer \p p with functor of type \p Disposer /** The function places pointer \p p to array of pointers ready for removing. (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it. Deleting the pointer is an invocation of some object of type \p Disposer; the interface of \p Disposer is: \code template struct disposer { void operator()( T * p ) ; // disposing operator }; \endcode Since the functor call can happen at any time after \p retire() call, additional restrictions are imposed to \p Disposer type: - it should be stateless functor - it should be default-constructible - the result of functor call with argument \p p should not depend on where the functor will be called. \par Examples: Operator \p delete functor: \code template struct disposer { void operator ()( T * p ) { delete p; } }; // How to call HP::retire method int * p = new int; // ... use p in lock-free manner cds::gc::HP::retire( p ) ; // place p to retired pointer array of HP GC \endcode Functor based on \p std::allocator : \code template > struct disposer { template void operator()( T * p ) { typedef typename Alloc::templare rebind::other alloc_t; alloc_t a; a.destroy( p ); a.deallocate( p, 1 ); } }; \endcode */ template static void retire( T * p ) { if ( !hp::smr::tls()->retired_.push( hp::retired_ptr( p, +[]( void* p ) { Disposer()( static_cast( p )); }))) scan(); } /// Get current scan strategy static scan_type getScanType() { return static_cast( hp::smr::instance().get_scan_type()); } /// Checks if Hazard Pointer GC is constructed and may be used static bool isUsed() { return hp::smr::isUsed(); } /// Forces SMR call for current thread /** Usually, this function should not be called directly. */ static void scan() { hp::smr::instance().scan( hp::smr::tls()); } /// Synonym for \p scan() static void force_dispose() { scan(); } /// Returns internal statistics /** The function clears \p st before gathering statistics. @note Internal statistics is available only if you compile \p libcds and your program with \p -DCDS_ENABLE_HPSTAT. */ static void statistics( stat& st ) { hp::smr::instance().statistics( st ); } /// Returns post-mortem statistics /** Post-mortem statistics is gathered in the \p %HP object destructor and can be accessible after destructing the global \p %HP object. @note Internal statistics is available only if you compile \p libcds and your program with \p -DCDS_ENABLE_HPSTAT. Usage: \code int main() { cds::Initialize(); { // Initialize HP SMR cds::gc::HP hp; // deal with HP-based data structured // ... } // HP object destroyed // Get total post-mortem statistics cds::gc::HP::stat const& st = cds::gc::HP::postmortem_statistics(); printf( "HP statistics:\n" " thread count = %llu\n" " guard allocated = %llu\n" " guard freed = %llu\n" " retired data count = %llu\n" " free data count = %llu\n" " scan() call count = %llu\n" " help_scan() call count = %llu\n", st.thread_rec_count, st.guard_allocated, st.guard_freed, st.retired_count, st.free_count, st.scan_count, st.help_scan_count ); cds::Terminate(); } \endcode */ CDS_EXPORT_API static stat const& postmortem_statistics(); }; }} // namespace cds::gc #endif // #ifndef CDSLIB_GC_HP_SMR_H libcds-2.3.3/cds/gc/hp_membar.h000066400000000000000000000051711341244201700162260ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #include #include namespace cds { namespace gc { namespace hp { class seq_qst_membar { public: static void sync_fast_path() { std::atomic_thread_fence( std::memory_order_seq_cst ); } static void sync_slow_path() { sync_fast_path(); } //@cond static void init() {} //@endcond }; class shared_var_membar { private: static std::atomic shared_var_; public: static void sync_fast_path() { shared_var_.fetch_add( 1, std::memory_order_acq_rel ); } static void sync_slow_path() { sync_fast_path(); } //@cond static void init() {} //@endcond }; typedef seq_qst_membar default_membar; # if CDS_OS_TYPE == CDS_OS_LINUX class asymmetric_membar { static bool membarrier_available_; static void call_membarrier(); static void check_membarrier_available(); public: static void sync_fast_path() { if ( membarrier_available_ ) CDS_COMPILER_RW_BARRIER; else default_membar::sync_fast_path(); } static void sync_slow_path() { if ( membarrier_available_ ) call_membarrier(); else default_membar::sync_fast_path(); } //@cond static void init() { check_membarrier_available(); } //@endcond }; class asymmetric_global_membar { static bool membarrier_available_; static void call_membarrier(); static void check_membarrier_available(); public: static void sync_fast_path() { if ( membarrier_available_ ) CDS_COMPILER_RW_BARRIER; else default_membar::sync_fast_path(); } static void sync_slow_path() { if ( membarrier_available_ ) call_membarrier(); else default_membar::sync_fast_path(); } //@cond static void init() { check_membarrier_available(); } //@endcond }; #else typedef default_membar asymmetric_membar; typedef default_membar asymmetric_global_membar; #endif }}} // namespace cds::gc::hp libcds-2.3.3/cds/gc/nogc.h000066400000000000000000000015041341244201700152160ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_GC_NOGC_H #define CDSLIB_GC_NOGC_H namespace cds { namespace gc { /// No garbage collecting /** @ingroup cds_garbage_collector This empty class is used in \p libcds to mark that a template specialization implements the container without any garbage collector schema. Usually, the container with this "GC" does not support the item removal. */ class nogc { public: //@cond /// Faked scan static void scan() {} static void force_dispose() {} //@endcond }; }} // namespace cds::gc #endif // #define CDSLIB_GC_NOGC_H libcds-2.3.3/cds/init.h000066400000000000000000000050101341244201700146360ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INIT_H #define CDSLIB_INIT_H #include #include #include namespace cds { //@cond namespace details { bool CDS_EXPORT_API init_first_call(); bool CDS_EXPORT_API fini_last_call(); void CDS_EXPORT_API check_hpstat_enabled( bool enabled ); } // namespace details //@endcond /// Initialize CDS library /** The function initializes \p CDS library framework. Before usage of \p CDS library features your application must initialize it by calling \p %Initialize() function, see \ref cds_how_to_use "how to use the library". You can call \p Initialize several times, only first call is significant others will be ignored. To terminate the \p CDS library correctly, each call to \p %Initialize() must be balanced by a corresponding \p Terminate() call. Note, that this function does not initialize garbage collectors. To use GC you need you should call GC-specific constructor function to initialize internal structures of GC. See \p cds::gc for details. */ static inline void Initialize( unsigned int nFeatureFlags = 0 ///< for future use, must be zero. ) { CDS_UNUSED( nFeatureFlags ); details::check_hpstat_enabled( #ifdef CDS_ENABLE_HPSTAT true #else false #endif ); if ( cds::details::init_first_call()) { cds::OS::topology::init(); cds::threading::ThreadData::s_nProcCount = cds::OS::topology::processor_count(); if ( cds::threading::ThreadData::s_nProcCount == 0 ) cds::threading::ThreadData::s_nProcCount = 1; cds::threading::Manager::init(); } } /// Terminate CDS library /** This function terminates \p CDS library. After \p %Terminate() calling many features of the library are unavailable. This call should be the last call of \p CDS library in your application, see \ref cds_how_to_use "how to use the library". */ static inline void Terminate() { if ( cds::details::fini_last_call()) { cds::threading::Manager::fini(); cds::OS::topology::fini(); } } } // namespace cds #endif // CDSLIB_INIT_H libcds-2.3.3/cds/intrusive/000077500000000000000000000000001341244201700155565ustar00rootroot00000000000000libcds-2.3.3/cds/intrusive/basket_queue.h000066400000000000000000001025701341244201700204110ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_BASKET_QUEUE_H #define CDSLIB_INTRUSIVE_BASKET_QUEUE_H #include #include #include namespace cds { namespace intrusive { /// BasketQueue -related definitions /** @ingroup cds_intrusive_helper */ namespace basket_queue { /// BasketQueue node /** Template parameters: Template parameters: - GC - garbage collector used - Tag - a \ref cds_intrusive_hook_tag "tag" */ template struct node { typedef GC gc ; ///< Garbage collector typedef Tag tag ; ///< tag typedef cds::details::marked_ptr marked_ptr; ///< marked pointer typedef typename gc::template atomic_marked_ptr< marked_ptr> atomic_marked_ptr; ///< atomic marked pointer specific for GC /// Rebind node for other template parameters template struct rebind { typedef node other ; ///< Rebinding result }; atomic_marked_ptr m_pNext ; ///< pointer to the next node in the container node() { m_pNext.store( marked_ptr(), atomics::memory_order_release ); } }; using cds::intrusive::single_link::default_hook; //@cond template < typename HookType, typename... Options> struct hook { typedef typename opt::make_options< default_hook, Options...>::type options; typedef typename options::gc gc; typedef typename options::tag tag; typedef node node_type; typedef HookType hook_type; }; //@endcond /// Base hook /** \p Options are: - opt::gc - garbage collector used. - opt::tag - a \ref cds_intrusive_hook_tag "tag" */ template < typename... Options > struct base_hook: public hook< opt::base_hook_tag, Options... > {}; /// Member hook /** \p MemberOffset defines offset in bytes of \ref node member into your structure. Use \p offsetof macro to define \p MemberOffset \p Options are: - opt::gc - garbage collector used. - opt::tag - a \ref cds_intrusive_hook_tag "tag" */ template < size_t MemberOffset, typename... Options > struct member_hook: public hook< opt::member_hook_tag, Options... > { //@cond static const size_t c_nMemberOffset = MemberOffset; //@endcond }; /// Traits hook /** \p NodeTraits defines type traits for node. See \ref node_traits for \p NodeTraits interface description \p Options are: - opt::gc - garbage collector used. - opt::tag - a \ref cds_intrusive_hook_tag "tag" */ template struct traits_hook: public hook< opt::traits_hook_tag, Options... > { //@cond typedef NodeTraits node_traits; //@endcond }; /// BasketQueue internal statistics. May be used for debugging or profiling /** Template argument \p Counter defines type of counter. Default is \p cds::atomicity::event_counter, that is weak, i.e. it is not guaranteed strict event counting. You may use stronger type of counter like as \p cds::atomicity::item_counter, or even integral type, for example, \p int. */ template struct stat { typedef Counter counter_type; ///< Counter type counter_type m_EnqueueCount; ///< Enqueue call count counter_type m_DequeueCount; ///< Dequeue call count counter_type m_EnqueueRace; ///< Count of enqueue race conditions encountered counter_type m_DequeueRace; ///< Count of dequeue race conditions encountered counter_type m_AdvanceTailError;///< Count of "advance tail failed" events counter_type m_BadTail; ///< Count of events "Tail is not pointed to the last item in the queue" counter_type m_TryAddBasket; ///< Count of attemps adding new item to a basket (only or BasketQueue, for other queue this metric is not used) counter_type m_AddBasketCount; ///< Count of events "Enqueue a new item into basket" (only or BasketQueue, for other queue this metric is not used) counter_type m_EmptyDequeue; ///< Count of dequeue from empty queue /// Register enqueue call void onEnqueue() { ++m_EnqueueCount; } /// Register dequeue call void onDequeue() { ++m_DequeueCount; } /// Register enqueue race event void onEnqueueRace() { ++m_EnqueueRace; } /// Register dequeue race event void onDequeueRace() { ++m_DequeueRace; } /// Register "advance tail failed" event void onAdvanceTailFailed() { ++m_AdvanceTailError; } /// Register event "Tail is not pointed to last item in the queue" void onBadTail() { ++m_BadTail; } /// Register an attempt t add new item to basket void onTryAddBasket() { ++m_TryAddBasket; } /// Register event "Enqueue a new item into basket" (only or BasketQueue, for other queue this metric is not used) void onAddBasket() { ++m_AddBasketCount; } /// Register dequeuing from empty queue void onEmptyDequeue() { ++m_EmptyDequeue; } //@cond void reset() { m_EnqueueCount.reset(); m_DequeueCount.reset(); m_EnqueueRace.reset(); m_DequeueRace.reset(); m_AdvanceTailError.reset(); m_BadTail.reset(); m_TryAddBasket.reset(); m_AddBasketCount.reset(); m_EmptyDequeue.reset(); } stat& operator +=( stat const& s ) { m_EnqueueCount += s.m_EnqueueCount.get(); m_DequeueCount += s.m_DequeueCount.get(); m_EnqueueRace += s.m_EnqueueRace.get(); m_DequeueRace += s.m_DequeueRace.get(); m_AdvanceTailError += s.m_AdvanceTailError.get(); m_BadTail += s.m_BadTail.get(); m_TryAddBasket += s.m_TryAddBasket.get(); m_AddBasketCount += s.m_AddBasketCount.get(); m_EmptyDequeue += s.m_EmptyDequeue.get(); return *this; } //@endcond }; /// Dummy BasketQueue statistics - no counting is performed, no overhead. Support interface like \p basket_queue::stat struct empty_stat { //@cond void onEnqueue() const {} void onDequeue() const {} void onEnqueueRace() const {} void onDequeueRace() const {} void onAdvanceTailFailed() const {} void onBadTail() const {} void onTryAddBasket() const {} void onAddBasket() const {} void onEmptyDequeue() const {} void reset() {} empty_stat& operator +=( empty_stat const& ) { return *this; } //@endcond }; /// BasketQueue default type traits struct traits { /// Back-off strategy typedef cds::backoff::empty back_off; /// Hook, possible types are \p basket_queue::base_hook, \p basket_queue::member_hook, \p basket_queue::traits_hook typedef basket_queue::base_hook<> hook; /// The functor used for dispose removed items. Default is \p opt::v::empty_disposer. This option is used for dequeuing typedef opt::v::empty_disposer disposer; /// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter to enable item counting typedef atomicity::empty_item_counter item_counter; /// Internal statistics (by default, disabled) /** Possible option value are: \p basket_queue::stat, \p basket_queue::empty_stat (the default), user-provided class that supports \p %basket_queue::stat interface. */ typedef basket_queue::empty_stat stat; /// C++ memory ordering model /** Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). */ typedef opt::v::relaxed_ordering memory_model; /// Link checking, see \p cds::opt::link_checker static constexpr const opt::link_check_type link_checker = opt::debug_check_link; /// Padding for internal critical atomic data. Default is \p opt::cache_line_padding enum { padding = opt::cache_line_padding }; }; /// Metafunction converting option list to \p basket_queue::traits /** Supported \p Options are: - \p opt::hook - hook used. Possible hooks are: \p basket_queue::base_hook, \p basket_queue::member_hook, \p basket_queue::traits_hook. If the option is not specified, \p %basket_queue::base_hook<> is used. - \p opt::back_off - back-off strategy used, default is \p cds::backoff::empty. - \p opt::disposer - the functor used for dispose removed items. Default is \p opt::v::empty_disposer. This option is used when dequeuing. - \p opt::link_checker - the type of node's link fields checking. Default is \p opt::debug_check_link - \p opt::item_counter - the type of item counting feature. Default is \p cds::atomicity::empty_item_counter (item counting disabled) To enable item counting use \p cds::atomicity::item_counter - \p opt::stat - the type to gather internal statistics. Possible statistics types are: \p basket_queue::stat, \p basket_queue::empty_stat, user-provided class that supports \p %basket_queue::stat interface. Default is \p %basket_queue::empty_stat (internal statistics disabled). - \p opt::padding - padding for internal critical atomic data. Default is \p opt::cache_line_padding - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). Example: declare \p %BasketQueue with item counting and internal statistics \code typedef cds::intrusive::BasketQueue< cds::gc::HP, Foo, typename cds::intrusive::basket_queue::make_traits< cds::intrusive::opt:hook< cds::intrusive::basket_queue::base_hook< cds::opt::gc >>, cds::opt::item_counte< cds::atomicity::item_counter >, cds::opt::stat< cds::intrusive::basket_queue::stat<> > >::type > myQueue; \endcode */ template struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type , Options... >::type type; # endif }; } // namespace basket_queue /// Basket lock-free queue (intrusive variant) /** @ingroup cds_intrusive_queue Implementation of basket queue algorithm. \par Source: [2007] Moshe Hoffman, Ori Shalev, Nir Shavit "The Baskets Queue" Key idea In the 'basket' approach, instead of the traditional ordered list of nodes, the queue consists of an ordered list of groups of nodes (logical baskets). The order of nodes in each basket need not be specified, and in fact, it is easiest to maintain them in FIFO order. The baskets fulfill the following basic rules: - Each basket has a time interval in which all its nodes' enqueue operations overlap. - The baskets are ordered by the order of their respective time intervals. - For each basket, its nodes' dequeue operations occur after its time interval. - The dequeue operations are performed according to the order of baskets. Two properties define the FIFO order of nodes: - The order of nodes in a basket is not specified. - The order of nodes in different baskets is the FIFO-order of their respective baskets. In algorithms such as the MS-queue or optimistic queue, threads enqueue items by applying a Compare-and-swap (CAS) operation to the queue's tail pointer, and all the threads that fail on a particular CAS operation (and also the winner of that CAS) overlap in time. In particular, they share the time interval of the CAS operation itself. Hence, all the threads that fail to CAS on the tail-node of the queue may be inserted into the same basket. By integrating the basket-mechanism as the back-off mechanism, the time usually spent on backing-off before trying to link onto the new tail, can now be utilized to insert the failed operations into the basket, allowing enqueues to complete sooner. In the meantime, the next successful CAS operations by enqueues allow new baskets to be formed down the list, and these can be filled concurrently. Moreover, the failed operations don't retry their link attempt on the new tail, lowering the overall contention on it. This leads to a queue algorithm that unlike all former concurrent queue algorithms requires virtually no tuning of the backoff mechanisms to reduce contention, making the algorithm an attractive out-of-the-box queue. In order to enqueue, just as in \p MSQueue, a thread first tries to link the new node to the last node. If it failed to do so, then another thread has already succeeded. Thus it tries to insert the new node into the new basket that was created by the winner thread. To dequeue a node, a thread first reads the head of the queue to obtain the oldest basket. It may then dequeue any node in the oldest basket. Template arguments: - \p GC - garbage collector type: \p gc::HP, \p gc::DHP - \p T - type of value to be stored in the queue - \p Traits - queue traits, default is \p basket_queue::traits. You can use \p basket_queue::make_traits metafunction to make your traits or just derive your traits from \p %basket_queue::traits: \code struct myTraits: public cds::intrusive::basket_queue::traits { typedef cds::intrusive::basket_queue::stat<> stat; typedef cds::atomicity::item_counter item_counter; }; typedef cds::intrusive::BasketQueue< cds::gc::HP, Foo, myTraits > myQueue; // Equivalent make_traits example: typedef cds::intrusive::BasketQueue< cds::gc::HP, Foo, typename cds::intrusive::basket_queue::make_traits< cds::opt::stat< cds::intrusive::basket_queue::stat<> >, cds::opt::item_counter< cds::atomicity::item_counter > >::type > myQueue; \endcode Garbage collecting schema \p GC must be consistent with the \p basket_queue::node GC. \par About item disposing Like \p MSQueue, the Baskets queue algo has a key feature: even if the queue is empty it contains one item that is "dummy" one from the standpoint of the algo. See \p dequeue() function doc for explanation. \par Examples \code #include #include namespace ci = cds::inrtusive; typedef cds::gc::HP hp_gc; // Basket queue with Hazard Pointer garbage collector, base hook + item disposer: struct Foo: public ci::basket_queue::node< hp_gc > { // Your data ... }; // Disposer for Foo struct just deletes the object passed in struct fooDisposer { void operator()( Foo * p ) { delete p; } }; struct fooTraits: public ci::basket_queue::traits { typedef ci::basket_queue::base_hook< ci::opt::gc > hook; typedef fooDisposer disposer; }; typedef ci::BasketQueue< hp_gc, Foo, fooTraits > fooQueue; // BasketQueue with Hazard Pointer garbage collector, // member hook + item disposer + item counter, // without padding of internal queue data: struct Bar { // Your data ... ci::basket_queue::node< hp_gc > hMember; }; struct barTraits: public ci::basket_queue::make_traits< ci::opt::hook< ci::basket_queue::member_hook< offsetof(Bar, hMember) ,ci::opt::gc > > ,ci::opt::disposer< fooDisposer > ,cds::opt::item_counter< cds::atomicity::item_counter > ,cds::opt::padding< cds::opt::no_special_padding > >::type {}; typedef ci::BasketQueue< hp_gc, Bar, barTraits > barQueue; \endcode */ template class BasketQueue { public: typedef GC gc; ///< Garbage collector typedef T value_type; ///< type of value stored in the queue typedef Traits traits; ///< Queue traits typedef typename traits::hook hook; ///< hook type typedef typename hook::node_type node_type; ///< node type typedef typename traits::disposer disposer; ///< disposer used typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< node traits typedef typename single_link::get_link_checker< node_type, traits::link_checker >::type link_checker; ///< link checker typedef typename traits::back_off back_off; ///< back-off strategy typedef typename traits::item_counter item_counter; ///< Item counting policy used typedef typename traits::stat stat; ///< Internal statistics policy used typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option /// Rebind template arguments template struct rebind { typedef BasketQueue< GC2, T2, Traits2> other ; ///< Rebinding result }; static constexpr const size_t c_nHazardPtrCount = 6 ; ///< Count of hazard pointer required for the algorithm protected: //@cond typedef typename node_type::marked_ptr marked_ptr; typedef typename node_type::atomic_marked_ptr atomic_marked_ptr; // GC and node_type::gc must be the same static_assert( std::is_same::value, "GC and node_type::gc must be the same"); //@endcond atomic_marked_ptr m_pHead ; ///< Queue's head pointer (aligned) //@cond typename opt::details::apply_padding< atomic_marked_ptr, traits::padding >::padding_type pad1_; //@endcond atomic_marked_ptr m_pTail ; ///< Queue's tail pointer (aligned) //@cond typename opt::details::apply_padding< atomic_marked_ptr, traits::padding >::padding_type pad2_; //@endcond node_type m_Dummy ; ///< dummy node //@cond typename opt::details::apply_padding< node_type, traits::padding >::padding_type pad3_; //@endcond item_counter m_ItemCounter ; ///< Item counter stat m_Stat ; ///< Internal statistics //@cond size_t const m_nMaxHops; //@endcond //@cond struct dequeue_result { typename gc::template GuardArray<3> guards; node_type * pNext; }; bool do_dequeue( dequeue_result& res, bool bDeque ) { // Note: // If bDeque == false then the function is called from empty method and no real dequeuing operation is performed back_off bkoff; marked_ptr h; marked_ptr t; marked_ptr pNext; while ( true ) { h = res.guards.protect( 0, m_pHead, []( marked_ptr p ) -> value_type * { return node_traits::to_value_ptr( p.ptr());}); t = res.guards.protect( 1, m_pTail, []( marked_ptr p ) -> value_type * { return node_traits::to_value_ptr( p.ptr());}); pNext = res.guards.protect( 2, h->m_pNext, []( marked_ptr p ) -> value_type * { return node_traits::to_value_ptr( p.ptr());}); if ( h == m_pHead.load( memory_model::memory_order_acquire )) { if ( h.ptr() == t.ptr()) { if ( !pNext.ptr()) { m_Stat.onEmptyDequeue(); return false; } { typename gc::Guard g; while ( pNext->m_pNext.load(memory_model::memory_order_relaxed).ptr() && m_pTail.load(memory_model::memory_order_relaxed) == t ) { pNext = g.protect( pNext->m_pNext, []( marked_ptr p ) -> value_type * { return node_traits::to_value_ptr( p.ptr());}); res.guards.copy( 2, g ); } } m_pTail.compare_exchange_weak( t, marked_ptr(pNext.ptr()), memory_model::memory_order_acquire, atomics::memory_order_relaxed ); } else { marked_ptr iter( h ); size_t hops = 0; typename gc::Guard g; while ( pNext.ptr() && pNext.bits() && iter.ptr() != t.ptr() && m_pHead.load(memory_model::memory_order_relaxed) == h ) { iter = pNext; g.assign( res.guards.template get(2)); pNext = res.guards.protect( 2, pNext->m_pNext, []( marked_ptr p ) -> value_type * { return node_traits::to_value_ptr( p.ptr());}); ++hops; } if ( m_pHead.load(memory_model::memory_order_relaxed) != h ) continue; if ( iter.ptr() == t.ptr()) free_chain( h, iter ); else if ( bDeque ) { res.pNext = pNext.ptr(); if ( iter->m_pNext.compare_exchange_weak( pNext, marked_ptr( pNext.ptr(), 1 ), memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { if ( hops >= m_nMaxHops ) free_chain( h, pNext ); break; } } else return true; } } if ( bDeque ) m_Stat.onDequeueRace(); bkoff(); } if ( bDeque ) { --m_ItemCounter; m_Stat.onDequeue(); } return true; } void free_chain( marked_ptr head, marked_ptr newHead ) { // "head" and "newHead" are guarded if ( m_pHead.compare_exchange_strong( head, marked_ptr(newHead.ptr()), memory_model::memory_order_release, atomics::memory_order_relaxed )) { typename gc::template GuardArray<2> guards; guards.assign( 0, node_traits::to_value_ptr(head.ptr())); while ( head.ptr() != newHead.ptr()) { marked_ptr pNext = guards.protect( 1, head->m_pNext, []( marked_ptr p ) -> value_type * { return node_traits::to_value_ptr( p.ptr());}); assert( pNext.bits() != 0 ); dispose_node( head.ptr()); guards.copy( 0, 1 ); head = pNext; } } } static void clear_links( node_type * pNode ) { pNode->m_pNext.store( marked_ptr( nullptr ), memory_model::memory_order_release ); } void dispose_node( node_type * p ) { if ( p != &m_Dummy ) { struct internal_disposer { void operator()( value_type * p ) { assert( p != nullptr ); BasketQueue::clear_links( node_traits::to_node_ptr( p )); disposer()(p); } }; gc::template retire( node_traits::to_value_ptr(p)); } } //@endcond public: /// Initializes empty queue BasketQueue() : m_pHead( &m_Dummy ) , m_pTail( &m_Dummy ) , m_nMaxHops( 3 ) {} /// Destructor clears the queue /** Since the baskets queue contains at least one item even if the queue is empty, the destructor may call item disposer. */ ~BasketQueue() { clear(); node_type * pHead = m_pHead.load(memory_model::memory_order_relaxed).ptr(); assert( pHead != nullptr ); { node_type * pNext = pHead->m_pNext.load( memory_model::memory_order_relaxed ).ptr(); while ( pNext ) { node_type * p = pNext; pNext = pNext->m_pNext.load( memory_model::memory_order_relaxed ).ptr(); p->m_pNext.store( marked_ptr(), memory_model::memory_order_relaxed ); dispose_node( p ); } pHead->m_pNext.store( marked_ptr(), memory_model::memory_order_relaxed ); //m_pTail.store( marked_ptr( pHead ), memory_model::memory_order_relaxed ); } m_pHead.store( marked_ptr( nullptr ), memory_model::memory_order_relaxed ); m_pTail.store( marked_ptr( nullptr ), memory_model::memory_order_relaxed ); dispose_node( pHead ); } /// Enqueues \p val value into the queue. /** @anchor cds_intrusive_BasketQueue_enqueue The function always returns \p true. */ bool enqueue( value_type& val ) { node_type * pNew = node_traits::to_node_ptr( val ); link_checker::is_empty( pNew ); typename gc::Guard guard; typename gc::Guard gNext; back_off bkoff; marked_ptr t; while ( true ) { t = guard.protect( m_pTail, []( marked_ptr p ) -> value_type * { return node_traits::to_value_ptr( p.ptr());}); marked_ptr pNext = t->m_pNext.load(memory_model::memory_order_relaxed ); if ( pNext.ptr() == nullptr ) { pNew->m_pNext.store( marked_ptr(), memory_model::memory_order_relaxed ); if ( t->m_pNext.compare_exchange_weak( pNext, marked_ptr(pNew), memory_model::memory_order_release, atomics::memory_order_relaxed )) { if ( !m_pTail.compare_exchange_strong( t, marked_ptr(pNew), memory_model::memory_order_release, atomics::memory_order_relaxed )) m_Stat.onAdvanceTailFailed(); break; } // Try adding to basket m_Stat.onTryAddBasket(); // Reread tail next try_again: pNext = gNext.protect( t->m_pNext, []( marked_ptr p ) -> value_type * { return node_traits::to_value_ptr( p.ptr());}); // add to the basket if ( m_pTail.load( memory_model::memory_order_relaxed ) == t && t->m_pNext.load( memory_model::memory_order_relaxed) == pNext && !pNext.bits()) { bkoff(); pNew->m_pNext.store( pNext, memory_model::memory_order_relaxed ); if ( t->m_pNext.compare_exchange_weak( pNext, marked_ptr( pNew ), memory_model::memory_order_release, atomics::memory_order_relaxed )) { m_Stat.onAddBasket(); break; } goto try_again; } } else { // Tail is misplaced, advance it typename gc::template GuardArray<2> g; g.assign( 0, node_traits::to_value_ptr( pNext.ptr())); if ( m_pTail.load( memory_model::memory_order_acquire ) != t || t->m_pNext.load( memory_model::memory_order_relaxed ) != pNext ) { m_Stat.onEnqueueRace(); bkoff(); continue; } marked_ptr p; bool bTailOk = true; while ( (p = pNext->m_pNext.load( memory_model::memory_order_acquire )).ptr() != nullptr ) { bTailOk = m_pTail.load( memory_model::memory_order_relaxed ) == t; if ( !bTailOk ) break; g.assign( 1, node_traits::to_value_ptr( p.ptr())); if ( pNext->m_pNext.load( memory_model::memory_order_relaxed ) != p ) continue; pNext = p; g.assign( 0, g.template get( 1 )); } if ( !bTailOk || !m_pTail.compare_exchange_weak( t, marked_ptr( pNext.ptr()), memory_model::memory_order_release, atomics::memory_order_relaxed )) m_Stat.onAdvanceTailFailed(); m_Stat.onBadTail(); } m_Stat.onEnqueueRace(); } ++m_ItemCounter; m_Stat.onEnqueue(); return true; } /// Synonym for \p enqueue() function bool push( value_type& val ) { return enqueue( val ); } /// Dequeues a value from the queue /** @anchor cds_intrusive_BasketQueue_dequeue If the queue is empty the function returns \p nullptr. @note See \p MSQueue::dequeue() note about item disposing */ value_type * dequeue() { dequeue_result res; if ( do_dequeue( res, true )) return node_traits::to_value_ptr( *res.pNext ); return nullptr; } /// Synonym for \p dequeue() function value_type * pop() { return dequeue(); } /// Checks if the queue is empty /** Note that this function is not \p const. The function is based on \p dequeue() algorithm but really it does not dequeue any item. */ bool empty() { dequeue_result res; return !do_dequeue( res, false ); } /// Clear the queue /** The function repeatedly calls \p dequeue() until it returns \p nullptr. The disposer defined in template \p Traits is called for each item that can be safely disposed. */ void clear() { while ( dequeue()); } /// Returns queue's item count /** The value returned depends on \p Traits (see basket_queue::traits::item_counter). For \p atomicity::empty_item_counter, this function always returns 0. @note Even if you use real item counter and it returns 0, this fact is not mean that the queue is empty. To check queue emptyness use \p empty() method. */ size_t size() const { return m_ItemCounter.value(); } /// Returns reference to internal statistics const stat& statistics() const { return m_Stat; } }; }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_BASKET_QUEUE_H libcds-2.3.3/cds/intrusive/cuckoo_set.h000066400000000000000000003425111341244201700200730ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_CUCKOO_SET_H #define CDSLIB_INTRUSIVE_CUCKOO_SET_H #include #include #include #include // ref #include #include #include #include #include #include namespace cds { namespace intrusive { /// CuckooSet-related definitions namespace cuckoo { /// Option to define probeset type /** The option specifies probeset type for the CuckooSet. Available values: - \p cds::intrusive::cuckoo::list - the probeset is a single-linked list. The node contains pointer to next node in probeset. - \p cds::intrusive::cuckoo::vector - the probeset is a vector with constant-size \p Capacity where \p Capacity is an unsigned int constant. The node does not contain any auxiliary data. */ template struct probeset_type { //@cond template struct pack: public Base { typedef Type probeset_type; }; //@endcond }; /// Option specifying whether to store hash values in the node /** This option reserves additional space in the hook to store the hash value of the object once it's introduced in the container. When this option is used, the unordered container will store the calculated hash value in the hook and rehashing operations won't need to recalculate the hash of the value. This option will improve the performance of unordered containers when rehashing is frequent or hashing the value is a slow operation The \p Count template parameter defines the size of hash array. Remember that cuckoo hashing implies at least two hash values per item. Possible values of \p Count: - 0 - no hash storing in the node - greater that 1 - store hash values. Value 1 is deprecated. */ template struct store_hash { //@cond template struct pack: public Base { static unsigned int const store_hash = Count; }; //@endcond }; //@cond // Probeset type placeholders struct list_probeset_class; struct vector_probeset_class; //@endcond //@cond /// List probeset type struct list; //@endcond /// Vector probeset type template struct vector { /// Vector capacity static unsigned int const c_nCapacity = Capacity; }; /// CuckooSet node /** Template arguments: - \p ProbesetType - type of probeset. Can be \p cds::intrusive::cuckoo::list or \p cds::intrusive::cuckoo::vector. - \p StoreHashCount - constant that defines whether to store node hash values. See cuckoo::store_hash option for explanation - \p Tag - a \ref cds_intrusive_hook_tag "tag" */ template struct node #ifdef CDS_DOXYGEN_INVOKED { typedef ProbesetType probeset_type ; ///< Probeset type typedef Tag tag ; ///< Tag static unsigned int const hash_array_size = StoreHashCount ; ///< The size of hash array } #endif ; //@cond template struct node< cuckoo::list, 0, Tag> { typedef list_probeset_class probeset_class; typedef cuckoo::list probeset_type; typedef Tag tag; static unsigned int const hash_array_size = 0; static unsigned int const probeset_size = 0; node * m_pNext; constexpr node() noexcept : m_pNext( nullptr ) {} void store_hash( size_t const* ) {} size_t * get_hash() const { // This node type does not store hash values!!! assert(false); return nullptr; } void clear() { m_pNext = nullptr; } }; template struct node< cuckoo::list, StoreHashCount, Tag> { typedef list_probeset_class probeset_class; typedef cuckoo::list probeset_type; typedef Tag tag; static unsigned int const hash_array_size = StoreHashCount; static unsigned int const probeset_size = 0; node * m_pNext; size_t m_arrHash[ hash_array_size ]; node() noexcept : m_pNext( nullptr ) { memset( m_arrHash, 0, sizeof(m_arrHash)); } void store_hash( size_t const* pHashes ) { memcpy( m_arrHash, pHashes, sizeof( m_arrHash )); } size_t * get_hash() const { return const_cast( m_arrHash ); } void clear() { m_pNext = nullptr; } }; template struct node< cuckoo::vector, 0, Tag> { typedef vector_probeset_class probeset_class; typedef cuckoo::vector probeset_type; typedef Tag tag; static unsigned int const hash_array_size = 0; static unsigned int const probeset_size = probeset_type::c_nCapacity; node() noexcept {} void store_hash( size_t const* ) {} size_t * get_hash() const { // This node type does not store hash values!!! assert(false); return nullptr; } void clear() {} }; template struct node< cuckoo::vector, StoreHashCount, Tag> { typedef vector_probeset_class probeset_class; typedef cuckoo::vector probeset_type; typedef Tag tag; static unsigned int const hash_array_size = StoreHashCount; static unsigned int const probeset_size = probeset_type::c_nCapacity; size_t m_arrHash[ hash_array_size ]; node() noexcept { memset( m_arrHash, 0, sizeof(m_arrHash)); } void store_hash( size_t const* pHashes ) { memcpy( m_arrHash, pHashes, sizeof( m_arrHash )); } size_t * get_hash() const { return const_cast( m_arrHash ); } void clear() {} }; //@endcond //@cond struct default_hook { typedef cuckoo::list probeset_type; static unsigned int const store_hash = 0; typedef opt::none tag; }; template < typename HookType, typename... Options> struct hook { typedef typename opt::make_options< default_hook, Options...>::type traits; typedef typename traits::probeset_type probeset_type; typedef typename traits::tag tag; static unsigned int const store_hash = traits::store_hash; typedef node node_type; typedef HookType hook_type; }; //@endcond /// Base hook /** \p Options are: - \p cuckoo::probeset_type - probeset type. Defaul is \p cuckoo::list - \p cuckoo::store_hash - store hash values in the node or not. Default is 0 (no storing) - \p opt::tag - a \ref cds_intrusive_hook_tag "tag" */ template < typename... Options > struct base_hook: public hook< opt::base_hook_tag, Options... > {}; /// Member hook /** \p MemberOffset defines offset in bytes of \ref node member into your structure. Use \p offsetof macro to define \p MemberOffset \p Options are: - \p cuckoo::probeset_type - probeset type. Defaul is \p cuckoo::list - \p cuckoo::store_hash - store hash values in the node or not. Default is 0 (no storing) - \p opt::tag - a \ref cds_intrusive_hook_tag "tag" */ template < size_t MemberOffset, typename... Options > struct member_hook: public hook< opt::member_hook_tag, Options... > { //@cond static const size_t c_nMemberOffset = MemberOffset; //@endcond }; /// Traits hook /** \p NodeTraits defines type traits for node. See \ref node_traits for \p NodeTraits interface description \p Options are: - \p cuckoo::probeset_type - probeset type. Defaul is \p cuckoo::list - \p cuckoo::store_hash - store hash values in the node or not. Default is 0 (no storing) - \p opt::tag - a \ref cds_intrusive_hook_tag "tag" */ template struct traits_hook: public hook< opt::traits_hook_tag, Options... > { //@cond typedef NodeTraits node_traits; //@endcond }; /// Internal statistics for \ref striping mutex policy struct striping_stat { typedef cds::atomicity::event_counter counter_type; ///< Counter type counter_type m_nCellLockCount ; ///< Count of obtaining cell lock counter_type m_nCellTryLockCount ; ///< Count of cell \p try_lock attempts counter_type m_nFullLockCount ; ///< Count of obtaining full lock counter_type m_nResizeLockCount ; ///< Count of obtaining resize lock counter_type m_nResizeCount ; ///< Count of resize event //@cond void onCellLock() { ++m_nCellLockCount; } void onCellTryLock() { ++m_nCellTryLockCount; } void onFullLock() { ++m_nFullLockCount; } void onResizeLock() { ++m_nResizeLockCount; } void onResize() { ++m_nResizeCount; } //@endcond }; /// Dummy internal statistics for \ref striping mutex policy struct empty_striping_stat { //@cond void onCellLock() const {} void onCellTryLock() const {} void onFullLock() const {} void onResizeLock() const {} void onResize() const {} //@endcond }; /// Lock striping concurrent access policy /** This is one of available opt::mutex_policy option type for CuckooSet Lock striping is very simple technique. The cuckoo set consists of the bucket tables and the array of locks. There is single lock array for each bucket table, at least, the count of bucket table is 2. Initially, the capacity of lock array and each bucket table is the same. When set is resized, bucket table capacity will be doubled but lock array will not. The lock \p i protects each bucket \p j, where j = i mod L , where \p L - the size of lock array. The policy contains an internal array of \p RecursiveLock locks. Template arguments: - \p RecursiveLock - the type of recursive mutex. The default is \p std::recursive_mutex. The mutex type should be default-constructible. Note that a recursive spin-lock is not suitable for lock striping for performance reason. - \p Arity - unsigned int constant that specifies an arity. The arity is the count of hash functors, i.e., the count of lock arrays. Default value is 2. - \p Alloc - allocator type used for lock array memory allocation. Default is \p CDS_DEFAULT_ALLOCATOR. - \p Stat - internal statistics type. Note that this template argument is automatically selected by \ref CuckooSet class according to its \p opt::stat option. */ template < class RecursiveLock = std::recursive_mutex, unsigned int Arity = 2, class Alloc = CDS_DEFAULT_ALLOCATOR, class Stat = empty_striping_stat > class striping { public: typedef RecursiveLock lock_type ; ///< lock type typedef Alloc allocator_type ; ///< allocator type static unsigned int const c_nArity = Arity ; ///< the arity typedef Stat statistics_type ; ///< Internal statistics type (\ref striping_stat or \ref empty_striping_stat) //@cond typedef striping_stat real_stat; typedef empty_striping_stat empty_stat; template struct rebind_statistics { typedef striping other; }; //@endcond typedef cds::sync::lock_array< lock_type, cds::sync::pow2_select_policy, allocator_type > lock_array_type ; ///< lock array type protected: //@cond class lock_array: public lock_array_type { public: // placeholder ctor lock_array(): lock_array_type( typename lock_array_type::select_cell_policy(2)) {} // real ctor lock_array( size_t nCapacity ): lock_array_type( nCapacity, typename lock_array_type::select_cell_policy(nCapacity)) {} }; class scoped_lock: public std::unique_lock< lock_array_type > { typedef std::unique_lock< lock_array_type > base_class; public: scoped_lock( lock_array& arrLock, size_t nHash ): base_class( arrLock, nHash ) {} }; //@endcond protected: //@cond lock_array m_Locks[c_nArity] ; ///< array of \p lock_array_type statistics_type m_Stat ; ///< internal statistics //@endcond public: //@cond class scoped_cell_lock { lock_type * m_guard[c_nArity]; public: scoped_cell_lock( striping& policy, size_t const* arrHash ) { for ( unsigned int i = 0; i < c_nArity; ++i ) { m_guard[i] = &( policy.m_Locks[i].at( policy.m_Locks[i].lock( arrHash[i] ))); } policy.m_Stat.onCellLock(); } ~scoped_cell_lock() { for ( unsigned int i = 0; i < c_nArity; ++i ) m_guard[i]->unlock(); } }; class scoped_cell_trylock { typedef typename lock_array_type::lock_type lock_type; lock_type * m_guard[c_nArity]; bool m_bLocked; public: scoped_cell_trylock( striping& policy, size_t const* arrHash ) { size_t nCell = policy.m_Locks[0].try_lock( arrHash[0] ); m_bLocked = nCell != lock_array_type::c_nUnspecifiedCell; if ( m_bLocked ) { m_guard[0] = &(policy.m_Locks[0].at(nCell)); for ( unsigned int i = 1; i < c_nArity; ++i ) { m_guard[i] = &( policy.m_Locks[i].at( policy.m_Locks[i].lock( arrHash[i] ))); } } else { std::fill( m_guard, m_guard + c_nArity, nullptr ); } policy.m_Stat.onCellTryLock(); } ~scoped_cell_trylock() { if ( m_bLocked ) { for ( unsigned int i = 0; i < c_nArity; ++i ) m_guard[i]->unlock(); } } bool locked() const { return m_bLocked; } }; class scoped_full_lock { std::unique_lock< lock_array_type > m_guard; public: scoped_full_lock( striping& policy ) : m_guard( policy.m_Locks[0] ) { policy.m_Stat.onFullLock(); } /// Ctor for scoped_resize_lock - no statistics is incremented scoped_full_lock( striping& policy, bool ) : m_guard( policy.m_Locks[0] ) {} }; class scoped_resize_lock: public scoped_full_lock { public: scoped_resize_lock( striping& policy ) : scoped_full_lock( policy, false ) { policy.m_Stat.onResizeLock(); } }; //@endcond public: /// Constructor striping( size_t nLockCount ///< The size of lock array. Must be power of two. ) { // Trick: initialize the array of locks for ( unsigned int i = 0; i < c_nArity; ++i ) { lock_array * pArr = m_Locks + i; pArr->lock_array::~lock_array(); new ( pArr ) lock_array( nLockCount ); } } /// Returns lock array size /** Lock array size is unchanged during \p striping object lifetime */ size_t lock_count() const { return m_Locks[0].size(); } //@cond void resize( size_t ) { m_Stat.onResize(); } //@endcond /// Returns the arity of striping mutex policy constexpr unsigned int arity() const noexcept { return c_nArity; } /// Returns internal statistics statistics_type const& statistics() const { return m_Stat; } }; /// Internal statistics for \ref refinable mutex policy struct refinable_stat { typedef cds::atomicity::event_counter counter_type ; ///< Counter type counter_type m_nCellLockCount ; ///< Count of obtaining cell lock counter_type m_nCellLockWaitResizing ; ///< Count of loop iteration to wait for resizing counter_type m_nCellLockArrayChanged ; ///< Count of event "Lock array has been changed when obtaining cell lock" counter_type m_nCellLockFailed ; ///< Count of event "Cell lock failed because of the array is owned by other thread" counter_type m_nSecondCellLockCount ; ///< Count of obtaining cell lock when another cell is already locked counter_type m_nSecondCellLockFailed ; ///< Count of unsuccess obtaining cell lock when another cell is already locked counter_type m_nFullLockCount ; ///< Count of obtaining full lock counter_type m_nFullLockIter ; ///< Count of unsuccessfull iteration to obtain full lock counter_type m_nResizeLockCount ; ///< Count of obtaining resize lock counter_type m_nResizeLockIter ; ///< Count of unsuccessfull iteration to obtain resize lock counter_type m_nResizeLockArrayChanged; ///< Count of event "Lock array has been changed when obtaining resize lock" counter_type m_nResizeCount ; ///< Count of resize event //@cond void onCellLock() { ++m_nCellLockCount; } void onCellWaitResizing() { ++m_nCellLockWaitResizing; } void onCellArrayChanged() { ++m_nCellLockArrayChanged; } void onCellLockFailed() { ++m_nCellLockFailed; } void onSecondCellLock() { ++m_nSecondCellLockCount; } void onSecondCellLockFailed() { ++m_nSecondCellLockFailed; } void onFullLock() { ++m_nFullLockCount; } void onFullLockIter() { ++m_nFullLockIter; } void onResizeLock() { ++m_nResizeLockCount; } void onResizeLockIter() { ++m_nResizeLockIter; } void onResizeLockArrayChanged() { ++m_nResizeLockArrayChanged; } void onResize() { ++m_nResizeCount; } //@endcond }; /// Dummy internal statistics for \ref refinable mutex policy struct empty_refinable_stat { //@cond void onCellLock() const {} void onCellWaitResizing() const {} void onCellArrayChanged() const {} void onCellLockFailed() const {} void onSecondCellLock() const {} void onSecondCellLockFailed() const {} void onFullLock() const {} void onFullLockIter() const {} void onResizeLock() const {} void onResizeLockIter() const {} void onResizeLockArrayChanged() const {} void onResize() const {} //@endcond }; /// Refinable concurrent access policy /** This is one of available \p opt::mutex_policy option type for \p CuckooSet Refining is like a striping technique (see \p cuckoo::striping) but it allows growing the size of lock array when resizing the hash table. So, the sizes of hash table and lock array are equal. Template arguments: - \p RecursiveLock - the type of mutex. Reentrant (recursive) mutex is required. The default is \p std::recursive_mutex. The mutex type should be default-constructible. - \p Arity - unsigned int constant that specifies an arity. The arity is the count of hash functors, i.e., the count of lock arrays. Default value is 2. - \p BackOff - back-off strategy. Default is \p cds::backoff::Default - \p Alloc - allocator type used for lock array memory allocation. Default is \p CDS_DEFAULT_ALLOCATOR. - \p Stat - internal statistics type. Note that this template argument is automatically selected by \ref CuckooSet class according to its \p opt::stat option. */ template < class RecursiveLock = std::recursive_mutex, unsigned int Arity = 2, typename BackOff = cds::backoff::Default, class Alloc = CDS_DEFAULT_ALLOCATOR, class Stat = empty_refinable_stat > class refinable { public: typedef RecursiveLock lock_type ; ///< lock type typedef Alloc allocator_type ; ///< allocator type typedef BackOff back_off ; ///< back-off strategy typedef Stat statistics_type ; ///< internal statistics type static unsigned int const c_nArity = Arity; ///< the arity //@cond typedef refinable_stat real_stat; typedef empty_refinable_stat empty_stat; template struct rebind_statistics { typedef refinable< lock_type, c_nArity, back_off, allocator_type, Stat2> other; }; //@endcond protected: //@cond typedef cds::sync::trivial_select_policy lock_selection_policy; class lock_array_type : public cds::sync::lock_array< lock_type, lock_selection_policy, allocator_type > , public std::enable_shared_from_this< lock_array_type > { typedef cds::sync::lock_array< lock_type, lock_selection_policy, allocator_type > lock_array_base; public: lock_array_type( size_t nCapacity ) : lock_array_base( nCapacity ) {} }; typedef std::shared_ptr< lock_array_type > lock_array_ptr; typedef cds::details::Allocator< lock_array_type, allocator_type > lock_array_allocator; typedef unsigned long long owner_t; typedef cds::OS::ThreadId threadId_t; typedef cds::sync::spin spinlock_type; typedef std::unique_lock< spinlock_type > scoped_spinlock; //@endcond protected: //@cond static owner_t const c_nOwnerMask = (((owner_t) 1) << (sizeof(owner_t) * 8 - 1)) - 1; atomics::atomic< owner_t > m_Owner ; ///< owner mark (thread id + boolean flag) atomics::atomic m_nCapacity ; ///< lock array capacity lock_array_ptr m_arrLocks[ c_nArity ] ; ///< Lock array. The capacity of array is specified in constructor. spinlock_type m_access ; ///< access to m_arrLocks statistics_type m_Stat ; ///< internal statistics //@endcond protected: //@cond struct lock_array_disposer { void operator()( lock_array_type * pArr ) { // Seems, there is a false positive in std::shared_ptr deallocation in uninstrumented libc++ // see, for example, https://groups.google.com/forum/#!topic/thread-sanitizer/eHu4dE_z7Cc // https://reviews.llvm.org/D21609 CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN; lock_array_allocator().Delete( pArr ); CDS_TSAN_ANNOTATE_IGNORE_WRITES_END; } }; lock_array_ptr create_lock_array( size_t nCapacity ) { return lock_array_ptr( lock_array_allocator().New( nCapacity ), lock_array_disposer()); } void acquire( size_t const * arrHash, lock_array_ptr * pLockArr, lock_type ** parrLock ) { owner_t me = (owner_t) cds::OS::get_current_thread_id(); owner_t who; size_t cur_capacity; back_off bkoff; while ( true ) { { scoped_spinlock sl(m_access); for ( unsigned int i = 0; i < c_nArity; ++i ) pLockArr[i] = m_arrLocks[i]; cur_capacity = m_nCapacity.load( atomics::memory_order_acquire ); } // wait while resizing while ( true ) { who = m_Owner.load( atomics::memory_order_acquire ); if ( !( who & 1 ) || (who >> 1) == (me & c_nOwnerMask)) break; bkoff(); m_Stat.onCellWaitResizing(); } if ( cur_capacity == m_nCapacity.load( atomics::memory_order_acquire )) { size_t const nMask = pLockArr[0]->size() - 1; assert( cds::beans::is_power2( nMask + 1 )); for ( unsigned int i = 0; i < c_nArity; ++i ) { parrLock[i] = &( pLockArr[i]->at( arrHash[i] & nMask )); parrLock[i]->lock(); } who = m_Owner.load( atomics::memory_order_acquire ); if ( ( !(who & 1) || (who >> 1) == (me & c_nOwnerMask)) && cur_capacity == m_nCapacity.load( atomics::memory_order_acquire )) { m_Stat.onCellLock(); return; } for ( unsigned int i = 0; i < c_nArity; ++i ) parrLock[i]->unlock(); m_Stat.onCellLockFailed(); } else m_Stat.onCellArrayChanged(); // clears pLockArr can lead to calling dtor for each item of pLockArr[i] that may be a heavy-weighted operation // (each pLockArr[i] is a shared pointer to array of a ton of mutexes) // It is better to do this before the next loop iteration where we will use spin-locked assignment to pLockArr // However, destructing a lot of mutexes under spin-lock is a bad solution for ( unsigned int i = 0; i < c_nArity; ++i ) pLockArr[i].reset(); } } bool try_second_acquire( size_t const * arrHash, lock_type ** parrLock ) { // It is assumed that the current thread already has a lock // and requires a second lock for other hash size_t const nMask = m_nCapacity.load(atomics::memory_order_acquire) - 1; size_t nCell = m_arrLocks[0]->try_lock( arrHash[0] & nMask); if ( nCell == lock_array_type::c_nUnspecifiedCell ) { m_Stat.onSecondCellLockFailed(); return false; } parrLock[0] = &(m_arrLocks[0]->at(nCell)); for ( unsigned int i = 1; i < c_nArity; ++i ) { parrLock[i] = &( m_arrLocks[i]->at( m_arrLocks[i]->lock( arrHash[i] & nMask))); } m_Stat.onSecondCellLock(); return true; } void acquire_all() { owner_t me = (owner_t) cds::OS::get_current_thread_id(); back_off bkoff; while ( true ) { owner_t ownNull = 0; if ( m_Owner.compare_exchange_strong( ownNull, (me << 1) | 1, atomics::memory_order_acq_rel, atomics::memory_order_relaxed )) { m_arrLocks[0]->lock_all(); m_Stat.onFullLock(); return; } bkoff(); m_Stat.onFullLockIter(); } } void release_all() { m_arrLocks[0]->unlock_all(); m_Owner.store( 0, atomics::memory_order_release ); } void acquire_resize( lock_array_ptr * pOldLocks ) { owner_t me = (owner_t) cds::OS::get_current_thread_id(); size_t cur_capacity; while ( true ) { { scoped_spinlock sl(m_access); for ( unsigned int i = 0; i < c_nArity; ++i ) pOldLocks[i] = m_arrLocks[i]; cur_capacity = m_nCapacity.load( atomics::memory_order_acquire ); } // global lock owner_t ownNull = 0; if ( m_Owner.compare_exchange_strong( ownNull, (me << 1) | 1, atomics::memory_order_acq_rel, atomics::memory_order_relaxed )) { if ( cur_capacity == m_nCapacity.load( atomics::memory_order_acquire )) { pOldLocks[0]->lock_all(); m_Stat.onResizeLock(); return; } m_Owner.store( 0, atomics::memory_order_release ); m_Stat.onResizeLockArrayChanged(); } else m_Stat.onResizeLockIter(); // clears pOldLocks can lead to calling dtor for each item of pOldLocks[i] that may be a heavy-weighted operation // (each pOldLocks[i] is a shared pointer to array of a ton of mutexes) // It is better to do this before the next loop iteration where we will use spin-locked assignment to pOldLocks // However, destructing a lot of mutexes under spin-lock is a bad solution for ( unsigned int i = 0; i < c_nArity; ++i ) pOldLocks[i].reset(); } } void release_resize( lock_array_ptr * pOldLocks ) { m_Owner.store( 0, atomics::memory_order_release ); pOldLocks[0]->unlock_all(); } //@endcond public: //@cond class scoped_cell_lock { lock_type * m_arrLock[ c_nArity ]; lock_array_ptr m_arrLockArr[ c_nArity ]; public: scoped_cell_lock( refinable& policy, size_t const* arrHash ) { policy.acquire( arrHash, m_arrLockArr, m_arrLock ); } ~scoped_cell_lock() { for ( unsigned int i = 0; i < c_nArity; ++i ) m_arrLock[i]->unlock(); } }; class scoped_cell_trylock { lock_type * m_arrLock[ c_nArity ]; bool m_bLocked; public: scoped_cell_trylock( refinable& policy, size_t const* arrHash ) { m_bLocked = policy.try_second_acquire( arrHash, m_arrLock ); } ~scoped_cell_trylock() { if ( m_bLocked ) { for ( unsigned int i = 0; i < c_nArity; ++i ) m_arrLock[i]->unlock(); } } bool locked() const { return m_bLocked; } }; class scoped_full_lock { refinable& m_policy; public: scoped_full_lock( refinable& policy ) : m_policy( policy ) { policy.acquire_all(); } ~scoped_full_lock() { m_policy.release_all(); } }; class scoped_resize_lock { refinable& m_policy; lock_array_ptr m_arrLocks[ c_nArity ]; public: scoped_resize_lock( refinable& policy ) : m_policy(policy) { policy.acquire_resize( m_arrLocks ); } ~scoped_resize_lock() { m_policy.release_resize( m_arrLocks ); } }; //@endcond public: /// Constructor refinable( size_t nLockCount ///< The size of lock array. Must be power of two. ) : m_Owner(0) , m_nCapacity( nLockCount ) { assert( cds::beans::is_power2( nLockCount )); for ( unsigned int i = 0; i < c_nArity; ++i ) m_arrLocks[i] = create_lock_array( nLockCount ); } //@cond void resize( size_t nCapacity ) { lock_array_ptr pNew[ c_nArity ]; for ( unsigned int i = 0; i < c_nArity; ++i ) pNew[i] = create_lock_array( nCapacity ); { scoped_spinlock sl(m_access); m_nCapacity.store( nCapacity, atomics::memory_order_release ); for ( unsigned int i = 0; i < c_nArity; ++i ) m_arrLocks[i] = pNew[i]; } m_Stat.onResize(); } //@endcond /// Returns lock array size /** Lock array size is not a constant for \p refinable policy and can be changed when the set is resized. */ size_t lock_count() const { return m_nCapacity.load(atomics::memory_order_relaxed); } /// Returns the arity of \p refinable mutex policy constexpr unsigned int arity() const noexcept { return c_nArity; } /// Returns internal statistics statistics_type const& statistics() const { return m_Stat; } }; /// \p CuckooSet internal statistics struct stat { typedef cds::atomicity::event_counter counter_type ; ///< Counter type counter_type m_nRelocateCallCount ; ///< Count of \p relocate() function call counter_type m_nRelocateRoundCount ; ///< Count of attempts to relocate items counter_type m_nFalseRelocateCount ; ///< Count of unneeded attempts of \p relocate call counter_type m_nSuccessRelocateCount ; ///< Count of successful item relocating counter_type m_nRelocateAboveThresholdCount; ///< Count of item relocating above probeset threshold counter_type m_nFailedRelocateCount ; ///< Count of failed relocation attemp (when all probeset is full) counter_type m_nResizeCallCount ; ///< Count of \p resize() function call counter_type m_nFalseResizeCount ; ///< Count of false \p resize() function call (when other thread has been resized the set) counter_type m_nResizeSuccessNodeMove; ///< Count of successful node moving when resizing counter_type m_nResizeRelocateCall ; ///< Count of \p relocate() function call from \p resize function counter_type m_nInsertSuccess ; ///< Count of successful \p insert() function call counter_type m_nInsertFailed ; ///< Count of failed \p insert() function call counter_type m_nInsertResizeCount ; ///< Count of \p resize() function call from \p insert() counter_type m_nInsertRelocateCount ; ///< Count of \p relocate() function call from \p insert() counter_type m_nInsertRelocateFault ; ///< Count of failed \p relocate() function call from \p insert() counter_type m_nUpdateExistCount ; ///< Count of call \p update() function for existing node counter_type m_nUpdateSuccessCount ; ///< Count of successful \p insert() function call for new node counter_type m_nUpdateResizeCount ; ///< Count of \p resize() function call from \p update() counter_type m_nUpdateRelocateCount ; ///< Count of \p relocate() function call from \p update() counter_type m_nUpdateRelocateFault ; ///< Count of failed \p relocate() function call from \p update() counter_type m_nUnlinkSuccess ; ///< Count of success \p unlink() function call counter_type m_nUnlinkFailed ; ///< Count of failed \p unlink() function call counter_type m_nEraseSuccess ; ///< Count of success \p erase() function call counter_type m_nEraseFailed ; ///< Count of failed \p erase() function call counter_type m_nFindSuccess ; ///< Count of success \p find() function call counter_type m_nFindFailed ; ///< Count of failed \p find() function call counter_type m_nFindEqualSuccess ; ///< Count of success \p find_equal() function call counter_type m_nFindEqualFailed ; ///< Count of failed \p find_equal() function call counter_type m_nFindWithSuccess ; ///< Count of success \p find_with() function call counter_type m_nFindWithFailed ; ///< Count of failed \p find_with() function call //@cond void onRelocateCall() { ++m_nRelocateCallCount; } void onRelocateRound() { ++m_nRelocateRoundCount; } void onFalseRelocateRound() { ++m_nFalseRelocateCount; } void onSuccessRelocateRound(){ ++m_nSuccessRelocateCount; } void onRelocateAboveThresholdRound() { ++m_nRelocateAboveThresholdCount; } void onFailedRelocate() { ++m_nFailedRelocateCount; } void onResizeCall() { ++m_nResizeCallCount; } void onFalseResizeCall() { ++m_nFalseResizeCount; } void onResizeSuccessMove() { ++m_nResizeSuccessNodeMove; } void onResizeRelocateCall() { ++m_nResizeRelocateCall; } void onInsertSuccess() { ++m_nInsertSuccess; } void onInsertFailed() { ++m_nInsertFailed; } void onInsertResize() { ++m_nInsertResizeCount; } void onInsertRelocate() { ++m_nInsertRelocateCount; } void onInsertRelocateFault() { ++m_nInsertRelocateFault; } void onUpdateExist() { ++m_nUpdateExistCount; } void onUpdateSuccess() { ++m_nUpdateSuccessCount; } void onUpdateResize() { ++m_nUpdateResizeCount; } void onUpdateRelocate() { ++m_nUpdateRelocateCount; } void onUpdateRelocateFault() { ++m_nUpdateRelocateFault; } void onUnlinkSuccess() { ++m_nUnlinkSuccess; } void onUnlinkFailed() { ++m_nUnlinkFailed; } void onEraseSuccess() { ++m_nEraseSuccess; } void onEraseFailed() { ++m_nEraseFailed; } void onFindSuccess() { ++m_nFindSuccess; } void onFindFailed() { ++m_nFindFailed; } void onFindWithSuccess() { ++m_nFindWithSuccess; } void onFindWithFailed() { ++m_nFindWithFailed; } //@endcond }; /// CuckooSet empty internal statistics struct empty_stat { //@cond void onRelocateCall() const {} void onRelocateRound() const {} void onFalseRelocateRound() const {} void onSuccessRelocateRound()const {} void onRelocateAboveThresholdRound() const {} void onFailedRelocate() const {} void onResizeCall() const {} void onFalseResizeCall() const {} void onResizeSuccessMove() const {} void onResizeRelocateCall() const {} void onInsertSuccess() const {} void onInsertFailed() const {} void onInsertResize() const {} void onInsertRelocate() const {} void onInsertRelocateFault() const {} void onUpdateExist() const {} void onUpdateSuccess() const {} void onUpdateResize() const {} void onUpdateRelocate() const {} void onUpdateRelocateFault() const {} void onUnlinkSuccess() const {} void onUnlinkFailed() const {} void onEraseSuccess() const {} void onEraseFailed() const {} void onFindSuccess() const {} void onFindFailed() const {} void onFindWithSuccess() const {} void onFindWithFailed() const {} //@endcond }; /// Type traits for CuckooSet class struct traits { /// Hook used /** Possible values are: cuckoo::base_hook, cuckoo::member_hook, cuckoo::traits_hook. */ typedef base_hook<> hook; /// Hash functors tuple /** This is mandatory type and has no predefined one. At least, two hash functors should be provided. All hash functor should be orthogonal (different): for each i,j: i != j => h[i](x) != h[j](x) . The hash functors are defined as std::tuple< H1, H2, ... Hn > : \@code cds::opt::hash< std::tuple< h1, h2 > > \@endcode The number of hash functors specifies the number \p k - the count of hash tables in cuckoo hashing. To specify hash tuple in traits you should use \p cds::opt::hash_tuple: \code struct my_traits: public cds::intrusive::cuckoo::traits { typedef cds::opt::hash_tuple< hash1, hash2 > hash; }; \endcode */ typedef cds::opt::none hash; /// Concurrent access policy /** Available opt::mutex_policy types: - \p cuckoo::striping - simple, but the lock array is not resizable - \p cuckoo::refinable - resizable lock array, but more complex access to set data. Default is \p cuckoo::striping. */ typedef cuckoo::striping<> mutex_policy; /// Key equality functor /** Default is std::equal_to */ typedef opt::none equal_to; /// Key comparing functor /** No default functor is provided. If the option is not specified, the \p less is used. */ typedef opt::none compare; /// specifies binary predicate used for key comparison. /** Default is \p std::less. */ typedef opt::none less; /// Item counter /** The type for item counting feature. Default is \p cds::atomicity::item_counter Only atomic item counter type is allowed. */ typedef atomicity::item_counter item_counter; /// Allocator type /** The allocator type for allocating bucket tables. */ typedef CDS_DEFAULT_ALLOCATOR allocator; /// Disposer /** The disposer functor is used in \p CuckooSet::clear() member function to free set's node. */ typedef intrusive::opt::v::empty_disposer disposer; /// Internal statistics. Available statistics: \p cuckoo::stat, \p cuckoo::empty_stat typedef empty_stat stat; }; /// Metafunction converting option list to \p CuckooSet traits /** Template argument list \p Options... are: - \p intrusive::opt::hook - hook used. Possible values are: \p cuckoo::base_hook, \p cuckoo::member_hook, \p cuckoo::traits_hook. If the option is not specified, %cuckoo::base_hook<> is used. - \p opt::hash - hash functor tuple, mandatory option. At least, two hash functors should be provided. All hash functor should be orthogonal (different): for each i,j: i != j => h[i](x) != h[j](x) . The hash functors are passed as std::tuple< H1, H2, ... Hn > . The number of hash functors specifies the number \p k - the count of hash tables in cuckoo hashing. - \p opt::mutex_policy - concurrent access policy. Available policies: \p cuckoo::striping, \p cuckoo::refinable. Default is \p %cuckoo::striping. - \p opt::equal_to - key equality functor like \p std::equal_to. If this functor is defined then the probe-set will be unordered. If \p %opt::compare or \p %opt::less option is specified too, then the probe-set will be ordered and \p %opt::equal_to will be ignored. - \p opt::compare - key comparison functor. No default functor is provided. If the option is not specified, the \p %opt::less is used. If \p %opt::compare or \p %opt::less option is specified, then the probe-set will be ordered. - \p opt::less - specifies binary predicate used for key comparison. Default is \p std::less. If \p %opt::compare or \p %opt::less option is specified, then the probe-set will be ordered. - \p opt::item_counter - the type of item counting feature. Default is \p atomicity::item_counter The item counter should be atomic. - \p opt::allocator - the allocator type using for allocating bucket tables. Default is \ref CDS_DEFAULT_ALLOCATOR - \p intrusive::opt::disposer - the disposer type used in \p clear() member function for freeing nodes. Default is \p intrusive::opt::v::empty_disposer - \p opt::stat - internal statistics. Possibly types: \p cuckoo::stat, \p cuckoo::empty_stat. Default is \p %cuckoo::empty_stat The probe set traits \p cuckoo::probeset_type and \p cuckoo::store_hash are taken from \p node type specified by \p opt::hook option. */ template struct make_traits { typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< cuckoo::traits, Options... >::type ,Options... >::type type ; ///< Result of metafunction }; //@cond namespace details { template class bucket_entry; template class bucket_entry { public: typedef Node node_type; typedef cuckoo::list_probeset_class probeset_class; typedef cuckoo::list probeset_type; protected: node_type * pHead; unsigned int nSize; public: class iterator { node_type * pNode; friend class bucket_entry; public: iterator() : pNode( nullptr ) {} iterator( node_type * p ) : pNode( p ) {} iterator( iterator const& it) : pNode( it.pNode ) {} iterator& operator=( iterator const& it ) { pNode = it.pNode; return *this; } iterator& operator=( node_type * p ) { pNode = p; return *this; } node_type * operator->() { return pNode; } node_type& operator*() { assert( pNode != nullptr ); return *pNode; } // preinc iterator& operator ++() { if ( pNode ) pNode = pNode->m_pNext; return *this; } bool operator==(iterator const& it ) const { return pNode == it.pNode; } bool operator!=(iterator const& it ) const { return !( *this == it ); } }; public: bucket_entry() : pHead( nullptr ) , nSize(0) { static_assert(( std::is_same::value ), "Incompatible node type" ); } iterator begin() { return iterator(pHead); } iterator end() { return iterator(); } void insert_after( iterator it, node_type * p ) { node_type * pPrev = it.pNode; if ( pPrev ) { p->m_pNext = pPrev->m_pNext; pPrev->m_pNext = p; } else { // insert as head p->m_pNext = pHead; pHead = p; } ++nSize; } void remove( iterator itPrev, iterator itWhat ) { node_type * pPrev = itPrev.pNode; node_type * pWhat = itWhat.pNode; assert( (!pPrev && pWhat == pHead) || (pPrev && pPrev->m_pNext == pWhat)); if ( pPrev ) pPrev->m_pNext = pWhat->m_pNext; else { assert( pWhat == pHead ); pHead = pHead->m_pNext; } pWhat->clear(); --nSize; } void clear() { node_type * pNext; for ( node_type * pNode = pHead; pNode; pNode = pNext ) { pNext = pNode->m_pNext; pNode->clear(); } nSize = 0; pHead = nullptr; } template void clear( Disposer disp ) { node_type * pNext; for ( node_type * pNode = pHead; pNode; pNode = pNext ) { pNext = pNode->m_pNext; pNode->clear(); disp( pNode ); } nSize = 0; pHead = nullptr; } unsigned int size() const { return nSize; } }; template class bucket_entry> { public: typedef Node node_type; typedef cuckoo::vector_probeset_class probeset_class; typedef cuckoo::vector probeset_type; static unsigned int const c_nCapacity = probeset_type::c_nCapacity; protected: node_type * m_arrNode[c_nCapacity]; unsigned int m_nSize; void shift_up( unsigned int nFrom ) { assert( m_nSize < c_nCapacity ); if ( nFrom < m_nSize ) std::copy_backward( m_arrNode + nFrom, m_arrNode + m_nSize, m_arrNode + m_nSize + 1 ); } void shift_down( node_type ** pFrom ) { assert( m_arrNode <= pFrom && pFrom < m_arrNode + m_nSize); std::copy( pFrom + 1, m_arrNode + m_nSize, pFrom ); } public: class iterator { node_type ** pArr; friend class bucket_entry; public: iterator() : pArr( nullptr ) {} iterator( node_type ** p ) : pArr(p) {} iterator( iterator const& it) : pArr( it.pArr ) {} iterator& operator=( iterator const& it ) { pArr = it.pArr; return *this; } node_type * operator->() { assert( pArr != nullptr ); return *pArr; } node_type& operator*() { assert( pArr != nullptr ); assert( *pArr != nullptr ); return *(*pArr); } // preinc iterator& operator ++() { ++pArr; return *this; } bool operator==(iterator const& it ) const { return pArr == it.pArr; } bool operator!=(iterator const& it ) const { return !( *this == it ); } }; public: bucket_entry() : m_nSize(0) { memset( m_arrNode, 0, sizeof(m_arrNode)); static_assert(( std::is_same::value ), "Incompatible node type" ); } iterator begin() { return iterator(m_arrNode); } iterator end() { return iterator(m_arrNode + size()); } void insert_after( iterator it, node_type * p ) { assert( m_nSize < c_nCapacity ); assert( !it.pArr || (m_arrNode <= it.pArr && it.pArr <= m_arrNode + m_nSize)); if ( it.pArr ) { shift_up( static_cast(it.pArr - m_arrNode) + 1 ); it.pArr[1] = p; } else { shift_up(0); m_arrNode[0] = p; } ++m_nSize; } void remove( iterator /*itPrev*/, iterator itWhat ) { itWhat->clear(); shift_down( itWhat.pArr ); --m_nSize; } void clear() { m_nSize = 0; } template void clear( Disposer disp ) { for ( unsigned int i = 0; i < m_nSize; ++i ) { disp( m_arrNode[i] ); } m_nSize = 0; } unsigned int size() const { return m_nSize; } }; template struct hash_ops { static void store( Node * pNode, size_t const* pHashes ) { memcpy( pNode->m_arrHash, pHashes, sizeof(pHashes[0]) * ArraySize ); } static bool equal_to( Node& node, unsigned int nTable, size_t nHash ) { return node.m_arrHash[nTable] == nHash; } }; template struct hash_ops { static void store( Node * /*pNode*/, size_t * /*pHashes*/ ) {} static bool equal_to( Node& /*node*/, unsigned int /*nTable*/, size_t /*nHash*/ ) { return true; } }; template struct contains; template struct contains { template static bool find( BucketEntry& probeset, Position& pos, unsigned int /*nTable*/, size_t /*nHash*/, Q const& val, Compare cmp ) { // Ordered version typedef typename BucketEntry::iterator bucket_iterator; bucket_iterator itPrev; for ( bucket_iterator it = probeset.begin(), itEnd = probeset.end(); it != itEnd; ++it ) { int cmpRes = cmp( *NodeTraits::to_value_ptr(*it), val ); if ( cmpRes >= 0 ) { pos.itFound = it; pos.itPrev = itPrev; return cmpRes == 0; } itPrev = it; } pos.itPrev = itPrev; pos.itFound = probeset.end(); return false; } }; template struct contains { template static bool find( BucketEntry& probeset, Position& pos, unsigned int nTable, size_t nHash, Q const& val, EqualTo eq ) { // Unordered version typedef typename BucketEntry::iterator bucket_iterator; typedef typename BucketEntry::node_type node_type; bucket_iterator itPrev; for ( bucket_iterator it = probeset.begin(), itEnd = probeset.end(); it != itEnd; ++it ) { if ( hash_ops::equal_to( *it, nTable, nHash ) && eq( *NodeTraits::to_value_ptr(*it), val )) { pos.itFound = it; pos.itPrev = itPrev; return true; } itPrev = it; } pos.itPrev = itPrev; pos.itFound = probeset.end(); return false; } }; } // namespace details //@endcond } // namespace cuckoo /// Cuckoo hash set /** @ingroup cds_intrusive_map Source - [2007] M.Herlihy, N.Shavit, M.Tzafrir "Concurrent Cuckoo Hashing. Technical report" - [2008] Maurice Herlihy, Nir Shavit "The Art of Multiprocessor Programming" About Cuckoo hashing [From "The Art of Multiprocessor Programming"] Cuckoo hashing is a hashing algorithm in which a newly added item displaces any earlier item occupying the same slot. For brevity, a table is a k-entry array of items. For a hash set of size N = 2k we use a two-entry array of tables, and two independent hash functions, h0, h1: KeyRange -> 0,...,k-1 mapping the set of possible keys to entries in he array. To test whether a value \p x is in the set, find(x) tests whether either table[0][h0(x)] or table[1][h1(x)] is equal to \p x. Similarly, erase(x)checks whether \p x is in either table[0][h0(x)] or table[1][h1(x)], ad removes it if found. The insert(x) successively "kicks out" conflicting items until every key has a slot. To add \p x, the method swaps \p x with \p y, the current occupant of table[0][h0(x)]. If the prior value was \p nullptr, it is done. Otherwise, it swaps the newly nest-less value \p y for the current occupant of table[1][h1(y)] in the same way. As before, if the prior value was \p nullptr, it is done. Otherwise, the method continues swapping entries (alternating tables) until it finds an empty slot. We might not find an empty slot, either because the table is full, or because the sequence of displacement forms a cycle. We therefore need an upper limit on the number of successive displacements we are willing to undertake. When this limit is exceeded, we resize the hash table, choose new hash functions and start over. For concurrent cuckoo hashing, rather than organizing the set as a two-dimensional table of items, we use two-dimensional table of probe sets, where a probe set is a constant-sized set of items with the same hash code. Each probe set holds at most \p PROBE_SIZE items, but the algorithm tries to ensure that when the set is quiescent (i.e no method call in progress) each probe set holds no more than THRESHOLD < PROBE_SET items. While method calls are in-flight, a probe set may temporarily hold more than \p THRESHOLD but never more than \p PROBE_SET items. In current implementation, a probe set can be defined either as a (single-linked) list or as a fixed-sized vector, optionally ordered. In description above two-table cuckoo hashing (k = 2) has been considered. We can generalize this approach for k >= 2 when we have \p k hash functions h[0], ... h[k-1] and \p k tables table[0], ... table[k-1]. The search in probe set is linear, the complexity is O(PROBE_SET) . The probe set may be ordered or not. Ordered probe set can be more efficient since the average search complexity is O(PROBE_SET/2). However, the overhead of sorting can eliminate a gain of ordered search. The probe set is ordered if \p compare or \p less is specified in \p Traits template parameter. Otherwise, the probe set is unordered and \p Traits should provide \p equal_to predicate. The \p cds::intrusive::cuckoo namespace contains \p %CuckooSet-related declarations. Template arguments: - \p T - the type stored in the set. The type must be based on \p cuckoo::node (for \p cuckoo::base_hook) or it must have a member of type %cuckoo::node (for \p cuckoo::member_hook), or it must be convertible to \p %cuckoo::node (for \p cuckoo::traits_hook) - \p Traits - type traits, default is \p cuckoo::traits. It is possible to declare option-based set with \p cuckoo::make_traits metafunction result as \p Traits template argument. How to use You should incorporate \p cuckoo::node into your struct \p T and provide appropriate \p cuckoo::traits::hook in your \p Traits template parameters. Usually, for \p Traits you define a struct based on \p cuckoo::traits. Example for base hook and list-based probe-set: \code #include // Data stored in cuckoo set // We use list as probe-set container and store hash values in the node // (since we use two hash functions we should store 2 hash values per node) struct my_data: public cds::intrusive::cuckoo::node< cds::intrusive::cuckoo::list, 2 > { // key field std::string strKey; // other data // ... }; // Provide equal_to functor for my_data since we will use unordered probe-set struct my_data_equal_to { bool operator()( const my_data& d1, const my_data& d2 ) const { return d1.strKey.compare( d2.strKey ) == 0; } bool operator()( const my_data& d, const std::string& s ) const { return d.strKey.compare(s) == 0; } bool operator()( const std::string& s, const my_data& d ) const { return s.compare( d.strKey ) == 0; } }; // Provide two hash functor for my_data struct hash1 { size_t operator()(std::string const& s) const { return cds::opt::v::hash( s ); } size_t operator()( my_data const& d ) const { return (*this)( d.strKey ); } }; struct hash2: private hash1 { size_t operator()(std::string const& s) const { size_t h = ~( hash1::operator()(s)); return ~h + 0x9e3779b9 + (h << 6) + (h >> 2); } size_t operator()( my_data const& d ) const { return (*this)( d.strKey ); } }; // Declare type traits struct my_traits: public cds::intrusive::cuckoo::traits { typedef cds::intrusive::cuckoo::base_hook< cds::intrusive::cuckoo::probeset_type< my_data::probeset_type > ,cds::intrusive::cuckoo::store_hash< my_data::hash_array_size > > hook; typedef my_data_equa_to equal_to; typedef cds::opt::hash_tuple< hash1, hash2 > hash; }; // Declare CuckooSet type typedef cds::intrusive::CuckooSet< my_data, my_traits > my_cuckoo_set; // Equal option-based declaration typedef cds::intrusive::CuckooSet< my_data, cds::intrusive::cuckoo::make_traits< cds::intrusive::opt::hook< cds::intrusive::cuckoo::base_hook< cds::intrusive::cuckoo::probeset_type< my_data::probeset_type > ,cds::intrusive::cuckoo::store_hash< my_data::hash_array_size > > > ,cds::opt::hash< std::tuple< hash1, hash2 > > ,cds::opt::equal_to< my_data_equal_to > >::type > opt_cuckoo_set; \endcode If we provide \p compare function instead of \p equal_to for \p my_data we get as a result a cuckoo set with ordered probe set that may improve performance. Example for base hook and ordered vector-based probe-set: \code #include // Data stored in cuckoo set // We use a vector of capacity 4 as probe-set container and store hash values in the node // (since we use two hash functions we should store 2 hash values per node) struct my_data: public cds::intrusive::cuckoo::node< cds::intrusive::cuckoo::vector<4>, 2 > { // key field std::string strKey; // other data // ... }; // Provide compare functor for my_data since we want to use ordered probe-set struct my_data_compare { int operator()( const my_data& d1, const my_data& d2 ) const { return d1.strKey.compare( d2.strKey ); } int operator()( const my_data& d, const std::string& s ) const { return d.strKey.compare(s); } int operator()( const std::string& s, const my_data& d ) const { return s.compare( d.strKey ); } }; // Provide two hash functor for my_data struct hash1 { size_t operator()(std::string const& s) const { return cds::opt::v::hash( s ); } size_t operator()( my_data const& d ) const { return (*this)( d.strKey ); } }; struct hash2: private hash1 { size_t operator()(std::string const& s) const { size_t h = ~( hash1::operator()(s)); return ~h + 0x9e3779b9 + (h << 6) + (h >> 2); } size_t operator()( my_data const& d ) const { return (*this)( d.strKey ); } }; // Declare type traits struct my_traits: public cds::intrusive::cuckoo::traits { typedef cds::intrusive::cuckoo::base_hook< cds::intrusive::cuckoo::probeset_type< my_data::probeset_type > ,cds::intrusive::cuckoo::store_hash< my_data::hash_array_size > > hook; typedef my_data_compare compare; typedef cds::opt::hash_tuple< hash1, hash2 > hash; }; // Declare CuckooSet type typedef cds::intrusive::CuckooSet< my_data, my_traits > my_cuckoo_set; // Equal option-based declaration typedef cds::intrusive::CuckooSet< my_data, cds::intrusive::cuckoo::make_traits< cds::intrusive::opt::hook< cds::intrusive::cuckoo::base_hook< cds::intrusive::cuckoo::probeset_type< my_data::probeset_type > ,cds::intrusive::cuckoo::store_hash< my_data::hash_array_size > > > ,cds::opt::hash< std::tuple< hash1, hash2 > > ,cds::opt::compare< my_data_compare > >::type > opt_cuckoo_set; \endcode */ template class CuckooSet { public: typedef T value_type; ///< The value type stored in the set typedef Traits traits; ///< Set traits typedef typename traits::hook hook; ///< hook type typedef typename hook::node_type node_type; ///< node type typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< node traits typedef typename traits::hash hash; ///< hash functor tuple wrapped for internal use typedef typename hash::hash_tuple_type hash_tuple_type; ///< Type of hash tuple typedef typename traits::stat stat; ///< internal statistics type typedef typename traits::mutex_policy original_mutex_policy; ///< Concurrent access policy, see \p cuckoo::traits::mutex_policy //@cond typedef typename original_mutex_policy::template rebind_statistics< typename std::conditional< std::is_same< stat, cuckoo::empty_stat >::value ,typename original_mutex_policy::empty_stat ,typename original_mutex_policy::real_stat >::type >::other mutex_policy; //@endcond /// Probe set should be ordered or not /** If \p Traits specifies \p cmpare or \p less functor then the set is ordered. Otherwise, it is unordered and \p Traits should provide \p equal_to functor. */ static bool const c_isSorted = !( std::is_same< typename traits::compare, opt::none >::value && std::is_same< typename traits::less, opt::none >::value ); static size_t const c_nArity = hash::size ; ///< the arity of cuckoo hashing: the number of hash functors provided; minimum 2. /// Key equality functor; used only for unordered probe-set typedef typename opt::details::make_equal_to< value_type, traits, !c_isSorted>::type key_equal_to; /// key comparing functor based on \p opt::compare and \p opt::less option setter. Used only for ordered probe set typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator; /// allocator type typedef typename traits::allocator allocator; /// item counter type typedef typename traits::item_counter item_counter; /// node disposer typedef typename traits::disposer disposer; protected: //@cond typedef typename node_type::probeset_class probeset_class; typedef typename node_type::probeset_type probeset_type; static unsigned int const c_nNodeHashArraySize = node_type::hash_array_size; typedef typename mutex_policy::scoped_cell_lock scoped_cell_lock; typedef typename mutex_policy::scoped_cell_trylock scoped_cell_trylock; typedef typename mutex_policy::scoped_full_lock scoped_full_lock; typedef typename mutex_policy::scoped_resize_lock scoped_resize_lock; typedef cuckoo::details::bucket_entry< node_type, probeset_type > bucket_entry; typedef typename bucket_entry::iterator bucket_iterator; typedef cds::details::Allocator< bucket_entry, allocator > bucket_table_allocator; typedef size_t hash_array[c_nArity] ; ///< hash array struct position { bucket_iterator itPrev; bucket_iterator itFound; }; typedef cuckoo::details::contains< node_traits, c_isSorted > contains_action; template using predicate_wrapper = typename std::conditional< c_isSorted, cds::opt::details::make_comparator_from_less, Predicate>::type; typedef typename std::conditional< c_isSorted, key_comparator, key_equal_to >::type key_predicate; //@endcond public: static unsigned int const c_nDefaultProbesetSize = 4; ///< default probeset size static size_t const c_nDefaultInitialSize = 16; ///< default initial size static unsigned int const c_nRelocateLimit = c_nArity * 2 - 1; ///< Count of attempts to relocate before giving up protected: bucket_entry * m_BucketTable[ c_nArity ] ; ///< Bucket tables atomics::atomic m_nBucketMask ; ///< Hash bitmask; bucket table size minus 1. unsigned int const m_nProbesetSize ; ///< Probe set size unsigned int const m_nProbesetThreshold ; ///< Probe set threshold hash m_Hash ; ///< Hash functor tuple mutex_policy m_MutexPolicy ; ///< concurrent access policy item_counter m_ItemCounter ; ///< item counter mutable stat m_Stat ; ///< internal statistics protected: //@cond static void check_common_constraints() { static_assert( (c_nArity == mutex_policy::c_nArity), "The count of hash functors must be equal to mutex_policy arity" ); } void check_probeset_properties() const { assert( m_nProbesetThreshold < m_nProbesetSize ); // if probe set type is cuckoo::vector then m_nProbesetSize == N assert( node_type::probeset_size == 0 || node_type::probeset_size == m_nProbesetSize ); } template void hashing( size_t * pHashes, Q const& v ) const { m_Hash( pHashes, v ); } void copy_hash( size_t * pHashes, value_type const& v ) const { constexpr_if ( c_nNodeHashArraySize != 0 ) memcpy( pHashes, node_traits::to_node_ptr( v )->get_hash(), sizeof( pHashes[0] ) * c_nNodeHashArraySize ); else hashing( pHashes, v ); } bucket_entry& bucket( unsigned int nTable, size_t nHash ) { assert( nTable < c_nArity ); return m_BucketTable[nTable][nHash & m_nBucketMask.load( atomics::memory_order_relaxed ) ]; } static void store_hash( node_type * pNode, size_t * pHashes ) { cuckoo::details::hash_ops< node_type, c_nNodeHashArraySize >::store( pNode, pHashes ); } static bool equal_hash( node_type& node, unsigned int nTable, size_t nHash ) { return cuckoo::details::hash_ops< node_type, c_nNodeHashArraySize >::equal_to( node, nTable, nHash ); } void allocate_bucket_tables( size_t nSize ) { assert( cds::beans::is_power2( nSize )); m_nBucketMask.store( nSize - 1, atomics::memory_order_release ); bucket_table_allocator alloc; for ( unsigned int i = 0; i < c_nArity; ++i ) m_BucketTable[i] = alloc.NewArray( nSize ); } static void free_bucket_tables( bucket_entry ** pTable, size_t nCapacity ) { bucket_table_allocator alloc; for ( unsigned int i = 0; i < c_nArity; ++i ) { alloc.Delete( pTable[i], nCapacity ); pTable[i] = nullptr; } } void free_bucket_tables() { free_bucket_tables( m_BucketTable, m_nBucketMask.load( atomics::memory_order_relaxed ) + 1 ); } static constexpr unsigned int const c_nUndefTable = (unsigned int) -1; template unsigned int contains( position * arrPos, size_t * arrHash, Q const& val, Predicate pred ) { // Buckets must be locked for ( unsigned int i = 0; i < c_nArity; ++i ) { bucket_entry& probeset = bucket( i, arrHash[i] ); if ( contains_action::find( probeset, arrPos[i], i, arrHash[i], val, pred )) return i; } return c_nUndefTable; } template value_type * erase_( Q const& val, Predicate pred, Func f ) { hash_array arrHash; hashing( arrHash, val ); position arrPos[ c_nArity ]; { scoped_cell_lock guard( m_MutexPolicy, arrHash ); unsigned int nTable = contains( arrPos, arrHash, val, pred ); if ( nTable != c_nUndefTable ) { node_type& node = *arrPos[nTable].itFound; f( *node_traits::to_value_ptr(node)); bucket( nTable, arrHash[nTable]).remove( arrPos[nTable].itPrev, arrPos[nTable].itFound ); --m_ItemCounter; m_Stat.onEraseSuccess(); return node_traits::to_value_ptr( node ); } } m_Stat.onEraseFailed(); return nullptr; } template bool find_( Q& val, Predicate pred, Func f ) { hash_array arrHash; position arrPos[ c_nArity ]; hashing( arrHash, val ); scoped_cell_lock sl( m_MutexPolicy, arrHash ); unsigned int nTable = contains( arrPos, arrHash, val, pred ); if ( nTable != c_nUndefTable ) { f( *node_traits::to_value_ptr( *arrPos[nTable].itFound ), val ); m_Stat.onFindSuccess(); return true; } m_Stat.onFindFailed(); return false; } bool relocate( unsigned int nTable, size_t * arrGoalHash ) { // arrGoalHash contains hash values for relocating element // Relocating element is first one from bucket( nTable, arrGoalHash[nTable] ) probeset m_Stat.onRelocateCall(); hash_array arrHash; value_type * pVal; for ( unsigned int nRound = 0; nRound < c_nRelocateLimit; ++nRound ) { m_Stat.onRelocateRound(); while ( true ) { scoped_cell_lock guard( m_MutexPolicy, arrGoalHash ); bucket_entry& refBucket = bucket( nTable, arrGoalHash[nTable] ); if ( refBucket.size() < m_nProbesetThreshold ) { // probeset is not above the threshold m_Stat.onFalseRelocateRound(); return true; } pVal = node_traits::to_value_ptr( *refBucket.begin()); copy_hash( arrHash, *pVal ); scoped_cell_trylock guard2( m_MutexPolicy, arrHash ); if ( !guard2.locked()) continue ; // try one more time refBucket.remove( typename bucket_entry::iterator(), refBucket.begin()); unsigned int i = (nTable + 1) % c_nArity; // try insert into free probeset while ( i != nTable ) { bucket_entry& bkt = bucket( i, arrHash[i] ); if ( bkt.size() < m_nProbesetThreshold ) { position pos; contains_action::find( bkt, pos, i, arrHash[i], *pVal, key_predicate()) ; // must return false! bkt.insert_after( pos.itPrev, node_traits::to_node_ptr( pVal )); m_Stat.onSuccessRelocateRound(); return true; } i = ( i + 1 ) % c_nArity; } // try insert into partial probeset i = (nTable + 1) % c_nArity; while ( i != nTable ) { bucket_entry& bkt = bucket( i, arrHash[i] ); if ( bkt.size() < m_nProbesetSize ) { position pos; contains_action::find( bkt, pos, i, arrHash[i], *pVal, key_predicate()) ; // must return false! bkt.insert_after( pos.itPrev, node_traits::to_node_ptr( pVal )); nTable = i; memcpy( arrGoalHash, arrHash, sizeof(arrHash)); m_Stat.onRelocateAboveThresholdRound(); goto next_iteration; } i = (i + 1) % c_nArity; } // all probeset is full, relocating fault refBucket.insert_after( typename bucket_entry::iterator(), node_traits::to_node_ptr( pVal )); m_Stat.onFailedRelocate(); return false; } next_iteration:; } return false; } void resize() { m_Stat.onResizeCall(); size_t nOldCapacity = bucket_count( atomics::memory_order_acquire ); bucket_entry* pOldTable[ c_nArity ]; { scoped_resize_lock guard( m_MutexPolicy ); if ( nOldCapacity != bucket_count()) { m_Stat.onFalseResizeCall(); return; } size_t nCapacity = nOldCapacity * 2; m_MutexPolicy.resize( nCapacity ); memcpy( pOldTable, m_BucketTable, sizeof(pOldTable)); allocate_bucket_tables( nCapacity ); hash_array arrHash; position arrPos[ c_nArity ]; for ( unsigned int nTable = 0; nTable < c_nArity; ++nTable ) { bucket_entry * pTable = pOldTable[nTable]; for ( size_t k = 0; k < nOldCapacity; ++k ) { bucket_iterator itNext; for ( bucket_iterator it = pTable[k].begin(), itEnd = pTable[k].end(); it != itEnd; it = itNext ) { itNext = it; ++itNext; value_type& val = *node_traits::to_value_ptr( *it ); copy_hash( arrHash, val ); CDS_VERIFY_EQ( contains( arrPos, arrHash, val, key_predicate()), c_nUndefTable ); for ( unsigned int i = 0; i < c_nArity; ++i ) { bucket_entry& refBucket = bucket( i, arrHash[i] ); if ( refBucket.size() < m_nProbesetThreshold ) { refBucket.insert_after( arrPos[i].itPrev, &*it ); m_Stat.onResizeSuccessMove(); goto do_next; } } for ( unsigned int i = 0; i < c_nArity; ++i ) { bucket_entry& refBucket = bucket( i, arrHash[i] ); if ( refBucket.size() < m_nProbesetSize ) { refBucket.insert_after( arrPos[i].itPrev, &*it ); assert( refBucket.size() > 1 ); copy_hash( arrHash, *node_traits::to_value_ptr( *refBucket.begin())); m_Stat.onResizeRelocateCall(); relocate( i, arrHash ); break; } } do_next:; } } } } free_bucket_tables( pOldTable, nOldCapacity ); } constexpr static unsigned int calc_probeset_size( unsigned int nProbesetSize ) noexcept { return std::is_same< probeset_class, cuckoo::vector_probeset_class >::value ? node_type::probeset_size : (nProbesetSize ? nProbesetSize : ( node_type::probeset_size ? node_type::probeset_size : c_nDefaultProbesetSize )); } //@endcond public: /// Default constructor /** Initial size = \ref c_nDefaultInitialSize Probe set size: - \p c_nDefaultProbesetSize if \p probeset_type is \p cuckoo::list - \p Capacity if \p probeset_type is cuckoo::vector Probe set threshold = probe set size - 1 */ CuckooSet() : m_nProbesetSize( calc_probeset_size(0)) , m_nProbesetThreshold( m_nProbesetSize - 1 ) , m_MutexPolicy( c_nDefaultInitialSize ) { check_common_constraints(); check_probeset_properties(); allocate_bucket_tables( c_nDefaultInitialSize ); } /// Constructs the set object with given probe set size and threshold /** If probe set type is cuckoo::vector vector then \p nProbesetSize is ignored since it should be equal to vector's \p Capacity. */ CuckooSet( size_t nInitialSize ///< Initial set size; if 0 - use default initial size \p c_nDefaultInitialSize , unsigned int nProbesetSize ///< probe set size , unsigned int nProbesetThreshold = 0 ///< probe set threshold, nProbesetThreshold < nProbesetSize. If 0, nProbesetThreshold = nProbesetSize - 1 ) : m_nProbesetSize( calc_probeset_size(nProbesetSize)) , m_nProbesetThreshold( nProbesetThreshold ? nProbesetThreshold : m_nProbesetSize - 1 ) , m_MutexPolicy( cds::beans::ceil2(nInitialSize ? nInitialSize : c_nDefaultInitialSize )) { check_common_constraints(); check_probeset_properties(); allocate_bucket_tables( nInitialSize ? cds::beans::ceil2( nInitialSize ) : c_nDefaultInitialSize ); } /// Constructs the set object with given hash functor tuple /** The probe set size and threshold are set as default, see \p CuckooSet() */ CuckooSet( hash_tuple_type const& h ///< hash functor tuple of type std::tuple where n == \ref c_nArity ) : m_nProbesetSize( calc_probeset_size(0)) , m_nProbesetThreshold( m_nProbesetSize -1 ) , m_Hash( h ) , m_MutexPolicy( c_nDefaultInitialSize ) { check_common_constraints(); check_probeset_properties(); allocate_bucket_tables( c_nDefaultInitialSize ); } /// Constructs the set object with given probe set properties and hash functor tuple /** If probe set type is cuckoo::vector vector then \p nProbesetSize should be equal to vector's \p Capacity. */ CuckooSet( size_t nInitialSize ///< Initial set size; if 0 - use default initial size \p c_nDefaultInitialSize , unsigned int nProbesetSize ///< probe set size, positive integer , unsigned int nProbesetThreshold ///< probe set threshold, nProbesetThreshold < nProbesetSize. If 0, nProbesetThreshold = nProbesetSize - 1 , hash_tuple_type const& h ///< hash functor tuple of type std::tuple where n == \ref c_nArity ) : m_nProbesetSize( calc_probeset_size(nProbesetSize)) , m_nProbesetThreshold( nProbesetThreshold ? nProbesetThreshold : m_nProbesetSize - 1) , m_Hash( h ) , m_MutexPolicy( cds::beans::ceil2(nInitialSize ? nInitialSize : c_nDefaultInitialSize )) { check_common_constraints(); check_probeset_properties(); allocate_bucket_tables( nInitialSize ? cds::beans::ceil2( nInitialSize ) : c_nDefaultInitialSize ); } /// Constructs the set object with given hash functor tuple (move semantics) /** The probe set size and threshold are set as default, see \p CuckooSet() */ CuckooSet( hash_tuple_type&& h ///< hash functor tuple of type std::tuple where n == \ref c_nArity ) : m_nProbesetSize( calc_probeset_size(0)) , m_nProbesetThreshold( m_nProbesetSize / 2 ) , m_Hash( std::forward(h)) , m_MutexPolicy( c_nDefaultInitialSize ) { check_common_constraints(); check_probeset_properties(); allocate_bucket_tables( c_nDefaultInitialSize ); } /// Constructs the set object with given probe set properties and hash functor tuple (move semantics) /** If probe set type is cuckoo::vector vector then \p nProbesetSize should be equal to vector's \p Capacity. */ CuckooSet( size_t nInitialSize ///< Initial set size; if 0 - use default initial size \p c_nDefaultInitialSize , unsigned int nProbesetSize ///< probe set size, positive integer , unsigned int nProbesetThreshold ///< probe set threshold, nProbesetThreshold < nProbesetSize. If 0, nProbesetThreshold = nProbesetSize - 1 , hash_tuple_type&& h ///< hash functor tuple of type std::tuple where n == \ref c_nArity ) : m_nProbesetSize( calc_probeset_size(nProbesetSize)) , m_nProbesetThreshold( nProbesetThreshold ? nProbesetThreshold : m_nProbesetSize - 1) , m_Hash( std::forward(h)) , m_MutexPolicy( cds::beans::ceil2(nInitialSize ? nInitialSize : c_nDefaultInitialSize )) { check_common_constraints(); check_probeset_properties(); allocate_bucket_tables( nInitialSize ? cds::beans::ceil2( nInitialSize ) : c_nDefaultInitialSize ); } /// Destructor ~CuckooSet() { free_bucket_tables(); } public: /// Inserts new node /** The function inserts \p val in the set if it does not contain an item with key equal to \p val. Returns \p true if \p val is inserted into the set, \p false otherwise. */ bool insert( value_type& val ) { return insert( val, []( value_type& ) {} ); } /// Inserts new node /** The function allows to split creating of new item into two part: - create item with key only - insert new item into the set - if inserting is success, calls \p f functor to initialize value-field of \p val. The functor signature is: \code void func( value_type& val ); \endcode where \p val is the item inserted. The user-defined functor is called only if the inserting is success. */ template bool insert( value_type& val, Func f ) { hash_array arrHash; position arrPos[ c_nArity ]; unsigned int nGoalTable; hashing( arrHash, val ); node_type * pNode = node_traits::to_node_ptr( val ); store_hash( pNode, arrHash ); while (true) { { scoped_cell_lock guard( m_MutexPolicy, arrHash ); if ( contains( arrPos, arrHash, val, key_predicate()) != c_nUndefTable ) { m_Stat.onInsertFailed(); return false; } for ( unsigned int i = 0; i < c_nArity; ++i ) { bucket_entry& refBucket = bucket( i, arrHash[i] ); if ( refBucket.size() < m_nProbesetThreshold ) { refBucket.insert_after( arrPos[i].itPrev, pNode ); f( val ); ++m_ItemCounter; m_Stat.onInsertSuccess(); return true; } } for ( unsigned int i = 0; i < c_nArity; ++i ) { bucket_entry& refBucket = bucket( i, arrHash[i] ); if ( refBucket.size() < m_nProbesetSize ) { refBucket.insert_after( arrPos[i].itPrev, pNode ); f( val ); ++m_ItemCounter; nGoalTable = i; assert( refBucket.size() > 1 ); copy_hash( arrHash, *node_traits::to_value_ptr( *refBucket.begin())); goto do_relocate; } } } m_Stat.onInsertResize(); resize(); } do_relocate: m_Stat.onInsertRelocate(); if ( !relocate( nGoalTable, arrHash )) { m_Stat.onInsertRelocateFault(); m_Stat.onInsertResize(); resize(); } m_Stat.onInsertSuccess(); return true; } /// Updates the node /** The operation performs inserting or changing data with lock-free manner. If the item \p val is not found in the set, then \p val is inserted into the set iff \p bAllowInsert is \p true. Otherwise, the functor \p func is called with item found. The functor \p func signature is: \code void func( bool bNew, value_type& item, value_type& val ); \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the set - \p val - argument \p val passed into the \p %update() function If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments refer to the same thing. The functor may change non-key fields of the \p item. Returns std::pair where \p first is \p true if operation is successful, i.e. the node has been inserted or updated, \p second is \p true if new item has been added or \p false if the item with \p key already exists. */ template std::pair update( value_type& val, Func func, bool bAllowInsert = true ) { hash_array arrHash; position arrPos[ c_nArity ]; unsigned int nGoalTable; hashing( arrHash, val ); node_type * pNode = node_traits::to_node_ptr( val ); store_hash( pNode, arrHash ); while (true) { { scoped_cell_lock guard( m_MutexPolicy, arrHash ); unsigned int nTable = contains( arrPos, arrHash, val, key_predicate()); if ( nTable != c_nUndefTable ) { func( false, *node_traits::to_value_ptr( *arrPos[nTable].itFound ), val ); m_Stat.onUpdateExist(); return std::make_pair( true, false ); } if ( !bAllowInsert ) return std::make_pair( false, false ); //node_type * pNode = node_traits::to_node_ptr( val ); //store_hash( pNode, arrHash ); for ( unsigned int i = 0; i < c_nArity; ++i ) { bucket_entry& refBucket = bucket( i, arrHash[i] ); if ( refBucket.size() < m_nProbesetThreshold ) { refBucket.insert_after( arrPos[i].itPrev, pNode ); func( true, val, val ); ++m_ItemCounter; m_Stat.onUpdateSuccess(); return std::make_pair( true, true ); } } for ( unsigned int i = 0; i < c_nArity; ++i ) { bucket_entry& refBucket = bucket( i, arrHash[i] ); if ( refBucket.size() < m_nProbesetSize ) { refBucket.insert_after( arrPos[i].itPrev, pNode ); func( true, val, val ); ++m_ItemCounter; nGoalTable = i; assert( refBucket.size() > 1 ); copy_hash( arrHash, *node_traits::to_value_ptr( *refBucket.begin())); goto do_relocate; } } } m_Stat.onUpdateResize(); resize(); } do_relocate: m_Stat.onUpdateRelocate(); if ( !relocate( nGoalTable, arrHash )) { m_Stat.onUpdateRelocateFault(); m_Stat.onUpdateResize(); resize(); } m_Stat.onUpdateSuccess(); return std::make_pair( true, true ); } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( value_type& val, Func func ) { return update( val, func, true ); } //@endcond /// Unlink the item \p val from the set /** The function searches the item \p val in the set and unlink it if it is found and is equal to \p val (here, the equality means that \p val belongs to the set: if \p item is an item found then unlink is successful iif &val == &item) The function returns \p true if success and \p false otherwise. */ bool unlink( value_type& val ) { hash_array arrHash; hashing( arrHash, val ); position arrPos[ c_nArity ]; { scoped_cell_lock guard( m_MutexPolicy, arrHash ); unsigned int nTable = contains( arrPos, arrHash, val, key_predicate()); if ( nTable != c_nUndefTable && node_traits::to_value_ptr(*arrPos[nTable].itFound) == &val ) { bucket( nTable, arrHash[nTable]).remove( arrPos[nTable].itPrev, arrPos[nTable].itFound ); --m_ItemCounter; m_Stat.onUnlinkSuccess(); return true; } } m_Stat.onUnlinkFailed(); return false; } /// Deletes the item from the set /** \anchor cds_intrusive_CuckooSet_erase The function searches an item with key equal to \p val in the set, unlinks it from the set, and returns a pointer to unlinked item. If the item with key equal to \p val is not found the function return \p nullptr. Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. */ template value_type * erase( Q const& val ) { return erase( val, [](value_type const&) {} ); } /// Deletes the item from the set using \p pred predicate for searching /** The function is an analog of \ref cds_intrusive_CuckooSet_erase "erase(Q const&)" but \p pred is used for key comparing. If cuckoo set is ordered, then \p Predicate should have the interface and semantics like \p std::less. If cuckoo set is unordered, then \p Predicate should have the interface and semantics like \p std::equal_to. \p Predicate must imply the same element order as the comparator used for building the set. */ template value_type * erase_with( Q const& val, Predicate pred ) { CDS_UNUSED( pred ); return erase_( val, predicate_wrapper(), [](value_type const&) {} ); } /// Delete the item from the set /** \anchor cds_intrusive_CuckooSet_erase_func The function searches an item with key equal to \p val in the set, call \p f functor with item found, unlinks it from the set, and returns a pointer to unlinked item. The \p Func interface is \code struct functor { void operator()( value_type const& item ); }; \endcode If the item with key equal to \p val is not found the function return \p nullptr. Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. */ template value_type * erase( Q const& val, Func f ) { return erase_( val, key_predicate(), f ); } /// Deletes the item from the set using \p pred predicate for searching /** The function is an analog of \ref cds_intrusive_CuckooSet_erase_func "erase(Q const&, Func)" but \p pred is used for key comparing. If you use ordered cuckoo set, then \p Predicate should have the interface and semantics like \p std::less. If you use unordered cuckoo set, then \p Predicate should have the interface and semantics like \p std::equal_to. \p Predicate must imply the same element order as the comparator used for building the set. */ template value_type * erase_with( Q const& val, Predicate pred, Func f ) { CDS_UNUSED( pred ); return erase_( val, predicate_wrapper(), f ); } /// Find the key \p val /** \anchor cds_intrusive_CuckooSet_find_func The function searches the item with key equal to \p val and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& val ); }; \endcode where \p item is the item found, \p val is the find function argument. The functor may change non-key fields of \p item. The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor may modify both arguments. Note the hash functor specified for class \p Traits template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. The function returns \p true if \p val is found, \p false otherwise. */ template bool find( Q& val, Func f ) { return find_( val, key_predicate(), f ); } //@cond template bool find( Q const& val, Func f ) { return find_( val, key_predicate(), f ); } //@endcond /// Find the key \p val using \p pred predicate for comparing /** The function is an analog of \ref cds_intrusive_CuckooSet_find_func "find(Q&, Func)" but \p pred is used for key comparison. If you use ordered cuckoo set, then \p Predicate should have the interface and semantics like \p std::less. If you use unordered cuckoo set, then \p Predicate should have the interface and semantics like \p std::equal_to. \p pred must imply the same element order as the comparator used for building the set. */ template bool find_with( Q& val, Predicate pred, Func f ) { CDS_UNUSED( pred ); return find_( val, predicate_wrapper(), f ); } //@cond template bool find_with( Q const& val, Predicate pred, Func f ) { CDS_UNUSED( pred ); return find_( val, predicate_wrapper(), f ); } //@endcond /// Checks whether the set contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. */ template bool contains( Q const& key ) { return find( key, [](value_type&, Q const& ) {} ); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find( Q const& key ) { return contains( key ); } //@endcond /// Checks whether the set contains \p key using \p pred predicate for searching /** The function is similar to contains( key ) but \p pred is used for key comparing. If the set is unordered, \p Predicate has semantics like \p std::equal_to. For ordered set \p Predicate has \p std::less semantics. In that case \p pred must imply the same element order as the comparator used for building the set. */ template bool contains( Q const& key, Predicate pred ) { CDS_UNUSED( pred ); return find_with( key, predicate_wrapper(), [](value_type& , Q const& ) {} ); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find_with( Q const& key, Predicate pred ) { return contains( key, pred ); } //@endcond /// Clears the set /** The function unlinks all items from the set. For any item \p Traits::disposer is called */ void clear() { clear_and_dispose( disposer()); } /// Clears the set and calls \p disposer for each item /** The function unlinks all items from the set calling \p oDisposer for each item. \p Disposer functor interface is: \code struct Disposer{ void operator()( value_type * p ); }; \endcode The \p Traits::disposer is not called. */ template void clear_and_dispose( Disposer oDisposer ) { // locks entire array scoped_full_lock sl( m_MutexPolicy ); for ( unsigned int i = 0; i < c_nArity; ++i ) { bucket_entry * pEntry = m_BucketTable[i]; bucket_entry * pEnd = pEntry + m_nBucketMask.load( atomics::memory_order_relaxed ) + 1; for ( ; pEntry != pEnd ; ++pEntry ) { pEntry->clear( [&oDisposer]( node_type * pNode ){ oDisposer( node_traits::to_value_ptr( pNode )) ; } ); } } m_ItemCounter.reset(); } /// Checks if the set is empty /** Emptiness is checked by item counting: if item count is zero then the set is empty. */ bool empty() const { return size() == 0; } /// Returns item count in the set size_t size() const { return m_ItemCounter; } /// Returns the size of hash table /** The hash table size is non-constant and can be increased via resizing. */ size_t bucket_count() const { return m_nBucketMask.load( atomics::memory_order_relaxed ) + 1; } //@cond size_t bucket_count( atomics::memory_order load_mo ) const { return m_nBucketMask.load( load_mo ) + 1; } //@endcond /// Returns lock array size size_t lock_count() const { return m_MutexPolicy.lock_count(); } /// Returns const reference to internal statistics stat const& statistics() const { return m_Stat; } /// Returns const reference to mutex policy internal statistics typename mutex_policy::statistics_type const& mutex_policy_statistics() const { return m_MutexPolicy.statistics(); } }; }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_CUCKOO_SET_H libcds-2.3.3/cds/intrusive/details/000077500000000000000000000000001341244201700172035ustar00rootroot00000000000000libcds-2.3.3/cds/intrusive/details/base.h000066400000000000000000000260171341244201700202740ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_DETAILS_BASE_H #define CDSLIB_INTRUSIVE_DETAILS_BASE_H #include #include #include namespace cds { /// Intrusive containers /** @ingroup cds_intrusive_containers The namespace \p cds::intrusive contains intrusive lock-free containers. The idea comes from \p boost::intrusive library, see http://boost.org/doc/ as a good introduction to intrusive approach. The intrusive containers of libcds library is developed as close to \p boost::intrusive In terms of lock-free approach, the main advantage of intrusive containers is that no memory allocation is performed to maintain container elements. However, additional requirements are imposed for types and values that can be stored in intrusive container. See the container documentation for details. \anchor cds_intrusive_hook_tag \par Tags Many hooks and nodes for intrusive containers contain template argument \p Tag. This argument serves as a tag, so you can derive from more than one container's node and hence put an object in multiple intrusive containers at the same time. An incomplete type can serve as a tag. If you specify two hooks, you must specify a different tag for each one. Example: \code struct tag1; cds::intrusive::treiber_stack::node< cds::gc::HP, tag > \endcode If no tag is specified the default \p cds::opt::none will be used. \anchor cds_intrusive_item_creating \par Inserting items Many intrusive and non-intrusive (standard-like) containers in the library have the member functions that take a functor argument to initialize the inserted item after it has been successfully inserted, for example: \code template bool insert( Q& key, Func f ); template std::pair update( Q& key, Func f, bool bAllowInsert = true ); \endcode The first member function calls \p f functor iif a new item has been inserted. The functor takes two parameter: a reference to inserted item and \p key. The second member function, \p update(), allows to insert a new item to the container if \p key is not found, or to find the item with \p key and to perform some action with it. The \p f signature is: \code void f( bool bNew, item_type& item, Q& key ); \endcode where \p bNew is a flag to indicate whether \p item is a new created node or not. Such functions should be used with caution in multi-threaded environment since they can cause races. The library does not synchronize access to container's items, so many threads can access to one item simultaneously. For example, for \p insert member function the following race is possible: \code // Suppose, Foo is a complex structure with int key field SomeContainer q; Thread 1 Thread 2 q.insert( Foo(5), q.find( 5, []( Foo& item ) { []( Foo& item ){ // access to item fields // complex initialization ... item.f1 = ...; }); ... }); \endcode Execute sequence: \code Find 5 in the container. Key 5 is not found Create a new item Find key 5 with calling Foo(5) ctor Insert the new item The key 5 is found - call the functor (!) Perform complex initialization - call the functor \endcode (!): Thread 2 found the key and call its functor on incomplete initialized item. Simultaneous access to the item also is possible. In this case Thread 1 is initializing the item, thread 2 is reading (or writing) the item's fields. In any case, Thread 2 can read uninitialized or incomplete initialized fields. \p update() member function race. Suppose, thread 1 and thread 2 perform the following code: \code q.update( 5, []( bool bNew, Foo& item, int arg ) { // bNew: true if the new element has been created // false otherwise if ( bNew ) { // initialize item item.f1=...; //... } else { // do some work if ( !item.f1 ) item.f1 = ...; else { //... } //... } } ); \endcode Execute sequence: \code Thread 1 Thread 2 key 5 not found insert new item Foo(5) Find 5 Key 5 found call the functor with bNew = false (!) call the functor with bNew = true \endcode (!): Thread 2 executes its functor on incomplete initialized item. To protect your code from such races you can use some item-level synchronization, for example: \code struct Foo { spinlock lock; // item-level lock bool initialized = false; // initialization flag // other fields // .... }; q.update( 5, []( bool bNew, Foo& item, int arg ) { // Lock access to the item std::unique_lock( item.lock ); if ( !item.initialized ) { // initialize item item.f1=...; //... item.initialized = true; // mark the item as initialized } else { // do some work if ( !item.f1 ) item.f1 = ...; else { //... } //... } } ); \endcode If the item-level synchronization is not suitable, you should not use any inserting member function with post-insert functor argument. \anchor cds_intrusive_item_destroying \par Destroying items It should be very careful when destroying an item removed from intrusive container. In other threads the references to popped item may exists some time after removing. To destroy the removed item in thread-safe manner you should call static function \p retire of garbage collector you use, for example: \code struct destroyer { void operator ()( my_type * p ) { delete p; } }; typedef cds::intrusive::TreiberStack< cds::gc::HP, my_type, cds::opt::disposer< destroyer > > stack; stack s; // .... my_type * p = s.pop(); if ( p ) { // It is wrong // delete p; // It is correct cds::gc:HP::retire< destroyer >( p ); } \endcode The situation becomes even more complicated when you want store items in different intrusive containers. In this case the best way is using reference counting: \code struct my_type { ... std::atomic nRefCount; my_type() : nRefCount(0) {} }; struct destroyer { void operator ()( my_type * p ) { if ( --p->nRefCount == 0 ) delete p ; // delete only after no reference pointing to p } }; typedef cds::intrusive::TreiberStack< cds::gc::HP, my_type, cds::opt::disposer< destroyer > > stack; typedef cds::intrusive::MSQueue< cds::gc::HP, my_type, cds::opt::disposer< destroyer > > queue; stack s; queue q; my_type * v = new my_type(); v.nRefCount++ ; // increment counter before pushing the item to the stack s.push(v); v.nRefCount++ ; // increment counter before pushing the item to the queue q.push(v); // .... my_type * ps = s.pop(); if ( ps ) { // It is wrong // delete ps; // It is correct cds::gc:HP::retire< destroyer >( ps ); } my_type * pq = q.pop(); if ( pq ) { // It is wrong // delete pq; // It is correct cds::gc:HP::retire< destroyer >( pq ); } \endcode Violation of these rules may lead to a crash. \par Intrusive containers and Hazard Pointer-like garbage collectors If you develop your intrusive container based on libcds library framework, you should take in the account the following. The main idea of garbage collectors (GC) based on Hazard Pointer schema is protecting a shared pointer by publishing it as a "hazard" i.e. as a pointer that is changing at the current time and cannot be deleted at this moment. In intrusive container paradigm, the pointer to a node of the container and the pointer to a item stored in the container are not equal in the general case. However, any pointer to node should be castable to appropriate pointer to container's item. In general, any item can be placed to two or more intrusive containers simultaneously, and each of those container holds an unique pointer to its node that refers to the same item. When we protect a pointer, we want to protect an item pointer that is the invariant for any container stored that item. In your intrusive container, instead of protecting by GC's guard a pointer to node you should cast it to the pointer to item and then protect that item pointer. Otherwise an unpredictable result may occur. */ namespace intrusive { /// @defgroup cds_intrusive_containers Intrusive containers /** @defgroup cds_intrusive_helper Helper structs for intrusive containers @ingroup cds_intrusive_containers */ /** @defgroup cds_intrusive_stack Stack @ingroup cds_intrusive_containers */ /** @defgroup cds_intrusive_queue Queue @ingroup cds_intrusive_containers */ /** @defgroup cds_intrusive_priority_queue Priority queue @ingroup cds_intrusive_containers */ /** @defgroup cds_intrusive_deque Deque @ingroup cds_intrusive_containers */ /** @defgroup cds_intrusive_map Set @ingroup cds_intrusive_containers */ /** @defgroup cds_intrusive_tree Tree @ingroup cds_intrusive_containers */ /** @defgroup cds_intrusive_list List @ingroup cds_intrusive_containers */ /** @defgroup cds_intrusive_freelist Free-list @ingroup cds_intrusive_containers */ //@cond class iterable_list_tag {}; template struct is_iterable_list: public std::is_base_of< iterable_list_tag, List> {}; //@endcond }} // namespace cds::intrusuve #endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_BASE_H libcds-2.3.3/cds/intrusive/details/ellen_bintree_base.h000066400000000000000000001003221341244201700231530ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_DETAILS_ELLEN_BINTREE_BASE_H #define CDSLIB_INTRUSIVE_DETAILS_ELLEN_BINTREE_BASE_H #include #include #include #include #include namespace cds { namespace intrusive { /// EllenBinTree related declarations namespace ellen_bintree { //Forwards template struct base_node; template struct node; template struct internal_node; /// Update descriptor /** Update descriptor is used internally for helping concurrent threads to complete modifying operation. Usually, you should not use \p update_desc type directly until you want to develop special free-list of update descriptor. Template parameters: - \p LeafNode - leaf node type, see \ref node - \p InternalNode - internal node type, see \ref internal_node @note Size of update descriptor is constant. It does not depends of template arguments. */ template struct update_desc { //@cond typedef LeafNode leaf_node; typedef InternalNode internal_node; typedef cds::details::marked_ptr< update_desc, 3 > update_ptr; enum { Clean = 0, DFlag = 1, IFlag = 2, Mark = 3 }; struct insert_info { internal_node * pParent; internal_node * pNew; leaf_node * pLeaf; bool bRightLeaf; }; struct delete_info { internal_node * pGrandParent; internal_node * pParent; leaf_node * pLeaf; update_desc * pUpdateParent; bool bDisposeLeaf; // true if pLeaf should be disposed, false otherwise (for extract operation, RCU) bool bRightParent; bool bRightLeaf; }; union { insert_info iInfo; delete_info dInfo; }; update_desc * pNextRetire; // for local retired list (RCU) update_desc() : pNextRetire( nullptr ) {} //@endcond }; //@cond struct alignas( void* ) basic_node { enum flags { internal = 1, ///< set for internal node key_infinite1 = 2, ///< set if node's key is Inf1 key_infinite2 = 4, ///< set if node's key is Inf2 key_infinite = key_infinite1 | key_infinite2 ///< Cumulative infinite flags }; atomics::atomic m_nFlags; ///< Internal flags /// Constructs leaf (bIntrenal == false) or internal (bInternal == true) node explicit basic_node( bool bInternal ) { m_nFlags.store( bInternal ? internal: 0, atomics::memory_order_release ); } /// Checks if the node is a leaf bool is_leaf() const { return !is_internal(); } /// Checks if the node is internal bool is_internal() const { return (m_nFlags.load(atomics::memory_order_acquire) & internal) != 0; } /// Returns infinite key, 0 if the node is not infinite unsigned int infinite_key() const { return m_nFlags.load(atomics::memory_order_acquire) & key_infinite; } /// Sets infinite key for the node (for internal use only!!!) void infinite_key( int nInf ) { unsigned int nFlags = m_nFlags.load(atomics::memory_order_relaxed); nFlags &= ~key_infinite; switch ( nInf ) { case 1: nFlags |= key_infinite1; break; case 2: nFlags |= key_infinite2; break; case 0: break; default: assert( false ); break; } m_nFlags.store( nFlags, atomics::memory_order_release ); } }; template struct base_node: public basic_node { typedef basic_node base_class; typedef GC gc ; ///< Garbage collector /// Constructs leaf (bIntrenal == false) or internal (bInternal == true) node explicit base_node( bool bInternal ) : base_class( bInternal ) {} }; //@endcond /// Ellen's binary tree leaf node /** Template parameters: - \p GC - one of \ref cds_garbage_collector "garbage collector type" - \p Tag - a \ref cds_intrusive_hook_tag "tag" */ template struct node # ifndef CDS_DOXYGEN_INVOKED : public base_node< GC > # endif { //@cond typedef base_node< GC > base_class; //@endcond typedef GC gc; ///< Garbage collector typedef Tag tag; ///< Tag /// Default ctor node() : base_class( false ) {} }; /// Ellen's binary tree internal node /** Template arguments: - \p Key - key type - \p LeafNode - leaf node type */ template struct internal_node # ifndef CDS_DOXYGEN_INVOKED : public base_node # endif { //@cond typedef base_node base_class; //@endcond typedef Key key_type; ///< key type typedef LeafNode leaf_node; ///< type of leaf node typedef update_desc< leaf_node, internal_node > update_desc_type; ///< Update descriptor typedef typename update_desc_type::update_ptr update_ptr; ///< Marked pointer to update descriptor key_type m_Key; ///< Regular key atomics::atomic m_pLeft; ///< Left subtree atomics::atomic m_pRight; ///< Right subtree atomics::atomic m_pUpdate; ///< Update descriptor //@cond atomics::atomic m_nEmptyUpdate; ///< ABA prevention for m_pUpdate, from 0..2^16 step 4 //@endcond /// Default ctor internal_node() : base_class( true ) , m_pLeft( nullptr ) , m_pRight( nullptr ) , m_pUpdate( update_ptr()) { m_nEmptyUpdate.store( 0, atomics::memory_order_release ); } //@cond update_ptr null_update_desc() { return update_ptr( reinterpret_cast( ((m_nEmptyUpdate.fetch_add(1, atomics::memory_order_relaxed) + 1 ) << 2) & 0xFFFF )); } base_class * get_child( bool bRight, atomics::memory_order mo ) const { return bRight ? m_pRight.load( mo ) : m_pLeft.load( mo ); } //@endcond }; /// Types of EllenBinTree node /** This struct declares different \p %EllenBinTree node types. It can be useful for simplifying \p %EllenBinTree node declaration in your application. Template parameters: - \p GC - one of \ref cds_garbage_collector "garbage collector type" - \p Key - key type - \p Tag - a \ref cds_intrusive_hook_tag "tag" */ template struct node_types { typedef node leaf_node_type; ///< Leaf node type typedef internal_node internal_node_type; ///< Internal node type typedef update_desc update_desc_type; ///< Update descriptor type }; //@cond struct undefined_gc; struct default_hook { typedef undefined_gc gc; typedef opt::none tag; }; //@endcond //@cond template < typename HookType, typename... Options> struct hook { typedef typename opt::make_options< default_hook, Options...>::type options; typedef typename options::gc gc; typedef typename options::tag tag; typedef node node_type; typedef HookType hook_type; }; //@endcond /// Base hook /** \p Options are: - \p opt::gc - garbage collector - \p opt::tag - a \ref cds_intrusive_hook_tag "tag" */ template < typename... Options > struct base_hook: public hook< opt::base_hook_tag, Options... > {}; /// Member hook /** \p MemberOffset defines offset in bytes of \ref node member into your structure. Use \p offsetof macro to define \p MemberOffset \p Options are: - \p opt::gc - garbage collector - \p opt::tag - a \ref cds_intrusive_hook_tag "tag" */ template < size_t MemberOffset, typename... Options > struct member_hook: public hook< opt::member_hook_tag, Options... > { //@cond static constexpr const size_t c_nMemberOffset = MemberOffset; //@endcond }; /// Traits hook /** \p NodeTraits defines type traits for node. See \ref node_traits for \p NodeTraits interface description \p Options are: - opt::gc - garbage collector - \p opt::tag - a \ref cds_intrusive_hook_tag "tag" */ template struct traits_hook: public hook< opt::traits_hook_tag, Options... > { //@cond typedef NodeTraits node_traits; //@endcond }; /// Key extracting functor option setter template struct key_extractor { //@cond template struct pack: public Base { typedef Type key_extractor; }; //@endcond }; /// Update descriptor allocator option setter template struct update_desc_allocator { //@cond template struct pack: public Base { typedef Type update_desc_allocator; }; //@endcond }; /// EllenBinTree internal statistics template struct stat { typedef Counter event_counter ; ///< Event counter type event_counter m_nInternalNodeCreated ; ///< Total count of created internal node event_counter m_nInternalNodeDeleted ; ///< Total count of deleted internal node event_counter m_nUpdateDescCreated ; ///< Total count of created update descriptors event_counter m_nUpdateDescDeleted ; ///< Total count of deleted update descriptors event_counter m_nInsertSuccess ; ///< Count of success insertion event_counter m_nInsertFailed ; ///< Count of failed insertion event_counter m_nInsertRetries ; ///< Count of unsuccessful retries of insertion event_counter m_nUpdateExist ; ///< Count of \p update() call for existed node event_counter m_nUpdateNew ; ///< Count of \p update() call for new node event_counter m_nUpdateRetries ; ///< Count of unsuccessful retries of ensuring event_counter m_nEraseSuccess ; ///< Count of successful call of \p erase and \p unlink event_counter m_nEraseFailed ; ///< Count of failed call of \p erase and \p unlink event_counter m_nEraseRetries ; ///< Count of unsuccessful retries inside erasing/unlinking event_counter m_nFindSuccess ; ///< Count of successful \p find call event_counter m_nFindFailed ; ///< Count of failed \p find call event_counter m_nExtractMinSuccess ; ///< Count of successful call of \p extract_min event_counter m_nExtractMinFailed ; ///< Count of failed call of \p extract_min event_counter m_nExtractMinRetries ; ///< Count of unsuccessful retries inside \p extract_min event_counter m_nExtractMaxSuccess ; ///< Count of successful call of \p extract_max event_counter m_nExtractMaxFailed ; ///< Count of failed call of \p extract_max event_counter m_nExtractMaxRetries ; ///< Count of unsuccessful retries inside \p extract_max event_counter m_nSearchRetry ; ///< How many times the deleting node was encountered while searching event_counter m_nHelpInsert ; ///< The number of insert help from the other thread event_counter m_nHelpDelete ; ///< The number of delete help from the other thread event_counter m_nHelpMark ; ///< The number of delete help (mark phase) from the other thread event_counter m_nHelpGuardSuccess ; ///< The number of successful guarding of update descriptor data event_counter m_nHelpGuardFailed ; ///< The number of failed guarding of update descriptor data //@cond void onInternalNodeCreated() { ++m_nInternalNodeCreated ; } void onInternalNodeDeleted() { ++m_nInternalNodeDeleted ; } void onUpdateDescCreated() { ++m_nUpdateDescCreated ; } void onUpdateDescDeleted() { ++m_nUpdateDescDeleted ; } void onInsertSuccess() { ++m_nInsertSuccess ; } void onInsertFailed() { ++m_nInsertFailed ; } void onInsertRetry() { ++m_nInsertRetries ; } void onUpdateExist() { ++m_nUpdateExist ; } void onUpdateNew() { ++m_nUpdateNew ; } void onUpdateRetry() { ++m_nUpdateRetries ; } void onEraseSuccess() { ++m_nEraseSuccess ; } void onEraseFailed() { ++m_nEraseFailed ; } void onEraseRetry() { ++m_nEraseRetries ; } void onExtractMinSuccess() { ++m_nExtractMinSuccess ; } void onExtractMinFailed() { ++m_nExtractMinFailed ; } void onExtractMinRetry() { ++m_nExtractMinRetries ; } void onExtractMaxSuccess() { ++m_nExtractMaxSuccess ; } void onExtractMaxFailed() { ++m_nExtractMaxFailed ; } void onExtractMaxRetry() { ++m_nExtractMaxRetries ; } void onFindSuccess() { ++m_nFindSuccess ; } void onFindFailed() { ++m_nFindFailed ; } void onSearchRetry() { ++m_nSearchRetry ; } void onHelpInsert() { ++m_nHelpInsert ; } void onHelpDelete() { ++m_nHelpDelete ; } void onHelpMark() { ++m_nHelpMark ; } void onHelpGuardSuccess() { ++m_nHelpGuardSuccess ; } void onHelpGuardFailed() { ++m_nHelpGuardFailed ; } //@endcond }; /// EllenBinTree empty statistics struct empty_stat { //@cond void onInternalNodeCreated() const {} void onInternalNodeDeleted() const {} void onUpdateDescCreated() const {} void onUpdateDescDeleted() const {} void onInsertSuccess() const {} void onInsertFailed() const {} void onInsertRetry() const {} void onUpdateExist() const {} void onUpdateNew() const {} void onUpdateRetry() const {} void onEraseSuccess() const {} void onEraseFailed() const {} void onEraseRetry() const {} void onExtractMinSuccess() const {} void onExtractMinFailed() const {} void onExtractMinRetry() const {} void onExtractMaxSuccess() const {} void onExtractMaxFailed() const {} void onExtractMaxRetry() const {} void onFindSuccess() const {} void onFindFailed() const {} void onSearchRetry() const {} void onHelpInsert() const {} void onHelpDelete() const {} void onHelpMark() const {} void onHelpGuardSuccess() const {} void onHelpGuardFailed() const {} //@endcond }; /// EllenBinTree traits struct traits { /// Hook used (mandatory) /** Possible values are: \p ellen_bintree::base_hook, \p ellen_bintree::member_hook, \p ellen_bintree::traits_hook. */ typedef base_hook<> hook; /// Key extracting functor (mandatory) /** You should explicit define a valid functor. The functor has the following prototype: \code struct key_extractor { void operator ()( Key& dest, T const& src ); }; \endcode It should initialize \p dest key from \p src data. The functor is used to initialize internal nodes. */ typedef opt::none key_extractor; /// Key comparison functor /** No default functor is provided. If the option is not specified, the \p less is used. See \p cds::opt::compare option description for functor interface. You should provide \p compare or \p less functor. See \ref cds_intrusive_EllenBinTree_rcu_less "predicate requirements". */ typedef opt::none compare; /// Specifies binary predicate used for key compare. /** See \p cds::opt::less option description for predicate interface. You should provide \p compare or \p less functor. See \ref cds_intrusive_EllenBinTree_rcu_less "predicate requirements". */ typedef opt::none less; /// Disposer /** The functor used for dispose removed items. Default is \p opt::v::empty_disposer. */ typedef opt::v::empty_disposer disposer; /// Item counter /** The type for item counter, by default it is disabled (\p atomicity::empty_item_counter). To enable it use \p atomicity::item_counter or \p atomicity::cache_friendly_item_counter */ typedef atomicity::empty_item_counter item_counter; /// C++ memory ordering model /** List of available memory ordering see \p opt::memory_model */ typedef opt::v::relaxed_ordering memory_model; /// Allocator for update descriptors /** The allocator type is used for \p ellen_bintree::update_desc. Update descriptor is helping data structure with short lifetime and it is good candidate for pooling. The number of simultaneously existing descriptors is bounded and it is limited by number of threads working with the tree. Therefore, a bounded lock-free container like \p cds::container::VyukovMPMCCycleQueue is a good choice for the free-list of update descriptors, see \p cds::memory::vyukov_queue_pool free-list implementation. Also notice that size of update descriptor is constant and not dependent on the type of data stored in the tree so single free-list object can be used for several \p EllenBinTree object. */ typedef CDS_DEFAULT_ALLOCATOR update_desc_allocator; /// Allocator for internal nodes /** The allocator type is used for \p ellen_bintree::internal_node. */ typedef CDS_DEFAULT_ALLOCATOR node_allocator; /// Internal statistics /** By default, internal statistics is disabled (\p ellen_bintree::empty_stat). To enable it use \p ellen_bintree::stat. */ typedef empty_stat stat; /// Back-off strategy typedef cds::backoff::empty back_off; /// RCU deadlock checking policy (only for \ref cds_intrusive_EllenBinTree_rcu "RCU-based EllenBinTree") /** List of available options see \p opt::rcu_check_deadlock */ typedef cds::opt::v::rcu_throw_deadlock rcu_check_deadlock; }; /// Metafunction converting option list to EllenBinTree traits /** \p Options are: - \p opt::hook - hook used. Possible values are: \p ellen_bintree::base_hook, \p ellen_bintree::member_hook, \p ellen_bintree::traits_hook. If the option is not specified, ellen_bintree::base_hook<> is used. - \p ellen_bintree::key_extractor - key extracting functor, mandatory option. The functor has the following prototype: \code struct key_extractor { void operator ()( Key& dest, T const& src ); }; \endcode It should initialize \p dest key from \p src data. The functor is used to initialize internal nodes. - \p opt::compare - key compare functor. No default functor is provided. If the option is not specified, \p %opt::less is used. - \p opt::less - specifies binary predicate used for key compare. At least \p %opt::compare or \p %opt::less should be defined. - \p opt::disposer - the functor used for dispose removed nodes. Default is \p opt::v::empty_disposer. Due the nature of GC schema the disposer may be called asynchronously. The disposer is used only for leaf nodes. - \p opt::item_counter - the type of item counting feature, by default it is disabled (\p atomicity::empty_item_counter) To enable it use \p atomicity::item_counter or \p atomicity::cache_friendly_item_counter - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). - \p ellen_bintree::update_desc_allocator - an allocator of \ref ellen_bintree::update_desc "update descriptors", default is \ref CDS_DEFAULT_ALLOCATOR. Note that update descriptor is helping data structure with short lifetime and it is good candidate for pooling. The number of simultaneously existing descriptors is bounded and depends on the number of threads working with the tree and GC internals. A bounded lock-free container like \p cds::container::VyukovMPMCCycleQueue is good candidate for the free-list of update descriptors, see cds::memory::vyukov_queue_pool free-list implementation. Also notice that size of update descriptor is constant and not dependent on the type of data stored in the tree so single free-list object can be used for all \p %EllenBinTree objects. - \p opt::node_allocator - the allocator for internal nodes. Default is \ref CDS_DEFAULT_ALLOCATOR. - \p opt::stat - internal statistics, by default it is disabled (\p ellen_bintree::empty_stat) To enable statistics use \p \p ellen_bintree::stat - \p opt::backoff - back-off strategy, by default no strategy is used (\p cds::backoff::empty) - \p opt::rcu_check_deadlock - a deadlock checking policy for RCU-based tree, default is \p opt::v::rcu_throw_deadlock */ template struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type ; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type ,Options... >::type type; # endif }; //@cond namespace details { template struct compare { typedef Compare key_compare; typedef Key key_type; typedef T value_type; typedef NodeTraits node_traits; template int operator()( Q1 const& v1, Q2 const& v2) const { return key_compare()( v1, v2 ); } template int operator()( internal_node const& n1, internal_node const& n2 ) const { if ( n1.infinite_key()) return n2.infinite_key() ? n1.infinite_key() - n2.infinite_key() : 1; else if ( n2.infinite_key()) return -1; return operator()( n1.m_Key, n2.m_Key ); } template int operator()( internal_node const& n, Q const& v ) const { if ( n.infinite_key()) return 1; return operator()( n.m_Key, v ); } template int operator()( Q const& v, internal_node const& n ) const { if ( n.infinite_key()) return -1; return operator()( v, n.m_Key ); } template int operator()( node const& n1, node const& n2 ) const { if ( n1.infinite_key() != n2.infinite_key()) return n1.infinite_key() - n2.infinite_key(); return operator()( *node_traits::to_value_ptr( n1 ), *node_traits::to_value_ptr( n2 )); } template int operator()( node const& n, Q const& v ) const { if ( n.infinite_key()) return 1; return operator()( *node_traits::to_value_ptr( n ), v ); } template int operator()( Q const& v, node const& n ) const { if ( n.infinite_key()) return -1; return operator()( v, *node_traits::to_value_ptr( n )); } template int operator()( base_node const& n1, base_node const& n2 ) const { if ( n1.infinite_key() != n2.infinite_key()) return n1.infinite_key() - n2.infinite_key(); if ( n1.is_leaf()) { if ( n2.is_leaf()) return operator()( node_traits::to_leaf_node( n1 ), node_traits::to_leaf_node( n2 )); else return operator()( node_traits::to_leaf_node( n1 ), node_traits::to_internal_node( n2 )); } if ( n2.is_leaf()) return operator()( node_traits::to_internal_node( n1 ), node_traits::to_leaf_node( n2 )); else return operator()( node_traits::to_internal_node( n1 ), node_traits::to_internal_node( n2 )); } template int operator()( base_node const& n, Q const& v ) const { if ( n.infinite_key()) return 1; if ( n.is_leaf()) return operator()( node_traits::to_leaf_node( n ), v ); return operator()( node_traits::to_internal_node( n ), v ); } template int operator()( Q const& v, base_node const& n ) const { return -operator()( n, v ); } template int operator()( base_node const& i, internal_node const& n ) const { if ( i.is_leaf()) return operator()( static_cast(i), n ); return operator()( static_cast const&>(i), n ); } template int operator()( internal_node const& n, base_node const& i ) const { return -operator()( i, n ); } template int operator()( node const& n, internal_node > const& i ) const { if ( !n.infinite_key()) { if ( i.infinite_key()) return -1; return operator()( n, i.m_Key ); } if ( !i.infinite_key()) return 1; return int( n.infinite_key()) - int( i.infinite_key()); } template int operator()( internal_node > const& i, node const& n ) const { return -operator()( n, i ); } }; } // namespace details //@endcond } // namespace ellen_bintree // Forwards template < class GC, typename Key, typename T, class Traits = ellen_bintree::traits > class EllenBinTree; }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_ELLEN_BINTREE_BASE_H libcds-2.3.3/cds/intrusive/details/feldman_hashset_base.h000066400000000000000000000701641341244201700235030ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_DETAILS_FELDMAN_HASHSET_BASE_H #define CDSLIB_INTRUSIVE_DETAILS_FELDMAN_HASHSET_BASE_H #include // memcmp, memcpy #include #include #include #include #include #include #include namespace cds { namespace intrusive { /// FeldmanHashSet related definitions /** @ingroup cds_intrusive_helper */ namespace feldman_hashset { /// Hash accessor option /** @copydetails traits::hash_accessor */ template struct hash_accessor { //@cond template struct pack: public Base { typedef Accessor hash_accessor; }; //@endcond }; /// Hash size option /** @copydetails traits::hash_size */ template struct hash_size { //@cond template struct pack: public Base { enum: size_t { hash_size = Size }; }; //@endcond }; /// Hash splitter option /** @copydetails traits::hash_splitter */ template struct hash_splitter { //@cond template struct pack: public Base { typedef Splitter hash_splitter; }; //@endcond }; /// \p FeldmanHashSet internal statistics template struct stat { typedef EventCounter event_counter ; ///< Event counter type event_counter m_nInsertSuccess; ///< Number of success \p insert() operations event_counter m_nInsertFailed; ///< Number of failed \p insert() operations event_counter m_nInsertRetry; ///< Number of attempts to insert new item event_counter m_nUpdateNew; ///< Number of new item inserted for \p update() event_counter m_nUpdateExisting; ///< Number of existing item updates event_counter m_nUpdateFailed; ///< Number of failed \p update() call event_counter m_nUpdateRetry; ///< Number of attempts to update the item event_counter m_nEraseSuccess; ///< Number of successful \p erase(), \p unlink(), \p extract() operations event_counter m_nEraseFailed; ///< Number of failed \p erase(), \p unlink(), \p extract() operations event_counter m_nEraseRetry; ///< Number of attempts to \p erase() an item event_counter m_nFindSuccess; ///< Number of successful \p find() and \p get() operations event_counter m_nFindFailed; ///< Number of failed \p find() and \p get() operations event_counter m_nExpandNodeSuccess; ///< Number of succeeded attempts converting data node to array node event_counter m_nExpandNodeFailed; ///< Number of failed attempts converting data node to array node event_counter m_nSlotChanged; ///< Number of array node slot changing by other thread during an operation event_counter m_nSlotConverting; ///< Number of events when we encounter a slot while it is converting to array node event_counter m_nArrayNodeCount; ///< Number of array nodes event_counter m_nHeight; ///< Current height of the tree //@cond void onInsertSuccess() { ++m_nInsertSuccess; } void onInsertFailed() { ++m_nInsertFailed; } void onInsertRetry() { ++m_nInsertRetry; } void onUpdateNew() { ++m_nUpdateNew; } void onUpdateExisting() { ++m_nUpdateExisting; } void onUpdateFailed() { ++m_nUpdateFailed; } void onUpdateRetry() { ++m_nUpdateRetry; } void onEraseSuccess() { ++m_nEraseSuccess; } void onEraseFailed() { ++m_nEraseFailed; } void onEraseRetry() { ++m_nEraseRetry; } void onFindSuccess() { ++m_nFindSuccess; } void onFindFailed() { ++m_nFindFailed; } void onExpandNodeSuccess() { ++m_nExpandNodeSuccess; } void onExpandNodeFailed() { ++m_nExpandNodeFailed; } void onSlotChanged() { ++m_nSlotChanged; } void onSlotConverting() { ++m_nSlotConverting; } void onArrayNodeCreated() { ++m_nArrayNodeCount; } void height( size_t h ) { if (m_nHeight < h ) m_nHeight = h; } //@endcond }; /// \p FeldmanHashSet empty internal statistics struct empty_stat { //@cond void onInsertSuccess() const {} void onInsertFailed() const {} void onInsertRetry() const {} void onUpdateNew() const {} void onUpdateExisting() const {} void onUpdateFailed() const {} void onUpdateRetry() const {} void onEraseSuccess() const {} void onEraseFailed() const {} void onEraseRetry() const {} void onFindSuccess() const {} void onFindFailed() const {} void onExpandNodeSuccess() const {} void onExpandNodeFailed() const {} void onSlotChanged() const {} void onSlotConverting() const {} void onArrayNodeCreated() const {} void height(size_t) const {} //@endcond }; /// \p FeldmanHashSet traits struct traits { /// Mandatory functor to get hash value from data node /** It is most-important feature of \p FeldmanHashSet. That functor must return a reference to fixed-sized hash value of data node. The return value of that functor specifies the type of hash value. Example: \code typedef uint8_t hash_type[32]; // 256-bit hash type struct foo { hash_type hash; // 256-bit hash value // ... other fields }; // Hash accessor struct foo_hash_accessor { hash_type const& operator()( foo const& d ) const { return d.hash; } }; \endcode */ typedef cds::opt::none hash_accessor; /// The size of hash value in bytes /** By default, the size of hash value is sizeof( hash_type ). Sometimes it is not correct, for example, for that 6-byte struct \p static_assert will be thrown: \code struct key_type { uint32_t key1; uint16_t subkey; }; static_assert( sizeof( key_type ) == 6, "Key type size mismatch" ); \endcode For that case you can specify \p hash_size explicitly. Value \p 0 means sizeof( hash_type ). */ static constexpr size_t const hash_size = 0; /// Hash splitter /** This trait specifies hash bit-string splitter algorithm. By default, \p cds::algo::number_splitter is used if \p HashType is a number, \p cds::algo::split_bitstring otherwise. */ typedef cds::opt::none hash_splitter; /// Disposer for removing data nodes typedef cds::intrusive::opt::v::empty_disposer disposer; /// Hash comparing functor /** No default functor is provided. If the option is not specified, the \p less option is used. */ typedef cds::opt::none compare; /// Specifies binary predicate used for hash compare. /** If \p %less and \p %compare are not specified, \p memcmp() -like @ref bitwise_compare "bit-wise hash comparator" is used because the hash value is treated as fixed-sized bit-string. */ typedef cds::opt::none less; /// Item counter /** The item counting is an important part of \p FeldmanHashSet algorithm: the \p empty() member function depends on correct item counting. Therefore, \p atomicity::empty_item_counter is not allowed as a type of the option. Default is \p atomicity::item_counter. To avoid false sharing you can aldo use \p atomicity::cache_friendly_item_counter */ typedef cds::atomicity::item_counter item_counter; /// Array node allocator /** Allocator for array nodes. The allocator is used for creating \p headNode and \p arrayNode when the set grows. Default is \ref CDS_DEFAULT_ALLOCATOR */ typedef CDS_DEFAULT_ALLOCATOR node_allocator; /// C++ memory ordering model /** Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). */ typedef cds::opt::v::relaxed_ordering memory_model; /// Back-off strategy typedef cds::backoff::Default back_off; /// Internal statistics /** By default, internal statistics is disabled (\p feldman_hashset::empty_stat). Use \p feldman_hashset::stat to enable it. */ typedef empty_stat stat; /// RCU deadlock checking policy (only for \ref cds_intrusive_FeldmanHashSet_rcu "RCU-based FeldmanHashSet") /** List of available policy see \p opt::rcu_check_deadlock */ typedef cds::opt::v::rcu_throw_deadlock rcu_check_deadlock; }; /// Metafunction converting option list to \p feldman_hashset::traits /** Supported \p Options are: - \p feldman_hashset::hash_accessor - mandatory option, hash accessor functor. @copydetails traits::hash_accessor - \p feldman_hashset::hash_size - the size of hash value in bytes. @copydetails traits::hash_size - \p feldman_hashset::hash_splitter - a hash splitter algorithm @copydetails traits::hash_splitter - \p opt::node_allocator - array node allocator. @copydetails traits::node_allocator - \p opt::compare - hash comparison functor. No default functor is provided. If the option is not specified, the \p opt::less is used. - \p opt::less - specifies binary predicate used for hash comparison. If the option is not specified, \p memcmp() -like bit-wise hash comparator is used because the hash value is treated as fixed-sized bit-string. - \p opt::back_off - back-off strategy used. If the option is not specified, the \p cds::backoff::Default is used. - \p opt::disposer - the functor used for disposing removed data node. Default is \p opt::v::empty_disposer. Due the nature of GC schema the disposer may be called asynchronously. - \p opt::item_counter - the type of item counting feature. The item counting is an important part of \p FeldmanHashSet algorithm: the \p empty() member function depends on correct item counting. Therefore, \p atomicity::empty_item_counter is not allowed as a type of the option. Default is \p atomicity::item_counter. To avoid false sharing you can use or \p atomicity::cache_friendly_item_counter - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). - \p opt::stat - internal statistics. By default, it is disabled (\p feldman_hashset::empty_stat). To enable it use \p feldman_hashset::stat - \p opt::rcu_check_deadlock - a deadlock checking policy for \ref cds_intrusive_FeldmanHashSet_rcu "RCU-based FeldmanHashSet" Default is \p opt::v::rcu_throw_deadlock */ template struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type ; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type ,Options... >::type type; # endif }; /// Bit-wise memcmp-based comparator for hash value \p T template struct bitwise_compare { /// Compares \p lhs and \p rhs /** Returns: - < 0 if lhs < rhs - 0 if lhs == rhs - > 0 if lhs > rhs */ int operator()( T const& lhs, T const& rhs ) const { return memcmp( &lhs, &rhs, sizeof(T)); } }; /// One-level statistics, see \p FeldmanHashSet::get_level_statistics struct level_statistics { size_t array_node_count; ///< Count of array node at the level size_t node_capacity; ///< Array capacity size_t data_cell_count; ///< The number of data cells in all array node at this level size_t array_cell_count; ///< The number of array cells in all array node at this level size_t empty_cell_count; ///< The number of empty cells in all array node at this level //@cond level_statistics() : array_node_count(0) , data_cell_count(0) , array_cell_count(0) , empty_cell_count(0) {} //@endcond }; //@cond namespace details { template using hash_splitter = cds::algo::split_bitstring< HashType, HashSize >; struct metrics { size_t head_node_size; // power-of-two size_t head_node_size_log; // log2( head_node_size ) size_t array_node_size; // power-of-two size_t array_node_size_log;// log2( array_node_size ) static metrics make(size_t head_bits, size_t array_bits, size_t hash_size ) { size_t const hash_bits = hash_size * 8; if (array_bits < 2) array_bits = 2; if (head_bits < 4) head_bits = 4; if (head_bits > hash_bits) head_bits = hash_bits; if ((hash_bits - head_bits) % array_bits != 0) head_bits += (hash_bits - head_bits) % array_bits; assert((hash_bits - head_bits) % array_bits == 0); metrics m; m.head_node_size_log = head_bits; m.head_node_size = size_t(1) << head_bits; m.array_node_size_log = array_bits; m.array_node_size = size_t(1) << array_bits; return m; } }; } // namespace details //@endcond //@cond template class multilevel_array { public: typedef T value_type; typedef Traits traits; typedef typename Traits::node_allocator node_allocator; typedef typename traits::memory_model memory_model; typedef typename traits::back_off back_off; ///< Backoff strategy typedef typename traits::stat stat; ///< Internal statistics type typedef typename traits::hash_accessor hash_accessor; static_assert(!std::is_same< hash_accessor, cds::opt::none >::value, "hash_accessor functor must be specified"); /// Hash type deduced from \p hash_accessor return type typedef typename std::decay< typename std::remove_reference< decltype(hash_accessor()(std::declval())) >::type >::type hash_type; static_assert(!std::is_pointer::value, "hash_accessor should return a reference to hash value"); typedef typename cds::opt::details::make_comparator_from< hash_type, traits, feldman_hashset::bitwise_compare< hash_type > >::type hash_comparator; /// The size of hash_type in bytes, see \p traits::hash_size for explanation static constexpr size_t const c_hash_size = traits::hash_size == 0 ? sizeof( hash_type ) : static_cast( traits::hash_size ); typedef typename std::conditional< std::is_same< typename traits::hash_splitter, cds::opt::none >::value, typename cds::algo::select_splitter< hash_type, c_hash_size >::type, typename traits::hash_splitter >::type hash_splitter; enum node_flags { flag_array_converting = 1, ///< the cell is converting from data node to an array node flag_array_node = 2 ///< the cell is a pointer to an array node }; protected: typedef cds::details::marked_ptr< value_type, 3 > node_ptr; typedef atomics::atomic< node_ptr > atomic_node_ptr; struct array_node { array_node * const pParent; ///< parent array node size_t const idxParent; ///< index in parent array node atomic_node_ptr nodes[1]; ///< node array array_node(array_node * parent, size_t idx) : pParent(parent) , idxParent(idx) {} array_node() = delete; array_node(array_node const&) = delete; array_node(array_node&&) = delete; }; typedef cds::details::Allocator< array_node, node_allocator > cxx_array_node_allocator; struct traverse_data { hash_splitter splitter; array_node * pArr; typename hash_splitter::uint_type nSlot; size_t nHeight; traverse_data( hash_type const& hash, multilevel_array& arr ) : splitter( hash ) { reset( arr ); } void reset( multilevel_array& arr ) { splitter.reset(); pArr = arr.head(); nSlot = splitter.cut( static_cast( arr.metrics().head_node_size_log )); assert( static_cast( nSlot ) < arr.metrics().head_node_size ); nHeight = 1; } }; protected: feldman_hashset::details::metrics const m_Metrics; array_node * m_Head; mutable stat m_Stat; public: multilevel_array(size_t head_bits, size_t array_bits ) : m_Metrics(feldman_hashset::details::metrics::make( head_bits, array_bits, c_hash_size )) , m_Head( alloc_head_node()) { assert( hash_splitter::is_correct( static_cast( metrics().head_node_size_log ))); assert( hash_splitter::is_correct( static_cast( metrics().array_node_size_log ))); } ~multilevel_array() { destroy_tree(); free_array_node( m_Head, head_size()); } node_ptr traverse(traverse_data& pos) { back_off bkoff; while (true) { node_ptr slot = pos.pArr->nodes[pos.nSlot].load(memory_model::memory_order_acquire); if ( slot.bits() == flag_array_node ) { // array node, go down the tree assert(slot.ptr() != nullptr); assert( !pos.splitter.eos()); pos.nSlot = pos.splitter.cut( static_cast( metrics().array_node_size_log )); assert( static_cast( pos.nSlot ) < metrics().array_node_size ); pos.pArr = to_array(slot.ptr()); ++pos.nHeight; } else if (slot.bits() == flag_array_converting) { // the slot is converting to array node right now bkoff(); stats().onSlotConverting(); } else { // data node assert(slot.bits() == 0); return slot; } } // while } size_t head_size() const { return m_Metrics.head_node_size; } size_t array_node_size() const { return m_Metrics.array_node_size; } void get_level_statistics(std::vector< feldman_hashset::level_statistics>& stat) const { stat.clear(); gather_level_statistics(stat, 0, m_Head, head_size()); } protected: array_node * head() const { return m_Head; } stat& stats() const { return m_Stat; } feldman_hashset::details::metrics const& metrics() const { return m_Metrics; } void destroy_tree() { // The function is not thread-safe. For use in dtor only // Destroy all array nodes destroy_array_nodes(m_Head, head_size()); } void destroy_array_nodes(array_node * pArr, size_t nSize) { for (atomic_node_ptr * p = pArr->nodes, *pLast = p + nSize; p != pLast; ++p) { node_ptr slot = p->load(memory_model::memory_order_relaxed); if (slot.bits() == flag_array_node) { destroy_array_nodes( to_array(slot.ptr()), array_node_size()); free_array_node( to_array( slot.ptr()), array_node_size()); p->store(node_ptr(), memory_model::memory_order_relaxed); } } } static array_node * alloc_array_node(size_t nSize, array_node * pParent, size_t idxParent) { array_node * pNode = cxx_array_node_allocator().NewBlock(sizeof(array_node) + sizeof(atomic_node_ptr) * (nSize - 1), pParent, idxParent); new (pNode->nodes) atomic_node_ptr[nSize]; return pNode; } array_node * alloc_head_node() const { return alloc_array_node(head_size(), nullptr, 0); } array_node * alloc_array_node(array_node * pParent, size_t idxParent) const { return alloc_array_node(array_node_size(), pParent, idxParent); } static void free_array_node( array_node * parr, size_t /*nSize*/ ) { cxx_array_node_allocator().Delete( parr, 1 ); } union converter { value_type * pData; array_node * pArr; converter(value_type * p) : pData(p) {} converter(array_node * p) : pArr(p) {} }; static array_node * to_array(value_type * p) { return converter(p).pArr; } static value_type * to_node(array_node * p) { return converter(p).pData; } void gather_level_statistics(std::vector& stat, size_t nLevel, array_node * pArr, size_t nSize) const { if (stat.size() <= nLevel) { stat.resize(nLevel + 1); stat[nLevel].node_capacity = nSize; } ++stat[nLevel].array_node_count; for (atomic_node_ptr * p = pArr->nodes, *pLast = p + nSize; p != pLast; ++p) { node_ptr slot = p->load(memory_model::memory_order_relaxed); if (slot.bits()) { ++stat[nLevel].array_cell_count; if (slot.bits() == flag_array_node) gather_level_statistics(stat, nLevel + 1, to_array(slot.ptr()), array_node_size()); } else if (slot.ptr()) ++stat[nLevel].data_cell_count; else ++stat[nLevel].empty_cell_count; } } bool expand_slot( traverse_data& pos, node_ptr current) { assert( !pos.splitter.eos()); return expand_slot( pos.pArr, pos.nSlot, current, pos.splitter.bit_offset()); } private: bool expand_slot(array_node * pParent, size_t idxParent, node_ptr current, size_t nOffset) { assert(current.bits() == 0); assert(current.ptr()); array_node * pArr = alloc_array_node(pParent, idxParent); node_ptr cur(current.ptr()); atomic_node_ptr& slot = pParent->nodes[idxParent]; if (!slot.compare_exchange_strong(cur, cur | flag_array_converting, memory_model::memory_order_release, atomics::memory_order_relaxed)) { stats().onExpandNodeFailed(); free_array_node( pArr, array_node_size()); return false; } typename hash_splitter::uint_type idx = hash_splitter( hash_accessor()(*current.ptr()), nOffset ).cut( static_cast( m_Metrics.array_node_size_log )); pArr->nodes[idx].store(current, memory_model::memory_order_release); cur = cur | flag_array_converting; CDS_VERIFY( slot.compare_exchange_strong(cur, node_ptr(to_node(pArr), flag_array_node), memory_model::memory_order_release, atomics::memory_order_relaxed) ); stats().onExpandNodeSuccess(); stats().onArrayNodeCreated(); return true; } }; //@endcond } // namespace feldman_hashset //@cond // Forward declaration template < class GC, typename T, class Traits = feldman_hashset::traits > class FeldmanHashSet; //@endcond }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_FELDMAN_HASHSET_BASE_H libcds-2.3.3/cds/intrusive/details/iterable_list_base.h000066400000000000000000000300141341244201700231660ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_DETAILS_ITERABLE_LIST_BASE_H #define CDSLIB_INTRUSIVE_DETAILS_ITERABLE_LIST_BASE_H #include #include #include #include #include #include namespace cds { namespace intrusive { /// \p IterableList ordered list related definitions /** @ingroup cds_intrusive_helper */ namespace iterable_list { /// Node type template struct node { typedef T value_type; ///< Value type typedef cds::details::marked_ptr marked_data_ptr; ///< marked pointer to the value atomics::atomic< node* > next; ///< pointer to next node in the list atomics::atomic< marked_data_ptr > data; ///< pointer to user data, \p nullptr if the node is free //@cond node() { next.store( nullptr, atomics::memory_order_release ); data.store( marked_data_ptr(), atomics::memory_order_release ); } node( value_type * pVal ) { next.store( nullptr, atomics::memory_order_release ); data.store( marked_data_ptr( pVal ), atomics::memory_order_release ); } //@endcond }; /// \p IterableList internal statistics template struct stat { typedef EventCounter event_counter; ///< Event counter type event_counter m_nInsertSuccess; ///< Number of success \p insert() operations event_counter m_nInsertFailed; ///< Number of failed \p insert() operations event_counter m_nInsertRetry; ///< Number of attempts to insert new item event_counter m_nReuseNode; ///< Number of reusing empty node when inserting/updating event_counter m_nNodeMarkFailed; ///< Number of unsuccessful marking attempts when we try to insert new data event_counter m_nNodeSeqBreak; ///< Number of breaking sequence events of \p prev -> \p next node when we try to insert new data event_counter m_nNullPrevABA; ///< Number of ABA-problem for \p nullptr prev node event_counter m_nNewNodeCreated; ///< Number of new node created when we try to insert new data event_counter m_nUpdateNew; ///< Number of new item inserted for \p update() event_counter m_nUpdateExisting; ///< Number of existing item updates event_counter m_nUpdateFailed; ///< Number of failed \p update() call event_counter m_nUpdateRetry; ///< Number of attempts to update the item event_counter m_nEraseSuccess; ///< Number of successful \p erase(), \p unlink(), \p extract() operations event_counter m_nEraseFailed; ///< Number of failed \p erase(), \p unlink(), \p extract() operations event_counter m_nEraseRetry; ///< Number of attempts to \p erase() an item event_counter m_nFindSuccess; ///< Number of successful \p find() and \p get() operations event_counter m_nFindFailed; ///< Number of failed \p find() and \p get() operations event_counter m_nNodeCreated; ///< Number of created internal nodes event_counter m_nNodeRemoved; ///< Number of removed internal nodes //@cond void onInsertSuccess() { ++m_nInsertSuccess; } void onInsertFailed() { ++m_nInsertFailed; } void onInsertRetry() { ++m_nInsertRetry; } void onReuseNode() { ++m_nReuseNode; } void onNodeMarkFailed() { ++m_nNodeMarkFailed; } void onNodeSeqBreak() { ++m_nNodeSeqBreak; } void onNullPrevABA() { ++m_nNullPrevABA; } void onNewNodeCreated() { ++m_nNewNodeCreated; } void onUpdateNew() { ++m_nUpdateNew; } void onUpdateExisting() { ++m_nUpdateExisting; } void onUpdateFailed() { ++m_nUpdateFailed; } void onUpdateRetry() { ++m_nUpdateRetry; } void onEraseSuccess() { ++m_nEraseSuccess; } void onEraseFailed() { ++m_nEraseFailed; } void onEraseRetry() { ++m_nEraseRetry; } void onFindSuccess() { ++m_nFindSuccess; } void onFindFailed() { ++m_nFindFailed; } void onNodeCreated() { ++m_nNodeCreated; } void onNodeRemoved() { ++m_nNodeRemoved; } //@endcond }; /// \p IterableList empty internal statistics struct empty_stat { //@cond void onInsertSuccess() const {} void onInsertFailed() const {} void onInsertRetry() const {} void onReuseNode() const {} void onNodeMarkFailed() const {} void onNodeSeqBreak() const {} void onNullPrevABA() const {} void onNewNodeCreated() const {} void onUpdateNew() const {} void onUpdateExisting() const {} void onUpdateFailed() const {} void onUpdateRetry() const {} void onEraseSuccess() const {} void onEraseFailed() const {} void onEraseRetry() const {} void onFindSuccess() const {} void onFindFailed() const {} void onNodeCreated() const {} void onNodeRemoved() const {} //@endcond }; //@cond template > struct wrapped_stat { typedef Stat stat_type; wrapped_stat( stat_type& st ) : m_stat( st ) {} void onInsertSuccess() { m_stat.onInsertSuccess(); } void onInsertFailed() { m_stat.onInsertFailed(); } void onInsertRetry() { m_stat.onInsertRetry(); } void onReuseNode() { m_stat.onReuseNode(); } void onNodeMarkFailed() { m_stat.onNodeMarkFailed();} void onNodeSeqBreak() { m_stat.onNodeSeqBreak(); } void onNullPrevABA() { m_stat.onNullPrevABA(); } void onNewNodeCreated() { m_stat.onNewNodeCreated();} void onUpdateNew() { m_stat.onUpdateNew(); } void onUpdateExisting() { m_stat.onUpdateExisting();} void onUpdateFailed() { m_stat.onUpdateFailed(); } void onUpdateRetry() { m_stat.onUpdateRetry(); } void onEraseSuccess() { m_stat.onEraseSuccess(); } void onEraseFailed() { m_stat.onEraseFailed(); } void onEraseRetry() { m_stat.onEraseRetry(); } void onFindSuccess() { m_stat.onFindSuccess(); } void onFindFailed() { m_stat.onFindFailed(); } void onNodeCreated() { m_stat.onNodeCreated(); } void onNodeRemoved() { m_stat.onNodeRemoved(); } stat_type& m_stat; }; //@endcond /// \p IterableList traits struct traits { /// Key comparison functor /** No default functor is provided. If the option is not specified, the \p less is used. */ typedef opt::none compare; /// Specifies binary predicate used for key compare. /** Default is \p std::less */ typedef opt::none less; /// Node allocator typedef CDS_DEFAULT_ALLOCATOR node_allocator; /// Back-off strategy typedef cds::backoff::Default back_off; /// Disposer for removing items typedef opt::v::empty_disposer disposer; /// Internal statistics /** By default, internal statistics is disabled (\p iterable_list::empty_stat). Use \p iterable_list::stat to enable it. */ typedef empty_stat stat; /// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter or \p atomicity::cache_friendly_item_counter to enable item counting typedef atomicity::empty_item_counter item_counter; /// C++ memory ordering model /** Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). */ typedef opt::v::relaxed_ordering memory_model; }; /// Metafunction converting option list to \p iterable_list::traits /** Supported \p Options are: - \p opt::compare - key comparison functor. No default functor is provided. If the option is not specified, the \p opt::less is used. - \p opt::less - specifies binary predicate used for key comparison. Default is \p std::less. - \p opt::node_allocator - node allocator, default is \p std::allocator. - \p opt::back_off - back-off strategy used. If the option is not specified, the \p cds::backoff::Default is used. - \p opt::disposer - the functor used for disposing removed items. Default is \p opt::v::empty_disposer. Due the nature of GC schema the disposer may be called asynchronously. - \p opt::item_counter - the type of item counting feature. Default is disabled (\p atomicity::empty_item_counter). To enable item counting use \p atomicity::item_counter or \p atomicity::cache_friendly_item_counter - \p opt::stat - internal statistics. By default, it is disabled (\p iterable_list::empty_stat). To enable it use \p iterable_list::stat - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consistent memory model). */ template struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type ; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type ,Options... >::type type; # endif }; //@cond template struct select_stat_wrapper { typedef Stat stat; typedef iterable_list::wrapped_stat wrapped_stat; enum { empty = false }; }; template <> struct select_stat_wrapper< empty_stat > { typedef empty_stat stat; typedef empty_stat wrapped_stat; enum { empty = true }; }; template struct select_stat_wrapper< iterable_list::wrapped_stat>: public select_stat_wrapper {}; //@endcond } // namespace iterable_list //@cond // Forward declaration template < class GC, typename T, class Traits = iterable_list::traits > class IterableList; //@endcond }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_ITERABLE_LIST_BASE_H libcds-2.3.3/cds/intrusive/details/lazy_list_base.h000066400000000000000000000443331341244201700223670ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_DETAILS_LAZY_LIST_BASE_H #define CDSLIB_INTRUSIVE_DETAILS_LAZY_LIST_BASE_H #include #include #include #include #include #include namespace cds { namespace intrusive { /// LazyList ordered list related definitions /** @ingroup cds_intrusive_helper */ namespace lazy_list { /// Lazy list node /** Template parameters: - GC - garbage collector - Lock - lock type. Default is \p cds::sync::spin - Tag - a \ref cds_intrusive_hook_tag "tag" */ template < class GC ,typename Lock = cds::sync::spin ,typename Tag = opt::none > struct node { typedef GC gc ; ///< Garbage collector typedef Lock lock_type ; ///< Lock type typedef Tag tag ; ///< tag typedef cds::details::marked_ptr marked_ptr ; ///< marked pointer typedef typename gc::template atomic_marked_ptr< marked_ptr> atomic_marked_ptr ; ///< atomic marked pointer specific for GC atomic_marked_ptr m_pNext; ///< pointer to the next node in the list + logical deletion mark mutable lock_type m_Lock; ///< Node lock /// Checks if node is marked bool is_marked() const { return m_pNext.load(atomics::memory_order_relaxed).bits() != 0; } /// Default ctor node() : m_pNext( nullptr ) {} }; //@cond template struct node_cleaner { void operator()( Node * p ) { typedef typename Node::marked_ptr marked_ptr; p->m_pNext.store( marked_ptr(), MemoryModel::memory_order_release ); } }; //@endcond //@cond struct undefined_gc; struct default_hook { typedef undefined_gc gc; typedef opt::none tag; typedef sync::spin lock_type; }; //@endcond //@cond template < typename HookType, typename... Options> struct hook { typedef typename opt::make_options< default_hook, Options...>::type options; typedef typename options::gc gc; typedef typename options::tag tag; typedef typename options::lock_type lock_type; typedef node node_type; typedef HookType hook_type; }; //@endcond /// Base hook /** \p Options are: - opt::gc - garbage collector - opt::lock_type - lock type used for node locking. Default is sync::spin - opt::tag - a \ref cds_intrusive_hook_tag "tag" */ template < typename... Options > struct base_hook: public hook< opt::base_hook_tag, Options... > {}; /// Member hook /** \p MemberOffset defines offset in bytes of \ref node member into your structure. Use \p offsetof macro to define \p MemberOffset \p Options are: - opt::gc - garbage collector - opt::lock_type - lock type used for node locking. Default is sync::spin - opt::tag - a \ref cds_intrusive_hook_tag "tag" */ template < size_t MemberOffset, typename... Options > struct member_hook: public hook< opt::member_hook_tag, Options... > { //@cond static const size_t c_nMemberOffset = MemberOffset; //@endcond }; /// Traits hook /** \p NodeTraits defines type traits for node. See \ref node_traits for \p NodeTraits interface description \p Options are: - opt::gc - garbage collector used. - opt::lock_type - lock type used for node locking. Default is sync::spin - opt::tag - a \ref cds_intrusive_hook_tag "tag" */ template struct traits_hook: public hook< opt::traits_hook_tag, Options... > { //@cond typedef NodeTraits node_traits; //@endcond }; /// Check link template struct link_checker { //@cond typedef Node node_type; //@endcond /// Checks if the link field of node \p pNode is \p nullptr /** An asserting is generated if \p pNode link field is not \p nullptr */ static void is_empty( node_type const * pNode ) { assert( pNode->m_pNext.load( atomics::memory_order_relaxed ) == nullptr ); CDS_UNUSED( pNode ); } }; //@cond template struct link_checker_selector; template struct link_checker_selector< GC, Node, opt::never_check_link > { typedef intrusive::opt::v::empty_link_checker type; }; template struct link_checker_selector< GC, Node, opt::debug_check_link > { # ifdef _DEBUG typedef link_checker type; # else typedef intrusive::opt::v::empty_link_checker type; # endif }; template struct link_checker_selector< GC, Node, opt::always_check_link > { typedef link_checker type; }; //@endcond /// Metafunction for selecting appropriate link checking policy template < typename Node, opt::link_check_type LinkType > struct get_link_checker { //@cond typedef typename link_checker_selector< typename Node::gc, Node, LinkType>::type type; //@endcond }; /// \p LazyList internal statistics template struct stat { typedef EventCounter event_counter; ///< Event counter type event_counter m_nInsertSuccess; ///< Number of success \p insert() operations event_counter m_nInsertFailed; ///< Number of failed \p insert() operations event_counter m_nInsertRetry; ///< Number of attempts to insert new item event_counter m_nUpdateNew; ///< Number of new item inserted for \p update() event_counter m_nUpdateExisting; ///< Number of existing item updates event_counter m_nUpdateFailed; ///< Number of failed \p update() call event_counter m_nUpdateRetry; ///< Number of attempts to \p update() the item event_counter m_nUpdateMarked; ///< Number of attempts to \p update() logically deleted (marked) items event_counter m_nEraseSuccess; ///< Number of successful \p erase(), \p unlink(), \p extract() operations event_counter m_nEraseFailed; ///< Number of failed \p erase(), \p unlink(), \p extract() operations event_counter m_nEraseRetry; ///< Number of attempts to \p erase() an item event_counter m_nFindSuccess; ///< Number of successful \p find() and \p get() operations event_counter m_nFindFailed; ///< Number of failed \p find() and \p get() operations event_counter m_nValidationSuccess; ///< Number of successful validating of search result event_counter m_nValidationFailed; ///< Number of failed validating of search result //@cond void onInsertSuccess() { ++m_nInsertSuccess; } void onInsertFailed() { ++m_nInsertFailed; } void onInsertRetry() { ++m_nInsertRetry; } void onUpdateNew() { ++m_nUpdateNew; } void onUpdateExisting() { ++m_nUpdateExisting; } void onUpdateFailed() { ++m_nUpdateFailed; } void onUpdateRetry() { ++m_nUpdateRetry; } void onUpdateMarked() { ++m_nUpdateMarked; } void onEraseSuccess() { ++m_nEraseSuccess; } void onEraseFailed() { ++m_nEraseFailed; } void onEraseRetry() { ++m_nEraseRetry; } void onFindSuccess() { ++m_nFindSuccess; } void onFindFailed() { ++m_nFindFailed; } void onValidationSuccess() { ++m_nValidationSuccess; } void onValidationFailed() { ++m_nValidationFailed; } //@endcond }; /// \p LazyList empty internal statistics struct empty_stat { //@cond void onInsertSuccess() const {} void onInsertFailed() const {} void onInsertRetry() const {} void onUpdateNew() const {} void onUpdateExisting() const {} void onUpdateFailed() const {} void onUpdateRetry() const {} void onUpdateMarked() const {} void onEraseSuccess() const {} void onEraseFailed() const {} void onEraseRetry() const {} void onFindSuccess() const {} void onFindFailed() const {} void onValidationSuccess() const {} void onValidationFailed() const {} //@endcond }; //@cond template > struct wrapped_stat { typedef Stat stat_type; wrapped_stat( stat_type& st ) : m_stat( st ) {} void onInsertSuccess() { m_stat.onInsertSuccess(); } void onInsertFailed() { m_stat.onInsertFailed(); } void onInsertRetry() { m_stat.onInsertRetry(); } void onUpdateNew() { m_stat.onUpdateNew(); } void onUpdateExisting() { m_stat.onUpdateExisting(); } void onUpdateFailed() { m_stat.onUpdateFailed(); } void onUpdateRetry() { m_stat.onUpdateRetry(); } void onUpdateMarked() { m_stat.onUpdateMarked(); } void onEraseSuccess() { m_stat.onEraseSuccess(); } void onEraseFailed() { m_stat.onEraseFailed(); } void onEraseRetry() { m_stat.onEraseRetry(); } void onFindSuccess() { m_stat.onFindSuccess(); } void onFindFailed() { m_stat.onFindFailed(); } void onValidationSuccess() { m_stat.onValidationSuccess(); } void onValidationFailed() { m_stat.onValidationFailed(); } stat_type& m_stat; }; //@endcond /// LazyList traits struct traits { /// Hook used /** Possible values are: \p lazy_list::base_hook, \p lazy_list::member_hook, \p lazy_list::traits_hook. */ typedef base_hook<> hook; /// Key comparing functor /** No default functor is provided. If the functor is not specified, the \p less is used. */ typedef opt::none compare; /// Specifies binary predicate used for comparing keys /** Default is \p std::less. */ typedef opt::none less; /// Specifies binary functor used for comparing keys for equality (for unordered list only) /** If \p equal_to option is not specified, \p compare is used, if \p compare is not specified, \p less is used, if \p less is not specified, then \p std::equal_to is used. */ typedef opt::none equal_to; /// Specifies list ordering policy /** If \p sort is \p true, than list maintains items in sorted order, otherwise the list is unordered. Default is \p true. Note that if \p sort is \p false, than lookup operations scan entire list. */ static const bool sort = true; /// Back-off strategy typedef cds::backoff::Default back_off; /// Disposer for removing items typedef opt::v::empty_disposer disposer; /// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter or \p atomicity::cache_friendly_item_counter to enable item counting typedef atomicity::empty_item_counter item_counter; /// Internal statistics /** By default, internal statistics is disabled (\p lazy_list::empty_stat). Use \p lazy_list::stat to enable it. */ typedef empty_stat stat; /// Link fields checking feature /** Default is \p opt::debug_check_link */ static const opt::link_check_type link_checker = opt::debug_check_link; /// C++ memory ordering model /** Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). */ typedef opt::v::relaxed_ordering memory_model; /// RCU deadlock checking policy (only for \ref cds_intrusive_LazyList_rcu "RCU-based LazyList") /** List of available options see \p opt::rcu_check_deadlock */ typedef opt::v::rcu_throw_deadlock rcu_check_deadlock; }; /// Metafunction converting option list to \p lazy_list::traits /** Supported \p Options are: - \p opt::hook - hook used. Possible values are: \p lazy_list::base_hook, \p lazy_list::member_hook, \p lazy_list::traits_hook. If the option is not specified, \p %lazy_list::base_hook and \p gc::HP is used. - \p opt::compare - key comparison functor. No default functor is provided. If the option is not specified, the \p opt::less is used. - \p opt::less - specifies binary predicate used for key comparison. Default is \p std::less. - \p opt::equal_to - specifies binary functor for comparing keys for equality. This option is applicable only for unordered list. If \p equal_to is not specified, \p compare is used, \p compare is not specified, \p less is used. - \p opt::sort - specifies ordering policy. Default value is \p true, i.e. the list is ordered. Note: unordering feature is not fully supported yet. - \p opt::back_off - back-off strategy used. If the option is not specified, the \p cds::backoff::Default is used. - \p opt::disposer - the functor used for dispose removed items. Default is \p opt::v::empty_disposer. Due the nature of GC schema the disposer may be called asynchronously. - \p opt::link_checker - the type of node's link fields checking. Default is \p opt::debug_check_link - \p opt::item_counter - the type of item counting feature. Default is disabled (\p atomicity::empty_item_counter). To enable item counting use \p atomicity::item_counter or \p atomicity::cache_friendly_item_counter - \p opt::stat - internal statistics. By default, it is disabled (\p lazy_list::empty_stat). To enable it use \p lazy_list::stat - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). - \p opt::rcu_check_deadlock - a deadlock checking policy for \ref cds_intrusive_MichaelList_rcu "RCU-based MichaelList" Default is \p opt::v::rcu_throw_deadlock */ template struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type ; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type ,Options... >::type type; # endif }; //@cond template struct select_stat_wrapper { typedef Stat stat; typedef lazy_list::wrapped_stat wrapped_stat; enum { empty = false }; }; template <> struct select_stat_wrapper< empty_stat > { typedef empty_stat stat; typedef empty_stat wrapped_stat; enum { empty = true }; }; template struct select_stat_wrapper< lazy_list::wrapped_stat>: public select_stat_wrapper< Stat > {}; //@endcond } // namespace lazy_list //@cond // Forward declaration template < class GC, typename T, class Traits = lazy_list::traits > class LazyList; //@endcond //@cond template struct is_lazy_list { enum { value = false }; }; template struct is_lazy_list< LazyList< GC, T, Traits >> { enum { value = true }; }; //@endcond }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_LAZY_LIST_BASE_H libcds-2.3.3/cds/intrusive/details/michael_list_base.h000066400000000000000000000405731341244201700230140ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_DETAILS_MICHAEL_LIST_BASE_H #define CDSLIB_INTRUSIVE_DETAILS_MICHAEL_LIST_BASE_H #include #include #include #include #include #include namespace cds { namespace intrusive { /// MichaelList ordered list related definitions /** @ingroup cds_intrusive_helper */ namespace michael_list { /// Michael's list node /** Template parameters: - \p GC - garbage collector - \p Tag - a \ref cds_intrusive_hook_tag "tag" */ template struct node { typedef GC gc ; ///< Garbage collector typedef Tag tag ; ///< tag typedef cds::details::marked_ptr marked_ptr; ///< marked pointer typedef typename gc::template atomic_marked_ptr atomic_marked_ptr; ///< atomic marked pointer specific for GC atomic_marked_ptr m_pNext ; ///< pointer to the next node in the container constexpr node() noexcept : m_pNext( nullptr ) {} }; //@cond template struct node_cleaner { void operator()( Node * p ) { typedef typename Node::marked_ptr marked_ptr; p->m_pNext.store( marked_ptr(), MemoryModel::memory_order_release ); } }; //@endcond //@cond struct undefined_gc; struct default_hook { typedef undefined_gc gc; typedef opt::none tag; }; //@endcond //@cond template < typename HookType, typename... Options> struct hook { typedef typename opt::make_options< default_hook, Options...>::type options; typedef typename options::gc gc; typedef typename options::tag tag; typedef node node_type; typedef HookType hook_type; }; //@endcond /// Base hook /** \p Options are: - opt::gc - garbage collector used. - opt::tag - a \ref cds_intrusive_hook_tag "tag" */ template < typename... Options > struct base_hook: public hook< opt::base_hook_tag, Options... > {}; /// Member hook /** \p MemberOffset defines offset in bytes of \ref node member into your structure. Use \p offsetof macro to define \p MemberOffset \p Options are: - opt::gc - garbage collector used. - opt::tag - a \ref cds_intrusive_hook_tag "tag" */ template < size_t MemberOffset, typename... Options > struct member_hook: public hook< opt::member_hook_tag, Options... > { //@cond static const size_t c_nMemberOffset = MemberOffset; //@endcond }; /// Traits hook /** \p NodeTraits defines type traits for node. See \ref node_traits for \p NodeTraits interface description \p Options are: - opt::gc - garbage collector used. - opt::tag - a \ref cds_intrusive_hook_tag "tag" */ template struct traits_hook: public hook< opt::traits_hook_tag, Options... > { //@cond typedef NodeTraits node_traits; //@endcond }; /// Checks link template struct link_checker { //@cond typedef Node node_type; //@endcond /// Checks if the link field of node \p pNode is \p nullptr /** An asserting is generated if \p pNode link field is not \p nullptr */ static void is_empty( const node_type * pNode ) { assert( pNode->m_pNext.load( atomics::memory_order_relaxed ) == nullptr ); CDS_UNUSED( pNode ); } }; //@cond template struct link_checker_selector; template struct link_checker_selector< GC, Node, opt::never_check_link > { typedef intrusive::opt::v::empty_link_checker type; }; template struct link_checker_selector< GC, Node, opt::debug_check_link > { # ifdef _DEBUG typedef link_checker type; # else typedef intrusive::opt::v::empty_link_checker type; # endif }; template struct link_checker_selector< GC, Node, opt::always_check_link > { typedef link_checker type; }; //@endcond /// Metafunction for selecting appropriate link checking policy template < typename Node, opt::link_check_type LinkType > struct get_link_checker { //@cond typedef typename link_checker_selector< typename Node::gc, Node, LinkType>::type type; //@endcond }; /// \p MichaelList internal statistics template struct stat { typedef EventCounter event_counter; ///< Event counter type event_counter m_nInsertSuccess; ///< Number of success \p insert() operations event_counter m_nInsertFailed; ///< Number of failed \p insert() operations event_counter m_nInsertRetry; ///< Number of attempts to insert new item event_counter m_nUpdateNew; ///< Number of new item inserted for \p update() event_counter m_nUpdateExisting; ///< Number of existing item updates event_counter m_nUpdateFailed; ///< Number of failed \p update() call event_counter m_nUpdateRetry; ///< Number of attempts to \p update() the item event_counter m_nUpdateMarked; ///< Number of attempts to \p update() logically deleted (marked) items event_counter m_nEraseSuccess; ///< Number of successful \p erase(), \p unlink(), \p extract() operations event_counter m_nEraseFailed; ///< Number of failed \p erase(), \p unlink(), \p extract() operations event_counter m_nEraseRetry; ///< Number of attempts to \p erase() an item event_counter m_nFindSuccess; ///< Number of successful \p find() and \p get() operations event_counter m_nFindFailed; ///< Number of failed \p find() and \p get() operations event_counter m_nHelpingSuccess; ///< Number of successful help attempts to remove marked item during searching event_counter m_nHelpingFailed; ///< Number if failed help attempts to remove marked item during searching //@cond void onInsertSuccess() { ++m_nInsertSuccess; } void onInsertFailed() { ++m_nInsertFailed; } void onInsertRetry() { ++m_nInsertRetry; } void onUpdateNew() { ++m_nUpdateNew; } void onUpdateExisting() { ++m_nUpdateExisting; } void onUpdateFailed() { ++m_nUpdateFailed; } void onUpdateRetry() { ++m_nUpdateRetry; } void onUpdateMarked() { ++m_nUpdateMarked; } void onEraseSuccess() { ++m_nEraseSuccess; } void onEraseFailed() { ++m_nEraseFailed; } void onEraseRetry() { ++m_nEraseRetry; } void onFindSuccess() { ++m_nFindSuccess; } void onFindFailed() { ++m_nFindFailed; } void onHelpingSuccess() { ++m_nHelpingSuccess; } void onHelpingFailed() { ++m_nHelpingFailed; } //@endcond }; /// \p MichaelList empty internal statistics struct empty_stat { //@cond void onInsertSuccess() const {} void onInsertFailed() const {} void onInsertRetry() const {} void onUpdateNew() const {} void onUpdateExisting() const {} void onUpdateFailed() const {} void onUpdateRetry() const {} void onUpdateMarked() const {} void onEraseSuccess() const {} void onEraseFailed() const {} void onEraseRetry() const {} void onFindSuccess() const {} void onFindFailed() const {} void onHelpingSuccess() const {} void onHelpingFailed() const {} //@endcond }; //@cond template > struct wrapped_stat { typedef Stat stat_type; wrapped_stat( stat_type& st ) : m_stat( st ) {} void onInsertSuccess() { m_stat.onInsertSuccess(); } void onInsertFailed() { m_stat.onInsertFailed(); } void onInsertRetry() { m_stat.onInsertRetry(); } void onUpdateNew() { m_stat.onUpdateNew(); } void onUpdateExisting() { m_stat.onUpdateExisting(); } void onUpdateFailed() { m_stat.onUpdateFailed(); } void onUpdateRetry() { m_stat.onUpdateRetry(); } void onUpdateMarked() { m_stat.onUpdateMarked(); } void onEraseSuccess() { m_stat.onEraseSuccess(); } void onEraseFailed() { m_stat.onEraseFailed(); } void onEraseRetry() { m_stat.onEraseRetry(); } void onFindSuccess() { m_stat.onFindSuccess(); } void onFindFailed() { m_stat.onFindFailed(); } void onHelpingSuccess() { m_stat.onHelpingSuccess(); } void onHelpingFailed() { m_stat.onHelpingFailed(); } stat_type& m_stat; }; //@endcond /// MichaelList traits struct traits { /// Hook used /** Possible values are: \p michael_list::base_hook, \p michael_list::member_hook, \p michael_list::traits_hook. */ typedef base_hook<> hook; /// Key comparison functor /** No default functor is provided. If the option is not specified, the \p less is used. */ typedef opt::none compare; /// Specifies binary predicate used for key compare. /** Default is \p std::less. */ typedef opt::none less; /// Back-off strategy typedef cds::backoff::Default back_off; /// Disposer for removing items typedef opt::v::empty_disposer disposer; /// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter or \p atomicity::cache_friendly_item_counter to enable item counting typedef atomicity::empty_item_counter item_counter; /// Internal statistics /** By default, internal statistics is disabled (\p michael_list::empty_stat). Use \p michael_list::stat to enable it. */ typedef empty_stat stat; /// Link fields checking feature /** Default is \p opt::debug_check_link */ static const opt::link_check_type link_checker = opt::debug_check_link; /// C++ memory ordering model /** Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). */ typedef opt::v::relaxed_ordering memory_model; /// RCU deadlock checking policy (only for \ref cds_intrusive_MichaelList_rcu "RCU-based MichaelList") /** List of available policy see \p opt::rcu_check_deadlock */ typedef opt::v::rcu_throw_deadlock rcu_check_deadlock; }; /// Metafunction converting option list to \p michael_list::traits /** Supported \p Options are: - \p opt::hook - hook used. Possible values are: \p michael_list::base_hook, \p michael_list::member_hook, \p michael_list::traits_hook. If the option is not specified, \p %michael_list::base_hook<> and \p gc::HP is used. - \p opt::compare - key comparison functor. No default functor is provided. If the option is not specified, the \p opt::less is used. - \p opt::less - specifies binary predicate used for key comparison. Default is \p std::less. - \p opt::back_off - back-off strategy used. If the option is not specified, the \p cds::backoff::Default is used. - \p opt::disposer - the functor used for disposing removed items. Default is \p opt::v::empty_disposer. Due the nature of GC schema the disposer may be called asynchronously. - \p opt::link_checker - the type of node's link fields checking. Default is \p opt::debug_check_link - \p opt::item_counter - the type of item counting feature. Default is disabled (\p atomicity::empty_item_counter). To enable item counting use \p atomicity::item_counter or \p atomicity::cache_friendly_item_counter - \p opt::stat - internal statistics. By default, it is disabled (\p michael_list::empty_stat). To enable it use \p michael_list::stat - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consistent memory model). - \p opt::rcu_check_deadlock - a deadlock checking policy for \ref cds_intrusive_MichaelList_rcu "RCU-based MichaelList" Default is \p opt::v::rcu_throw_deadlock */ template struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type ; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type ,Options... >::type type; # endif }; //@cond template struct select_stat_wrapper { typedef Stat stat; typedef michael_list::wrapped_stat wrapped_stat; enum { empty = false }; }; template <> struct select_stat_wrapper< empty_stat > { typedef empty_stat stat; typedef empty_stat wrapped_stat; enum { empty = true }; }; template struct select_stat_wrapper< michael_list::wrapped_stat>: public select_stat_wrapper< Stat > {}; //@endcond } // namespace michael_list //@cond // Forward declaration template < class GC, typename T, class Traits = michael_list::traits > class MichaelList; //@endcond //@cond template struct is_michael_list { enum { value = false }; }; template struct is_michael_list< MichaelList< GC, T, Traits >> { enum { value = true }; }; //@endcond }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_MICHAEL_LIST_BASE_H libcds-2.3.3/cds/intrusive/details/michael_set_base.h000066400000000000000000000170361341244201700226320ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_DETAILS_MICHAEL_SET_BASE_H #define CDSLIB_INTRUSIVE_DETAILS_MICHAEL_SET_BASE_H #include #include #include #include #include namespace cds { namespace intrusive { /// MichaelHashSet related definitions /** @ingroup cds_intrusive_helper */ namespace michael_set { /// MichaelHashSet traits struct traits { /// Hash function /** Hash function converts the key fields of struct \p T stored in the hash-set into value of type \p size_t called hash value that is an index of hash table. This is mandatory type and has no predefined one. */ typedef opt::none hash; /// Item counter /** The item counting is an important part of \p MichaelHashSet algorithm: the \p empty() member function depends on correct item counting. You may use \p atomicity::empty_item_counter if don't need \p empty() and \p size() member functions. Default is \p atomicity::item_counter; to avoid false sharing you may use \p atomicity::cache_friendly_item_counter */ typedef cds::atomicity::item_counter item_counter; /// Bucket table allocator /** Allocator for bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR The allocator uses only in constructor for allocating bucket table and in destructor for destroying bucket table */ typedef CDS_DEFAULT_ALLOCATOR allocator; }; /// Metafunction converting option list to traits struct /** Available \p Options: - \p opt::hash - mandatory option, specifies hash functor. - \p opt::item_counter - optional, specifies item counting policy. See \p traits::item_counter for default type. - \p opt::allocator - optional, bucket table allocator. Default is \ref CDS_DEFAULT_ALLOCATOR. */ template struct make_traits { typedef typename cds::opt::make_options< traits, Options...>::type type; ///< Metafunction result }; //@cond namespace details { static inline size_t init_hash_bitmask( size_t nMaxItemCount, size_t nLoadFactor ) { if ( nLoadFactor == 0 ) nLoadFactor = 1; if ( nMaxItemCount == 0 ) nMaxItemCount = 4; const size_t nBucketCount = nMaxItemCount / nLoadFactor; const size_t exp2 = size_t( 1 ) << cds::bitop::MSB( nBucketCount ); return ( exp2 < nBucketCount ? exp2 * 2 : exp2 ) - 1; } template struct list_iterator_selector; template struct list_iterator_selector< OrderedList, false> { typedef OrderedList * bucket_ptr; typedef typename OrderedList::iterator type; }; template struct list_iterator_selector< OrderedList, true> { typedef OrderedList const * bucket_ptr; typedef typename OrderedList::const_iterator type; }; template class iterator { friend class iterator< OrderedList, !IsConst >; protected: typedef OrderedList bucket_type; typedef typename list_iterator_selector< bucket_type, IsConst>::bucket_ptr bucket_ptr; typedef typename list_iterator_selector< bucket_type, IsConst>::type list_iterator; bucket_ptr m_pCurBucket; list_iterator m_itList; bucket_ptr m_pEndBucket; void next() { if ( m_pCurBucket < m_pEndBucket ) { if ( ++m_itList != m_pCurBucket->end()) return; while ( ++m_pCurBucket < m_pEndBucket ) { m_itList = m_pCurBucket->begin(); if ( m_itList != m_pCurBucket->end()) return; } } m_pCurBucket = m_pEndBucket - 1; m_itList = m_pCurBucket->end(); } public: typedef typename list_iterator::value_ptr value_ptr; typedef typename list_iterator::value_ref value_ref; public: iterator() : m_pCurBucket( nullptr ) , m_itList() , m_pEndBucket( nullptr ) {} iterator( list_iterator const& it, bucket_ptr pFirst, bucket_ptr pLast ) : m_pCurBucket( pFirst ) , m_itList( it ) , m_pEndBucket( pLast ) { if ( it == pFirst->end()) next(); } iterator( iterator const& src ) : m_pCurBucket( src.m_pCurBucket ) , m_itList( src.m_itList ) , m_pEndBucket( src.m_pEndBucket ) {} value_ptr operator ->() const { assert( m_pCurBucket != nullptr ); return m_itList.operator ->(); } value_ref operator *() const { assert( m_pCurBucket != nullptr ); return m_itList.operator *(); } /// Pre-increment iterator& operator ++() { next(); return *this; } iterator& operator = (const iterator& src) { m_pCurBucket = src.m_pCurBucket; m_pEndBucket = src.m_pEndBucket; m_itList = src.m_itList; return *this; } bucket_ptr bucket() const { return m_pCurBucket != m_pEndBucket ? m_pCurBucket : nullptr; } list_iterator const& underlying_iterator() const { return m_itList; } template bool operator ==(iterator const& i) const { return m_pCurBucket == i.m_pCurBucket && m_itList == i.m_itList; } template bool operator !=(iterator const& i ) const { return !( *this == i ); } }; } //@endcond } // namespace michael_set //@cond // Forward declarations template class MichaelHashSet; //@endcond }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_MICHAEL_SET_BASE_H libcds-2.3.3/cds/intrusive/details/node_traits.h000066400000000000000000000136001341244201700216670ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_DETAILS_NODE_TRAITS_H #define CDSLIB_INTRUSIVE_DETAILS_NODE_TRAITS_H #include namespace cds { namespace intrusive { #ifdef CDS_DOXYGEN_INVOKED /// Container's node traits /** @ingroup cds_intrusive_helper This traits is intended for converting between type \p T of value stored in the intrusive container and container's node type \p NodeType. There are separate specializations for each \p Hook type. */ template struct node_traits { typedef T value_type ; ///< Value type typedef NodeType node_type ; ///< Node type /// Convert value reference to node pointer static node_type * to_node_ptr( value_type& v ); /// Convert value pointer to node pointer static node_type * to_node_ptr( value_type * v ); /// Convert value reference to node pointer (const version) static const node_type * to_node_ptr( value_type const& v ); /// Convert value pointer to node pointer (const version) static const node_type * to_node_ptr( value_type const * v ); /// Convert node refernce to value pointer static value_type * to_value_ptr( node_type& n ); /// Convert node pointer to value pointer static value_type * to_value_ptr( node_type * n ); /// Convert node reference to value pointer (const version) static const value_type * to_value_ptr( node_type const & n ); /// Convert node pointer to value pointer (const version) static const value_type * to_value_ptr( node_type const * n ); }; #else template struct node_traits; #endif //@cond template struct node_traits { typedef T value_type; typedef NodeType node_type; static node_type * to_node_ptr( value_type& v ) { return static_cast( &v ); } static node_type * to_node_ptr( value_type * v ) { return v ? static_cast(v) : nullptr; } static const node_type * to_node_ptr( const value_type& v ) { return static_cast( &v ); } static const node_type * to_node_ptr( const value_type * v ) { return v ? static_cast(v) : nullptr; } static value_type * to_value_ptr( node_type& n ) { return static_cast( &n ); } static value_type * to_value_ptr( node_type * n ) { return n ? static_cast(n) : nullptr; } static const value_type * to_value_ptr( const node_type& n ) { return static_cast( &n ); } static const value_type * to_value_ptr( const node_type * n ) { return n ? static_cast(n) : nullptr; } }; template struct node_traits { typedef T value_type; typedef NodeType node_type; static node_type * to_node_ptr( value_type& v ) { return reinterpret_cast( reinterpret_cast(&v) + Hook::c_nMemberOffset ); } static node_type * to_node_ptr( value_type * v ) { return v ? to_node_ptr( *v ) : nullptr; } static const node_type * to_node_ptr( const value_type& v ) { return reinterpret_cast( reinterpret_cast(&v) + Hook::c_nMemberOffset ); } static const node_type * to_node_ptr( const value_type * v ) { return v ? to_node_ptr( *v ) : nullptr; } static value_type * to_value_ptr( node_type& n ) { return reinterpret_cast( reinterpret_cast(&n) - Hook::c_nMemberOffset ); } static value_type * to_value_ptr( node_type * n ) { return n ? to_value_ptr( *n ) : nullptr; } static const value_type * to_value_ptr( const node_type& n ) { return reinterpret_cast( reinterpret_cast(&n) - Hook::c_nMemberOffset ); } static const value_type * to_value_ptr( const node_type * n ) { return n ? to_value_ptr( *n ) : nullptr; } }; template struct node_traits: public Hook::node_traits {}; //@endcond /// Node traits selector metafunction /** @ingroup cds_intrusive_helper The metafunction selects appropriate \ref node_traits specialization based on value type \p T, node type \p NodeType, and hook type \p Hook. */ template struct get_node_traits { //@cond typedef node_traits type; //@endcond }; //@cond /// Functor converting container's node type to value type //TODO: delete template struct node_to_value { typename Container::value_type * operator()( typename Container::node_type * p ) const { typedef typename Container::node_traits node_traits; return node_traits::to_value_ptr( p ); } }; //@endcond }} // namespace cds::intrusuve #endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_NODE_TRAITS_H libcds-2.3.3/cds/intrusive/details/raw_ptr_disposer.h000066400000000000000000000043341341244201700227460ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_DETAILS_RAW_PTR_DISPOSER_H #define CDSLIB_INTRUSIVE_DETAILS_RAW_PTR_DISPOSER_H #include //@cond namespace cds { namespace intrusive { namespace details { template struct raw_ptr_disposer { typedef RCU gc; typedef NodeType node_type; typedef Disposer disposer; node_type * pReclaimedChain; raw_ptr_disposer() : pReclaimedChain( nullptr ) {} template explicit raw_ptr_disposer( Position& pos ) : pReclaimedChain( pos.pDelChain ) { pos.pDelChain = nullptr; } raw_ptr_disposer( raw_ptr_disposer&& d ) : pReclaimedChain( d.pReclaimedChain ) { d.pReclaimedChain = nullptr; } raw_ptr_disposer( raw_ptr_disposer const& ) = delete; ~raw_ptr_disposer() { apply(); } raw_ptr_disposer& combine(raw_ptr_disposer&& d) { if ( pReclaimedChain == nullptr ) pReclaimedChain = d.pReclaimedChain; else if ( d.pReclaimedChain ) { // union reclaimed chains node_type * pEnd = d.pReclaimedChain; for ( ; pEnd->m_pDelChain; pEnd = pEnd->m_pDelChain ); pEnd->m_pDelChain = pReclaimedChain; pReclaimedChain = d.pReclaimedChain; } d.pReclaimedChain = nullptr; return *this; } raw_ptr_disposer& operator=(raw_ptr_disposer const& d) = delete; raw_ptr_disposer& operator=( raw_ptr_disposer&& d ) = delete; void apply() { if ( pReclaimedChain ) { assert( !gc::is_locked()); disposer()( pReclaimedChain ); pReclaimedChain = nullptr; } } }; }}} // namespace cds::intrusive::details //@endcond #endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_RAW_PTR_DISPOSER_H libcds-2.3.3/cds/intrusive/details/single_link_struct.h000066400000000000000000000124301341244201700232560ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_DETAILS_SINGLE_LINK_STRUCT_H #define CDSLIB_INTRUSIVE_DETAILS_SINGLE_LINK_STRUCT_H #include #include #include namespace cds { namespace intrusive { /// Definitions common for single-linked data structures /** @ingroup cds_intrusive_helper */ namespace single_link { /// Container's node /** Template parameters: - GC - garbage collector used - Tag - a tag used to distinguish between different implementation */ template struct node { typedef GC gc ; ///< Garbage collector typedef Tag tag ; ///< tag typedef typename gc::template atomic_ref atomic_node_ptr; ///< atomic pointer /// Rebind node for other template parameters template struct rebind { typedef node other ; ///< Rebinding result }; atomic_node_ptr m_pNext ; ///< pointer to the next node in the container node() noexcept { m_pNext.store( nullptr, atomics::memory_order_release ); } }; //@cond struct default_hook { typedef cds::gc::default_gc gc; typedef opt::none tag; }; //@endcond //@cond template < typename HookType, typename... Options> struct hook { typedef typename opt::make_options< default_hook, Options...>::type options; typedef typename options::gc gc; typedef typename options::tag tag; typedef node node_type; typedef HookType hook_type; }; //@endcond /// Base hook /** \p Options are: - opt::gc - garbage collector used. - opt::tag - tag */ template < typename... Options > struct base_hook: public hook< opt::base_hook_tag, Options... > {}; /// Member hook /** \p MemberOffset defines offset in bytes of \ref node member into your structure. Use \p offsetof macro to define \p MemberOffset \p Options are: - opt::gc - garbage collector used. - opt::tag - tag */ template < size_t MemberOffset, typename... Options > struct member_hook: public hook< opt::member_hook_tag, Options... > { //@cond static const size_t c_nMemberOffset = MemberOffset; //@endcond }; /// Traits hook /** \p NodeTraits defines type traits for node. See \ref node_traits for \p NodeTraits interface description \p Options are: - opt::gc - garbage collector used. - opt::tag - tag */ template struct traits_hook: public hook< opt::traits_hook_tag, Options... > { //@cond typedef NodeTraits node_traits; //@endcond }; /// Check link template struct link_checker { //@cond typedef Node node_type; //@endcond /// Checks if the link field of node \p pNode is \p nullptr /** An asserting is generated if \p pNode link field is not \p nullptr */ static void is_empty( const node_type * pNode ) { assert( pNode->m_pNext.load( atomics::memory_order_relaxed ) == nullptr ); CDS_UNUSED( pNode ); } }; //@cond template struct link_checker_selector; template struct link_checker_selector< GC, Node, opt::never_check_link > { typedef intrusive::opt::v::empty_link_checker type; }; template struct link_checker_selector< GC, Node, opt::debug_check_link > { # ifdef _DEBUG typedef link_checker type; # else typedef intrusive::opt::v::empty_link_checker type; # endif }; template struct link_checker_selector< GC, Node, opt::always_check_link > { typedef link_checker type; }; //@endcond /// Metafunction for selecting appropriate link checking policy template < typename Node, opt::link_check_type LinkType > struct get_link_checker { //@cond typedef typename link_checker_selector< typename Node::gc, Node, LinkType>::type type; //@endcond }; } // namespace single_link }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_SINGLE_LINK_STRUCT_H libcds-2.3.3/cds/intrusive/details/skip_list_base.h000066400000000000000000001006201341244201700223460ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_DETAILS_SKIP_LIST_BASE_H #define CDSLIB_INTRUSIVE_DETAILS_SKIP_LIST_BASE_H #include #include #include #include #include namespace cds { namespace intrusive { /// SkipListSet related definitions /** @ingroup cds_intrusive_helper */ namespace skip_list { /// The maximum possible height of any skip-list static unsigned int const c_nHeightLimit = 32; /// Skip list node /** Template parameters: - \p GC - garbage collector - \p Tag - a \ref cds_intrusive_hook_tag "tag" */ template class node { public: typedef GC gc; ///< Garbage collector typedef Tag tag; ///< tag typedef cds::details::marked_ptr marked_ptr; ///< marked pointer typedef typename gc::template atomic_marked_ptr< marked_ptr> atomic_marked_ptr; ///< atomic marked pointer specific for GC //@cond typedef atomic_marked_ptr tower_item_type; //@endcond protected: //@cond atomic_marked_ptr m_pNext; ///< Next item in bottom-list (list at level 0) unsigned int m_nHeight; ///< Node height (size of \p m_arrNext array). For node at level 0 the height is 1. atomic_marked_ptr * m_arrNext; ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p nullptr atomics::atomic m_nUnlink; ///< Unlink helper //@endcond public: node() : m_pNext( nullptr ) , m_nHeight( 1 ) , m_arrNext( nullptr ) { m_nUnlink.store( 1, atomics::memory_order_release ); } /// Constructs a node's tower of height \p nHeight void make_tower( unsigned int nHeight, atomic_marked_ptr * nextTower ) { assert( nHeight > 0 ); assert( (nHeight == 1 && nextTower == nullptr) // bottom-list node || (nHeight > 1 && nextTower != nullptr) // node at level of more than 0 ); m_arrNext = nextTower; m_nHeight = nHeight; m_nUnlink.store( nHeight, atomics::memory_order_release ); } //@cond atomic_marked_ptr * release_tower() { atomic_marked_ptr * pTower = m_arrNext; m_arrNext = nullptr; m_nHeight = 1; return pTower; } atomic_marked_ptr * get_tower() const { return m_arrNext; } bool has_tower() const { return m_nHeight > 1; } //@endcond /// Access to element of next pointer array atomic_marked_ptr& next( unsigned int nLevel ) { assert( nLevel < height()); assert( nLevel == 0 || (nLevel > 0 && m_arrNext != nullptr)); if ( nLevel ) { // TSan: data race between m_arrNext[ nLevel - 1 ] and make_tower() // In fact, m_arrNext is a const array that is never changed CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &m_arrNext[ nLevel - 1 ] ); return m_arrNext[nLevel - 1]; } return m_pNext; } /// Access to element of next pointer array (const version) atomic_marked_ptr const& next( unsigned int nLevel ) const { assert( nLevel < height()); assert( nLevel == 0 || nLevel > 0 && m_arrNext != nullptr ); if ( nLevel ) { CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &m_arrNext[nLevel - 1] ); return m_arrNext[nLevel - 1]; } return m_pNext; } /// Access to element of next pointer array (synonym for \p next() function) atomic_marked_ptr& operator[]( unsigned int nLevel ) { return next( nLevel ); } /// Access to element of next pointer array (synonym for \p next() function) atomic_marked_ptr const& operator[]( unsigned int nLevel ) const { return next( nLevel ); } /// Height of the node unsigned int height() const { return m_nHeight; } /// Clears internal links void clear() { assert( m_arrNext == nullptr ); m_pNext.store( marked_ptr(), atomics::memory_order_release ); } //@cond bool is_cleared() const { return m_pNext == atomic_marked_ptr() && m_arrNext == nullptr && m_nHeight <= 1; } bool level_unlinked( unsigned nCount = 1 ) { return m_nUnlink.fetch_sub( nCount, atomics::memory_order_relaxed ) == 1; } bool is_upper_level( unsigned nLevel ) const { return m_nUnlink.load( atomics::memory_order_relaxed ) == nLevel + 1; } //@endcond }; //@cond struct undefined_gc; struct default_hook { typedef undefined_gc gc; typedef opt::none tag; }; //@endcond //@cond template < typename HookType, typename... Options> struct hook { typedef typename opt::make_options< default_hook, Options...>::type options; typedef typename options::gc gc; typedef typename options::tag tag; typedef node node_type; typedef HookType hook_type; }; //@endcond /// Base hook /** \p Options are: - \p opt::gc - garbage collector - \p opt::tag - a \ref cds_intrusive_hook_tag "tag" */ template < typename... Options > struct base_hook: public hook< opt::base_hook_tag, Options... > {}; /// Member hook /** \p MemberOffset defines offset in bytes of \ref node member into your structure. Use \p offsetof macro to define \p MemberOffset \p Options are: - \p opt::gc - garbage collector - \p opt::tag - a \ref cds_intrusive_hook_tag "tag" */ template < size_t MemberOffset, typename... Options > struct member_hook: public hook< opt::member_hook_tag, Options... > { //@cond static const size_t c_nMemberOffset = MemberOffset; //@endcond }; /// Traits hook /** \p NodeTraits defines type traits for node. See \ref node_traits for \p NodeTraits interface description \p Options are: - \p opt::gc - garbage collector - \p opt::tag - a \ref cds_intrusive_hook_tag "tag" */ template struct traits_hook: public hook< opt::traits_hook_tag, Options... > { //@cond typedef NodeTraits node_traits; //@endcond }; /// Option specifying random level generator /** The random level generator is an important part of skip-list algorithm. The node height in the skip-list have a probabilistic distribution where half of the nodes that have level \p i pointers also have level i+1 pointers (i = 0..30). The random level generator should provide such distribution. The \p Type functor interface is: \code struct random_generator { static unsigned int const c_nUpperBound = 32; random_generator(); unsigned int operator()(); }; \endcode where - \p c_nUpperBound - constant that specifies the upper bound of random number generated. The generator produces a number from range [0 .. c_nUpperBound) (upper bound excluded). \p c_nUpperBound must be no more than 32. - random_generator() - the constructor of generator object initialises the generator instance (its internal state). - unsigned int operator()() - the main generating function. Returns random level from range [0 .. c_nUpperBound - 1] Stateful generators are supported. Available \p Type implementations: - \p skip_list::xor_shift - \p skip_list::turbo */ template struct random_level_generator { //@cond template struct pack: public Base { typedef Type random_level_generator; }; //@endcond }; /// Xor-shift random level generator /** The simplest of the generators described in George Marsaglia's "Xorshift RNGs" paper. This is not a high-quality generator but is acceptable for skip-list. The random generator should return numbers from range [0 .. MaxHeight - 1]. From Doug Lea's ConcurrentSkipListMap.java. */ template class xor_shift { //@cond atomics::atomic m_nSeed; static_assert( MaxHeight > 1, "MaxHeight" ); static_assert( MaxHeight <= c_nHeightLimit, "MaxHeight is too large" ); static unsigned int const c_nBitMask = (1u << ( MaxHeight - 1 )) - 1; //@endcond public: /// The upper bound of generator's return value. The generator produces random number in range [0..c_nUpperBound) static unsigned int const c_nUpperBound = MaxHeight; /// Initializes the generator instance xor_shift() { m_nSeed.store( (unsigned int) cds::OS::Timer::random_seed(), atomics::memory_order_relaxed ); } /// Main generator function unsigned int operator()() { /* ConcurrentSkipListMap.java private int randomLevel() { int x = randomSeed; x ^= x << 13; x ^= x >>> 17; randomSeed = x ^= x << 5; if ((x & 0x80000001) != 0) // test highest and lowest bits return 0; int level = 1; while (((x >>>= 1) & 1) != 0) ++level; return level; } */ unsigned int x = m_nSeed.load( atomics::memory_order_relaxed ); x ^= x << 13; x ^= x >> 17; x ^= x << 5; m_nSeed.store( x, atomics::memory_order_relaxed ); unsigned int nLevel = ((x & 0x00000001) != 0) ? 0 : cds::bitop::LSB( (~(x >> 1)) & c_nBitMask ); assert( nLevel < c_nUpperBound ); return nLevel; } }; /// Xor-shift random level generator, max height 32 typedef xor_shift xorshift32; //@cond // For backward compatibility typedef xorshift32 xorshift; //@endcond /// \ref xor_shift generator, max height 24 typedef xor_shift< 24 > xorshift24; /// \ref xor_shift generator, max height = 16 typedef xor_shift< 16 > xorshift16; /// Turbo-pascal random level generator /** This uses a cheap pseudo-random function that was used in Turbo Pascal. The random generator should return numbers from range [0..31]. From Doug Lea's ConcurrentSkipListMap.java. */ template class turbo { //@cond atomics::atomic m_nSeed; static_assert( MaxHeight > 1, "MaxHeight" ); static_assert( MaxHeight <= c_nHeightLimit, "MaxHeight is too large" ); static unsigned int const c_nBitMask = (1u << ( MaxHeight - 1 )) - 1; //@endcond public: /// The upper bound of generator's return value. The generator produces random number in range [0..c_nUpperBound) static unsigned int const c_nUpperBound = MaxHeight; /// Initializes the generator instance turbo() { m_nSeed.store( (unsigned int) cds::OS::Timer::random_seed(), atomics::memory_order_relaxed ); } /// Main generator function unsigned int operator()() { /* private int randomLevel() { int level = 0; int r = randomSeed; randomSeed = r * 134775813 + 1; if (r < 0) { while ((r <<= 1) > 0) ++level; } return level; } */ /* The low bits are apparently not very random (the original used only upper 16 bits) so we traverse from highest bit down (i.e., test sign), thus hardly ever use lower bits. */ unsigned int x = m_nSeed.load( atomics::memory_order_relaxed ) * 134775813 + 1; m_nSeed.store( x, atomics::memory_order_relaxed ); unsigned int nLevel = ( x & 0x80000000 ) ? ( c_nUpperBound - 1 - cds::bitop::MSBnz( (x & c_nBitMask ) | 1 )) : 0; assert( nLevel < c_nUpperBound ); return nLevel; } }; /// Turbo-Pascal random level generator, max height 32 typedef turbo turbo32; //@cond // For backward compatibility typedef turbo32 turbo_pascal; //@endcond /// Turbo-Pascal generator, max height 24 typedef turbo< 24 > turbo24; /// Turbo-Pascal generator, max height 16 typedef turbo< 16 > turbo16; /// \p SkipListSet internal statistics template struct stat { typedef EventCounter event_counter ; ///< Event counter type event_counter m_nNodeHeightAdd[c_nHeightLimit] ; ///< Count of added node of each height event_counter m_nNodeHeightDel[c_nHeightLimit] ; ///< Count of deleted node of each height event_counter m_nInsertSuccess ; ///< Count of success insertion event_counter m_nInsertFailed ; ///< Count of failed insertion event_counter m_nInsertRetries ; ///< Count of unsuccessful retries of insertion event_counter m_nUpdateExist ; ///< Count of \p update() call for existed node event_counter m_nUpdateNew ; ///< Count of \p update() call for new node event_counter m_nUnlinkSuccess ; ///< Count of successful call of \p unlink event_counter m_nUnlinkFailed ; ///< Count of failed call of \p unlink event_counter m_nEraseSuccess ; ///< Count of successful call of \p erase event_counter m_nEraseFailed ; ///< Count of failed call of \p erase event_counter m_nEraseRetry ; ///< Count of retries while erasing node event_counter m_nFindFastSuccess ; ///< Count of successful call of \p find and all derivatives (via fast-path) event_counter m_nFindFastFailed ; ///< Count of failed call of \p find and all derivatives (via fast-path) event_counter m_nFindSlowSuccess ; ///< Count of successful call of \p find and all derivatives (via slow-path) event_counter m_nFindSlowFailed ; ///< Count of failed call of \p find and all derivatives (via slow-path) event_counter m_nRenewInsertPosition ; ///< Count of renewing position events while inserting event_counter m_nLogicDeleteWhileInsert; ///< Count of events "The node has been logically deleted while inserting" event_counter m_nRemoveWhileInsert ; ///< Count of evnts "The node is removing while inserting" event_counter m_nFastErase ; ///< Fast erase event counter event_counter m_nFastExtract ; ///< Fast extract event counter event_counter m_nSlowErase ; ///< Slow erase event counter event_counter m_nSlowExtract ; ///< Slow extract event counter event_counter m_nExtractSuccess ; ///< Count of successful call of \p extract event_counter m_nExtractFailed ; ///< Count of failed call of \p extract event_counter m_nExtractRetries ; ///< Count of retries of \p extract call event_counter m_nExtractMinSuccess ; ///< Count of successful call of \p extract_min event_counter m_nExtractMinFailed ; ///< Count of failed call of \p extract_min event_counter m_nExtractMinRetries ; ///< Count of retries of \p extract_min call event_counter m_nExtractMaxSuccess ; ///< Count of successful call of \p extract_max event_counter m_nExtractMaxFailed ; ///< Count of failed call of \p extract_max event_counter m_nExtractMaxRetries ; ///< Count of retries of \p extract_max call event_counter m_nEraseWhileFind ; ///< Count of erased item while searching event_counter m_nExtractWhileFind ; ///< Count of extracted item while searching (RCU only) event_counter m_nMarkFailed ; ///< Count of failed node marking (logical deletion mark) event_counter m_nEraseContention ; ///< Count of key erasing contention encountered //@cond void onAddNode( unsigned int nHeight ) { assert( nHeight > 0 && nHeight <= sizeof(m_nNodeHeightAdd) / sizeof(m_nNodeHeightAdd[0])); ++m_nNodeHeightAdd[nHeight - 1]; } void onRemoveNode( unsigned int nHeight ) { assert( nHeight > 0 && nHeight <= sizeof(m_nNodeHeightDel) / sizeof(m_nNodeHeightDel[0])); ++m_nNodeHeightDel[nHeight - 1]; } void onInsertSuccess() { ++m_nInsertSuccess ; } void onInsertFailed() { ++m_nInsertFailed ; } void onInsertRetry() { ++m_nInsertRetries ; } void onUpdateExist() { ++m_nUpdateExist ; } void onUpdateNew() { ++m_nUpdateNew ; } void onUnlinkSuccess() { ++m_nUnlinkSuccess ; } void onUnlinkFailed() { ++m_nUnlinkFailed ; } void onEraseSuccess() { ++m_nEraseSuccess ; } void onEraseFailed() { ++m_nEraseFailed ; } void onEraseRetry() { ++m_nEraseRetry; } void onFindFastSuccess() { ++m_nFindFastSuccess ; } void onFindFastFailed() { ++m_nFindFastFailed ; } void onFindSlowSuccess() { ++m_nFindSlowSuccess ; } void onFindSlowFailed() { ++m_nFindSlowFailed ; } void onEraseWhileFind() { ++m_nEraseWhileFind ; } void onExtractWhileFind() { ++m_nExtractWhileFind ; } void onRenewInsertPosition() { ++m_nRenewInsertPosition; } void onLogicDeleteWhileInsert() { ++m_nLogicDeleteWhileInsert; } void onRemoveWhileInsert() { ++m_nRemoveWhileInsert; } void onFastErase() { ++m_nFastErase; } void onFastExtract() { ++m_nFastExtract; } void onSlowErase() { ++m_nSlowErase; } void onSlowExtract() { ++m_nSlowExtract; } void onExtractSuccess() { ++m_nExtractSuccess; } void onExtractFailed() { ++m_nExtractFailed; } void onExtractRetry() { ++m_nExtractRetries; } void onExtractMinSuccess() { ++m_nExtractMinSuccess; } void onExtractMinFailed() { ++m_nExtractMinFailed; } void onExtractMinRetry() { ++m_nExtractMinRetries; } void onExtractMaxSuccess() { ++m_nExtractMaxSuccess; } void onExtractMaxFailed() { ++m_nExtractMaxFailed; } void onExtractMaxRetry() { ++m_nExtractMaxRetries; } void onMarkFailed() { ++m_nMarkFailed; } void onEraseContention() { ++m_nEraseContention; } //@endcond }; /// \p SkipListSet empty internal statistics struct empty_stat { //@cond void onAddNode( unsigned int /*nHeight*/ ) const {} void onRemoveNode( unsigned int /*nHeight*/ ) const {} void onInsertSuccess() const {} void onInsertFailed() const {} void onInsertRetry() const {} void onUpdateExist() const {} void onUpdateNew() const {} void onUnlinkSuccess() const {} void onUnlinkFailed() const {} void onEraseSuccess() const {} void onEraseFailed() const {} void onEraseRetry() const {} void onFindFastSuccess() const {} void onFindFastFailed() const {} void onFindSlowSuccess() const {} void onFindSlowFailed() const {} void onEraseWhileFind() const {} void onExtractWhileFind() const {} void onRenewInsertPosition() const {} void onLogicDeleteWhileInsert() const {} void onRemoveWhileInsert() const {} void onFastErase() const {} void onFastExtract() const {} void onSlowErase() const {} void onSlowExtract() const {} void onExtractSuccess() const {} void onExtractFailed() const {} void onExtractRetry() const {} void onExtractMinSuccess() const {} void onExtractMinFailed() const {} void onExtractMinRetry() const {} void onExtractMaxSuccess() const {} void onExtractMaxFailed() const {} void onExtractMaxRetry() const {} void onMarkFailed() const {} void onEraseContention() const {} //@endcond }; //@cond // For internal use only!!! template struct internal_node_builder { template struct pack: public Base { typedef Type internal_node_builder; }; }; //@endcond /// \p SkipListSet traits struct traits { /// Hook used /** Possible values are: \p skip_list::base_hook, \p skip_list::member_hook, \p skip_list::traits_hook. */ typedef base_hook<> hook; /// Key comparison functor /** No default functor is provided. If the option is not specified, the \p less is used. */ typedef opt::none compare; /// specifies binary predicate used for key compare. /** Default is \p std::less. */ typedef opt::none less; /// Disposer /** The functor used for dispose removed items. Default is \p opt::v::empty_disposer. */ typedef opt::v::empty_disposer disposer; /// Item counter /** The type for item counting feature. By default, item counting is disabled (\p atomicity::empty_item_counter), \p atomicity::item_counter or \p atomicity::cache_friendly_item_counter enables it. */ typedef atomicity::empty_item_counter item_counter; /// C++ memory ordering model /** List of available memory ordering see \p opt::memory_model */ typedef opt::v::relaxed_ordering memory_model; /// Random level generator /** The random level generator is an important part of skip-list algorithm. The node height in the skip-list have a probabilistic distribution where half of the nodes that have level \p i pointers also have level i+1 pointers (i = 0..30). So, the height of a node is in range [0..31]. See \p skip_list::random_level_generator option setter. */ typedef turbo32 random_level_generator; /// Allocator /** Although the skip-list is an intrusive container, an allocator should be provided to maintain variable randomly-calculated height of the node since the node can contain up to 32 next pointers. The allocator specified is used to allocate an array of next pointers for nodes which height is more than 1. */ typedef CDS_DEFAULT_ALLOCATOR allocator; /// back-off strategy /** If the option is not specified, the \p cds::backoff::Default is used. */ typedef cds::backoff::Default back_off; /// Internal statistics /** By default, internal statistics is disabled (\p skip_list::empty_stat). Use \p skip_list::stat to enable it. */ typedef empty_stat stat; /// RCU deadlock checking policy (only for \ref cds_intrusive_SkipListSet_rcu "RCU-based SkipListSet") /** List of available options see \p opt::rcu_check_deadlock */ typedef opt::v::rcu_throw_deadlock rcu_check_deadlock; //@cond // For internal use only!!! typedef opt::none internal_node_builder; //@endcond }; /// Metafunction converting option list to \p SkipListSet traits /** \p Options are: - \p opt::hook - hook used. Possible values are: \p skip_list::base_hook, \p skip_list::member_hook, \p skip_list::traits_hook. If the option is not specified, skip_list::base_hook<> and \p gc::HP is used. - \p opt::compare - key comparison functor. No default functor is provided. If the option is not specified, the \p opt::less is used. - \p opt::less - specifies binary predicate used for key comparison. Default is \p std::less. - \p opt::disposer - the functor used for dispose removed items. Default is \p opt::v::empty_disposer. Due the nature of GC schema the disposer may be called asynchronously. - \p opt::item_counter - the type of item counting feature. Default is disabled, i.e. \p atomicity::empty_item_counter. To enable it use \p atomicity::item_counter or \p atomicity::cache_friendly_item_counter - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). - \p skip_list::random_level_generator - random level generator. Can be \p skip_list::xor_shift, \p skip_list::turbo32 (the default) or user-provided one. See \p skip_list::random_level_generator option description for explanation. - \p opt::allocator - although the skip-list is an intrusive container, an allocator should be provided to maintain variable randomly-calculated height of the node since the node can contain up to 32 next pointers. The allocator option is used to allocate an array of next pointers for nodes which height is more than 1. Default is \ref CDS_DEFAULT_ALLOCATOR. - \p opt::back_off - back-off strategy, default is \p cds::backoff::Default. - \p opt::stat - internal statistics. By default, it is disabled (\p skip_list::empty_stat). To enable it use \p skip_list::stat */ template struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type ; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type ,Options... >::type type; # endif }; //@cond namespace details { template class head_node: public Node { typedef Node node_type; typename node_type::atomic_marked_ptr m_Tower[skip_list::c_nHeightLimit]; public: head_node( unsigned int nHeight ) { for ( size_t i = 0; i < sizeof(m_Tower) / sizeof(m_Tower[0]); ++i ) m_Tower[i].store( typename node_type::marked_ptr(), atomics::memory_order_relaxed ); node_type::make_tower( nHeight, m_Tower ); } node_type * head() const { return const_cast( static_cast(this)); } }; template struct intrusive_node_builder { typedef NodeType node_type; typedef AtomicNodePtr atomic_node_ptr; typedef Alloc allocator_type; typedef cds::details::Allocator< atomic_node_ptr, allocator_type > tower_allocator; template static node_type * make_tower( node_type * pNode, RandomGen& gen ) { return make_tower( pNode, gen() + 1 ); } static node_type * make_tower( node_type * pNode, unsigned int nHeight ) { if ( nHeight > 1 ) pNode->make_tower( nHeight, tower_allocator().NewArray( nHeight - 1, nullptr )); return pNode; } static void dispose_tower( node_type * pNode ) { unsigned int nHeight = pNode->height(); if ( nHeight > 1 ) tower_allocator().Delete( pNode->release_tower(), nHeight ); } struct node_disposer { void operator()( node_type * pNode ) { dispose_tower( pNode ); } }; }; // Forward declaration template class iterator; } // namespace details //@endcond } // namespace skip_list // Forward declaration template class SkipListSet; }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_SKIP_LIST_BASE_H libcds-2.3.3/cds/intrusive/details/split_list_base.h000066400000000000000000001526241341244201700225460ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H #define CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H #include #include #include #include #include #include #include #include #include #include namespace cds { namespace intrusive { /// Split-ordered list related definitions /** @ingroup cds_intrusive_helper */ namespace split_list { //@cond struct hash_node { size_t m_nHash; ///< Hash value for node /// Default constructor hash_node() : m_nHash( 0 ) { assert( is_dummy()); } /// Initializes dummy node with \p nHash value explicit hash_node( size_t nHash ) : m_nHash( nHash ) { assert( is_dummy()); } /// Checks if the node is dummy node bool is_dummy() const { return (m_nHash & 1) == 0; } }; //@endcond /// Split-ordered list node /** Template parameter: - \p OrderedListNode - node type for underlying ordered list */ template struct node: public OrderedListNode, public hash_node { //@cond typedef OrderedListNode base_class; //@endcond /// Default constructor node() : hash_node(0) { assert( is_dummy()); } /// Initializes dummy node with \p nHash value explicit node( size_t nHash ) : hash_node( nHash ) { assert( is_dummy()); } /// Checks if the node is dummy node bool is_dummy() const { return hash_node::is_dummy(); } }; //@cond // for IterableList template <> struct node: public hash_node { // Default ctor node() : hash_node( 0 ) { assert( is_dummy()); } /// Initializes dummy node with \p nHash value explicit node( size_t nHash ) : hash_node( nHash ) { assert( is_dummy()); } /// Checks if the node is dummy node bool is_dummy() const { return hash_node::is_dummy(); } }; //@endcond /// \p SplitListSet internal statistics. May be used for debugging or profiling /** Template argument \p Counter defines type of counter, default is \p cds::atomicity::event_counter. */ template struct stat { typedef Counter counter_type; ///< Counter type counter_type m_nInsertSuccess; ///< Count of success inserting counter_type m_nInsertFailed; ///< Count of failed inserting counter_type m_nUpdateNew; ///< Count of new item created by \p ensure() member function counter_type m_nUpdateExist; ///< Count of \p ensure() call for existing item counter_type m_nEraseSuccess; ///< Count of success erasing of items counter_type m_nEraseFailed; ///< Count of attempts to erase unknown item counter_type m_nExtractSuccess; ///< Count of success extracting of items counter_type m_nExtractFailed; ///< Count of attempts to extract unknown item counter_type m_nFindSuccess; ///< Count of success finding counter_type m_nFindFailed; ///< Count of failed finding counter_type m_nHeadNodeAllocated; ///< Count of allocated head node counter_type m_nHeadNodeFreed; ///< Count of freed head node counter_type m_nBucketCount; ///< Current bucket count counter_type m_nInitBucketRecursive; ///< Count of recursive bucket initialization counter_type m_nInitBucketContention; ///< Count of bucket init contention encountered counter_type m_nBusyWaitBucketInit; ///< Count of busy wait cycle while a bucket is initialized counter_type m_nBucketsExhausted; ///< Count of failed bucket allocation //@cond void onInsertSuccess() { ++m_nInsertSuccess; } void onInsertFailed() { ++m_nInsertFailed; } void onUpdateNew() { ++m_nUpdateNew; } void onUpdateExist() { ++m_nUpdateExist; } void onEraseSuccess() { ++m_nEraseSuccess; } void onEraseFailed() { ++m_nEraseFailed; } void onExtractSuccess() { ++m_nExtractSuccess; } void onExtractFailed() { ++m_nExtractFailed; } void onFindSuccess() { ++m_nFindSuccess; } void onFindFailed() { ++m_nFindFailed; } bool onFind(bool bSuccess) { if ( bSuccess ) onFindSuccess(); else onFindFailed(); return bSuccess; } void onHeadNodeAllocated() { ++m_nHeadNodeAllocated; } void onHeadNodeFreed() { ++m_nHeadNodeFreed; } void onNewBucket() { ++m_nBucketCount; } void onRecursiveInitBucket() { ++m_nInitBucketRecursive; } void onBucketInitContenton() { ++m_nInitBucketContention; } void onBusyWaitBucketInit() { ++m_nBusyWaitBucketInit; } void onBucketsExhausted() { ++m_nBucketsExhausted; } //@endcond }; /// Dummy queue statistics - no counting is performed, no overhead. Support interface like \p split_list::stat struct empty_stat { //@cond void onInsertSuccess() const {} void onInsertFailed() const {} void onUpdateNew() const {} void onUpdateExist() const {} void onEraseSuccess() const {} void onEraseFailed() const {} void onExtractSuccess() const {} void onExtractFailed() const {} void onFindSuccess() const {} void onFindFailed() const {} bool onFind( bool bSuccess ) const { return bSuccess; } void onHeadNodeAllocated() const {} void onHeadNodeFreed() const {} void onNewBucket() const {} void onRecursiveInitBucket() const {} void onBucketInitContenton() const {} void onBusyWaitBucketInit() const {} void onBucketsExhausted() const {} //@endcond }; /// Option to control bit reversal algorithm /** Bit reversal is a significant part of split-list. \p Type can be one of predefined algorithm in \p cds::algo::bit_reversal namespace. */ template struct bit_reversal { //@cond template struct pack: public Base { typedef Type bit_reversal; }; //@endcond }; /// SplitListSet traits struct traits { /// Hash function /** Hash function converts the key fields of struct \p T stored in the split list into hash value of type \p size_t that is an index in hash table. By default, \p std::hash is used. */ typedef opt::none hash; /// Bit reversal algorithm /** Bit reversal is a significant part of split-list. There are several predefined algorithm in \p cds::algo::bit_reversal namespace, \p cds::algo::bit_reversal::lookup is the best general purpose one. There are more efficient bit reversal algoritm for particular processor architecture, for example, based on x86 SIMD/AVX instruction set, see here */ typedef cds::algo::bit_reversal::lookup bit_reversal; /// Item counter /** The item counting is an important part of \p SplitListSet algorithm: the empty() member function depends on correct item counting. Therefore, \p cds::atomicity::empty_item_counter is not allowed as a type of the option. Default is \p cds::atomicity::item_counter; to avoid false sharing you may use \p atomicity::cache_friendly_item_counter */ typedef cds::atomicity::item_counter item_counter; /// Bucket table allocator /** Allocator for bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR */ typedef CDS_DEFAULT_ALLOCATOR allocator; /// Internal statistics (by default, disabled) /** Possible statistics types are: \p split_list::stat (enable internal statistics), \p split_list::empty_stat (the default, internal statistics disabled), user-provided class that supports \p %split_list::stat interface. */ typedef split_list::empty_stat stat; /// C++ memory ordering model /** Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). */ typedef opt::v::relaxed_ordering memory_model; /// What type of bucket table is used /** \p true - use \p split_list::expandable_bucket_table that can be expanded if the load factor of the set is exhausted. \p false - use \p split_list::static_bucket_table that cannot be expanded and is allocated in \p SplitListSet constructor. Default is \p true. */ static const bool dynamic_bucket_table = true; /// Back-off strategy typedef cds::backoff::Default back_off; /// Padding; default is cache-line padding enum { padding = cds::opt::cache_line_padding }; /// Free-list of auxiliary nodes /** The split-list contains auxiliary nodes marked the start of buckets. To increase performance, there is a pool of preallocated aux nodes. The part of the pool is a free-list of aux nodes. Default is: - \p cds::intrusive::FreeList - if architecture and/or compiler does not support double-width CAS primitive - \p cds::intrusive::TaggedFreeList - if architecture and/or compiler supports double-width CAS primitive */ typedef FreeListImpl free_list; }; /// [value-option] Split-list dynamic bucket table option /** The option is used to select bucket table implementation. Possible values of \p Value are: - \p true - select \p expandable_bucket_table - \p false - select \p static_bucket_table */ template struct dynamic_bucket_table { //@cond template struct pack: public Base { enum { dynamic_bucket_table = Value }; }; //@endcond }; /// Metafunction converting option list to \p split_list::traits /** Available \p Options: - \p opt::hash - mandatory option, specifies hash functor. - \p split_list::bit_reversal - bit reversal algorithm, see \p traits::bit_reversal for explanation default is \p cds::algo::bit_reversal::lookup - \p opt::item_counter - optional, specifies item counting policy. See \p traits::item_counter for default type. - \p opt::memory_model - C++ memory model for atomic operations. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consistent memory model). - \p opt::allocator - optional, bucket table allocator. Default is \ref CDS_DEFAULT_ALLOCATOR. - \p split_list::dynamic_bucket_table - use dynamic or static bucket table implementation. Dynamic bucket table expands its size up to maximum bucket count when necessary - \p opt::back_off - back-off strategy used for spinning, default is \p cds::backoff::Default. - \p opt::stat - internal statistics, default is \p split_list::empty_stat (disabled). To enable internal statistics use \p split_list::stat. - \p opt::padding - a padding to solve false-sharing issues; default is cache-line padding - \p opt::free_list - a free-list implementation, see \p traits::free_list */ template struct make_traits { typedef typename cds::opt::make_options< traits, Options...>::type type ; ///< Result of metafunction }; /// Static bucket table /** Non-resizeable bucket table for \p SplitListSet class. The capacity of table (max bucket count) is defined in the constructor call. Template parameter: - \p GC - garbage collector - \p Node - node type, must be a type based on \p split_list::node - \p Options... - options \p Options are: - \p opt::allocator - allocator used to allocate bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR - \p opt::memory_model - memory model used. Possible types are \p opt::v::sequential_consistent, \p opt::v::relaxed_ordering - \p opt::free_list - free-list implementation; default is \p TaggedFreeList if the processor supports double-with CAS otherwise \p FreeList. */ template class static_bucket_table { //@cond struct default_options { typedef CDS_DEFAULT_ALLOCATOR allocator; typedef opt::v::relaxed_ordering memory_model; typedef FreeListImpl free_list; }; typedef typename opt::make_options< default_options, Options... >::type options; //@endcond public: typedef GC gc; ///< Garbage collector typedef Node node_type; ///< Bucket node type typedef typename options::allocator allocator; ///< allocator typedef typename options::memory_model memory_model; ///< Memory model for atomic operations typedef typename options::free_list free_list; ///< Free-list /// Auxiliary node type struct aux_node_type: public node_type, public free_list::node { # ifdef CDS_DEBUG atomics::atomic m_busy; aux_node_type() { m_busy.store( false, atomics::memory_order_release ); } # endif }; typedef atomics::atomic table_entry; ///< Table entry type typedef cds::details::Allocator< table_entry, allocator > bucket_table_allocator; ///< Bucket table allocator protected: //@cond const size_t m_nLoadFactor; ///< load factor (average count of items per bucket) const size_t m_nCapacity; ///< Bucket table capacity table_entry * m_Table; ///< Bucket table typedef typename std::allocator_traits::template rebind_alloc< aux_node_type > aux_node_allocator; aux_node_type* m_auxNode; ///< Array of pre-allocated auxiliary nodes atomics::atomic m_nAuxNodeAllocated; ///< how many auxiliary node allocated free_list m_freeList; ///< Free list //@endcond protected: //@cond void allocate_table() { m_Table = bucket_table_allocator().NewArray( m_nCapacity, nullptr ); m_auxNode = aux_node_allocator().allocate( m_nCapacity ); } void destroy_table() { m_freeList.clear( []( typename free_list::node* ) {} ); aux_node_allocator().deallocate( m_auxNode, m_nCapacity ); bucket_table_allocator().Delete( m_Table, m_nCapacity ); } //@endcond public: /// Constructs bucket table for 512K buckets. Load factor is 1. static_bucket_table() : m_nLoadFactor(1) , m_nCapacity( 512 * 1024 ) , m_nAuxNodeAllocated( 0 ) { allocate_table(); } /// Creates the table with specified size rounded up to nearest power-of-two static_bucket_table( size_t nItemCount, ///< Max expected item count in split-ordered list size_t nLoadFactor ///< Load factor ) : m_nLoadFactor( nLoadFactor > 0 ? nLoadFactor : (size_t) 1 ) , m_nCapacity( cds::beans::ceil2( nItemCount / m_nLoadFactor )) , m_nAuxNodeAllocated( 0 ) { // m_nCapacity must be power of 2 assert( cds::beans::is_power2( m_nCapacity )); allocate_table(); } /// Destroys bucket table ~static_bucket_table() { destroy_table(); } /// Returns head node of bucket \p nBucket aux_node_type * bucket( size_t nBucket ) const { assert( nBucket < capacity()); return m_Table[ nBucket ].load(memory_model::memory_order_acquire); } /// Set \p pNode as a head of bucket \p nBucket void bucket( size_t nBucket, aux_node_type * pNode ) { assert( nBucket < capacity()); assert( bucket( nBucket ) == nullptr ); m_Table[ nBucket ].store( pNode, memory_model::memory_order_release ); } /// Allocates auxiliary node; can return \p nullptr if the table exhausted aux_node_type* alloc_aux_node() { if ( m_nAuxNodeAllocated.load( memory_model::memory_order_relaxed ) < capacity()) { // alloc next free node from m_auxNode size_t const idx = m_nAuxNodeAllocated.fetch_add( 1, memory_model::memory_order_relaxed ); if ( idx < capacity()) { CDS_TSAN_ANNOTATE_NEW_MEMORY( &m_auxNode[idx], sizeof( aux_node_type )); return new( &m_auxNode[idx] ) aux_node_type(); } } // get from free-list auto pFree = m_freeList.get(); if ( pFree ) return static_cast( pFree ); // table exhausted return nullptr; } /// Places node type to free-list void free_aux_node( aux_node_type* p ) { m_freeList.put( static_cast( p )); } /// Returns the capacity of the bucket table size_t capacity() const { return m_nCapacity; } /// Returns the load factor, i.e. average count of items per bucket size_t load_factor() const { return m_nLoadFactor; } }; /// Expandable bucket table /** This bucket table can dynamically grow its capacity when necessary up to maximum bucket count. Template parameter: - \p GC - garbage collector - \p Node - node type, must be derived from \p split_list::node - \p Options... - options \p Options are: - \p opt::allocator - allocator used to allocate bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR - \p opt::memory_model - memory model used. Possible types are \p opt::v::sequential_consistent, \p opt::v::relaxed_ordering - \p opt::free_list - free-list implementation; default is \p TaggedFreeList if the processor supports double-with CAS otherwise \p FreeList. */ template class expandable_bucket_table { //@cond struct default_options { typedef CDS_DEFAULT_ALLOCATOR allocator; typedef opt::v::relaxed_ordering memory_model; typedef FreeListImpl free_list; }; typedef typename opt::make_options< default_options, Options... >::type options; //@endcond public: typedef GC gc; ///< Garbage collector typedef Node node_type; ///< Bucket node type typedef typename options::allocator allocator; ///< allocator /// Memory model for atomic operations typedef typename options::memory_model memory_model; /// Free-list typedef typename options::free_list free_list; /// Auxiliary node type struct aux_node_type: public node_type, public free_list::node { # ifdef CDS_DEBUG atomics::atomic m_busy; aux_node_type() { m_busy.store( false, atomics::memory_order_release ); } # endif }; protected: //@cond typedef atomics::atomic table_entry; ///< Table entry type typedef atomics::atomic segment_type; ///< Bucket table segment type struct aux_node_segment { atomics::atomic< size_t > aux_node_count; // how many aux nodes allocated from the segment aux_node_segment* next_segment; // aux_node_type nodes[]; aux_node_segment() : next_segment( nullptr ) { aux_node_count.store( 0, atomics::memory_order_release ); } aux_node_type* segment() { return reinterpret_cast( this + 1 ); } }; /// Bucket table metrics struct metrics { size_t nSegmentCount; ///< max count of segments in bucket table size_t nSegmentSize; ///< the segment's capacity. The capacity must be power of two. size_t nSegmentSizeLog2; ///< log2( m_nSegmentSize ) size_t nLoadFactor; ///< load factor size_t nCapacity; ///< max capacity of bucket table metrics() : nSegmentCount( 1024 ) , nSegmentSize( 512 ) , nSegmentSizeLog2( cds::beans::log2( nSegmentSize )) , nLoadFactor( 1 ) , nCapacity( nSegmentCount * nSegmentSize ) {} }; /// Bucket table allocator typedef cds::details::Allocator< segment_type, allocator > bucket_table_allocator; /// Bucket table segment allocator typedef cds::details::Allocator< table_entry, allocator > segment_allocator; // Aux node segment allocator typedef typename std::allocator_traits< allocator >::template rebind_alloc raw_allocator; //@endcond public: /// Constructs bucket table for 512K buckets. Load factor is 1. expandable_bucket_table() : m_metrics( calc_metrics( 512 * 1024, 1 )) { init(); } /// Creates the table with specified capacity rounded up to nearest power-of-two expandable_bucket_table( size_t nItemCount, ///< Max expected item count in split-ordered list size_t nLoadFactor ///< Load factor ) : m_metrics( calc_metrics( nItemCount, nLoadFactor )) { init(); } /// Destroys bucket table ~expandable_bucket_table() { m_freeList.clear( []( typename free_list::node* ) {} ); for ( auto aux_segment = m_auxNodeList.load( atomics::memory_order_relaxed ); aux_segment; ) { auto next_segment = aux_segment->next_segment; free_aux_segment( aux_segment ); aux_segment = next_segment; } segment_type * pSegments = m_Segments; for ( size_t i = 0; i < m_metrics.nSegmentCount; ++i ) { table_entry* pEntry = pSegments[i].load(memory_model::memory_order_relaxed); if ( pEntry != nullptr ) destroy_segment( pEntry ); } destroy_table( pSegments ); } /// Returns head node of the bucket \p nBucket aux_node_type * bucket( size_t nBucket ) const { size_t nSegment = nBucket >> m_metrics.nSegmentSizeLog2; assert( nSegment < m_metrics.nSegmentCount ); table_entry* pSegment = m_Segments[ nSegment ].load(memory_model::memory_order_acquire); if ( pSegment == nullptr ) return nullptr; // uninitialized bucket return pSegment[ nBucket & (m_metrics.nSegmentSize - 1) ].load(memory_model::memory_order_acquire); } /// Set \p pNode as a head of bucket \p nBucket void bucket( size_t nBucket, aux_node_type * pNode ) { size_t nSegment = nBucket >> m_metrics.nSegmentSizeLog2; assert( nSegment < m_metrics.nSegmentCount ); segment_type& segment = m_Segments[nSegment]; if ( segment.load( memory_model::memory_order_relaxed ) == nullptr ) { table_entry* pNewSegment = allocate_segment(); table_entry * pNull = nullptr; if ( !segment.compare_exchange_strong( pNull, pNewSegment, memory_model::memory_order_release, atomics::memory_order_relaxed )) destroy_segment( pNewSegment ); } assert( segment.load( atomics::memory_order_relaxed )[nBucket & (m_metrics.nSegmentSize - 1)].load( atomics::memory_order_relaxed ) == nullptr ); segment.load(memory_model::memory_order_acquire)[ nBucket & (m_metrics.nSegmentSize - 1) ].store( pNode, memory_model::memory_order_release ); } /// Allocates auxiliary node; can return \p nullptr if the table exhausted aux_node_type* alloc_aux_node() { aux_node_segment* aux_segment = m_auxNodeList.load( memory_model::memory_order_acquire ); for ( ;; ) { assert( aux_segment != nullptr ); // try to allocate from current aux segment if ( aux_segment->aux_node_count.load( memory_model::memory_order_acquire ) < m_metrics.nSegmentSize ) { size_t idx = aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed ); if ( idx < m_metrics.nSegmentSize ) { CDS_TSAN_ANNOTATE_NEW_MEMORY( aux_segment->segment() + idx, sizeof( aux_node_type )); return new( aux_segment->segment() + idx ) aux_node_type(); } } // try allocate from free-list auto pFree = m_freeList.get(); if ( pFree ) return static_cast( pFree ); // free-list is empty, current segment is full // try to allocate new aux segment // We can allocate more aux segments than we need but it is not a problem in this context aux_node_segment* new_aux_segment = allocate_aux_segment(); new_aux_segment->next_segment = aux_segment; new_aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed ); if ( m_auxNodeList.compare_exchange_strong( aux_segment, new_aux_segment, memory_model::memory_order_release, atomics::memory_order_acquire )) { CDS_TSAN_ANNOTATE_NEW_MEMORY( new_aux_segment->segment(), sizeof( aux_node_type )); return new( new_aux_segment->segment()) aux_node_type(); } free_aux_segment( new_aux_segment ); } } /// Places auxiliary node type to free-list void free_aux_node( aux_node_type* p ) { m_freeList.put( static_cast( p )); } /// Returns the capacity of the bucket table size_t capacity() const { return m_metrics.nCapacity; } /// Returns the load factor, i.e. average count of items per bucket size_t load_factor() const { return m_metrics.nLoadFactor; } protected: //@cond metrics calc_metrics( size_t nItemCount, size_t nLoadFactor ) { metrics m; // Calculate m_nSegmentSize and m_nSegmentCount by nItemCount m.nLoadFactor = nLoadFactor > 0 ? nLoadFactor : 1; size_t nBucketCount = ( nItemCount + m.nLoadFactor - 1 ) / m.nLoadFactor; if ( nBucketCount <= 2 ) { m.nSegmentCount = 1; m.nSegmentSize = 2; } else if ( nBucketCount <= 1024 ) { m.nSegmentCount = 1; m.nSegmentSize = ((size_t)1) << beans::log2ceil( nBucketCount ); } else { nBucketCount = beans::log2ceil( nBucketCount ); m.nSegmentCount = m.nSegmentSize = ((size_t)1) << (nBucketCount / 2); if ( nBucketCount & 1 ) m.nSegmentSize *= 2; if ( m.nSegmentCount * m.nSegmentSize * m.nLoadFactor < nItemCount ) m.nSegmentSize *= 2; } m.nCapacity = m.nSegmentCount * m.nSegmentSize; m.nSegmentSizeLog2 = cds::beans::log2( m.nSegmentSize ); assert( m.nSegmentSizeLog2 != 0 ); // return m; } segment_type * allocate_table() { return bucket_table_allocator().NewArray( m_metrics.nSegmentCount, nullptr ); } void destroy_table( segment_type * pTable ) { bucket_table_allocator().Delete( pTable, m_metrics.nSegmentCount ); } table_entry* allocate_segment() { return segment_allocator().NewArray( m_metrics.nSegmentSize, nullptr ); } void destroy_segment( table_entry* pSegment ) { segment_allocator().Delete( pSegment, m_metrics.nSegmentSize ); } aux_node_segment* allocate_aux_segment() { char* p = raw_allocator().allocate( sizeof( aux_node_segment ) + sizeof( aux_node_type ) * m_metrics.nSegmentSize ); CDS_TSAN_ANNOTATE_NEW_MEMORY( p, sizeof( aux_node_segment )); return new(p) aux_node_segment(); } void free_aux_segment( aux_node_segment* p ) { raw_allocator().deallocate( reinterpret_cast( p ), sizeof( aux_node_segment ) + sizeof( aux_node_type ) * m_metrics.nSegmentSize ); } void init() { // m_nSegmentSize must be 2**N assert( cds::beans::is_power2( m_metrics.nSegmentSize )); assert( (((size_t)1) << m_metrics.nSegmentSizeLog2) == m_metrics.nSegmentSize ); // m_nSegmentCount must be 2**K assert( cds::beans::is_power2( m_metrics.nSegmentCount )); m_Segments = allocate_table(); m_auxNodeList = allocate_aux_segment(); } //@endcond protected: //@cond metrics const m_metrics; ///< Dynamic bucket table metrics segment_type* m_Segments; ///< bucket table - array of segments atomics::atomic m_auxNodeList; ///< segment list of aux nodes free_list m_freeList; ///< List of free aux nodes //@endcond }; //@cond namespace details { template struct bucket_table_selector; template struct bucket_table_selector< true, GC, Node, Options...> { typedef expandable_bucket_table type; }; template struct bucket_table_selector< false, GC, Node, Options...> { typedef static_bucket_table type; }; template struct search_value_type { Q& val; size_t nHash; search_value_type( Q& v, size_t h ) : val( v ) , nHash( h ) {} }; template class ordered_list_adapter; template class ordered_list_adapter< OrderedList, Traits, false > { typedef OrderedList native_ordered_list; typedef Traits traits; typedef typename native_ordered_list::gc gc; typedef typename native_ordered_list::key_comparator native_key_comparator; typedef typename native_ordered_list::node_type node_type; typedef typename native_ordered_list::value_type value_type; typedef typename native_ordered_list::node_traits native_node_traits; typedef typename native_ordered_list::disposer native_disposer; typedef split_list::node splitlist_node_type; struct key_compare { int operator()( value_type const& v1, value_type const& v2 ) const { splitlist_node_type const * n1 = static_cast(native_node_traits::to_node_ptr( v1 )); splitlist_node_type const * n2 = static_cast(native_node_traits::to_node_ptr( v2 )); if ( n1->m_nHash != n2->m_nHash ) return n1->m_nHash < n2->m_nHash ? -1 : 1; if ( n1->is_dummy()) { assert( n2->is_dummy()); return 0; } assert( !n1->is_dummy() && !n2->is_dummy()); return native_key_comparator()(v1, v2); } template int operator()( value_type const& v, search_value_type const& q ) const { splitlist_node_type const * n = static_cast(native_node_traits::to_node_ptr( v )); if ( n->m_nHash != q.nHash ) return n->m_nHash < q.nHash ? -1 : 1; assert( !n->is_dummy()); return native_key_comparator()(v, q.val); } template int operator()( search_value_type const& q, value_type const& v ) const { return -operator()( v, q ); } }; struct wrapped_disposer { void operator()( value_type * v ) { splitlist_node_type * p = static_cast(native_node_traits::to_node_ptr( v )); if ( !p->is_dummy()) native_disposer()(v); } }; public: typedef node_type ordered_list_node_type; typedef splitlist_node_type aux_node; struct node_traits: private native_node_traits { typedef native_node_traits base_class; ///< Base ordered list node type typedef typename base_class::value_type value_type; ///< Value type typedef typename base_class::node_type base_node_type; ///< Ordered list node type typedef node node_type; ///< Split-list node type /// Convert value reference to node pointer static node_type * to_node_ptr( value_type& v ) { return static_cast(base_class::to_node_ptr( v )); } /// Convert value pointer to node pointer static node_type * to_node_ptr( value_type * v ) { return static_cast(base_class::to_node_ptr( v )); } /// Convert value reference to node pointer (const version) static node_type const * to_node_ptr( value_type const& v ) { return static_cast(base_class::to_node_ptr( v )); } /// Convert value pointer to node pointer (const version) static node_type const * to_node_ptr( value_type const * v ) { return static_cast(base_class::to_node_ptr( v )); } /// Convert node reference to value pointer static value_type * to_value_ptr( node_type& n ) { return base_class::to_value_ptr( static_cast(n)); } /// Convert node pointer to value pointer static value_type * to_value_ptr( node_type * n ) { return base_class::to_value_ptr( static_cast(n)); } /// Convert node reference to value pointer (const version) static const value_type * to_value_ptr( node_type const & n ) { return base_class::to_value_ptr( static_cast(n)); } /// Convert node pointer to value pointer (const version) static const value_type * to_value_ptr( node_type const * n ) { return base_class::to_value_ptr( static_cast(n)); } }; template struct make_compare_from_less: public cds::opt::details::make_comparator_from_less { typedef cds::opt::details::make_comparator_from_less base_class; template int operator()( value_type const& v, search_value_type const& q ) const { splitlist_node_type const * n = static_cast(native_node_traits::to_node_ptr( v )); if ( n->m_nHash != q.nHash ) return n->m_nHash < q.nHash ? -1 : 1; assert( !n->is_dummy()); return base_class()(v, q.val); } template int operator()( search_value_type const& q, value_type const& v ) const { splitlist_node_type const * n = static_cast(native_node_traits::to_node_ptr( v )); if ( n->m_nHash != q.nHash ) return q.nHash < n->m_nHash ? -1 : 1; assert( !n->is_dummy()); return base_class()(q.val, v); } int operator()( value_type const& lhs, value_type const& rhs ) const { splitlist_node_type const * n1 = static_cast(native_node_traits::to_node_ptr( lhs )); splitlist_node_type const * n2 = static_cast(native_node_traits::to_node_ptr( rhs )); if ( n1->m_nHash != n2->m_nHash ) return n1->m_nHash < n2->m_nHash ? -1 : 1; if ( n1->is_dummy()) { assert( n2->is_dummy()); return 0; } assert( !n1->is_dummy() && !n2->is_dummy()); return native_key_comparator()( lhs, rhs ); } }; typedef typename native_ordered_list::template rebind_traits< opt::compare< key_compare > , opt::disposer< wrapped_disposer > , opt::boundary_node_type< splitlist_node_type > >::type result; }; template class ordered_list_adapter< OrderedList, Traits, true > { typedef OrderedList native_ordered_list; typedef Traits traits; typedef typename native_ordered_list::gc gc; typedef typename native_ordered_list::key_comparator native_key_comparator; typedef typename native_ordered_list::value_type value_type; typedef typename native_ordered_list::disposer native_disposer; struct key_compare { int operator()( value_type const& v1, value_type const& v2 ) const { hash_node const& n1 = static_cast( v1 ); hash_node const& n2 = static_cast( v2 ); if ( n1.m_nHash != n2.m_nHash ) return n1.m_nHash < n2.m_nHash ? -1 : 1; if ( n1.is_dummy()) { assert( n2.is_dummy()); return 0; } assert( !n1.is_dummy() && !n2.is_dummy()); return native_key_comparator()(v1, v2); } template int operator()( value_type const& v, search_value_type const& q ) const { hash_node const& n = static_cast( v ); if ( n.m_nHash != q.nHash ) return n.m_nHash < q.nHash ? -1 : 1; assert( !n.is_dummy()); return native_key_comparator()(v, q.val); } template int operator()( search_value_type const& q, value_type const& v ) const { return -operator()( v, q ); } }; struct wrapped_disposer { void operator()( value_type * v ) { if ( !static_cast( v )->is_dummy()) native_disposer()( v ); } }; public: typedef void ordered_list_node_type; struct aux_node: public native_ordered_list::node_type, public hash_node { aux_node() { typedef typename native_ordered_list::node_type list_node_type; list_node_type::data.store( typename list_node_type::marked_data_ptr( static_cast( static_cast( this ))), atomics::memory_order_release ); } }; struct node_traits { static hash_node * to_node_ptr( value_type& v ) { return static_cast( &v ); } static hash_node * to_node_ptr( value_type * v ) { return static_cast( v ); } static hash_node const * to_node_ptr( value_type const& v ) { return static_cast( &v ); } static hash_node const * to_node_ptr( value_type const * v ) { return static_cast( v ); } }; template struct make_compare_from_less: public cds::opt::details::make_comparator_from_less { typedef cds::opt::details::make_comparator_from_less base_class; template int operator()( value_type const& v, search_value_type const& q ) const { hash_node const& n = static_cast( v ); if ( n.m_nHash != q.nHash ) return n.m_nHash < q.nHash ? -1 : 1; assert( !n.is_dummy()); return base_class()(v, q.val); } template int operator()( search_value_type const& q, value_type const& v ) const { hash_node const& n = static_cast( v ); if ( n.m_nHash != q.nHash ) return q.nHash < n.m_nHash ? -1 : 1; assert( !n.is_dummy()); return base_class()(q.val, v); } int operator()( value_type const& lhs, value_type const& rhs ) const { hash_node const& n1 = static_cast( lhs ); hash_node const& n2 = static_cast( rhs ); if ( n1.m_nHash != n2.m_nHash ) return n1.m_nHash < n2.m_nHash ? -1 : 1; if ( n1.is_dummy()) { assert( n2.is_dummy()); return 0; } assert( !n1.is_dummy() && !n2.is_dummy()); return base_class()( lhs, rhs ); } }; typedef typename native_ordered_list::template rebind_traits< opt::compare< key_compare > , opt::disposer< wrapped_disposer > , opt::boundary_node_type< aux_node > >::type result; }; template using rebind_list_traits = ordered_list_adapter< OrderedList, Traits, is_iterable_list::value >; template struct select_list_iterator; template struct select_list_iterator { typedef typename OrderedList::iterator type; }; template struct select_list_iterator { typedef typename OrderedList::const_iterator type; }; template class iterator_type { typedef OrderedList ordered_list_type; friend class iterator_type ; protected: typedef typename select_list_iterator::type list_iterator; typedef NodeTraits node_traits; private: list_iterator m_itCur; list_iterator m_itEnd; public: typedef typename list_iterator::value_ptr value_ptr; typedef typename list_iterator::value_ref value_ref; public: iterator_type() {} iterator_type( iterator_type const& src ) : m_itCur( src.m_itCur ) , m_itEnd( src.m_itEnd ) {} // This ctor should be protected... iterator_type( list_iterator itCur, list_iterator itEnd ) : m_itCur( itCur ) , m_itEnd( itEnd ) { // skip dummy nodes while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy()) ++m_itCur; } value_ptr operator ->() const { return m_itCur.operator->(); } value_ref operator *() const { return m_itCur.operator*(); } /// Pre-increment iterator_type& operator ++() { if ( m_itCur != m_itEnd ) { do { ++m_itCur; } while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy()); } return *this; } iterator_type& operator = (iterator_type const& src) { m_itCur = src.m_itCur; m_itEnd = src.m_itEnd; return *this; } template bool operator ==(iterator_type const& i ) const { return m_itCur == i.m_itCur; } template bool operator !=(iterator_type const& i ) const { return m_itCur != i.m_itCur; } protected: list_iterator const& underlying_iterator() const { return m_itCur; } }; } // namespace details //@endcond //@cond // Helper functions template static inline size_t regular_hash( size_t nHash ) { return static_cast( BitReversalAlgo()( cds::details::size_t_cast( nHash ))) | size_t(1); } template static inline size_t dummy_hash( size_t nHash ) { return static_cast( BitReversalAlgo()( cds::details::size_t_cast( nHash ))) & ~size_t(1); } //@endcond } // namespace split_list //@cond // Forward declaration template class SplitListSet; //@endcond }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H libcds-2.3.3/cds/intrusive/ellen_bintree_dhp.h000066400000000000000000000006331341244201700213730ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_ELLEN_BINTREE_DHP_H #define CDSLIB_INTRUSIVE_ELLEN_BINTREE_DHP_H #include #include #endif // #ifndef CDSLIB_INTRUSIVE_ELLEN_BINTREE_DHP_H libcds-2.3.3/cds/intrusive/ellen_bintree_hp.h000066400000000000000000000006271341244201700212320ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_ELLEN_BINTREE_HP_H #define CDSLIB_INTRUSIVE_ELLEN_BINTREE_HP_H #include #include #endif // #ifndef CDSLIB_INTRUSIVE_ELLEN_BINTREE_HP_H libcds-2.3.3/cds/intrusive/ellen_bintree_rcu.h000066400000000000000000002327011341244201700214140ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_ELLEN_BINTREE_RCU_H #define CDSLIB_INTRUSIVE_ELLEN_BINTREE_RCU_H #include #include #include #include #include #include namespace cds { namespace intrusive { //@cond namespace ellen_bintree { template struct base_node >: public basic_node { typedef basic_node base_class; base_node * m_pNextRetired; typedef cds::urcu::gc gc ; ///< Garbage collector /// Constructs leaf (bIntrenal == false) or internal (bInternal == true) node explicit base_node( bool bInternal ) : basic_node( bInternal ) , m_pNextRetired( nullptr ) {} }; } // namespace ellen_bintree //@endcond /// Ellen's et al binary search tree (RCU specialization) /** @ingroup cds_intrusive_map @ingroup cds_intrusive_tree @anchor cds_intrusive_EllenBinTree_rcu Source: - [2010] F.Ellen, P.Fatourou, E.Ruppert, F.van Breugel "Non-blocking Binary Search Tree" %EllenBinTree is an unbalanced leaf-oriented binary search tree that implements the set abstract data type. Nodes maintains child pointers but not parent pointers. Every internal node has exactly two children, and all data of type \p T currently in the tree are stored in the leaves. Internal nodes of the tree are used to direct \p find operation along the path to the correct leaf. The keys (of \p Key type) stored in internal nodes may or may not be in the set. \p Key type is a subset of \p T type. There should be exactly defined a key extracting functor for converting object of type \p T to object of type \p Key. Due to \p extract_min and \p extract_max member functions the \p %EllenBinTree can act as a priority queue. In this case you should provide unique compound key, for example, the priority value plus some uniformly distributed random value. @attention Recall the tree is unbalanced. The complexity of operations is O(log N) for uniformly distributed random keys, but in the worst case the complexity is O(N). @note In the current implementation we do not use helping technique described in the original paper. Instead of helping, when a thread encounters a concurrent operation it just spins waiting for the operation done. Such solution allows greatly simplify the implementation of tree. Template arguments: - \p RCU - one of \ref cds_urcu_gc "RCU type" - \p Key - key type, a subset of \p T - \p T - type to be stored in tree's leaf nodes. The type must be based on \p ellen_bintree::node (for \p ellen_bintree::base_hook) or it must have a member of type \p ellen_bintree::node (for \p ellen_bintree::member_hook). - \p Traits - tree traits, default is \p ellen_bintree::traits It is possible to declare option-based tree with \p ellen_bintree::make_traits metafunction instead of \p Traits template argument. @note Before including you should include appropriate RCU header file, see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. @anchor cds_intrusive_EllenBinTree_rcu_less Predicate requirements \p Traits::less, \p Traits::compare and other predicates using with member fuctions should accept at least parameters of type \p T and \p Key in any combination. For example, for \p Foo struct with \p std::string key field the appropiate \p less functor is: \code struct Foo: public cds::intrusive::ellen_bintree::node< ... > { std::string m_strKey; ... }; struct less { bool operator()( Foo const& v1, Foo const& v2 ) const { return v1.m_strKey < v2.m_strKey ; } bool operator()( Foo const& v, std::string const& s ) const { return v.m_strKey < s ; } bool operator()( std::string const& s, Foo const& v ) const { return s < v.m_strKey ; } // Support comparing std::string and char const * bool operator()( std::string const& s, char const * p ) const { return s.compare(p) < 0 ; } bool operator()( Foo const& v, char const * p ) const { return v.m_strKey.compare(p) < 0 ; } bool operator()( char const * p, std::string const& s ) const { return s.compare(p) > 0; } bool operator()( char const * p, Foo const& v ) const { return v.m_strKey.compare(p) > 0; } }; \endcode @anchor cds_intrusive_EllenBinTree_usage Usage Suppose we have the following Foo struct with string key type: \code struct Foo { std::string m_strKey ; // The key //... // other non-key data }; \endcode We want to utilize RCU-based \p %cds::intrusive::EllenBinTree set for \p Foo data. We may use base hook or member hook. Consider base hook variant. First, we need deriving \p Foo struct from \p cds::intrusive::ellen_bintree::node: \code #include #include // RCU type we use typedef cds::urcu::gc< cds::urcu::general_buffered<> > gpb_rcu; struct Foo: public cds::intrusive:ellen_bintree::node< gpb_rcu > { std::string m_strKey ; // The key //... // other non-key data }; \endcode Second, we need to implement auxiliary structures and functors: - key extractor functor for extracting the key from \p Foo object. Such functor is necessary because the tree internal nodes store the keys. - \p less predicate. We want our set should accept \p std::string and char const * parameters for searching, so our \p less predicate will not be trivial, see below. - item counting feature: we want our set's \p size() member function returns actual item count. \code // Key extractor functor struct my_key_extractor { void operator ()( std::string& key, Foo const& src ) const { key = src.m_strKey; } }; // Less predicate struct my_less { bool operator()( Foo const& v1, Foo const& v2 ) const { return v1.m_strKey < v2.m_strKey ; } bool operator()( Foo const& v, std::string const& s ) const { return v.m_strKey < s ; } bool operator()( std::string const& s, Foo const& v ) const { return s < v.m_strKey ; } // Support comparing std::string and char const * bool operator()( std::string const& s, char const * p ) const { return s.compare(p) < 0 ; } bool operator()( Foo const& v, char const * p ) const { return v.m_strKey.compare(p) < 0 ; } bool operator()( char const * p, std::string const& s ) const { return s.compare(p) > 0; } bool operator()( char const * p, Foo const& v ) const { return v.m_strKey.compare(p) > 0; } }; // Tree traits for our set // It is necessary to specify only those typedefs that differ from // cds::intrusive::ellen_bintree::traits defaults. struct set_traits: public cds::intrusive::ellen_bintree::traits { typedef cds::intrusive::ellen_bintree::base_hook< cds::opt::gc > > hook; typedef my_key_extractor key_extractor; typedef my_less less; typedef cds::atomicity::item_counter item_counter; }; \endcode Now we declare \p %EllenBinTree set and use it: \code typedef cds::intrusive::EllenBinTree< gpb_rcu, std::string, Foo, set_traits > set_type; set_type theSet; // ... \endcode Instead of declaring \p set_traits type traits we can use option-based syntax with \p ellen_bintree::make_traits metafunction, for example: \code typedef cds::intrusive::EllenBinTree< gpb_rcu, std::string, Foo, typename cds::intrusive::ellen_bintree::make_traits< cds::opt::hook< cds::intrusive::ellen_bintree::base_hook< cds::opt::gc > > ,cds::intrusive::ellen_bintree::key_extractor< my_key_extractor > ,cds::opt::less< my_less > ,cds::opt::item_counter< cds::atomicity::item_counter > >::type > set_type2; \endcode Functionally, \p set_type and \p set_type2 are equivalent. Member-hooked tree Sometimes, we cannot use base hook, for example, when the \p Foo structure is external. In such case we can use member hook feature. \code #include #include // Struct Foo is external and its declaration cannot be modified. struct Foo { std::string m_strKey ; // The key //... // other non-key data }; // RCU type we use typedef cds::urcu::gc< cds::urcu::general_buffered<> > gpb_rcu; // Foo wrapper struct MyFoo { Foo m_foo; cds::intrusive:ellen_bintree::node< gpb_rcu > set_hook; // member hook }; // Key extractor functor struct member_key_extractor { void operator ()( std::string& key, MyFoo const& src ) const { key = src.m_foo.m_strKey; } }; // Less predicate struct member_less { bool operator()( MyFoo const& v1, MyFoo const& v2 ) const { return v1.m_foo.m_strKey < v2.m_foo.m_strKey ; } bool operator()( MyFoo const& v, std::string const& s ) const { return v.m_foo.m_strKey < s ; } bool operator()( std::string const& s, MyFoo const& v ) const { return s < v.m_foo.m_strKey ; } // Support comparing std::string and char const * bool operator()( std::string const& s, char const * p ) const { return s.compare(p) < 0 ; } bool operator()( MyFoo const& v, char const * p ) const { return v.m_foo.m_strKey.compare(p) < 0 ; } bool operator()( char const * p, std::string const& s ) const { return s.compare(p) > 0; } bool operator()( char const * p, MyFoo const& v ) const { return v.m_foo.m_strKey.compare(p) > 0; } }; // Tree traits for our member-based set struct member_set_traits: public cds::intrusive::ellen_bintree::traits { cds::intrusive::ellen_bintree::member_hook< offsetof(MyFoo, set_hook), cds::opt::gc > > hook; typedef member_key_extractor key_extractor; typedef member_less less; typedef cds::atomicity::item_counter item_counter; }; // Tree containing MyFoo objects typedef cds::intrusive::EllenBinTree< gpb_rcu, std::string, MyFoo, member_set_traits > member_set_type; member_set_type theMemberSet; \endcode Multiple containers Sometimes we need that our \p Foo struct should be used in several different containers. Suppose, \p Foo struct has two key fields: \code struct Foo { std::string m_strKey ; // string key int m_nKey ; // int key //... // other non-key data fields }; \endcode We want to build two intrusive \p %EllenBinTree sets: one indexed on \p Foo::m_strKey field, another indexed on \p Foo::m_nKey field. To decide such case we should use a tag option for tree's hook: \code #include #include // RCU type we use typedef cds::urcu::gc< cds::urcu::general_buffered<> > gpb_rcu; // Declare tag structs struct int_tag ; // int key tag struct string_tag ; // string key tag // Foo struct is derived from two ellen_bintree::node class // with different tags struct Foo : public cds::intrusive::ellen_bintree::node< gpb_rcu, cds::opt::tag< string_tag >> , public cds::intrusive::ellen_bintree::node< gpb_rcu, cds::opt::tag< int_tag >> { std::string m_strKey ; // string key int m_nKey ; // int key //... // other non-key data fields }; // String key extractor functor struct string_key_extractor { void operator ()( std::string& key, Foo const& src ) const { key = src.m_strKey; } }; // Int key extractor functor struct int_key_extractor { void operator ()( int& key, Foo const& src ) const { key = src.m_nKey; } }; // String less predicate struct string_less { bool operator()( Foo const& v1, Foo const& v2 ) const { return v1.m_strKey < v2.m_strKey ; } bool operator()( Foo const& v, std::string const& s ) const { return v.m_strKey < s ; } bool operator()( std::string const& s, Foo const& v ) const { return s < v.m_strKey ; } // Support comparing std::string and char const * bool operator()( std::string const& s, char const * p ) const { return s.compare(p) < 0 ; } bool operator()( Foo const& v, char const * p ) const { return v.m_strKey.compare(p) < 0 ; } bool operator()( char const * p, std::string const& s ) const { return s.compare(p) > 0; } bool operator()( char const * p, Foo const& v ) const { return v.m_strKey.compare(p) > 0; } }; // Int less predicate struct int_less { bool operator()( Foo const& v1, Foo const& v2 ) const { return v1.m_nKey < v2.m_nKey ; } bool operator()( Foo const& v, int n ) const { return v.m_nKey < n ; } bool operator()( int n, Foo const& v ) const { return n < v.m_nKey ; } }; // Type traits for string-indexed set struct string_set_traits: public cds::intrusive::ellen_bintree::traits { typedef cds::intrusive::ellen_bintree::base_hook< cds::opt::gc >, cds::opt::tag< string_tag > > hook; typedef string_key_extractor key_extractor; typedef string_less less; typedef cds::atomicity::item_counter item_counter; }; // Type traits for int-indexed set struct int_set_traits: public cds::intrusive::ellen_bintree::traits { typedef cds::intrusive::ellen_bintree::base_hook< cds::opt::gc >, cds::opt::tag< int_tag > > hook; typedef int_key_extractor key_extractor; typedef int_less less; typedef cds::atomicity::item_counter item_counter; }; // Declare string-indexed set typedef cds::intrusive::EllenBinTree< gpb_rcu, std::string, Foo, string_set_traits > string_set_type; string_set_type theStringSet; // Declare int-indexed set typedef cds::intrusive::EllenBinTree< gpb_rcu, int, Foo, int_set_traits > int_set_type; int_set_type theIntSet; // Now we can use theStringSet and theIntSet in our program // ... \endcode */ template < class RCU, typename Key, typename T, #ifdef CDS_DOXYGEN_INVOKED class Traits = ellen_bintree::traits #else class Traits #endif > class EllenBinTree< cds::urcu::gc, Key, T, Traits > { public: typedef cds::urcu::gc gc; ///< RCU Garbage collector typedef Key key_type; ///< type of a key stored in internal nodes; key is a part of \p value_type typedef T value_type; ///< type of value stored in the binary tree typedef Traits traits; ///< Traits template parameter typedef typename traits::hook hook; ///< hook type typedef typename hook::node_type node_type; ///< node type typedef typename traits::disposer disposer; ///< leaf node disposer typedef typename traits::back_off back_off; ///< back-off strategy protected: //@cond typedef ellen_bintree::base_node< gc > tree_node; ///< Base type of tree node typedef node_type leaf_node; ///< Leaf node type typedef ellen_bintree::internal_node< key_type, leaf_node > internal_node; ///< Internal node type typedef ellen_bintree::update_desc< leaf_node, internal_node> update_desc; ///< Update descriptor typedef typename update_desc::update_ptr update_ptr; ///< Marked pointer to update descriptor //@endcond public: using exempt_ptr = cds::urcu::exempt_ptr< gc, value_type, value_type, disposer, void >; ///< pointer to extracted node public: # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< Node traits # else typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator; struct node_traits: public get_node_traits< value_type, node_type, hook>::type { static internal_node const& to_internal_node( tree_node const& n ) { assert( n.is_internal()); return static_cast( n ); } static leaf_node const& to_leaf_node( tree_node const& n ) { assert( n.is_leaf()); return static_cast( n ); } }; # endif typedef typename traits::item_counter item_counter; ///< Item counting policy used typedef typename traits::memory_model memory_model; ///< Memory ordering. See \p cds::opt::memory_model option typedef typename traits::stat stat; ///< internal statistics type typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy typedef typename traits::key_extractor key_extractor; ///< key extracting functor typedef typename traits::node_allocator node_allocator; ///< Internal node allocator typedef typename traits::update_desc_allocator update_desc_allocator; ///< Update descriptor allocator typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock static constexpr const bool c_bExtractLockExternal = false; ///< Group of \p extract_xxx functions do not require external locking protected: //@cond typedef ellen_bintree::details::compare< key_type, value_type, key_comparator, node_traits > node_compare; typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy; typedef cds::details::Allocator< internal_node, node_allocator > cxx_node_allocator; typedef cds::details::Allocator< update_desc, update_desc_allocator > cxx_update_desc_allocator; struct search_result { internal_node * pGrandParent; internal_node * pParent; leaf_node * pLeaf; update_ptr updParent; update_ptr updGrandParent; bool bRightLeaf ; // true if pLeaf is right child of pParent, false otherwise bool bRightParent ; // true if pParent is right child of pGrandParent, false otherwise search_result() :pGrandParent( nullptr ) , pParent( nullptr ) , pLeaf( nullptr ) ,bRightLeaf( false ) ,bRightParent( false ) {} }; //@endcond protected: //@cond internal_node m_Root; ///< Tree root node (key= Infinite2) leaf_node m_LeafInf1; leaf_node m_LeafInf2; //@endcond item_counter m_ItemCounter; ///< item counter mutable stat m_Stat; ///< internal statistics protected: //@cond static void free_leaf_node( value_type* p ) { disposer()( p ); } static void free_leaf_node_void( void* p ) { free_leaf_node( reinterpret_cast( p )); } internal_node * alloc_internal_node() const { m_Stat.onInternalNodeCreated(); internal_node * pNode = cxx_node_allocator().New(); //pNode->clean(); return pNode; } static void free_internal_node( internal_node* pNode ) { cxx_node_allocator().Delete( pNode ); } static void free_internal_node_void( void* pNode ) { free_internal_node( reinterpret_cast( pNode )); } struct internal_node_deleter { void operator()( internal_node * p) const { free_internal_node( p ); } }; typedef std::unique_ptr< internal_node, internal_node_deleter> unique_internal_node_ptr; update_desc * alloc_update_desc() const { m_Stat.onUpdateDescCreated(); return cxx_update_desc_allocator().New(); } static void free_update_desc( update_desc* pDesc ) { cxx_update_desc_allocator().Delete( pDesc ); } static void free_update_desc_void( void* pDesc ) { free_update_desc( reinterpret_cast( pDesc )); } class retired_list { update_desc * pUpdateHead; tree_node * pNodeHead; private: class forward_iterator { update_desc * m_pUpdate; tree_node * m_pNode; public: forward_iterator( retired_list const& l ) : m_pUpdate( l.pUpdateHead ) , m_pNode( l.pNodeHead ) {} forward_iterator() : m_pUpdate( nullptr ) , m_pNode( nullptr ) {} cds::urcu::retired_ptr operator *() { if ( m_pUpdate ) { return cds::urcu::retired_ptr( reinterpret_cast( m_pUpdate ), free_update_desc_void ); } if ( m_pNode ) { if ( m_pNode->is_leaf()) { return cds::urcu::retired_ptr( reinterpret_cast( node_traits::to_value_ptr( static_cast( m_pNode ))), free_leaf_node_void ); } else { return cds::urcu::retired_ptr( reinterpret_cast( static_cast( m_pNode )), free_internal_node_void ); } } return cds::urcu::retired_ptr( nullptr, free_update_desc_void ); } void operator ++() { if ( m_pUpdate ) { m_pUpdate = m_pUpdate->pNextRetire; return; } if ( m_pNode ) m_pNode = m_pNode->m_pNextRetired; } friend bool operator ==( forward_iterator const& i1, forward_iterator const& i2 ) { return i1.m_pUpdate == i2.m_pUpdate && i1.m_pNode == i2.m_pNode; } friend bool operator !=( forward_iterator const& i1, forward_iterator const& i2 ) { return !( i1 == i2 ); } }; public: retired_list() : pUpdateHead( nullptr ) , pNodeHead( nullptr ) {} ~retired_list() { gc::batch_retire( forward_iterator(*this), forward_iterator()); } void push( update_desc * p ) { p->pNextRetire = pUpdateHead; pUpdateHead = p; } void push( tree_node * p ) { p->m_pNextRetired = pNodeHead; pNodeHead = p; } }; void retire_node( tree_node * pNode, retired_list& rl ) const { if ( pNode->is_leaf()) { assert( static_cast( pNode ) != &m_LeafInf1 ); assert( static_cast( pNode ) != &m_LeafInf2 ); } else { assert( static_cast( pNode ) != &m_Root ); m_Stat.onInternalNodeDeleted(); } rl.push( pNode ); } void retire_update_desc( update_desc * p, retired_list& rl, bool bDirect ) const { m_Stat.onUpdateDescDeleted(); if ( bDirect ) free_update_desc( p ); else rl.push( p ); } void make_empty_tree() { m_Root.infinite_key( 2 ); m_LeafInf1.infinite_key( 1 ); m_LeafInf2.infinite_key( 2 ); m_Root.m_pLeft.store( &m_LeafInf1, memory_model::memory_order_relaxed ); m_Root.m_pRight.store( &m_LeafInf2, memory_model::memory_order_release ); } //@endcond public: /// Default constructor EllenBinTree() { static_assert( !std::is_same< key_extractor, opt::none >::value, "The key extractor option must be specified" ); make_empty_tree(); } /// Clears the tree ~EllenBinTree() { unsafe_clear(); } /// Inserts new node /** The function inserts \p val in the tree if it does not contain an item with key equal to \p val. The function applies RCU lock internally. Returns \p true if \p val is placed into the set, \p false otherwise. */ bool insert( value_type& val ) { return insert( val, []( value_type& ) {} ); } /// Inserts new node /** This function is intended for derived non-intrusive containers. The function allows to split creating of new item into two part: - create item with key only - insert new item into the tree - if inserting is success, calls \p f functor to initialize value-field of \p val. The functor signature is: \code void func( value_type& val ); \endcode where \p val is the item inserted. User-defined functor \p f should guarantee that during changing \p val no any other changes could be made on this tree's item by concurrent threads. The user-defined functor is called only if the inserting is success. RCU \p synchronize method can be called. RCU should not be locked. */ template bool insert( value_type& val, Func f ) { check_deadlock_policy::check(); unique_internal_node_ptr pNewInternal; retired_list updRetire; back_off bkoff; { rcu_lock l; search_result res; for ( ;; ) { if ( search( res, val, node_compare())) { if ( pNewInternal.get()) m_Stat.onInternalNodeDeleted() ; // unique_internal_node_ptr deletes internal node m_Stat.onInsertFailed(); return false; } if ( res.updGrandParent.bits() == update_desc::Clean && res.updParent.bits() == update_desc::Clean ) { if ( !pNewInternal.get()) pNewInternal.reset( alloc_internal_node()); if ( try_insert( val, pNewInternal.get(), res, updRetire )) { f( val ); pNewInternal.release() ; // internal node is linked into the tree and should not be deleted break; } } else help( res.updParent, updRetire ); bkoff(); m_Stat.onInsertRetry(); } } ++m_ItemCounter; m_Stat.onInsertSuccess(); return true; } /// Updates the node /** The operation performs inserting or changing data with lock-free manner. If the item \p val is not found in the set, then \p val is inserted into the set iff \p bAllowInsert is \p true. Otherwise, the functor \p func is called with item found. The functor \p func signature is: \code void func( bool bNew, value_type& item, value_type& val ); \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the set - \p val - argument \p val passed into the \p %update() function If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments refer to the same thing. The functor can change non-key fields of the \p item; however, \p func must guarantee that during changing no any other modifications could be made on this item by concurrent threads. RCU \p synchronize method can be called. RCU should not be locked. Returns std::pair where \p first is \p true if operation is successful, i.e. the node has been inserted or updated, \p second is \p true if new item has been added or \p false if the item with \p key already exists. @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" */ template std::pair update( value_type& val, Func func, bool bAllowInsert = true ) { check_deadlock_policy::check(); unique_internal_node_ptr pNewInternal; retired_list updRetire; back_off bkoff; { rcu_lock l; search_result res; for ( ;; ) { if ( search( res, val, node_compare())) { func( false, *node_traits::to_value_ptr( res.pLeaf ), val ); if ( pNewInternal.get()) m_Stat.onInternalNodeDeleted() ; // unique_internal_node_ptr deletes internal node m_Stat.onUpdateExist(); return std::make_pair( true, false ); } if ( res.updGrandParent.bits() == update_desc::Clean && res.updParent.bits() == update_desc::Clean ) { if ( !bAllowInsert ) return std::make_pair( false, false ); if ( !pNewInternal.get()) pNewInternal.reset( alloc_internal_node()); if ( try_insert( val, pNewInternal.get(), res, updRetire )) { func( true, val, val ); pNewInternal.release() ; // internal node is linked into the tree and should not be deleted break; } } else help( res.updParent, updRetire ); bkoff(); m_Stat.onUpdateRetry(); } } ++m_ItemCounter; m_Stat.onUpdateNew(); return std::make_pair( true, true ); } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( value_type& val, Func func ) { return update( val, func, true ); } //@endcond /// Unlinks the item \p val from the tree /** The function searches the item \p val in the tree and unlink it from the tree if it is found and is equal to \p val. Difference between \p erase() and \p %unlink() functions: \p %erase() finds a key and deletes the item found. \p %unlink() finds an item by key and deletes it only if \p val is an item of the tree, i.e. the pointer to item found is equal to &val . RCU \p synchronize method can be called. RCU should not be locked. The \ref disposer specified in \p Traits class template parameter is called by garbage collector \p GC asynchronously. The function returns \p true if success and \p false otherwise. */ bool unlink( value_type& val ) { return erase_( val, node_compare(), []( value_type const& v, leaf_node const& n ) -> bool { return &v == node_traits::to_value_ptr( n ); }, [](value_type const&) {} ); } /// Deletes the item from the tree /** \anchor cds_intrusive_EllenBinTree_rcu_erase The function searches an item with key equal to \p key in the tree, unlinks it from the tree, and returns \p true. If the item with key equal to \p key is not found the function return \p false. Note the \p Traits::less and/or \p Traits::compare predicate should accept a parameter of type \p Q that can be not the same as \p value_type. RCU \p synchronize method can be called. RCU should not be locked. */ template bool erase( const Q& key ) { return erase_( key, node_compare(), []( Q const&, leaf_node const& ) -> bool { return true; }, [](value_type const&) {} ); } /// Delete the item from the tree with comparing functor \p pred /** The function is an analog of \ref cds_intrusive_EllenBinTree_rcu_erase "erase(Q const&)" but \p pred predicate is used for key comparing. \p Less has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_rcu_less "Predicate requirements". \p pred must imply the same element order as the comparator used for building the tree. */ template bool erase_with( const Q& key, Less pred ) { CDS_UNUSED( pred ); typedef ellen_bintree::details::compare< key_type, value_type, opt::details::make_comparator_from_less, node_traits > compare_functor; return erase_( key, compare_functor(), []( Q const&, leaf_node const& ) -> bool { return true; }, [](value_type const&) {} ); } /// Deletes the item from the tree /** \anchor cds_intrusive_EllenBinTree_rcu_erase_func The function searches an item with key equal to \p key in the tree, call \p f functor with item found, unlinks it from the tree, and returns \p true. The \ref disposer specified in \p Traits class template parameter is called by garbage collector \p GC asynchronously. The \p Func interface is \code struct functor { void operator()( value_type const& item ); }; \endcode If the item with key equal to \p key is not found the function return \p false. Note the \p Traits::less and/or \p Traits::compare predicate should accept a parameter of type \p Q that can be not the same as \p value_type. RCU \p synchronize method can be called. RCU should not be locked. */ template bool erase( Q const& key, Func f ) { return erase_( key, node_compare(), []( Q const&, leaf_node const& ) -> bool { return true; }, f ); } /// Delete the item from the tree with comparing functor \p pred /** The function is an analog of \ref cds_intrusive_EllenBinTree_rcu_erase_func "erase(Q const&, Func)" but \p pred predicate is used for key comparing. \p Less has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_rcu_less "Predicate requirements". \p pred must imply the same element order as the comparator used for building the tree. */ template bool erase_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); typedef ellen_bintree::details::compare< key_type, value_type, opt::details::make_comparator_from_less, node_traits > compare_functor; return erase_( key, compare_functor(), []( Q const&, leaf_node const& ) -> bool { return true; }, f ); } /// Extracts an item with minimal key from the tree /** The function searches an item with minimal key, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the leftmost item. If the tree is empty the function returns empty \p exempt_ptr. @note Due the concurrent nature of the tree, the function extracts nearly minimum key. It means that the function gets leftmost leaf of the tree and tries to unlink it. During unlinking, a concurrent thread may insert an item with key less than leftmost item's key. So, the function returns the item with minimum key at the moment of tree traversing. RCU \p synchronize method can be called. RCU should NOT be locked. The function does not call the disposer for the item found. The disposer will be implicitly invoked when the returned object is destroyed or when its \p release() member function is called. */ exempt_ptr extract_min() { return exempt_ptr( extract_min_()); } /// Extracts an item with maximal key from the tree /** The function searches an item with maximal key, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the rightmost item. If the tree is empty the function returns empty \p exempt_ptr. @note Due the concurrent nature of the tree, the function extracts nearly maximal key. It means that the function gets rightmost leaf of the tree and tries to unlink it. During unlinking, a concurrent thread may insert an item with key great than rightmost item's key. So, the function returns the item with maximum key at the moment of tree traversing. RCU \p synchronize method can be called. RCU should NOT be locked. The function does not call the disposer for the item found. The disposer will be implicitly invoked when the returned object is destroyed or when its \p release() member function is called. */ exempt_ptr extract_max() { return exempt_ptr( extract_max_()); } /// Extracts an item from the tree /** \anchor cds_intrusive_EllenBinTree_rcu_extract The function searches an item with key equal to \p key in the tree, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to an item found. If the item with the key equal to \p key is not found the function returns empty \p exempt_ptr. RCU \p synchronize method can be called. RCU should NOT be locked. The function does not call the disposer for the item found. The disposer will be implicitly invoked when the returned object is destroyed or when its \p release() member function is called. */ template exempt_ptr extract( Q const& key ) { return exempt_ptr( extract_( key, node_compare())); } /// Extracts an item from the set using \p pred for searching /** The function is an analog of \p extract(Q const&) but \p pred is used for key compare. \p Less has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_rcu_less "predicate requirements". \p pred must imply the same element order as the comparator used for building the tree. */ template exempt_ptr extract_with( Q const& key, Less pred ) { return exempt_ptr( extract_with_( key, pred )); } /// Checks whether the set contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. The function applies RCU lock internally. */ template bool contains( Q const& key ) const { rcu_lock l; search_result res; if ( search( res, key, node_compare())) { m_Stat.onFindSuccess(); return true; } m_Stat.onFindFailed(); return false; } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find( Q const& key ) const { return contains( key ); } //@endcond /// Checks whether the set contains \p key using \p pred predicate for searching /** The function is similar to contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_rcu_less "Predicate requirements". \p Less must imply the same element order as the comparator used for building the set. \p pred should accept arguments of type \p Q, \p key_type, \p value_type in any combination. */ template bool contains( Q const& key, Less pred ) const { CDS_UNUSED( pred ); typedef ellen_bintree::details::compare< key_type, value_type, opt::details::make_comparator_from_less, node_traits > compare_functor; rcu_lock l; search_result res; if ( search( res, key, compare_functor())) { m_Stat.onFindSuccess(); return true; } m_Stat.onFindFailed(); return false; } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find_with( Q const& key, Less pred ) const { return contains( key, pred ); } //@endcond /// Finds the key \p key /** @anchor cds_intrusive_EllenBinTree_rcu_find_func The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& key ); }; \endcode where \p item is the item found, \p key is the find function argument. The functor can change non-key fields of \p item. Note that the functor is only guarantee that \p item cannot be disposed during functor is executing. The functor does not serialize simultaneous access to the tree \p item. If such access is possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. The function applies RCU lock internally. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( Q& key, Func f ) const { return find_( key, f ); } //@cond template bool find( Q const& key, Func f ) const { return find_( key, f ); } //@endcond /// Finds the key \p key with comparing functor \p pred /** The function is an analog of \ref cds_intrusive_EllenBinTree_rcu_find_func "find(Q&, Func)" but \p pred is used for key comparison. \p Less functor has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_rcu_less "Predicate requirements". \p pred must imply the same element order as the comparator used for building the tree. */ template bool find_with( Q& key, Less pred, Func f ) const { return find_with_( key, pred, f ); } //@cond template bool find_with( Q const& key, Less pred, Func f ) const { return find_with_( key, pred, f ); } //@endcond /// Finds \p key and return the item found /** \anchor cds_intrusive_EllenBinTree_rcu_get The function searches the item with key equal to \p key and returns the pointer to item found. If \p key is not found it returns \p nullptr. RCU should be locked before call the function. Returned pointer is valid while RCU is locked. */ template value_type * get( Q const& key ) const { return get_( key, node_compare()); } /// Finds \p key with \p pred predicate and return the item found /** The function is an analog of \ref cds_intrusive_EllenBinTree_rcu_get "get(Q const&)" but \p pred is used for comparing the keys. \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the tree. */ template value_type * get_with( Q const& key, Less pred ) const { CDS_UNUSED( pred ); typedef ellen_bintree::details::compare< key_type, value_type, opt::details::make_comparator_from_less, node_traits > compare_functor; return get_( key, compare_functor()); } /// Checks if the tree is empty bool empty() const { return m_Root.m_pLeft.load( memory_model::memory_order_relaxed )->is_leaf(); } /// Clears the tree (thread safe, not atomic) /** The function unlink all items from the tree. The function is thread safe but not atomic: in multi-threaded environment with parallel insertions this sequence \code set.clear(); assert( set.empty()); \endcode the assertion could be raised. For each leaf the \ref disposer will be called after unlinking. RCU \p synchronize method can be called. RCU should not be locked. */ void clear() { for ( exempt_ptr ep = extract_min(); !ep.empty(); ep = extract_min()) ep.release(); } /// Clears the tree (not thread safe) /** This function is not thread safe and may be called only when no other thread deals with the tree. The function is used in the tree destructor. */ void unsafe_clear() { rcu_lock l; while ( true ) { internal_node * pParent = nullptr; internal_node * pGrandParent = nullptr; tree_node * pLeaf = const_cast( &m_Root ); // Get leftmost leaf while ( pLeaf->is_internal()) { pGrandParent = pParent; pParent = static_cast( pLeaf ); pLeaf = pParent->m_pLeft.load( memory_model::memory_order_relaxed ); } if ( pLeaf->infinite_key()) { // The tree is empty return; } // Remove leftmost leaf and its parent node assert( pGrandParent ); assert( pParent ); assert( pLeaf->is_leaf()); pGrandParent->m_pLeft.store( pParent->m_pRight.load( memory_model::memory_order_relaxed ), memory_model::memory_order_relaxed ); free_leaf_node( node_traits::to_value_ptr( static_cast( pLeaf ))); free_internal_node( pParent ); } } /// Returns item count in the tree /** Only leaf nodes containing user data are counted. The value returned depends on item counter type provided by \p Traits template parameter. If it is \p atomicity::empty_item_counter this function always returns 0. The function is not suitable for checking the tree emptiness, use \p empty() member function for that. */ size_t size() const { return m_ItemCounter; } /// Returns const reference to internal statistics stat const& statistics() const { return m_Stat; } /// Checks internal consistency (not atomic, not thread-safe) /** The debugging function to check internal consistency of the tree. */ bool check_consistency() const { return check_consistency( &m_Root ); } protected: //@cond bool check_consistency( internal_node const * pRoot ) const { tree_node * pLeft = pRoot->m_pLeft.load( atomics::memory_order_relaxed ); tree_node * pRight = pRoot->m_pRight.load( atomics::memory_order_relaxed ); assert( pLeft ); assert( pRight ); if ( node_compare()( *pLeft, *pRoot ) < 0 && node_compare()( *pRoot, *pRight ) <= 0 && node_compare()( *pLeft, *pRight ) < 0 ) { bool bRet = true; if ( pLeft->is_internal()) bRet = check_consistency( static_cast( pLeft )); assert( bRet ); if ( bRet && pRight->is_internal()) bRet = bRet && check_consistency( static_cast( pRight )); assert( bRet ); return bRet; } return false; } void help( update_ptr /*pUpdate*/, retired_list& /*rl*/ ) { /* switch ( pUpdate.bits()) { case update_desc::IFlag: help_insert( pUpdate.ptr()); m_Stat.onHelpInsert(); break; case update_desc::DFlag: //help_delete( pUpdate.ptr(), rl ); //m_Stat.onHelpDelete(); break; case update_desc::Mark: //help_marked( pUpdate.ptr()); //m_Stat.onHelpMark(); break; } */ } void help_insert( update_desc * pOp ) { assert( gc::is_locked()); tree_node * pLeaf = static_cast( pOp->iInfo.pLeaf ); if ( pOp->iInfo.bRightLeaf ) { pOp->iInfo.pParent->m_pRight.compare_exchange_strong( pLeaf, static_cast( pOp->iInfo.pNew ), memory_model::memory_order_release, atomics::memory_order_relaxed ); } else { pOp->iInfo.pParent->m_pLeft.compare_exchange_strong( pLeaf, static_cast( pOp->iInfo.pNew ), memory_model::memory_order_release, atomics::memory_order_relaxed ); } update_ptr cur( pOp, update_desc::IFlag ); pOp->iInfo.pParent->m_pUpdate.compare_exchange_strong( cur, pOp->iInfo.pParent->null_update_desc(), memory_model::memory_order_release, atomics::memory_order_relaxed ); } bool check_delete_precondition( search_result& res ) { assert( res.pGrandParent != nullptr ); return static_cast( res.pGrandParent->get_child( res.bRightParent, memory_model::memory_order_relaxed )) == res.pParent && static_cast( res.pParent->get_child( res.bRightLeaf, memory_model::memory_order_relaxed )) == res.pLeaf; } bool help_delete( update_desc * pOp, retired_list& rl ) { assert( gc::is_locked()); update_ptr pUpdate( pOp->dInfo.pUpdateParent ); update_ptr pMark( pOp, update_desc::Mark ); if ( pOp->dInfo.pParent->m_pUpdate.compare_exchange_strong( pUpdate, pMark, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { help_marked( pOp ); retire_node( pOp->dInfo.pParent, rl ); // For extract operations the leaf should NOT be disposed if ( pOp->dInfo.bDisposeLeaf ) retire_node( pOp->dInfo.pLeaf, rl ); retire_update_desc( pOp, rl, false ); return true; } else if ( pUpdate == pMark ) { // some other thread is processing help_marked() help_marked( pOp ); m_Stat.onHelpMark(); return true; } else { // pUpdate has been changed by CAS help( pUpdate, rl ); // Undo grandparent dInfo update_ptr pDel( pOp, update_desc::DFlag ); if ( pOp->dInfo.pGrandParent->m_pUpdate.compare_exchange_strong( pDel, pOp->dInfo.pGrandParent->null_update_desc(), memory_model::memory_order_release, atomics::memory_order_relaxed )) { retire_update_desc( pOp, rl, false ); } return false; } } void help_marked( update_desc * pOp ) { assert( gc::is_locked()); tree_node * p = pOp->dInfo.pParent; if ( pOp->dInfo.bRightParent ) { pOp->dInfo.pGrandParent->m_pRight.compare_exchange_strong( p, pOp->dInfo.pParent->get_child( !pOp->dInfo.bRightLeaf, memory_model::memory_order_acquire ), memory_model::memory_order_release, atomics::memory_order_relaxed ); } else { pOp->dInfo.pGrandParent->m_pLeft.compare_exchange_strong( p, pOp->dInfo.pParent->get_child( !pOp->dInfo.bRightLeaf, memory_model::memory_order_acquire ), memory_model::memory_order_release, atomics::memory_order_relaxed ); } update_ptr upd( pOp, update_desc::DFlag ); pOp->dInfo.pGrandParent->m_pUpdate.compare_exchange_strong( upd, pOp->dInfo.pGrandParent->null_update_desc(), memory_model::memory_order_release, atomics::memory_order_relaxed ); } template bool search( search_result& res, KeyValue const& key, Compare cmp ) const { assert( gc::is_locked()); internal_node * pParent; internal_node * pGrandParent = nullptr; tree_node * pLeaf; update_ptr updParent; update_ptr updGrandParent; bool bRightLeaf; bool bRightParent = false; int nCmp = 0; retry: pParent = nullptr; pLeaf = const_cast( &m_Root ); updParent = nullptr; bRightLeaf = false; while ( pLeaf->is_internal()) { pGrandParent = pParent; pParent = static_cast( pLeaf ); bRightParent = bRightLeaf; updGrandParent = updParent; updParent = pParent->m_pUpdate.load( memory_model::memory_order_acquire ); switch ( updParent.bits()) { case update_desc::DFlag: case update_desc::Mark: m_Stat.onSearchRetry(); goto retry; } nCmp = cmp( key, *pParent ); bRightLeaf = nCmp >= 0; pLeaf = pParent->get_child( nCmp >= 0, memory_model::memory_order_acquire ); } assert( pLeaf->is_leaf()); nCmp = cmp( key, *static_cast(pLeaf)); res.pGrandParent = pGrandParent; res.pParent = pParent; res.pLeaf = static_cast( pLeaf ); res.updParent = updParent; res.updGrandParent = updGrandParent; res.bRightParent = bRightParent; res.bRightLeaf = bRightLeaf; return nCmp == 0; } bool search_min( search_result& res ) const { assert( gc::is_locked()); internal_node * pParent; internal_node * pGrandParent = nullptr; tree_node * pLeaf; update_ptr updParent; update_ptr updGrandParent; retry: pParent = nullptr; pLeaf = const_cast( &m_Root ); while ( pLeaf->is_internal()) { pGrandParent = pParent; pParent = static_cast( pLeaf ); updGrandParent = updParent; updParent = pParent->m_pUpdate.load( memory_model::memory_order_acquire ); switch ( updParent.bits()) { case update_desc::DFlag: case update_desc::Mark: m_Stat.onSearchRetry(); goto retry; } pLeaf = pParent->m_pLeft.load( memory_model::memory_order_acquire ); } if ( pLeaf->infinite_key()) return false; res.pGrandParent = pGrandParent; res.pParent = pParent; assert( pLeaf->is_leaf()); res.pLeaf = static_cast( pLeaf ); res.updParent = updParent; res.updGrandParent = updGrandParent; res.bRightParent = false; res.bRightLeaf = false; return true; } bool search_max( search_result& res ) const { assert( gc::is_locked()); internal_node * pParent; internal_node * pGrandParent = nullptr; tree_node * pLeaf; update_ptr updParent; update_ptr updGrandParent; bool bRightLeaf; bool bRightParent = false; retry: pParent = nullptr; pLeaf = const_cast( &m_Root ); bRightLeaf = false; while ( pLeaf->is_internal()) { pGrandParent = pParent; pParent = static_cast( pLeaf ); bRightParent = bRightLeaf; updGrandParent = updParent; updParent = pParent->m_pUpdate.load( memory_model::memory_order_acquire ); switch ( updParent.bits()) { case update_desc::DFlag: case update_desc::Mark: m_Stat.onSearchRetry(); goto retry; } bRightLeaf = !pParent->infinite_key(); pLeaf = pParent->get_child( bRightLeaf, memory_model::memory_order_acquire ); } if ( pLeaf->infinite_key()) return false; res.pGrandParent = pGrandParent; res.pParent = pParent; assert( pLeaf->is_leaf()); res.pLeaf = static_cast( pLeaf ); res.updParent = updParent; res.updGrandParent = updGrandParent; res.bRightParent = bRightParent; res.bRightLeaf = bRightLeaf; return true; } template bool erase_( Q const& val, Compare cmp, Equal eq, Func f ) { check_deadlock_policy::check(); retired_list updRetire; update_desc * pOp = nullptr; search_result res; back_off bkoff; { rcu_lock l; for ( ;; ) { if ( !search( res, val, cmp ) || !eq( val, *res.pLeaf )) { if ( pOp ) retire_update_desc( pOp, updRetire, false ); m_Stat.onEraseFailed(); return false; } if ( res.updGrandParent.bits() != update_desc::Clean ) help( res.updGrandParent, updRetire ); else if ( res.updParent.bits() != update_desc::Clean ) help( res.updParent, updRetire ); else { if ( !pOp ) pOp = alloc_update_desc(); if ( check_delete_precondition( res )) { pOp->dInfo.pGrandParent = res.pGrandParent; pOp->dInfo.pParent = res.pParent; pOp->dInfo.pLeaf = res.pLeaf; pOp->dInfo.bDisposeLeaf = true; pOp->dInfo.pUpdateParent = res.updParent.ptr(); pOp->dInfo.bRightParent = res.bRightParent; pOp->dInfo.bRightLeaf = res.bRightLeaf; update_ptr updGP( res.updGrandParent.ptr()); if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ), memory_model::memory_order_acq_rel, atomics::memory_order_acquire )) { if ( help_delete( pOp, updRetire )) { // res.pLeaf is not deleted yet since RCU is blocked f( *node_traits::to_value_ptr( res.pLeaf )); break; } pOp = nullptr; } else { // updGP has been changed by CAS help( updGP, updRetire ); } } } bkoff(); m_Stat.onEraseRetry(); } } --m_ItemCounter; m_Stat.onEraseSuccess(); return true; } template value_type * extract_with_( Q const& val, Less /*pred*/ ) { typedef ellen_bintree::details::compare< key_type, value_type, opt::details::make_comparator_from_less, node_traits > compare_functor; return extract_( val, compare_functor()); } template value_type * extract_( Q const& val, Compare cmp ) { check_deadlock_policy::check(); retired_list updRetire; update_desc * pOp = nullptr; search_result res; back_off bkoff; value_type * pResult; { rcu_lock l; for ( ;; ) { if ( !search( res, val, cmp )) { if ( pOp ) retire_update_desc( pOp, updRetire, false ); m_Stat.onEraseFailed(); return nullptr; } if ( res.updGrandParent.bits() != update_desc::Clean ) help( res.updGrandParent, updRetire ); else if ( res.updParent.bits() != update_desc::Clean ) help( res.updParent, updRetire ); else { if ( !pOp ) pOp = alloc_update_desc(); if ( check_delete_precondition( res )) { pOp->dInfo.pGrandParent = res.pGrandParent; pOp->dInfo.pParent = res.pParent; pOp->dInfo.pLeaf = res.pLeaf; pOp->dInfo.bDisposeLeaf = false; pOp->dInfo.pUpdateParent = res.updParent.ptr(); pOp->dInfo.bRightParent = res.bRightParent; pOp->dInfo.bRightLeaf = res.bRightLeaf; update_ptr updGP( res.updGrandParent.ptr()); if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ), memory_model::memory_order_acq_rel, atomics::memory_order_acquire )) { if ( help_delete( pOp, updRetire )) { pResult = node_traits::to_value_ptr( res.pLeaf ); break; } pOp = nullptr; } else { // updGP has been changed by CAS help( updGP, updRetire ); } } } bkoff(); m_Stat.onEraseRetry(); } } --m_ItemCounter; m_Stat.onEraseSuccess(); return pResult; } value_type * extract_max_() { check_deadlock_policy::check(); retired_list updRetire; update_desc * pOp = nullptr; search_result res; back_off bkoff; value_type * pResult; { rcu_lock l; for ( ;; ) { if ( !search_max( res )) { // Tree is empty if ( pOp ) retire_update_desc( pOp, updRetire, false ); m_Stat.onExtractMaxFailed(); return nullptr; } if ( res.updGrandParent.bits() != update_desc::Clean ) help( res.updGrandParent, updRetire ); else if ( res.updParent.bits() != update_desc::Clean ) help( res.updParent, updRetire ); else { if ( !pOp ) pOp = alloc_update_desc(); if ( check_delete_precondition( res )) { pOp->dInfo.pGrandParent = res.pGrandParent; pOp->dInfo.pParent = res.pParent; pOp->dInfo.pLeaf = res.pLeaf; pOp->dInfo.bDisposeLeaf = false; pOp->dInfo.pUpdateParent = res.updParent.ptr(); pOp->dInfo.bRightParent = res.bRightParent; pOp->dInfo.bRightLeaf = res.bRightLeaf; update_ptr updGP( res.updGrandParent.ptr()); if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ), memory_model::memory_order_acq_rel, atomics::memory_order_acquire )) { if ( help_delete( pOp, updRetire )) { pResult = node_traits::to_value_ptr( res.pLeaf ); break; } pOp = nullptr; } else { // updGP has been changed by CAS help( updGP, updRetire ); } } } bkoff(); m_Stat.onExtractMaxRetry(); } } --m_ItemCounter; m_Stat.onExtractMaxSuccess(); return pResult; } value_type * extract_min_() { check_deadlock_policy::check(); retired_list updRetire; update_desc * pOp = nullptr; search_result res; back_off bkoff; value_type * pResult; { rcu_lock l; for ( ;; ) { if ( !search_min( res )) { // Tree is empty if ( pOp ) retire_update_desc( pOp, updRetire, false ); m_Stat.onExtractMinFailed(); return nullptr; } if ( res.updGrandParent.bits() != update_desc::Clean ) help( res.updGrandParent, updRetire ); else if ( res.updParent.bits() != update_desc::Clean ) help( res.updParent, updRetire ); else { if ( !pOp ) pOp = alloc_update_desc(); if ( check_delete_precondition( res )) { pOp->dInfo.pGrandParent = res.pGrandParent; pOp->dInfo.pParent = res.pParent; pOp->dInfo.pLeaf = res.pLeaf; pOp->dInfo.bDisposeLeaf = false; pOp->dInfo.pUpdateParent = res.updParent.ptr(); pOp->dInfo.bRightParent = res.bRightParent; pOp->dInfo.bRightLeaf = res.bRightLeaf; update_ptr updGP( res.updGrandParent.ptr()); if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ), memory_model::memory_order_acq_rel, atomics::memory_order_acquire )) { if ( help_delete( pOp, updRetire )) { pResult = node_traits::to_value_ptr( res.pLeaf ); break; } pOp = nullptr; } else { // updGP has been changed by CAS help( updGP, updRetire ); } } } bkoff(); m_Stat.onExtractMinRetry(); } } --m_ItemCounter; m_Stat.onExtractMinSuccess(); return pResult; } template bool find_with_( Q& val, Less /*pred*/, Func f ) const { typedef ellen_bintree::details::compare< key_type, value_type, opt::details::make_comparator_from_less, node_traits > compare_functor; rcu_lock l; search_result res; if ( search( res, val, compare_functor())) { assert( res.pLeaf ); f( *node_traits::to_value_ptr( res.pLeaf ), val ); m_Stat.onFindSuccess(); return true; } m_Stat.onFindFailed(); return false; } template bool find_( Q& key, Func f ) const { rcu_lock l; search_result res; if ( search( res, key, node_compare())) { assert( res.pLeaf ); f( *node_traits::to_value_ptr( res.pLeaf ), key ); m_Stat.onFindSuccess(); return true; } m_Stat.onFindFailed(); return false; } template value_type * get_( Q const& key, Compare cmp ) const { assert( gc::is_locked()); search_result res; if ( search( res, key, cmp )) { m_Stat.onFindSuccess(); return node_traits::to_value_ptr( res.pLeaf ); } m_Stat.onFindFailed(); return nullptr; } bool try_insert( value_type& val, internal_node * pNewInternal, search_result& res, retired_list& updRetire ) { assert( gc::is_locked()); assert( res.updParent.bits() == update_desc::Clean ); // check search result if ( static_cast( res.pParent->get_child( res.bRightLeaf, memory_model::memory_order_relaxed )) == res.pLeaf ) { leaf_node * pNewLeaf = node_traits::to_node_ptr( val ); int nCmp = node_compare()( val, *res.pLeaf ); if ( nCmp < 0 ) { if ( res.pGrandParent ) { pNewInternal->infinite_key( 0 ); key_extractor()( pNewInternal->m_Key, *node_traits::to_value_ptr( res.pLeaf )); assert( !res.pLeaf->infinite_key()); } else { assert( res.pLeaf->infinite_key() == tree_node::key_infinite1 ); pNewInternal->infinite_key( 1 ); } pNewInternal->m_pLeft.store( static_cast(pNewLeaf), memory_model::memory_order_relaxed ); pNewInternal->m_pRight.store( static_cast(res.pLeaf), memory_model::memory_order_relaxed ); } else { assert( !res.pLeaf->is_internal()); pNewInternal->infinite_key( 0 ); key_extractor()( pNewInternal->m_Key, val ); pNewInternal->m_pLeft.store( static_cast(res.pLeaf), memory_model::memory_order_relaxed ); pNewInternal->m_pRight.store( static_cast(pNewLeaf), memory_model::memory_order_relaxed ); assert( !res.pLeaf->infinite_key()); } update_desc * pOp = alloc_update_desc(); pOp->iInfo.pParent = res.pParent; pOp->iInfo.pNew = pNewInternal; pOp->iInfo.pLeaf = res.pLeaf; pOp->iInfo.bRightLeaf = res.bRightLeaf; update_ptr updCur( res.updParent.ptr()); if ( res.pParent->m_pUpdate.compare_exchange_strong( updCur, update_ptr( pOp, update_desc::IFlag ), memory_model::memory_order_acq_rel, atomics::memory_order_acquire )) { // do insert help_insert( pOp ); retire_update_desc( pOp, updRetire, false ); return true; } else { // updCur has been updated by CAS help( updCur, updRetire ); retire_update_desc( pOp, updRetire, true ); } } return false; } //@endcond }; }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_ELLEN_BINTREE_RCU_H libcds-2.3.3/cds/intrusive/fcqueue.h000066400000000000000000000342071341244201700173720ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_FCQUEUE_H #define CDSLIB_INTRUSIVE_FCQUEUE_H #include #include #include #include namespace cds { namespace intrusive { /// \p FCQueue related definitions namespace fcqueue { /// \p FCQueue internal statistics template struct stat: public cds::algo::flat_combining::stat { typedef cds::algo::flat_combining::stat flat_combining_stat; ///< Flat-combining statistics typedef typename flat_combining_stat::counter_type counter_type; ///< Counter type counter_type m_nEnqueue ; ///< Count of push operations counter_type m_nDequeue ; ///< Count of success pop operations counter_type m_nFailedDeq ; ///< Count of failed pop operations (pop from empty queue) counter_type m_nCollided ; ///< How many pairs of push/pop were collided, if elimination is enabled //@cond void onEnqueue() { ++m_nEnqueue; } void onDequeue( bool bFailed ) { if ( bFailed ) ++m_nFailedDeq; else ++m_nDequeue; } void onCollide() { ++m_nCollided; } //@endcond }; /// FCQueue dummy statistics, no overhead struct empty_stat: public cds::algo::flat_combining::empty_stat { //@cond void onEnqueue() {} void onDequeue(bool) {} void onCollide() {} //@endcond }; /// \p FCQueue type traits struct traits: public cds::algo::flat_combining::traits { typedef cds::intrusive::opt::v::empty_disposer disposer ; ///< Disposer to erase removed elements. Used only in \p FCQueue::clear() function typedef empty_stat stat; ///< Internal statistics static constexpr const bool enable_elimination = false; ///< Enable \ref cds_elimination_description "elimination" }; /// Metafunction converting option list to traits /** \p Options are: - any \p cds::algo::flat_combining::make_traits options - \p opt::disposer - the functor used to dispose removed items. Default is \p opt::intrusive::v::empty_disposer. This option is used only in \p FCQueue::clear() function. - \p opt::stat - internal statistics, possible type: \p fcqueue::stat, \p fcqueue::empty_stat (the default) - \p opt::enable_elimination - enable/disable operation \ref cds_elimination_description "elimination" By default, the elimination is disabled (\p false) */ template struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type ; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type ,Options... >::type type; # endif }; } // namespace fcqueue /// Flat-combining intrusive queue /** @ingroup cds_intrusive_queue @ingroup cds_flat_combining_intrusive \ref cds_flat_combining_description "Flat combining" sequential intrusive queue. Template parameters: - \p T - a value type stored in the queue - \p Container - sequential intrusive container with \p push_back and \p pop_front functions. Default is \p boost::intrusive::list - \p Traits - type traits of flat combining, default is \p fcqueue::traits. \p fcqueue::make_traits metafunction can be used to construct \p %fcqueue::traits specialization */ template ,typename Traits = fcqueue::traits > class FCQueue #ifndef CDS_DOXYGEN_INVOKED : public cds::algo::flat_combining::container #endif { public: typedef T value_type; ///< Value type typedef Container container_type; ///< Sequential container type typedef Traits traits; ///< Queue traits typedef typename traits::disposer disposer; ///< The disposer functor. The disposer is used only in \ref clear() function typedef typename traits::stat stat; ///< Internal statistics type static constexpr const bool c_bEliminationEnabled = traits::enable_elimination; ///< \p true if elimination is enabled protected: //@cond /// Queue operation IDs enum fc_operation { op_enq = cds::algo::flat_combining::req_Operation, ///< Enqueue op_deq, ///< Dequeue op_clear, ///< Clear op_clear_and_dispose ///< Clear and dispose }; /// Flat combining publication list record struct fc_record: public cds::algo::flat_combining::publication_record { value_type * pVal; ///< Value to enqueue or dequeue bool bEmpty; ///< \p true if the queue is empty }; //@endcond /// Flat combining kernel typedef cds::algo::flat_combining::kernel< fc_record, traits > fc_kernel; protected: //@cond mutable fc_kernel m_FlatCombining; container_type m_Queue; //@endcond public: /// Initializes empty queue object FCQueue() {} /// Initializes empty queue object and gives flat combining parameters FCQueue( unsigned int nCompactFactor ///< Flat combining: publication list compacting factor ,unsigned int nCombinePassCount ///< Flat combining: number of combining passes for combiner thread ) : m_FlatCombining( nCompactFactor, nCombinePassCount ) {} /// Inserts a new element at the end of the queue /** The function always returns \p true. */ bool enqueue( value_type& val ) { auto pRec = m_FlatCombining.acquire_record(); pRec->pVal = &val; constexpr_if ( c_bEliminationEnabled ) m_FlatCombining.batch_combine( op_enq, pRec, *this ); else m_FlatCombining.combine( op_enq, pRec, *this ); assert( pRec->is_done()); m_FlatCombining.release_record( pRec ); m_FlatCombining.internal_statistics().onEnqueue(); return true; } /// Inserts a new element at the end of the queue (a synonym for \ref enqueue) bool push( value_type& val ) { return enqueue( val ); } /// Removes the next element from the queue /** If the queue is empty the function returns \p nullptr */ value_type * dequeue() { auto pRec = m_FlatCombining.acquire_record(); pRec->pVal = nullptr; constexpr_if ( c_bEliminationEnabled ) m_FlatCombining.batch_combine( op_deq, pRec, *this ); else m_FlatCombining.combine( op_deq, pRec, *this ); assert( pRec->is_done()); m_FlatCombining.release_record( pRec ); m_FlatCombining.internal_statistics().onDequeue( pRec->bEmpty ); return pRec->pVal; } /// Removes the next element from the queue (a synonym for \ref dequeue) value_type * pop() { return dequeue(); } /// Clears the queue /** If \p bDispose is \p true, the disposer provided in \p Traits class' template parameter will be called for each removed element. */ void clear( bool bDispose = false ) { auto pRec = m_FlatCombining.acquire_record(); constexpr_if ( c_bEliminationEnabled ) m_FlatCombining.batch_combine( bDispose ? op_clear_and_dispose : op_clear, pRec, *this ); else m_FlatCombining.combine( bDispose ? op_clear_and_dispose : op_clear, pRec, *this ); assert( pRec->is_done()); m_FlatCombining.release_record( pRec ); } /// Exclusive access to underlying queue object /** The functor \p f can do any operation with underlying \p container_type in exclusive mode. For example, you can iterate over the queue. \p Func signature is: \code void f( container_type& queue ); \endcode */ template void apply( Func f ) { auto& queue = m_Queue; m_FlatCombining.invoke_exclusive( [&queue, &f]() { f( queue ); } ); } /// Exclusive access to underlying queue object /** The functor \p f can do any operation with underlying \p container_type in exclusive mode. For example, you can iterate over the queue. \p Func signature is: \code void f( container_type const& queue ); \endcode */ template void apply( Func f ) const { auto const& queue = m_Queue; m_FlatCombining.invoke_exclusive( [&queue, &f]() { f( queue ); } ); } /// Returns the number of elements in the queue. /** Note that size() == 0 is not mean that the queue is empty because combining record can be in process. To check emptiness use \ref empty function. */ size_t size() const { return m_Queue.size(); } /// Checks if the queue is empty /** If the combining is in process the function waits while it is done. */ bool empty() const { bool bRet = false; auto const& queue = m_Queue; m_FlatCombining.invoke_exclusive([&queue, &bRet]() { bRet = queue.empty(); }); return bRet; } /// Internal statistics stat const& statistics() const { return m_FlatCombining.statistics(); } public: // flat combining cooperation, not for direct use! //@cond /// Flat combining supporting function. Do not call it directly! /** The function is called by \ref cds::algo::flat_combining::kernel "flat combining kernel" object if the current thread becomes a combiner. Invocation of the function means that the queue should perform an action recorded in \p pRec. */ void fc_apply( fc_record * pRec ) { assert( pRec ); // this function is called under FC mutex, so switch TSan off // All TSan warnings are false positive //CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN; switch ( pRec->op()) { case op_enq: assert( pRec->pVal ); m_Queue.push_back( *(pRec->pVal )); break; case op_deq: pRec->bEmpty = m_Queue.empty(); if ( !pRec->bEmpty ) { pRec->pVal = &m_Queue.front(); m_Queue.pop_front(); } break; case op_clear: m_Queue.clear(); break; case op_clear_and_dispose: m_Queue.clear_and_dispose( disposer()); break; default: assert(false); break; } //CDS_TSAN_ANNOTATE_IGNORE_RW_END; } /// Batch-processing flat combining void fc_process( typename fc_kernel::iterator itBegin, typename fc_kernel::iterator itEnd ) { // this function is called under FC mutex, so switch TSan off // All TSan warnings are false positive //CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN; typedef typename fc_kernel::iterator fc_iterator; for ( fc_iterator it = itBegin, itPrev = itEnd; it != itEnd; ++it ) { switch ( it->op( atomics::memory_order_acquire )) { case op_enq: case op_deq: if ( m_Queue.empty()) { if ( itPrev != itEnd && collide( *itPrev, *it )) itPrev = itEnd; else itPrev = it; } break; } } //CDS_TSAN_ANNOTATE_IGNORE_RW_END; } //@endcond private: //@cond bool collide( fc_record& rec1, fc_record& rec2 ) { assert( m_Queue.empty()); switch ( rec1.op()) { case op_enq: if ( rec2.op() == op_deq ) { assert(rec1.pVal); rec2.pVal = rec1.pVal; rec2.bEmpty = false; m_FlatCombining.operation_done( rec1 ); m_FlatCombining.operation_done( rec2 ); m_FlatCombining.internal_statistics().onCollide(); return true; } break; case op_deq: if ( rec2.op() == op_enq ) { assert(rec2.pVal); rec1.pVal = rec2.pVal; rec1.bEmpty = false; m_FlatCombining.operation_done( rec1 ); m_FlatCombining.operation_done( rec2 ); m_FlatCombining.internal_statistics().onCollide(); return true; } break; } return false; } //@endcond }; }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_FCQUEUE_H libcds-2.3.3/cds/intrusive/fcstack.h000066400000000000000000000322371341244201700173540ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_FCSTACK_H #define CDSLIB_INTRUSIVE_FCSTACK_H #include #include #include #include namespace cds { namespace intrusive { /// FCStack related definitions namespace fcstack { /// FCStack internal statistics template struct stat: public cds::algo::flat_combining::stat { typedef cds::algo::flat_combining::stat flat_combining_stat; ///< Flat-combining statistics typedef typename flat_combining_stat::counter_type counter_type; ///< Counter type counter_type m_nPush ; ///< Count of push operations counter_type m_nPop ; ///< Count of success pop operations counter_type m_nFailedPop; ///< Count of failed pop operations (pop from empty stack) counter_type m_nCollided ; ///< How many pairs of push/pop were collided, if elimination is enabled //@cond void onPush() { ++m_nPush; } void onPop( bool bFailed ) { if ( bFailed ) ++m_nFailedPop; else ++m_nPop; } void onCollide() { ++m_nCollided; } //@endcond }; /// FCStack dummy statistics, no overhead struct empty_stat: public cds::algo::flat_combining::empty_stat { //@cond void onPush() {} void onPop(bool) {} void onCollide() {} //@endcond }; /// FCStack type traits struct traits: public cds::algo::flat_combining::traits { typedef cds::intrusive::opt::v::empty_disposer disposer ; ///< Disposer to erase removed elements. Used only in \p FCStack::clear() function typedef empty_stat stat; ///< Internal statistics static constexpr const bool enable_elimination = false; ///< Enable \ref cds_elimination_description "elimination" }; /// Metafunction converting option list to traits /** \p Options are: - any \p cds::algo::flat_combining::make_traits options - \p opt::disposer - the functor used for dispose removed items. Default is \p opt::intrusive::v::empty_disposer. This option is used only in \p FCStack::clear() function. - \p opt::stat - internal statistics, possible type: \p fcstack::stat, \p fcstack::empty_stat (the default) - \p opt::enable_elimination - enable/disable operation \ref cds_elimination_description "elimination" By default, the elimination is disabled. */ template struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type ; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type ,Options... >::type type; # endif }; } // namespace fcstack /// Flat-combining intrusive stack /** @ingroup cds_intrusive_stack @ingroup cds_flat_combining_intrusive \ref cds_flat_combining_description "Flat combining" sequential intrusive stack. Template parameters: - \p T - a value type stored in the stack - \p Container - sequential intrusive container with \p push_front and \p pop_front functions. Possible containers are \p boost::intrusive::slist (the default), \p boost::inrtrusive::list - \p Traits - type traits of flat combining, default is \p fcstack::traits. \p fcstack::make_traits metafunction can be used to construct specialized \p %traits */ template ,typename Traits = fcstack::traits > class FCStack #ifndef CDS_DOXYGEN_INVOKED : public cds::algo::flat_combining::container #endif { public: typedef T value_type; ///< Value type typedef Container container_type; ///< Sequential container type typedef Traits traits; ///< Stack traits typedef typename traits::disposer disposer; ///< The disposer functor. The disposer is used only in \ref clear() function typedef typename traits::stat stat; ///< Internal statistics type static constexpr const bool c_bEliminationEnabled = traits::enable_elimination; ///< \p true if elimination is enabled protected: //@cond /// Stack operation IDs enum fc_operation { op_push = cds::algo::flat_combining::req_Operation, ///< Push op_pop, ///< Pop op_clear, ///< Clear op_clear_and_dispose ///< Clear and dispose }; /// Flat combining publication list record struct fc_record: public cds::algo::flat_combining::publication_record { value_type * pVal; ///< Value to push or pop bool bEmpty; ///< \p true if the stack is empty }; //@endcond /// Flat combining kernel typedef cds::algo::flat_combining::kernel< fc_record, traits > fc_kernel; protected: //@cond mutable fc_kernel m_FlatCombining; container_type m_Stack; //@endcond public: /// Initializes empty stack object FCStack() {} /// Initializes empty stack object and gives flat combining parameters FCStack( unsigned int nCompactFactor ///< Flat combining: publication list compacting factor ,unsigned int nCombinePassCount ///< Flat combining: number of combining passes for combiner thread ) : m_FlatCombining( nCompactFactor, nCombinePassCount ) {} /// Inserts a new element at the top of stack /** The content of the new element initialized to a copy of \p val. */ bool push( value_type& val ) { auto pRec = m_FlatCombining.acquire_record(); pRec->pVal = &val; constexpr_if ( c_bEliminationEnabled ) m_FlatCombining.batch_combine( op_push, pRec, *this ); else m_FlatCombining.combine( op_push, pRec, *this ); assert( pRec->is_done()); m_FlatCombining.release_record( pRec ); m_FlatCombining.internal_statistics().onPush(); return true; } /// Removes the element on top of the stack value_type * pop() { auto pRec = m_FlatCombining.acquire_record(); pRec->pVal = nullptr; constexpr_if ( c_bEliminationEnabled ) m_FlatCombining.batch_combine( op_pop, pRec, *this ); else m_FlatCombining.combine( op_pop, pRec, *this ); assert( pRec->is_done()); m_FlatCombining.release_record( pRec ); m_FlatCombining.internal_statistics().onPop( pRec->bEmpty ); return pRec->pVal; } /// Clears the stack /** If \p bDispose is \p true, the disposer provided in \p Traits class' template parameter will be called for each removed element. */ void clear( bool bDispose = false ) { auto pRec = m_FlatCombining.acquire_record(); constexpr_if ( c_bEliminationEnabled ) m_FlatCombining.batch_combine( bDispose ? op_clear_and_dispose : op_clear, pRec, *this ); else m_FlatCombining.combine( bDispose ? op_clear_and_dispose : op_clear, pRec, *this ); assert( pRec->is_done()); m_FlatCombining.release_record( pRec ); } /// Exclusive access to underlying stack object /** The functor \p f can do any operation with underlying \p container_type in exclusive mode. For example, you can iterate over the stack. \p Func signature is: \code void f( container_type& stack ); \endcode */ template void apply( Func f ) { auto& stack = m_Stack; m_FlatCombining.invoke_exclusive( [&stack, &f]() { f( stack ); } ); } /// Exclusive access to underlying stack object /** The functor \p f can do any operation with underlying \p container_type in exclusive mode. For example, you can iterate over the stack. \p Func signature is: \code void f( container_type const& stack ); \endcode */ template void apply( Func f ) const { auto const& stack = m_Stack; m_FlatCombining.invoke_exclusive( [&stack, &f]() { f( stack ); } ); } /// Returns the number of elements in the stack. /** Note that size() == 0 is not mean that the stack is empty because combining record can be in process. To check emptiness use \ref empty function. */ size_t size() const { return m_Stack.size(); } /// Checks if the stack is empty /** If the combining is in process the function waits while it is done. */ bool empty() const { bool bRet = false; auto const& stack = m_Stack; m_FlatCombining.invoke_exclusive( [&stack, &bRet]() { bRet = stack.empty(); } ); return bRet; } /// Internal statistics stat const& statistics() const { return m_FlatCombining.statistics(); } public: // flat combining cooperation, not for direct use! //@cond /// Flat combining supporting function. Do not call it directly! /** The function is called by \ref cds::algo::flat_combining::kernel "flat combining kernel" object if the current thread becomes a combiner. Invocation of the function means that the stack should perform an action recorded in \p pRec. */ void fc_apply( fc_record* pRec ) { assert( pRec ); switch ( pRec->op()) { case op_push: assert( pRec->pVal ); m_Stack.push_front( *(pRec->pVal )); break; case op_pop: pRec->bEmpty = m_Stack.empty(); if ( !pRec->bEmpty ) { pRec->pVal = &m_Stack.front(); m_Stack.pop_front(); } break; case op_clear: m_Stack.clear(); break; case op_clear_and_dispose: m_Stack.clear_and_dispose( disposer()); break; default: assert(false); break; } } /// Batch-processing flat combining void fc_process( typename fc_kernel::iterator itBegin, typename fc_kernel::iterator itEnd ) { typedef typename fc_kernel::iterator fc_iterator; for ( fc_iterator it = itBegin, itPrev = itEnd; it != itEnd; ++it ) { switch ( it->op( atomics::memory_order_acquire )) { case op_push: case op_pop: if ( itPrev != itEnd && collide( *itPrev, *it )) itPrev = itEnd; else itPrev = it; break; } } } //@endcond private: //@cond bool collide( fc_record& rec1, fc_record& rec2 ) { switch ( rec1.op()) { case op_push: if ( rec2.op() == op_pop ) { assert(rec1.pVal); rec2.pVal = rec1.pVal; rec2.bEmpty = false; m_FlatCombining.operation_done( rec1 ); m_FlatCombining.operation_done( rec2 ); m_FlatCombining.internal_statistics().onCollide(); return true; } break; case op_pop: if ( rec2.op() == op_push ) { assert(rec2.pVal); rec1.pVal = rec2.pVal; rec1.bEmpty = false; m_FlatCombining.operation_done( rec1 ); m_FlatCombining.operation_done( rec2 ); m_FlatCombining.internal_statistics().onCollide(); return true; } break; } return false; } //@endcond }; }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_FCSTACK_H libcds-2.3.3/cds/intrusive/feldman_hashset_dhp.h000066400000000000000000000006421341244201700217110ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_FELDMAN_HASHSET_DHP_H #define CDSLIB_INTRUSIVE_FELDMAN_HASHSET_DHP_H #include #include #endif // #ifndef CDSLIB_INTRUSIVE_FELDMAN_HASHSET_DHP_H libcds-2.3.3/cds/intrusive/feldman_hashset_hp.h000066400000000000000000000006361341244201700215500ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_FELDMAN_HASHSET_HP_H #define CDSLIB_INTRUSIVE_FELDMAN_HASHSET_HP_H #include #include #endif // #ifndef CDSLIB_INTRUSIVE_FELDMAN_HASHSET_HP_H libcds-2.3.3/cds/intrusive/feldman_hashset_rcu.h000066400000000000000000001343701341244201700217350ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_FELDMAN_HASHSET_RCU_H #define CDSLIB_INTRUSIVE_FELDMAN_HASHSET_RCU_H #include // std::ref #include // std::iterator_traits #include #include #include #include #include #include namespace cds { namespace intrusive { /// Intrusive hash set based on multi-level array, \ref cds_urcu_desc "RCU" specialization /** @ingroup cds_intrusive_map @anchor cds_intrusive_FeldmanHashSet_rcu Source: - [2013] Steven Feldman, Pierre LaBorde, Damian Dechev "Concurrent Multi-level Arrays: Wait-free Extensible Hash Maps" See algorithm short description @ref cds_intrusive_FeldmanHashSet_hp "here" @note Two important things you should keep in mind when you're using \p %FeldmanHashSet: - all keys must be fixed-size. It means that you cannot use \p std::string as a key for \p %FeldmanHashSet. Instead, for the strings you should use well-known hashing algorithms like SHA1, SHA2, MurmurHash, CityHash or its successor FarmHash and so on, which converts variable-length strings to fixed-length bit-strings, and use that hash as a key in \p %FeldmanHashSet. - \p %FeldmanHashSet uses a perfect hashing. It means that if two different keys, for example, of type \p std::string, have identical hash then you cannot insert both that keys in the set. \p %FeldmanHashSet does not maintain the key, it maintains its fixed-size hash value. Template parameters: - \p RCU - one of \ref cds_urcu_gc "RCU type" - \p T - a value type to be stored in the set - \p Traits - type traits, the structure based on \p feldman_hashset::traits or result of \p feldman_hashset::make_traits metafunction. \p Traits is the mandatory argument because it has one mandatory type - an @ref feldman_hashset::traits::hash_accessor "accessor" to hash value of \p T. The set algorithm does not calculate that hash value. @note Before including you should include appropriate RCU header file, see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. The set supports @ref cds_intrusive_FeldmanHashSet_rcu_iterators "bidirectional thread-safe iterators" with some restrictions. */ template < class RCU, class T, #ifdef CDS_DOXYGEN_INVOKED class Traits = feldman_hashset::traits #else class Traits #endif > class FeldmanHashSet< cds::urcu::gc< RCU >, T, Traits >: protected feldman_hashset::multilevel_array { //@cond typedef feldman_hashset::multilevel_array base_class; //@endcond public: typedef cds::urcu::gc< RCU > gc; ///< RCU garbage collector typedef T value_type; ///< type of value stored in the set typedef Traits traits; ///< Traits template parameter typedef typename traits::hash_accessor hash_accessor; ///< Hash accessor functor typedef typename base_class::hash_type hash_type; ///< Hash type deduced from \p hash_accessor return type typedef typename traits::disposer disposer; ///< data node disposer typedef typename base_class::hash_comparator hash_comparator; ///< hash compare functor based on \p traits::compare and \p traits::less options typedef typename traits::item_counter item_counter; ///< Item counter type typedef typename traits::node_allocator node_allocator; ///< Array node allocator typedef typename traits::memory_model memory_model; ///< Memory model typedef typename traits::back_off back_off; ///< Backoff strategy typedef typename traits::stat stat; ///< Internal statistics type typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock static constexpr const bool c_bExtractLockExternal = false; ///< Group of \p extract_xxx functions does not require external locking using exempt_ptr = cds::urcu::exempt_ptr< gc, value_type, value_type, disposer, void >; ///< pointer to extracted node /// The size of hash_type in bytes, see \p feldman_hashset::traits::hash_size for explanation static constexpr size_t const c_hash_size = base_class::c_hash_size; //@cond typedef feldman_hashset::level_statistics level_statistics; //@endcond protected: //@cond typedef typename base_class::node_ptr node_ptr; typedef typename base_class::atomic_node_ptr atomic_node_ptr; typedef typename base_class::array_node array_node; typedef typename base_class::traverse_data traverse_data; using base_class::to_array; using base_class::to_node; using base_class::stats; using base_class::head; using base_class::metrics; typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock> check_deadlock_policy; //@endcond private: //@cond item_counter m_ItemCounter; ///< Item counter //@endcond public: /// Creates empty set /** @param head_bits - 2head_bits specifies the size of head array, minimum is 4. @param array_bits - 2array_bits specifies the size of array node, minimum is 2. Equation for \p head_bits and \p array_bits: \code sizeof(hash_type) * 8 == head_bits + N * array_bits \endcode where \p N is multi-level array depth. */ FeldmanHashSet(size_t head_bits = 8, size_t array_bits = 4) : base_class(head_bits, array_bits) {} /// Destructs the set and frees all data ~FeldmanHashSet() { clear(); } /// Inserts new node /** The function inserts \p val in the set if it does not contain an item with that hash. Returns \p true if \p val is placed into the set, \p false otherwise. The function locks RCU internally. */ bool insert( value_type& val ) { return insert( val, [](value_type&) {} ); } /// Inserts new node /** This function is intended for derived non-intrusive containers. The function allows to split creating of new item into two part: - create item with key only - insert new item into the set - if inserting is success, calls \p f functor to initialize \p val. The functor signature is: \code void func( value_type& val ); \endcode where \p val is the item inserted. The user-defined functor is called only if the inserting is success. The function locks RCU internally. @warning See \ref cds_intrusive_item_creating "insert item troubleshooting". */ template bool insert( value_type& val, Func f ) { hash_type const& hash = hash_accessor()( val ); traverse_data pos( hash, *this ); hash_comparator cmp; while (true) { rcu_lock rcuLock; node_ptr slot = base_class::traverse( pos ); assert(slot.bits() == 0); if ( pos.pArr->nodes[pos.nSlot].load(memory_model::memory_order_acquire) == slot) { if (slot.ptr()) { if ( cmp( hash, hash_accessor()(*slot.ptr())) == 0 ) { // the item with that hash value already exists stats().onInsertFailed(); return false; } // the slot must be expanded base_class::expand_slot( pos, slot ); } else { // the slot is empty, try to insert data node node_ptr pNull; if ( pos.pArr->nodes[pos.nSlot].compare_exchange_strong(pNull, node_ptr(&val), memory_model::memory_order_release, atomics::memory_order_relaxed)) { // the new data node has been inserted f(val); ++m_ItemCounter; stats().onInsertSuccess(); stats().height( pos.nHeight ); return true; } // insert failed - slot has been changed by another thread // retry inserting stats().onInsertRetry(); } } else stats().onSlotChanged(); } } /// Updates the node /** Performs inserting or updating the item with hash value equal to \p val. - If hash value is found then existing item is replaced with \p val, old item is disposed with \p Traits::disposer. Note that the disposer is called by \p GC asynchronously. The function returns std::pair - If hash value is not found and \p bInsert is \p true then \p val is inserted, the function returns std::pair - If hash value is not found and \p bInsert is \p false then the set is unchanged, the function returns std::pair Returns std::pair where \p first is \p true if operation is successful (i.e. the item has been inserted or updated), \p second is \p true if new item has been added or \p false if the set contains that hash. The function locks RCU internally. */ std::pair update( value_type& val, bool bInsert = true ) { return do_update(val, [](value_type&, value_type *) {}, bInsert ); } /// Unlinks the item \p val from the set /** The function searches the item \p val in the set and unlink it if it is found and its address is equal to &val. The function returns \p true if success and \p false otherwise. RCU should not be locked. The function locks RCU internally. */ bool unlink( value_type const& val ) { check_deadlock_policy::check(); auto pred = [&val](value_type const& item) -> bool { return &item == &val; }; value_type * p; { rcu_lock rcuLock; p = do_erase( hash_accessor()( val ), std::ref( pred )); } if ( p ) { gc::template retire_ptr( p ); return true; } return false; } /// Deletes the item from the set /** The function searches \p hash in the set, unlinks the item found, and returns \p true. If that item is not found the function returns \p false. The \ref disposer specified in \p Traits is called by garbage collector \p GC asynchronously. RCU should not be locked. The function locks RCU internally. */ bool erase( hash_type const& hash ) { return erase(hash, [](value_type const&) {} ); } /// Deletes the item from the set /** The function searches \p hash in the set, call \p f functor with item found, and unlinks it from the set. The \ref disposer specified in \p Traits is called by garbage collector \p GC asynchronously. The \p Func interface is \code struct functor { void operator()( value_type& item ); }; \endcode If \p hash is not found the function returns \p false. RCU should not be locked. The function locks RCU internally. */ template bool erase( hash_type const& hash, Func f ) { check_deadlock_policy::check(); value_type * p; { rcu_lock rcuLock; p = do_erase( hash, []( value_type const&) -> bool { return true; } ); } // p is guarded by HP if ( p ) { f( *p ); gc::template retire_ptr(p); return true; } return false; } /// Extracts the item with specified \p hash /** The function searches \p hash in the set, unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found. If the item with key equal to \p key is not found the function returns an empty \p exempt_ptr. RCU \p synchronize method can be called. RCU should NOT be locked. The function does not call the disposer for the item found. The disposer will be implicitly invoked when the returned object is destroyed or when its \p release() member function is called. Example: \code typedef cds::intrusive::FeldmanHashSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > set_type; set_type theSet; // ... typename set_type::exempt_ptr ep( theSet.extract( 5 )); if ( ep ) { // Deal with ep //... // Dispose returned item. ep.release(); } \endcode */ exempt_ptr extract( hash_type const& hash ) { check_deadlock_policy::check(); value_type * p; { rcu_lock rcuLock; p = do_erase( hash, []( value_type const&) -> bool {return true;} ); } return exempt_ptr( p ); } /// Finds an item by it's \p hash /** The function searches the item by \p hash and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item ); }; \endcode where \p item is the item found. The functor may change non-key fields of \p item. Note that the functor is only guarantee that \p item cannot be disposed during the functor is executing. The functor does not serialize simultaneous access to the set's \p item. If such access is possible you must provide your own synchronization schema on item level to prevent unsafe item modifications. The function returns \p true if \p hash is found, \p false otherwise. The function applies RCU lock internally. */ template bool find( hash_type const& hash, Func f ) { rcu_lock rcuLock; value_type * p = search( hash ); if ( p ) { f( *p ); return true; } return false; } /// Checks whether the set contains \p hash /** The function searches the item by its \p hash and returns \p true if it is found, or \p false otherwise. The function applies RCU lock internally. */ bool contains( hash_type const& hash ) { return find( hash, [](value_type&) {} ); } /// Finds an item by it's \p hash and returns the item found /** The function searches the item by its \p hash and returns the pointer to the item found. If \p hash is not found the function returns \p nullptr. RCU should be locked before the function invocation. Returned pointer is valid only while RCU is locked. Usage: \code typedef cds::intrusive::FeldmanHashSet< your_template_params > my_set; my_set theSet; // ... { // lock RCU my_set::rcu_lock; foo * p = theSet.get( 5 ); if ( p ) { // Deal with p //... } } \endcode */ value_type * get( hash_type const& hash ) { assert( gc::is_locked()); return search( hash ); } /// Clears the set (non-atomic) /** The function unlink all data node from the set. The function is not atomic but is thread-safe. After \p %clear() the set may not be empty because another threads may insert items. For each item the \p disposer is called after unlinking. */ void clear() { clear_array( head(), head_size()); } /// Checks if the set is empty /** Emptiness is checked by item counting: if item count is zero then the set is empty. Thus, the correct item counting feature is an important part of the set implementation. */ bool empty() const { return size() == 0; } /// Returns item count in the set size_t size() const { return m_ItemCounter; } /// Returns const reference to internal statistics stat const& statistics() const { return stats(); } /// Returns the size of head node using base_class::head_size; /// Returns the size of the array node using base_class::array_node_size; /// Collects tree level statistics into \p stat /** The function traverses the set and collects statistics for each level of the tree into \p feldman_hashset::level_statistics struct. The element of \p stat[i] represents statistics for level \p i, level 0 is head array. The function is thread-safe and may be called in multi-threaded environment. Result can be useful for estimating efficiency of hash functor you use. */ void get_level_statistics(std::vector& stat) const { base_class::get_level_statistics(stat); } protected: //@cond class iterator_base { friend class FeldmanHashSet; protected: array_node * m_pNode; ///< current array node size_t m_idx; ///< current position in m_pNode value_type * m_pValue; ///< current value FeldmanHashSet const* m_set; ///< Hash set public: iterator_base() noexcept : m_pNode(nullptr) , m_idx(0) , m_pValue(nullptr) , m_set(nullptr) {} iterator_base(iterator_base const& rhs) noexcept : m_pNode(rhs.m_pNode) , m_idx(rhs.m_idx) , m_pValue(rhs.m_pValue) , m_set(rhs.m_set) {} iterator_base& operator=(iterator_base const& rhs) noexcept { m_pNode = rhs.m_pNode; m_idx = rhs.m_idx; m_pValue = rhs.m_pValue; m_set = rhs.m_set; return *this; } iterator_base& operator++() { forward(); return *this; } iterator_base& operator--() { backward(); return *this; } bool operator ==(iterator_base const& rhs) const noexcept { return m_pNode == rhs.m_pNode && m_idx == rhs.m_idx && m_set == rhs.m_set; } bool operator !=(iterator_base const& rhs) const noexcept { return !(*this == rhs); } protected: iterator_base(FeldmanHashSet const& set, array_node * pNode, size_t idx, bool) : m_pNode(pNode) , m_idx(idx) , m_pValue(nullptr) , m_set(&set) {} iterator_base(FeldmanHashSet const& set, array_node * pNode, size_t idx) : m_pNode(pNode) , m_idx(idx) , m_pValue(nullptr) , m_set(&set) { forward(); } value_type * pointer() const noexcept { return m_pValue; } void forward() { assert(m_set != nullptr); assert(m_pNode != nullptr); size_t const arrayNodeSize = m_set->array_node_size(); size_t const headSize = m_set->head_size(); array_node * pNode = m_pNode; size_t idx = m_idx + 1; size_t nodeSize = m_pNode->pParent ? arrayNodeSize : headSize; for (;;) { if (idx < nodeSize) { node_ptr slot = pNode->nodes[idx].load(memory_model::memory_order_acquire); if (slot.bits() == base_class::flag_array_node ) { // array node, go down the tree assert(slot.ptr() != nullptr); pNode = to_array(slot.ptr()); idx = 0; nodeSize = arrayNodeSize; } else if (slot.bits() == base_class::flag_array_converting ) { // the slot is converting to array node right now - skip the node ++idx; } else { if (slot.ptr()) { // data node m_pNode = pNode; m_idx = idx; m_pValue = slot.ptr(); return; } ++idx; } } else { // up to parent node if (pNode->pParent) { idx = pNode->idxParent + 1; pNode = pNode->pParent; nodeSize = pNode->pParent ? arrayNodeSize : headSize; } else { // end() assert(pNode == m_set->head()); assert(idx == headSize); m_pNode = pNode; m_idx = idx; m_pValue = nullptr; return; } } } } void backward() { assert(m_set != nullptr); assert(m_pNode != nullptr); size_t const arrayNodeSize = m_set->array_node_size(); size_t const headSize = m_set->head_size(); size_t const endIdx = size_t(0) - 1; array_node * pNode = m_pNode; size_t idx = m_idx - 1; size_t nodeSize = m_pNode->pParent ? arrayNodeSize : headSize; for (;;) { if (idx != endIdx) { node_ptr slot = pNode->nodes[idx].load(memory_model::memory_order_acquire); if (slot.bits() == base_class::flag_array_node ) { // array node, go down the tree assert(slot.ptr() != nullptr); pNode = to_array(slot.ptr()); nodeSize = arrayNodeSize; idx = nodeSize - 1; } else if (slot.bits() == base_class::flag_array_converting ) { // the slot is converting to array node right now - skip the node --idx; } else { if (slot.ptr()) { // data node m_pNode = pNode; m_idx = idx; m_pValue = slot.ptr(); return; } --idx; } } else { // up to parent node if (pNode->pParent) { idx = pNode->idxParent - 1; pNode = pNode->pParent; nodeSize = pNode->pParent ? arrayNodeSize : headSize; } else { // rend() assert(pNode == m_set->head()); assert(idx == endIdx); m_pNode = pNode; m_idx = idx; m_pValue = nullptr; return; } } } } }; template Iterator init_begin() const { return Iterator(*this, head(), size_t(0) - 1); } template Iterator init_end() const { return Iterator(*this, head(), head_size(), false); } template Iterator init_rbegin() const { return Iterator(*this, head(), head_size()); } template Iterator init_rend() const { return Iterator(*this, head(), size_t(0) - 1, false); } /// Bidirectional iterator class template class bidirectional_iterator : protected iterator_base { friend class FeldmanHashSet; protected: static constexpr bool const c_bConstantIterator = IsConst; public: typedef typename std::conditional< IsConst, value_type const*, value_type*>::type value_ptr; ///< Value pointer typedef typename std::conditional< IsConst, value_type const&, value_type&>::type value_ref; ///< Value reference public: bidirectional_iterator() noexcept {} bidirectional_iterator(bidirectional_iterator const& rhs) noexcept : iterator_base(rhs) {} bidirectional_iterator& operator=(bidirectional_iterator const& rhs) noexcept { iterator_base::operator=(rhs); return *this; } bidirectional_iterator& operator++() { iterator_base::operator++(); return *this; } bidirectional_iterator& operator--() { iterator_base::operator--(); return *this; } value_ptr operator ->() const noexcept { return iterator_base::pointer(); } value_ref operator *() const noexcept { value_ptr p = iterator_base::pointer(); assert(p); return *p; } template bool operator ==(bidirectional_iterator const& rhs) const noexcept { return iterator_base::operator==(rhs); } template bool operator !=(bidirectional_iterator const& rhs) const noexcept { return !(*this == rhs); } protected: bidirectional_iterator(FeldmanHashSet& set, array_node * pNode, size_t idx, bool) : iterator_base(set, pNode, idx, false) {} bidirectional_iterator(FeldmanHashSet& set, array_node * pNode, size_t idx) : iterator_base(set, pNode, idx) {} }; /// Reverse bidirectional iterator template class reverse_bidirectional_iterator : public iterator_base { friend class FeldmanHashSet; public: typedef typename std::conditional< IsConst, value_type const*, value_type*>::type value_ptr; ///< Value pointer typedef typename std::conditional< IsConst, value_type const&, value_type&>::type value_ref; ///< Value reference public: reverse_bidirectional_iterator() noexcept : iterator_base() {} reverse_bidirectional_iterator(reverse_bidirectional_iterator const& rhs) noexcept : iterator_base(rhs) {} reverse_bidirectional_iterator& operator=(reverse_bidirectional_iterator const& rhs) noexcept { iterator_base::operator=(rhs); return *this; } reverse_bidirectional_iterator& operator++() { iterator_base::operator--(); return *this; } reverse_bidirectional_iterator& operator--() { iterator_base::operator++(); return *this; } value_ptr operator ->() const noexcept { return iterator_base::pointer(); } value_ref operator *() const noexcept { value_ptr p = iterator_base::pointer(); assert(p); return *p; } template bool operator ==(reverse_bidirectional_iterator const& rhs) const { return iterator_base::operator==(rhs); } template bool operator !=(reverse_bidirectional_iterator const& rhs) { return !(*this == rhs); } private: reverse_bidirectional_iterator(FeldmanHashSet& set, array_node * pNode, size_t idx, bool) : iterator_base(set, pNode, idx, false) {} reverse_bidirectional_iterator(FeldmanHashSet& set, array_node * pNode, size_t idx) : iterator_base(set, pNode, idx, false) { iterator_base::backward(); } }; //@endcond public: #ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined iterator; ///< @ref cds_intrusive_FeldmanHashSet_rcu_iterators "bidirectional iterator" type typedef implementation_defined const_iterator; ///< @ref cds_intrusive_FeldmanHashSet_rcu_iterators "bidirectional const iterator" type typedef implementation_defined reverse_iterator; ///< @ref cds_intrusive_FeldmanHashSet_rcu_iterators "bidirectional reverse iterator" type typedef implementation_defined const_reverse_iterator; ///< @ref cds_intrusive_FeldmanHashSet_rcu_iterators "bidirectional reverse const iterator" type #else typedef bidirectional_iterator iterator; typedef bidirectional_iterator const_iterator; typedef reverse_bidirectional_iterator reverse_iterator; typedef reverse_bidirectional_iterator const_reverse_iterator; #endif ///@name Thread-safe iterators /** @anchor cds_intrusive_FeldmanHashSet_rcu_iterators The set supports thread-safe iterators: you may iterate over the set in multi-threaded environment under explicit RCU lock. RCU lock requirement means that inserting or searching is allowed for iterating thread but you must not erase the items from the set because erasing under RCU lock can lead to a deadlock. However, another thread can call \p erase() safely while your thread is iterating. A typical example is: \code struct foo { uint32_t hash; // ... other fields uint32_t payload; // only for example }; struct set_traits: cds::intrusive::feldman_hashset::traits { struct hash_accessor { uint32_t operator()( foo const& src ) const { retur src.hash; } }; }; typedef cds::urcu::gc< cds::urcu::general_buffered<>> rcu; typedef cds::intrusive::FeldmanHashSet< rcu, foo, set_traits > set_type; set_type s; // ... // iterate over the set { // lock the RCU. typename set_type::rcu_lock l; // scoped RCU lock // traverse the set for ( auto i = s.begin(); i != s.end(); ++i ) { // deal with i. Remember, erasing is prohibited here! i->payload++; } } // at this point RCU lock is released \endcode Each iterator object supports the common interface: - dereference operators: @code value_type [const] * operator ->() noexcept value_type [const] & operator *() noexcept @endcode - pre-increment and pre-decrement. Post-operators is not supported - equality operators == and !=. Iterators are equal iff they point to the same cell of the same array node. Note that for two iterators \p it1 and \p it2 the condition it1 == it2 does not entail &(*it1) == &(*it2) : welcome to concurrent containers @note It is possible the item can be iterated more that once, for example, if an iterator points to the item in an array node that is being splitted. */ ///@{ /// Returns an iterator to the beginning of the set iterator begin() { return iterator(*this, head(), size_t(0) - 1); } /// Returns an const iterator to the beginning of the set const_iterator begin() const { return const_iterator(*this, head(), size_t(0) - 1); } /// Returns an const iterator to the beginning of the set const_iterator cbegin() { return const_iterator(*this, head(), size_t(0) - 1); } /// Returns an iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior. iterator end() { return iterator(*this, head(), head_size(), false); } /// Returns a const iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior. const_iterator end() const { return const_iterator(*this, head(), head_size(), false); } /// Returns a const iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior. const_iterator cend() { return const_iterator(*this, head(), head_size(), false); } /// Returns a reverse iterator to the first element of the reversed set reverse_iterator rbegin() { return reverse_iterator(*this, head(), head_size()); } /// Returns a const reverse iterator to the first element of the reversed set const_reverse_iterator rbegin() const { return const_reverse_iterator(*this, head(), head_size()); } /// Returns a const reverse iterator to the first element of the reversed set const_reverse_iterator crbegin() { return const_reverse_iterator(*this, head(), head_size()); } /// Returns a reverse iterator to the element following the last element of the reversed set /** It corresponds to the element preceding the first element of the non-reversed container. This element acts as a placeholder, attempting to access it results in undefined behavior. */ reverse_iterator rend() { return reverse_iterator(*this, head(), size_t(0) - 1, false); } /// Returns a const reverse iterator to the element following the last element of the reversed set /** It corresponds to the element preceding the first element of the non-reversed container. This element acts as a placeholder, attempting to access it results in undefined behavior. */ const_reverse_iterator rend() const { return const_reverse_iterator(*this, head(), size_t(0) - 1, false); } /// Returns a const reverse iterator to the element following the last element of the reversed set /** It corresponds to the element preceding the first element of the non-reversed container. This element acts as a placeholder, attempting to access it results in undefined behavior. */ const_reverse_iterator crend() { return const_reverse_iterator(*this, head(), size_t(0) - 1, false); } ///@} protected: //@cond template std::pair do_update(value_type& val, Func f, bool bInsert = true) { hash_type const& hash = hash_accessor()(val); traverse_data pos( hash, *this ); hash_comparator cmp; value_type * pOld; while ( true ) { rcu_lock rcuLock; node_ptr slot = base_class::traverse( pos ); assert(slot.bits() == 0); pOld = nullptr; if ( pos.pArr->nodes[pos.nSlot].load(memory_model::memory_order_acquire) == slot) { if ( slot.ptr()) { if ( cmp( hash, hash_accessor()(*slot.ptr())) == 0 ) { // the item with that hash value already exists // Replace it with val if ( slot.ptr() == &val ) { stats().onUpdateExisting(); return std::make_pair(true, false); } if ( pos.pArr->nodes[pos.nSlot].compare_exchange_strong(slot, node_ptr(&val), memory_model::memory_order_release, atomics::memory_order_relaxed)) { // slot can be disposed f( val, slot.ptr()); pOld = slot.ptr(); stats().onUpdateExisting(); goto update_existing_done; } stats().onUpdateRetry(); } else { if ( bInsert ) { // the slot must be expanded base_class::expand_slot( pos, slot ); } else { stats().onUpdateFailed(); return std::make_pair(false, false); } } } else { // the slot is empty, try to insert data node if (bInsert) { node_ptr pNull; if ( pos.pArr->nodes[pos.nSlot].compare_exchange_strong(pNull, node_ptr(&val), memory_model::memory_order_release, atomics::memory_order_relaxed)) { // the new data node has been inserted f(val, nullptr); ++m_ItemCounter; stats().onUpdateNew(); stats().height( pos.nHeight ); return std::make_pair(true, true); } } else { stats().onUpdateFailed(); return std::make_pair(false, false); } // insert failed - slot has been changed by another thread // retry updating stats().onUpdateRetry(); } } else stats().onSlotChanged(); } // while // update success // retire_ptr must be called only outside of RCU lock update_existing_done: if (pOld) gc::template retire_ptr(pOld); return std::make_pair(true, false); } template value_type * do_erase( hash_type const& hash, Predicate pred) { assert(gc::is_locked()); traverse_data pos( hash, *this ); hash_comparator cmp; while ( true ) { node_ptr slot = base_class::traverse( pos ); assert( slot.bits() == 0 ); if ( pos.pArr->nodes[pos.nSlot].load( memory_model::memory_order_acquire ) == slot ) { if ( slot.ptr()) { if ( cmp( hash, hash_accessor()(*slot.ptr())) == 0 && pred( *slot.ptr())) { // item found - replace it with nullptr if ( pos.pArr->nodes[pos.nSlot].compare_exchange_strong( slot, node_ptr( nullptr ), memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { --m_ItemCounter; stats().onEraseSuccess(); return slot.ptr(); } stats().onEraseRetry(); continue; } stats().onEraseFailed(); return nullptr; } else { // the slot is empty stats().onEraseFailed(); return nullptr; } } else stats().onSlotChanged(); } } value_type * search(hash_type const& hash ) { assert( gc::is_locked()); traverse_data pos( hash, *this ); hash_comparator cmp; while ( true ) { node_ptr slot = base_class::traverse( pos ); assert( slot.bits() == 0 ); if ( pos.pArr->nodes[pos.nSlot].load( memory_model::memory_order_acquire ) != slot ) { // slot value has been changed - retry stats().onSlotChanged(); continue; } else if ( slot.ptr() && cmp( hash, hash_accessor()(*slot.ptr())) == 0 ) { // item found stats().onFindSuccess(); return slot.ptr(); } stats().onFindFailed(); return nullptr; } } //@endcond private: //@cond void clear_array(array_node * pArrNode, size_t nSize) { back_off bkoff; for (atomic_node_ptr * pArr = pArrNode->nodes, *pLast = pArr + nSize; pArr != pLast; ++pArr) { while (true) { node_ptr slot = pArr->load(memory_model::memory_order_acquire); if (slot.bits() == base_class::flag_array_node ) { // array node, go down the tree assert(slot.ptr() != nullptr); clear_array(to_array(slot.ptr()), array_node_size()); break; } else if (slot.bits() == base_class::flag_array_converting ) { // the slot is converting to array node right now while ((slot = pArr->load(memory_model::memory_order_acquire)).bits() == base_class::flag_array_converting ) { bkoff(); stats().onSlotConverting(); } bkoff.reset(); assert(slot.ptr() != nullptr); assert(slot.bits() == base_class::flag_array_node ); clear_array(to_array(slot.ptr()), array_node_size()); break; } else { // data node if (pArr->compare_exchange_strong(slot, node_ptr(), memory_model::memory_order_acquire, atomics::memory_order_relaxed)) { if (slot.ptr()) { gc::template retire_ptr(slot.ptr()); --m_ItemCounter; stats().onEraseSuccess(); } break; } } } } } //@endcond }; }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_FELDMAN_HASHSET_RCU_H libcds-2.3.3/cds/intrusive/free_list.h000066400000000000000000000214431341244201700177070ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_FREE_LIST_H #define CDSLIB_INTRUSIVE_FREE_LIST_H #include namespace cds { namespace intrusive { /// Lock-free free list /** @ingroup cds_intrusive_freelist Free list is a helper class intended for reusing objects instead of freeing them completely; this avoids the overhead of \p malloc(), and also avoids its worst-case behavior of taking an operating system lock. So, the free list can be considered as a specialized allocator for objects of some type. The algorithm is taken from this article. The algo does not require any SMR like Hazard Pointer to prevent ABA problem. There is \ref TaggedFreeList "tagged pointers" variant of free list for processors with double-width CAS support. \b How to use \code #include // Your struct should be derived from FreeList::node struct Foo: public cds::intrusive::FreeList::node { // Foo fields }; // Simplified Foo allocator class FooAllocator { public: // free-list clear() must be explicitly called before destroying the free-list object ~FooAllocator() { m_FreeList.clear( []( freelist_node * p ) { delete static_cast( p ); }); } Foo * alloc() { freelist_node * p = m_FreeList.get(); if ( p ) return static_cast( p ); return new Foo; }; void dealloc( Foo * p ) { m_FreeList.put( static_cast( p )); }; private: typedef cds::intrusive::FreeList::node freelist_node; cds::intrusive::FreeList m_FreeList; }; \endcode */ class FreeList { public: /// Free list node struct node { //@cond atomics::atomic m_freeListRefs; atomics::atomic m_freeListNext; node() : m_freeListRefs( 0 ) { m_freeListNext.store( nullptr, atomics::memory_order_release ); } //@endcond }; public: /// Creates empty free list FreeList() : m_Head( nullptr ) {} /// Destroys the free list. Free-list must be empty. /** @warning dtor does not free elements of the list. To free elements you should manually call \p clear() with an appropriate disposer. */ ~FreeList() { assert( empty()); } /// Puts \p pNode to the free list void put( node * pNode ) { // We know that the should-be-on-freelist bit is 0 at this point, so it's safe to // set it using a fetch_add if ( pNode->m_freeListRefs.fetch_add( c_ShouldBeOnFreeList, atomics::memory_order_release ) == 0 ) { // Oh look! We were the last ones referencing this node, and we know // we want to add it to the free list, so let's do it! add_knowing_refcount_is_zero( pNode ); } } /// Gets a node from the free list. If the list is empty, returns \p nullptr node * get() { auto head = m_Head.load( atomics::memory_order_acquire ); while ( head != nullptr ) { auto prevHead = head; auto refs = head->m_freeListRefs.load( atomics::memory_order_relaxed ); if ( cds_unlikely( (refs & c_RefsMask) == 0 || !head->m_freeListRefs.compare_exchange_strong( refs, refs + 1, atomics::memory_order_acquire, atomics::memory_order_relaxed ))) { head = m_Head.load( atomics::memory_order_acquire ); continue; } // Good, reference count has been incremented (it wasn't at zero), which means // we can read the next and not worry about it changing between now and the time // we do the CAS node * next = head->m_freeListNext.load( atomics::memory_order_relaxed ); if ( cds_likely( m_Head.compare_exchange_strong( head, next, atomics::memory_order_acquire, atomics::memory_order_relaxed ))) { // Yay, got the node. This means it was on the list, which means // shouldBeOnFreeList must be false no matter the refcount (because // nobody else knows it's been taken off yet, it can't have been put back on). assert( (head->m_freeListRefs.load( atomics::memory_order_relaxed ) & c_ShouldBeOnFreeList) == 0 ); // Decrease refcount twice, once for our ref, and once for the list's ref head->m_freeListRefs.fetch_sub( 2, atomics::memory_order_relaxed ); return head; } // OK, the head must have changed on us, but we still need to decrease the refcount we // increased refs = prevHead->m_freeListRefs.fetch_sub( 1, atomics::memory_order_acq_rel ); if ( refs == c_ShouldBeOnFreeList + 1 ) add_knowing_refcount_is_zero( prevHead ); } return nullptr; } /// Checks whether the free list is empty bool empty() const { return m_Head.load( atomics::memory_order_relaxed ) == nullptr; } /// Clears the free list (not atomic) /** For each element \p disp disposer is called to free memory. The \p Disposer interface: \code struct disposer { void operator()( FreeList::node * node ); }; \endcode This method must be explicitly called before the free list destructor. */ template void clear( Disposer disp ) { node * head = m_Head.load( atomics::memory_order_relaxed ); m_Head.store( nullptr, atomics::memory_order_relaxed ); while ( head ) { node * next = head->m_freeListNext.load( atomics::memory_order_relaxed ); disp( head ); head = next; } } private: //@cond void add_knowing_refcount_is_zero( node * pNode ) { // Since the refcount is zero, and nobody can increase it once it's zero (except us, and we // run only one copy of this method per node at a time, i.e. the single thread case), then we // know we can safely change the next pointer of the node; however, once the refcount is back // above zero, then other threads could increase it (happens under heavy contention, when the // refcount goes to zero in between a load and a refcount increment of a node in try_get, then // back up to something non-zero, then the refcount increment is done by the other thread) -- // so, if the CAS to add the node to the actual list fails, decrease the refcount and leave // the add operation to the next thread who puts the refcount back at zero (which could be us, // hence the loop). node * head = m_Head.load( atomics::memory_order_relaxed ); while ( true ) { pNode->m_freeListNext.store( head, atomics::memory_order_relaxed ); pNode->m_freeListRefs.store( 1, atomics::memory_order_release ); if ( cds_unlikely( !m_Head.compare_exchange_strong( head, pNode, atomics::memory_order_release, atomics::memory_order_relaxed ))) { // Hmm, the add failed, but we can only try again when the refcount goes back to zero if ( pNode->m_freeListRefs.fetch_add( c_ShouldBeOnFreeList - 1, atomics::memory_order_release ) == 1 ) continue; } return; } } //@endcond private: //@cond static constexpr uint32_t const c_RefsMask = 0x7FFFFFFF; static constexpr uint32_t const c_ShouldBeOnFreeList = 0x80000000; // Implemented like a stack, but where node order doesn't matter (nodes are // inserted out of order under contention) atomics::atomic m_Head; //@endcond }; }} // namespace cds::intrusive #endif // CDSLIB_INTRUSIVE_FREE_LIST_H libcds-2.3.3/cds/intrusive/free_list_cached.h000066400000000000000000000133421341244201700211750ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_FREE_LIST_CACHED_H #define CDSLIB_INTRUSIVE_FREE_LIST_CACHED_H #include #include #include #include #include #include namespace cds { namespace intrusive { /// Cached free list /** @ingroup cds_intrusive_freelist The class that is a wrapper over other \p FreeList contains a small cache of free elements. Before placing a new item into underlying \p FreeList the cached free-list tryes to put that item into the cache if its corresponding slot is empty. The slot is calculated by current thread id: \code int slot = std::hash()( std::this_thread::get_id()) & (CacheSize - 1); \endcode When getting the free-list checks the corresponding cache slot. If it is not empty, its contents is returned. In some cases such simple algorithm significantly reduces \p FreeList contention. Template parameters: - \p FreeList - a free-list implementation: \p FreeList, \p TaggedFreeList - \p CacheSize - size of cache, a small power-of-two number, default is 16 - \p Padding - padding of cache elements for solving false sharing, default is \p cds::c_nCacheLineSize */ template class CachedFreeList { public: typedef FreeList free_list_type; ///< Undelying free-list type typedef typename free_list_type::node node; ///< Free-list node static size_t const c_cache_size = CacheSize; ///< Cache size static unsigned const c_padding = Padding; ///< Cache element padding static_assert( c_cache_size >= 4, "Cache size is too small" ); static_assert( (c_cache_size & (c_cache_size - 1)) == 0, "CacheSize must be power of two" ); static_assert( (c_padding & (c_padding - 1)) == 0, "Padding must be power-of-two"); public: /// Creates empty free list CachedFreeList() { for ( auto& i: m_cache ) i.store( nullptr, atomics::memory_order_relaxed ); } /// Destroys the free list. Free-list must be empty. /** @warning dtor does not free elements of the list. To free elements you should manually call \p clear() with an appropriate disposer. */ ~CachedFreeList() { assert( empty()); } /// Puts \p pNode to the free list void put( node* pNode ) { // try to put into free cell of cache node* expect = nullptr; if ( m_cache[ get_hash() ].compare_exchange_weak( expect, pNode, atomics::memory_order_release, atomics::memory_order_relaxed )) return; // cache cell is not empty - use free-list m_freeList.put( pNode ); } /// Gets a node from the free list. If the list is empty, returns \p nullptr node * get() { // try get from cache atomics::atomic& cell = m_cache[ get_hash() ]; node* p = cell.load( atomics::memory_order_relaxed ); if ( p && cell.compare_exchange_weak( p, nullptr, atomics::memory_order_acquire, atomics::memory_order_relaxed )) return p; // try read from free-list p = m_freeList.get(); if ( p ) return p; // iterate the cache for ( auto& item : m_cache ) { p = item.load( atomics::memory_order_relaxed ); if ( p && item.compare_exchange_weak( p, nullptr, atomics::memory_order_acquire, atomics::memory_order_relaxed )) return p; } return m_freeList.get(); } /// Checks whether the free list is empty bool empty() const { if ( !m_freeList.empty()) return false; for ( auto& cell : m_cache ) { node* p = cell.load( atomics::memory_order_relaxed ); if ( p ) return false; } return true; } /// Clears the free list (not atomic) /** For each element \p disp disposer is called to free memory. The \p Disposer interface: \code struct disposer { void operator()( FreeList::node * node ); }; \endcode This method must be explicitly called before the free list destructor. */ template void clear( Disposer disp ) { m_freeList.clear( disp ); for ( auto& cell : m_cache ) { node* p = cell.load( atomics::memory_order_relaxed ); if ( p ) { disp( p ); cell.store( nullptr, atomics::memory_order_relaxed ); } } } private: //@cond size_t get_hash() { return std::hash()( std::this_thread::get_id()) & (c_cache_size - 1); } //@endcond private: //@cond typedef typename cds::details::type_padding< atomics::atomic, c_padding >::type array_item; array_item m_cache[ c_cache_size ]; free_list_type m_freeList; //@endcond }; }} // namespace cds::intrusive //@endcond #endif // CDSLIB_INTRUSIVE_FREE_LIST_CACHED_H libcds-2.3.3/cds/intrusive/free_list_selector.h000066400000000000000000000012761341244201700216110ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_FREE_LIST_SELECTOR_H #define CDSLIB_INTRUSIVE_FREE_LIST_SELECTOR_H #include #ifdef CDS_DCAS_SUPPORT # include #else # include #endif //@cond namespace cds { namespace intrusive { #ifdef CDS_DCAS_SUPPORT typedef TaggedFreeList FreeListImpl; #else typedef FreeList FreeListImpl; #endif }} // namespace cds::intrusive //@endcond #endif // CDSLIB_INTRUSIVE_FREE_LIST_SELECTOR_H libcds-2.3.3/cds/intrusive/free_list_tagged.h000066400000000000000000000132251341244201700212210ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_FREE_LIST_TAGGED_H #define CDSLIB_INTRUSIVE_FREE_LIST_TAGGED_H #include namespace cds { namespace intrusive { /// Lock-free free list based on tagged pointers (required double-width CAS) /** @ingroup cds_intrusive_freelist This variant of \p FreeList is intended for processor architectures that support double-width CAS. It uses tagged pointer technique to solve ABA problem. \b How to use \code #include // Your struct should be derived from TaggedFreeList::node struct Foo: public cds::intrusive::TaggedFreeList::node { // Foo fields }; // Simplified Foo allocator class FooAllocator { public: // free-list clear() must be explicitly called before destroying the free-list object ~FooAllocator() { m_FreeList.clear( []( freelist_node * p ) { delete static_cast( p ); }); } Foo * alloc() { freelist_node * p = m_FreeList.get(); if ( p ) return static_cast( p ); return new Foo; }; void dealloc( Foo * p ) { m_FreeList.put( static_cast( p )); }; private: typedef cds::intrusive::TaggedFreeList::node freelist_node; cds::intrusive::TaggedFreeList m_FreeList; }; \endcode */ class TaggedFreeList { public: struct node { //@cond atomics::atomic m_freeListNext; node() { m_freeListNext.store( nullptr, atomics::memory_order_release ); } //@endcond }; private: //@cond struct tagged_ptr { node * ptr; uintptr_t tag; tagged_ptr() : ptr( nullptr ) , tag( 0 ) {} tagged_ptr( node* p ) : ptr( p ) , tag( 0 ) {} }; static_assert(sizeof( tagged_ptr ) == sizeof( void * ) * 2, "sizeof( tagged_ptr ) violation"); //@endcond public: /// Creates empty free-list TaggedFreeList() : m_Head( tagged_ptr()) { // Your platform must support double-width CAS assert( m_Head.is_lock_free()); } /// Destroys the free list. Free-list must be empty. /** @warning dtor does not free elements of the list. To free elements you should manually call \p clear() with an appropriate disposer. */ ~TaggedFreeList() { assert( empty()); } /// Puts \p pNode to the free list void put( node * pNode ) { assert( m_Head.is_lock_free()); tagged_ptr currentHead = m_Head.load( atomics::memory_order_relaxed ); tagged_ptr newHead = { pNode }; do { newHead.tag = currentHead.tag + 1; pNode->m_freeListNext.store( currentHead.ptr, atomics::memory_order_relaxed ); CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pNode->m_freeListNext ); } while ( cds_unlikely( !m_Head.compare_exchange_weak( currentHead, newHead, atomics::memory_order_release, atomics::memory_order_acquire ))); } /// Gets a node from the free list. If the list is empty, returns \p nullptr node * get() { tagged_ptr currentHead = m_Head.load( atomics::memory_order_acquire ); tagged_ptr newHead; while ( currentHead.ptr != nullptr ) { CDS_TSAN_ANNOTATE_HAPPENS_AFTER( ¤tHead.ptr->m_freeListNext ); newHead.ptr = currentHead.ptr->m_freeListNext.load( atomics::memory_order_relaxed ); newHead.tag = currentHead.tag + 1; if ( cds_likely( m_Head.compare_exchange_weak( currentHead, newHead, atomics::memory_order_release, atomics::memory_order_acquire ))) break; } return currentHead.ptr; } /// Checks whether the free list is empty bool empty() const { return m_Head.load( atomics::memory_order_relaxed ).ptr == nullptr; } /// Clears the free list (not atomic) /** For each element \p disp disposer is called to free memory. The \p Disposer interface: \code struct disposer { void operator()( FreeList::node * node ); }; \endcode This method must be explicitly called before the free list destructor. */ template void clear( Disposer disp ) { node * head = m_Head.load( atomics::memory_order_relaxed ).ptr; m_Head.store( { nullptr }, atomics::memory_order_relaxed ); while ( head ) { node * next = head->m_freeListNext.load( atomics::memory_order_relaxed ); disp( head ); head = next; } } private: //@cond atomics::atomic m_Head; //@endcond }; }} // namespace cds::intrusive #endif // CDSLIB_INTRUSIVE_FREE_LIST_TAGGED_H libcds-2.3.3/cds/intrusive/impl/000077500000000000000000000000001341244201700165175ustar00rootroot00000000000000libcds-2.3.3/cds/intrusive/impl/ellen_bintree.h000066400000000000000000001753371341244201700215170ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_IMPL_ELLEN_BINTREE_H #define CDSLIB_INTRUSIVE_IMPL_ELLEN_BINTREE_H #include #include #include #include #include namespace cds { namespace intrusive { /// Ellen's et al binary search tree /** @ingroup cds_intrusive_map @ingroup cds_intrusive_tree @anchor cds_intrusive_EllenBinTree Source: - [2010] F.Ellen, P.Fatourou, E.Ruppert, F.van Breugel "Non-blocking Binary Search Tree" %EllenBinTree is an unbalanced leaf-oriented binary search tree that implements the set abstract data type. Nodes maintains child pointers but not parent pointers. Every internal node has exactly two children, and all data of type \p T currently in the tree are stored in the leaves. Internal nodes of the tree are used to direct \p find() operation along the path to the correct leaf. The keys (of \p Key type) stored in internal nodes may or may not be in the set. \p Key type is a subset of \p T type. There should be exactly defined a key extracting functor for converting object of type \p T to object of type \p Key. Due to \p extract_min() and \p extract_max() member functions the \p %EllenBinTree can act as a priority queue. In this case you should provide unique compound key, for example, the priority value plus some uniformly distributed random value. @note In the current implementation we do not use helping technique described in the original paper. In Hazard Pointer schema the helping is too complicated and does not give any observable benefits. Instead of helping, when a thread encounters a concurrent operation it just spins waiting for the operation done. Such solution allows greatly simplify implementation of the tree. @attention Recall the tree is unbalanced. The complexity of operations is O(log N) for uniformly distributed random keys, but in the worst case the complexity is O(N). @note Do not include header file explicitly. There are header file for each GC type: - - for Hazard Pointer GC \p cds::gc::HP - - for Dynamic Hazard Pointer GC \p cds::gc::DHP - - for RCU (see \ref cds_intrusive_EllenBinTree_rcu "RCU-based EllenBinTree") Template arguments : - \p GC - garbage collector, possible types are cds::gc::HP, cds::gc::DHP. - \p Key - key type, a subset of \p T - \p T - type to be stored in tree's leaf nodes. The type must be based on \p ellen_bintree::node (for \p ellen_bintree::base_hook) or it must have a member of type \p ellen_bintree::node (for \p ellen_bintree::member_hook). - \p Traits - tree traits, default is \p ellen_bintree::traits It is possible to declare option-based tree with \p ellen_bintree::make_traits metafunction instead of \p Traits template argument. @anchor cds_intrusive_EllenBinTree_less Predicate requirements \p Traits::less, \p Traits::compare and other predicates using with member fuctions should accept at least parameters of type \p T and \p Key in any combination. For example, for \p Foo struct with \p std::string key field the appropiate \p less functor is: \code struct Foo: public cds::intrusive::ellen_bintree::node< ... > { std::string m_strKey; ... }; struct less { bool operator()( Foo const& v1, Foo const& v2 ) const { return v1.m_strKey < v2.m_strKey ; } bool operator()( Foo const& v, std::string const& s ) const { return v.m_strKey < s ; } bool operator()( std::string const& s, Foo const& v ) const { return s < v.m_strKey ; } // Support comparing std::string and char const * bool operator()( std::string const& s, char const * p ) const { return s.compare(p) < 0 ; } bool operator()( Foo const& v, char const * p ) const { return v.m_strKey.compare(p) < 0 ; } bool operator()( char const * p, std::string const& s ) const { return s.compare(p) > 0; } bool operator()( char const * p, Foo const& v ) const { return v.m_strKey.compare(p) > 0; } }; \endcode Usage examples see \ref cds_intrusive_EllenBinTree_usage "here" */ template < class GC, typename Key, typename T, #ifdef CDS_DOXYGEN_INVOKED class Traits = ellen_bintree::traits #else class Traits #endif > class EllenBinTree { public: typedef GC gc; ///< Garbage collector typedef Key key_type; ///< type of a key to be stored in internal nodes; key is a part of \p value_type typedef T value_type; ///< type of value stored in the binary tree typedef Traits traits; ///< Traits template parameter typedef typename traits::hook hook; ///< hook type typedef typename hook::node_type node_type; ///< node type typedef typename traits::disposer disposer; ///< leaf node disposer typedef typename traits::back_off back_off; ///< back-off strategy typedef typename gc::template guarded_ptr< value_type > guarded_ptr; ///< Guarded pointer protected: //@cond typedef ellen_bintree::base_node< gc > tree_node; ///< Base type of tree node typedef node_type leaf_node; ///< Leaf node type typedef ellen_bintree::node_types< gc, key_type, typename leaf_node::tag > node_factory; typedef typename node_factory::internal_node_type internal_node; ///< Internal node type typedef typename node_factory::update_desc_type update_desc; ///< Update descriptor typedef typename update_desc::update_ptr update_ptr; ///< Marked pointer to update descriptor //@endcond public: # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< Node traits # else typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator; struct node_traits: public get_node_traits< value_type, node_type, hook>::type { static internal_node const& to_internal_node( tree_node const& n ) { assert( n.is_internal()); return static_cast( n ); } static leaf_node const& to_leaf_node( tree_node const& n ) { assert( n.is_leaf()); return static_cast( n ); } }; # endif typedef typename traits::item_counter item_counter; ///< Item counting policy typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model typedef typename traits::stat stat; ///< internal statistics type typedef typename traits::key_extractor key_extractor; ///< key extracting functor typedef typename traits::node_allocator node_allocator; ///< Allocator for internal node typedef typename traits::update_desc_allocator update_desc_allocator; ///< Update descriptor allocator static constexpr const size_t c_nHazardPtrCount = 9; ///< Count of hazard pointer required for the algorithm protected: //@cond typedef ellen_bintree::details::compare< key_type, value_type, key_comparator, node_traits > node_compare; typedef cds::details::Allocator< internal_node, node_allocator > cxx_node_allocator; typedef cds::details::Allocator< update_desc, update_desc_allocator > cxx_update_desc_allocator; struct search_result { enum guard_index { Guard_GrandParent, Guard_Parent, Guard_Leaf, Guard_updGrandParent, Guard_updParent, Guard_temporary, // end of guard indices guard_count }; typedef typename gc::template GuardArray< guard_count > guard_array; guard_array guards; internal_node * pGrandParent; internal_node * pParent; leaf_node * pLeaf; update_ptr updParent; update_ptr updGrandParent; bool bRightLeaf; // true if pLeaf is right child of pParent, false otherwise bool bRightParent; // true if pParent is right child of pGrandParent, false otherwise search_result() :pGrandParent( nullptr ) ,pParent( nullptr ) ,pLeaf( nullptr ) ,bRightLeaf( false ) ,bRightParent( false ) {} }; //@endcond protected: //@cond internal_node m_Root; ///< Tree root node (key= Infinite2) leaf_node m_LeafInf1; ///< Infinite leaf 1 (key= Infinite1) leaf_node m_LeafInf2; ///< Infinite leaf 2 (key= Infinite2) //@endcond item_counter m_ItemCounter; ///< item counter mutable stat m_Stat; ///< internal statistics protected: //@cond static void free_leaf_node( void* p ) { disposer()( reinterpret_cast( p )); } internal_node * alloc_internal_node() const { m_Stat.onInternalNodeCreated(); internal_node * pNode = cxx_node_allocator().New(); return pNode; } static void free_internal_node( void* pNode ) { cxx_node_allocator().Delete( reinterpret_cast( pNode )); } struct internal_node_deleter { void operator()( internal_node* p) const { cxx_node_allocator().Delete( p ); } }; typedef std::unique_ptr< internal_node, internal_node_deleter> unique_internal_node_ptr; update_desc * alloc_update_desc() const { m_Stat.onUpdateDescCreated(); return cxx_update_desc_allocator().New(); } static void free_update_desc( void* pDesc ) { cxx_update_desc_allocator().Delete( reinterpret_cast( pDesc )); } void retire_node( tree_node * pNode ) const { if ( pNode->is_leaf()) { assert( static_cast( pNode ) != &m_LeafInf1 ); assert( static_cast( pNode ) != &m_LeafInf2 ); gc::template retire( node_traits::to_value_ptr( static_cast( pNode )), free_leaf_node ); } else { assert( static_cast( pNode ) != &m_Root ); m_Stat.onInternalNodeDeleted(); gc::template retire( static_cast( pNode ), free_internal_node ); } } void retire_update_desc( update_desc * p ) const { m_Stat.onUpdateDescDeleted(); gc::template retire( p, free_update_desc ); } void make_empty_tree() { m_Root.infinite_key( 2 ); m_LeafInf1.infinite_key( 1 ); m_LeafInf2.infinite_key( 2 ); m_Root.m_pLeft.store( &m_LeafInf1, memory_model::memory_order_relaxed ); m_Root.m_pRight.store( &m_LeafInf2, memory_model::memory_order_release ); } //@endcond public: /// Default constructor EllenBinTree() { static_assert( !std::is_same< key_extractor, opt::none >::value, "The key extractor option must be specified" ); make_empty_tree(); } /// Clears the tree ~EllenBinTree() { unsafe_clear(); } /// Inserts new node /** The function inserts \p val in the tree if it does not contain an item with key equal to \p val. Returns \p true if \p val is placed into the tree, \p false otherwise. */ bool insert( value_type& val ) { return insert( val, []( value_type& ) {} ); } /// Inserts new node /** This function is intended for derived non-intrusive containers. The function allows to split creating of new item into two part: - create item with key only - insert new item into the tree - if inserting is success, calls \p f functor to initialize value-field of \p val. The functor signature is: \code void func( value_type& val ); \endcode where \p val is the item inserted. User-defined functor \p f should guarantee that during changing \p val no any other changes could be made on this tree's item by concurrent threads. The user-defined functor is called only if the inserting is success. */ template bool insert( value_type& val, Func f ) { typename gc::Guard guardInsert; guardInsert.assign( &val ); unique_internal_node_ptr pNewInternal; search_result res; back_off bkoff; for ( ;; ) { if ( search( res, val, node_compare())) { if ( pNewInternal.get()) m_Stat.onInternalNodeDeleted() ; // unique_internal_node_ptr deletes internal node m_Stat.onInsertFailed(); return false; } if ( res.updGrandParent.bits() == update_desc::Clean && res.updParent.bits() == update_desc::Clean ) { if ( !pNewInternal.get()) pNewInternal.reset( alloc_internal_node()); if ( try_insert( val, pNewInternal.get(), res )) { f( val ); pNewInternal.release(); // internal node is linked into the tree and should not be deleted break; } } bkoff(); m_Stat.onInsertRetry(); } ++m_ItemCounter; m_Stat.onInsertSuccess(); return true; } /// Updates the node /** The operation performs inserting or changing data with lock-free manner. If the item \p val is not found in the set, then \p val is inserted into the set iff \p bAllowInsert is \p true. Otherwise, the functor \p func is called with item found. The functor \p func signature is: \code void func( bool bNew, value_type& item, value_type& val ); \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the set - \p val - argument \p val passed into the \p %update() function If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments refer to the same thing. The functor can change non-key fields of the \p item; however, \p func must guarantee that during changing no any other modifications could be made on this item by concurrent threads. Returns std::pair where \p first is \p true if operation is successful, i.e. the node has been inserted or updated, \p second is \p true if new item has been added or \p false if the item with \p key already exists. @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" */ template std::pair update( value_type& val, Func func, bool bAllowInsert = true ) { typename gc::Guard guardInsert; guardInsert.assign( &val ); unique_internal_node_ptr pNewInternal; search_result res; back_off bkoff; for ( ;; ) { if ( search( res, val, node_compare())) { func( false, *node_traits::to_value_ptr( res.pLeaf ), val ); if ( pNewInternal.get()) m_Stat.onInternalNodeDeleted() ; // unique_internal_node_ptr deletes internal node m_Stat.onUpdateExist(); return std::make_pair( true, false ); } if ( res.updGrandParent.bits() == update_desc::Clean && res.updParent.bits() == update_desc::Clean ) { if ( !bAllowInsert ) return std::make_pair( false, false ); if ( !pNewInternal.get()) pNewInternal.reset( alloc_internal_node()); if ( try_insert( val, pNewInternal.get(), res )) { func( true, val, val ); pNewInternal.release() ; // internal node has been linked into the tree and should not be deleted break; } } bkoff(); m_Stat.onUpdateRetry(); } ++m_ItemCounter; m_Stat.onUpdateNew(); return std::make_pair( true, true ); } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( value_type& val, Func func ) { return update( val, func, true ); } //@endcond /// Unlinks the item \p val from the tree /** The function searches the item \p val in the tree and unlink it from the tree if it is found and is equal to \p val. Difference between \ref erase and \p unlink functions: \p erase finds a key and deletes the item found. \p unlink finds an item by key and deletes it only if \p val is a node, i.e. the pointer to item found is equal to &val . The \p disposer specified in \p Traits class template parameter is called by garbage collector \p GC asynchronously. The function returns \p true if success and \p false otherwise. */ bool unlink( value_type& val ) { return erase_( val, node_compare(), []( value_type const& v, leaf_node const& n ) -> bool { return &v == node_traits::to_value_ptr( n ); }, [](value_type const&) {} ); } /// Deletes the item from the tree /** \anchor cds_intrusive_EllenBinTree_erase The function searches an item with key equal to \p key in the tree, unlinks it from the tree, and returns \p true. If the item with key equal to \p key is not found the function return \p false. Note the \p Traits::less and/or \p Traits::compare predicate should accept a parameter of type \p Q that can be not the same as \p value_type. */ template bool erase( const Q& key ) { return erase_( key, node_compare(), []( Q const&, leaf_node const& ) -> bool { return true; }, [](value_type const&) {} ); } /// Delete the item from the tree with comparing functor \p pred /** The function is an analog of \ref cds_intrusive_EllenBinTree_erase "erase(Q const&)" but \p pred predicate is used for key comparing. \p Less has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_less "Predicate requirements". \p pred must imply the same element order as the comparator used for building the tree. */ template bool erase_with( const Q& key, Less pred ) { CDS_UNUSED( pred ); typedef ellen_bintree::details::compare< key_type, value_type, opt::details::make_comparator_from_less, node_traits > compare_functor; return erase_( key, compare_functor(), []( Q const&, leaf_node const& ) -> bool { return true; }, [](value_type const&) {} ); } /// Deletes the item from the tree /** \anchor cds_intrusive_EllenBinTree_erase_func The function searches an item with key equal to \p key in the tree, call \p f functor with item found, unlinks it from the tree, and returns \p true. The \ref disposer specified in \p Traits class template parameter is called by garbage collector \p GC asynchronously. The \p Func interface is \code struct functor { void operator()( value_type const& item ); }; \endcode If the item with key equal to \p key is not found the function return \p false. Note the \p Traits::less and/or \p Traits::compare predicate should accept a parameter of type \p Q that can be not the same as \p value_type. */ template bool erase( Q const& key, Func f ) { return erase_( key, node_compare(), []( Q const&, leaf_node const& ) -> bool { return true; }, f ); } /// Delete the item from the tree with comparing functor \p pred /** The function is an analog of \ref cds_intrusive_EllenBinTree_erase_func "erase(Q const&, Func)" but \p pred predicate is used for key comparing. \p Less has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_less "Predicate requirements". \p pred must imply the same element order as the comparator used for building the tree. */ template bool erase_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); typedef ellen_bintree::details::compare< key_type, value_type, opt::details::make_comparator_from_less, node_traits > compare_functor; return erase_( key, compare_functor(), []( Q const&, leaf_node const& ) -> bool { return true; }, f ); } /// Extracts an item with minimal key from the tree /** The function searches an item with minimal key, unlinks it, and returns a guarded pointer to an item found. If the tree is empty the function returns an empty guarded pointer. @note Due the concurrent nature of the tree, the function extracts nearly minimum key. It means that the function gets leftmost leaf of the tree and tries to unlink it. During unlinking, a concurrent thread may insert an item with key less than leftmost item's key. So, the function returns the item with minimum key at the moment of tree traversing. The returned \p guarded_ptr prevents disposer invocation for returned item, see \p cds::gc::guarded_ptr for explanation. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. */ guarded_ptr extract_min() { return extract_min_(); } /// Extracts an item with maximal key from the tree /** The function searches an item with maximal key, unlinks it, and returns a guarded pointer to an item found. If the tree is empty the function returns an empty \p guarded_ptr. @note Due the concurrent nature of the tree, the function extracts nearly maximal key. It means that the function gets rightmost leaf of the tree and tries to unlink it. During unlinking, a concurrent thread may insert an item with key great than rightmost item's key. So, the function returns the item with maximal key at the moment of tree traversing. The returned \p guarded_ptr prevents disposer invocation for returned item, see cds::gc::guarded_ptr for explanation. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. */ guarded_ptr extract_max() { return extract_max_(); } /// Extracts an item from the tree /** \anchor cds_intrusive_EllenBinTree_extract The function searches an item with key equal to \p key in the tree, unlinks it, and returns a guarded pointer to an item found. If the item is not found the function returns an empty \p guarded_ptr. \p guarded_ptr prevents disposer invocation for returned item, see cds::gc::guarded_ptr for explanation. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. */ template guarded_ptr extract( Q const& key ) { return extract_( key ); } /// Extracts an item from the tree using \p pred for searching /** The function is an analog of \ref cds_intrusive_EllenBinTree_extract "extract(Q const&)" but \p pred is used for key compare. \p Less has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_less "Predicate requirements". \p pred must imply the same element order as the comparator used for building the tree. */ template guarded_ptr extract_with( Q const& key, Less pred ) { return extract_with_( key, pred ); } /// Checks whether the set contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. */ template bool contains( Q const& key ) const { search_result res; if ( search( res, key, node_compare())) { m_Stat.onFindSuccess(); return true; } m_Stat.onFindFailed(); return false; } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find( Q const& key ) const { return contains( key ); } //@endcond /// Checks whether the set contains \p key using \p pred predicate for searching /** The function is similar to contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool contains( Q const& key, Less pred ) const { CDS_UNUSED( pred ); typedef ellen_bintree::details::compare< key_type, value_type, opt::details::make_comparator_from_less, node_traits > compare_functor; search_result res; if ( search( res, key, compare_functor())) { m_Stat.onFindSuccess(); return true; } m_Stat.onFindFailed(); return false; } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find_with( Q const& key, Less pred ) const { return contains( key, pred ); } //@endcond /// Finds the key \p key /** @anchor cds_intrusive_EllenBinTree_find_func The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& key ); }; \endcode where \p item is the item found, \p key is the find function argument. The functor can change non-key fields of \p item. Note that the functor is only guarantee that \p item cannot be disposed during functor is executing. The functor does not serialize simultaneous access to the tree \p item. If such access is possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( Q& key, Func f ) const { return find_( key, f ); } //@cond template bool find( Q const& key, Func f ) const { return find_( key, f ); } //@endcond /// Finds the key \p key with comparing functor \p pred /** The function is an analog of \ref cds_intrusive_EllenBinTree_find_func "find(Q&, Func)" but \p pred is used for key comparison. \p Less functor has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_less "Predicate requirements". \p pred must imply the same element order as the comparator used for building the tree. */ template bool find_with( Q& key, Less pred, Func f ) const { return find_with_( key, pred, f ); } //@cond template bool find_with( Q const& key, Less pred, Func f ) const { return find_with_( key, pred, f ); } //@endcond /// Finds \p key and returns the item found /** @anchor cds_intrusive_EllenBinTree_get The function searches the item with key equal to \p key and returns the item found as \p guarded_ptr object. The function returns an empty guarded pointer is \p key is not found. \p guarded_ptr prevents disposer invocation for returned item, see \p cds::gc::guarded_ptr for explanation. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. */ template guarded_ptr get( Q const& key ) const { return get_( key ); } /// Finds \p key with predicate \p pred and returns the item found /** The function is an analog of \ref cds_intrusive_EllenBinTree_get "get(Q const&)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_less "Predicate requirements". \p pred must imply the same element order as the comparator used for building the tree. */ template guarded_ptr get_with( Q const& key, Less pred ) const { return get_with_( key, pred ); } /// Checks if the tree is empty bool empty() const { return m_Root.m_pLeft.load( memory_model::memory_order_relaxed )->is_leaf(); } /// Clears the tree (thread safe, not atomic) /** The function unlink all items from the tree. The function is thread safe but not atomic: in multi-threaded environment with parallel insertions this sequence \code tree.clear(); assert( tree.empty()); \endcode the assertion could be raised. For each leaf the \p disposer will be called after unlinking. */ void clear() { guarded_ptr gp; do { gp = extract_min(); } while ( gp ); } /// Clears the tree (not thread safe) /** This function is not thread safe and may be called only when no other thread deals with the tree. The function is used in the tree destructor. */ void unsafe_clear() { while ( true ) { internal_node * pParent = nullptr; internal_node * pGrandParent = nullptr; tree_node * pLeaf = const_cast( &m_Root ); // Get leftmost leaf while ( pLeaf->is_internal()) { pGrandParent = pParent; pParent = static_cast( pLeaf ); pLeaf = pParent->m_pLeft.load( memory_model::memory_order_relaxed ); } if ( pLeaf->infinite_key()) { // The tree is empty return; } // Remove leftmost leaf and its parent node assert( pGrandParent ); assert( pParent ); assert( pLeaf->is_leaf()); pGrandParent->m_pLeft.store( pParent->m_pRight.load( memory_model::memory_order_relaxed ), memory_model::memory_order_relaxed ); free_leaf_node( node_traits::to_value_ptr( static_cast( pLeaf ))); free_internal_node( pParent ); } } /// Returns item count in the tree /** Only leaf nodes containing user data are counted. The value returned depends on item counter type provided by \p Traits template parameter. If it is \p atomicity::empty_item_counter this function always returns 0. The function is not suitable for checking the tree emptiness, use \p empty() member function for this purpose. */ size_t size() const { return m_ItemCounter; } /// Returns const reference to internal statistics stat const& statistics() const { return m_Stat; } /// Checks internal consistency (not atomic, not thread-safe) /** The debugging function to check internal consistency of the tree. */ bool check_consistency() const { return check_consistency( &m_Root ); } protected: //@cond bool check_consistency( internal_node const * pRoot ) const { tree_node * pLeft = pRoot->m_pLeft.load( atomics::memory_order_relaxed ); tree_node * pRight = pRoot->m_pRight.load( atomics::memory_order_relaxed ); assert( pLeft ); assert( pRight ); if ( node_compare()( *pLeft, *pRoot ) < 0 && node_compare()( *pRoot, *pRight ) <= 0 && node_compare()( *pLeft, *pRight ) < 0 ) { bool bRet = true; if ( pLeft->is_internal()) bRet = check_consistency( static_cast( pLeft )); assert( bRet ); if ( bRet && pRight->is_internal()) bRet = bRet && check_consistency( static_cast( pRight )); assert( bRet ); return bRet; } return false; } tree_node * protect_child_node( search_result& res, internal_node * pParent, bool bRight, update_ptr updParent ) const { retry: tree_node * p = bRight ? res.guards.protect( search_result::Guard_Leaf, pParent->m_pRight, []( tree_node * pn ) -> internal_node* { return static_cast(pn);}) : res.guards.protect( search_result::Guard_Leaf, pParent->m_pLeft, []( tree_node * pn ) -> internal_node* { return static_cast(pn);}); // If we use member hook, data node pointer != internal node pointer // So, we need protect the child twice: as internal node and as data node // and then analyze what kind of node we have tree_node * pVal = bRight ? res.guards.protect( search_result::Guard_temporary, pParent->m_pRight, []( tree_node * pn ) -> value_type* { return node_traits::to_value_ptr( static_cast(pn));} ) : res.guards.protect( search_result::Guard_temporary, pParent->m_pLeft, []( tree_node * pn ) -> value_type* { return node_traits::to_value_ptr( static_cast(pn));} ); // child node is guarded // See whether pParent->m_pUpdate has not been changed if ( pParent->m_pUpdate.load( memory_model::memory_order_acquire ) != updParent ) { // update has been changed - returns nullptr as a flag to search retry return nullptr; } if ( p != pVal ) goto retry; if ( p && p->is_leaf()) res.guards.assign( search_result::Guard_Leaf, node_traits::to_value_ptr( static_cast( p ))); res.guards.clear( search_result::Guard_temporary ); return p; } static update_ptr search_protect_update( search_result& res, atomics::atomic const& src ) { return res.guards.protect( search_result::Guard_updParent, src, [](update_ptr p) -> update_desc* { return p.ptr(); }); } template bool search( search_result& res, KeyValue const& key, Compare cmp ) const { internal_node * pParent; internal_node * pGrandParent = nullptr; update_ptr updParent; update_ptr updGrandParent; bool bRightLeaf; bool bRightParent = false; int nCmp = 0; retry: pParent = nullptr; //pGrandParent = nullptr; updParent = nullptr; bRightLeaf = false; tree_node * pLeaf = const_cast( &m_Root ); while ( pLeaf->is_internal()) { res.guards.copy( search_result::Guard_GrandParent, search_result::Guard_Parent ); pGrandParent = pParent; res.guards.copy( search_result::Guard_Parent, search_result::Guard_Leaf ); pParent = static_cast( pLeaf ); bRightParent = bRightLeaf; res.guards.copy( search_result::Guard_updGrandParent, search_result::Guard_updParent ); updGrandParent = updParent; updParent = search_protect_update( res, pParent->m_pUpdate ); switch ( updParent.bits()) { case update_desc::DFlag: case update_desc::Mark: m_Stat.onSearchRetry(); goto retry; } nCmp = cmp( key, *pParent ); bRightLeaf = nCmp >= 0; pLeaf = protect_child_node( res, pParent, bRightLeaf, updParent ); if ( !pLeaf ) { m_Stat.onSearchRetry(); goto retry; } } assert( pLeaf->is_leaf()); nCmp = cmp( key, *static_cast(pLeaf)); res.pGrandParent = pGrandParent; res.pParent = pParent; res.pLeaf = static_cast( pLeaf ); res.updParent = updParent; res.updGrandParent = updGrandParent; res.bRightParent = bRightParent; res.bRightLeaf = bRightLeaf; return nCmp == 0; } bool search_min( search_result& res ) const { internal_node * pParent; internal_node * pGrandParent; update_ptr updParent; update_ptr updGrandParent; retry: pParent = nullptr; pGrandParent = nullptr; updParent = nullptr; tree_node * pLeaf = const_cast( &m_Root ); while ( pLeaf->is_internal()) { res.guards.copy( search_result::Guard_GrandParent, search_result::Guard_Parent ); pGrandParent = pParent; res.guards.copy( search_result::Guard_Parent, search_result::Guard_Leaf ); pParent = static_cast( pLeaf ); res.guards.copy( search_result::Guard_updGrandParent, search_result::Guard_updParent ); updGrandParent = updParent; updParent = search_protect_update( res, pParent->m_pUpdate ); switch ( updParent.bits()) { case update_desc::DFlag: case update_desc::Mark: m_Stat.onSearchRetry(); goto retry; } pLeaf = protect_child_node( res, pParent, false, updParent ); if ( !pLeaf ) { m_Stat.onSearchRetry(); goto retry; } } if ( pLeaf->infinite_key()) return false; res.pGrandParent = pGrandParent; res.pParent = pParent; assert( pLeaf->is_leaf()); res.pLeaf = static_cast( pLeaf ); res.updParent = updParent; res.updGrandParent = updGrandParent; res.bRightParent = false; res.bRightLeaf = false; return true; } bool search_max( search_result& res ) const { internal_node * pParent; internal_node * pGrandParent; update_ptr updParent; update_ptr updGrandParent; bool bRightLeaf; bool bRightParent = false; retry: pParent = nullptr; pGrandParent = nullptr; updParent = nullptr; bRightLeaf = false; tree_node * pLeaf = const_cast( &m_Root ); while ( pLeaf->is_internal()) { res.guards.copy( search_result::Guard_GrandParent, search_result::Guard_Parent ); pGrandParent = pParent; res.guards.copy( search_result::Guard_Parent, search_result::Guard_Leaf ); pParent = static_cast( pLeaf ); bRightParent = bRightLeaf; res.guards.copy( search_result::Guard_updGrandParent, search_result::Guard_updParent ); updGrandParent = updParent; updParent = search_protect_update( res, pParent->m_pUpdate ); switch ( updParent.bits()) { case update_desc::DFlag: case update_desc::Mark: m_Stat.onSearchRetry(); goto retry; } bRightLeaf = !pParent->infinite_key(); pLeaf = protect_child_node( res, pParent, bRightLeaf, updParent ); if ( !pLeaf ) { m_Stat.onSearchRetry(); goto retry; } } if ( pLeaf->infinite_key()) return false; res.pGrandParent = pGrandParent; res.pParent = pParent; assert( pLeaf->is_leaf()); res.pLeaf = static_cast( pLeaf ); res.updParent = updParent; res.updGrandParent = updGrandParent; res.bRightParent = bRightParent; res.bRightLeaf = bRightLeaf; return true; } /* void help( update_ptr pUpdate ) { // pUpdate must be guarded! switch ( pUpdate.bits()) { case update_desc::IFlag: help_insert( pUpdate.ptr()); m_Stat.onHelpInsert(); break; case update_desc::DFlag: help_delete( pUpdate.ptr()); m_Stat.onHelpDelete(); break; case update_desc::Mark: //m_Stat.onHelpMark(); //help_marked( pUpdate.ptr()); break; } } */ void help_insert( update_desc * pOp ) { // pOp must be guarded tree_node * pLeaf = static_cast( pOp->iInfo.pLeaf ); if ( pOp->iInfo.bRightLeaf ) { CDS_VERIFY( pOp->iInfo.pParent->m_pRight.compare_exchange_strong( pLeaf, static_cast( pOp->iInfo.pNew ), memory_model::memory_order_release, atomics::memory_order_relaxed )); } else { CDS_VERIFY( pOp->iInfo.pParent->m_pLeft.compare_exchange_strong( pLeaf, static_cast( pOp->iInfo.pNew ), memory_model::memory_order_release, atomics::memory_order_relaxed )); } // Unflag parent update_ptr cur( pOp, update_desc::IFlag ); CDS_VERIFY( pOp->iInfo.pParent->m_pUpdate.compare_exchange_strong( cur, pOp->iInfo.pParent->null_update_desc(), memory_model::memory_order_release, atomics::memory_order_relaxed )); } bool check_delete_precondition( search_result& res ) const { // precondition: all member of res must be guarded assert( res.pGrandParent != nullptr ); return static_cast(res.pGrandParent->get_child( res.bRightParent, memory_model::memory_order_relaxed )) == res.pParent && static_cast( res.pParent->get_child( res.bRightLeaf, memory_model::memory_order_relaxed )) == res.pLeaf; } bool help_delete( update_desc * pOp ) { // precondition: pOp must be guarded update_ptr pUpdate( pOp->dInfo.pUpdateParent ); update_ptr pMark( pOp, update_desc::Mark ); if ( pOp->dInfo.pParent->m_pUpdate.compare_exchange_strong( pUpdate, pMark, // * memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { help_marked( pOp ); retire_node( pOp->dInfo.pParent ); retire_node( pOp->dInfo.pLeaf ); retire_update_desc( pOp ); return true; } else if ( pUpdate == pMark ) { // some other thread is processing help_marked() help_marked( pOp ); m_Stat.onHelpMark(); return true; } else { // Undo grandparent dInfo update_ptr pDel( pOp, update_desc::DFlag ); if ( pOp->dInfo.pGrandParent->m_pUpdate.compare_exchange_strong( pDel, pOp->dInfo.pGrandParent->null_update_desc(), memory_model::memory_order_release, atomics::memory_order_relaxed )) { retire_update_desc( pOp ); } return false; } } static tree_node * protect_sibling( typename gc::Guard& guard, atomics::atomic& sibling ) { tree_node * pSibling = guard.protect( sibling, [](tree_node * p) -> internal_node* { return static_cast(p); } ); if ( pSibling->is_leaf()) guard.assign( node_traits::to_value_ptr( static_cast( pSibling ))); return pSibling; } void help_marked( update_desc * pOp ) { // precondition: pOp must be guarded tree_node * pParent = pOp->dInfo.pParent; typename gc::Guard guard; tree_node * pOpposite = protect_sibling( guard, pOp->dInfo.bRightLeaf ? pOp->dInfo.pParent->m_pLeft : pOp->dInfo.pParent->m_pRight ); if ( pOp->dInfo.bRightParent ) { CDS_VERIFY( pOp->dInfo.pGrandParent->m_pRight.compare_exchange_strong( pParent, pOpposite, memory_model::memory_order_release, atomics::memory_order_relaxed )); } else { CDS_VERIFY( pOp->dInfo.pGrandParent->m_pLeft.compare_exchange_strong( pParent, pOpposite, memory_model::memory_order_release, atomics::memory_order_relaxed )); } update_ptr upd( pOp, update_desc::DFlag ); CDS_VERIFY( pOp->dInfo.pGrandParent->m_pUpdate.compare_exchange_strong( upd, pOp->dInfo.pGrandParent->null_update_desc(), memory_model::memory_order_release, atomics::memory_order_relaxed )); } bool try_insert( value_type& val, internal_node * pNewInternal, search_result& res ) { assert( res.updParent.bits() == update_desc::Clean ); assert( res.pLeaf->is_leaf()); // check search result if ( res.pParent->get_child( res.bRightLeaf, memory_model::memory_order_acquire ) == res.pLeaf ) { leaf_node * pNewLeaf = node_traits::to_node_ptr( val ); int nCmp = node_compare()(val, *res.pLeaf); if ( nCmp < 0 ) { if ( res.pGrandParent ) { assert( !res.pLeaf->infinite_key()); pNewInternal->infinite_key( 0 ); key_extractor()(pNewInternal->m_Key, *node_traits::to_value_ptr( res.pLeaf )); } else { assert( res.pLeaf->infinite_key() == tree_node::key_infinite1 ); pNewInternal->infinite_key( 1 ); } pNewInternal->m_pLeft.store( static_cast(pNewLeaf), memory_model::memory_order_relaxed ); pNewInternal->m_pRight.store( static_cast(res.pLeaf), memory_model::memory_order_relaxed ); } else { assert( !res.pLeaf->is_internal()); pNewInternal->infinite_key( 0 ); key_extractor()(pNewInternal->m_Key, val); pNewInternal->m_pLeft.store( static_cast(res.pLeaf), memory_model::memory_order_relaxed ); pNewInternal->m_pRight.store( static_cast(pNewLeaf), memory_model::memory_order_relaxed ); assert( !res.pLeaf->infinite_key()); } typename gc::Guard guard; update_desc * pOp = alloc_update_desc(); guard.assign( pOp ); pOp->iInfo.pParent = res.pParent; pOp->iInfo.pNew = pNewInternal; pOp->iInfo.pLeaf = res.pLeaf; pOp->iInfo.bRightLeaf = res.bRightLeaf; update_ptr updCur( res.updParent.ptr()); if ( res.pParent->m_pUpdate.compare_exchange_strong( updCur, update_ptr( pOp, update_desc::IFlag ), memory_model::memory_order_acq_rel, atomics::memory_order_relaxed )) { // do insert help_insert( pOp ); retire_update_desc( pOp ); return true; } else { m_Stat.onUpdateDescDeleted(); free_update_desc( pOp ); } } return false; } template bool erase_( Q const& val, Compare cmp, Equal eq, Func f ) { update_desc * pOp = nullptr; search_result res; back_off bkoff; for ( ;; ) { if ( !search( res, val, cmp ) || !eq( val, *res.pLeaf )) { if ( pOp ) retire_update_desc( pOp ); m_Stat.onEraseFailed(); return false; } if ( res.updGrandParent.bits() == update_desc::Clean && res.updParent.bits() == update_desc::Clean ) { if ( !pOp ) pOp = alloc_update_desc(); if ( check_delete_precondition( res )) { typename gc::Guard guard; guard.assign( pOp ); pOp->dInfo.pGrandParent = res.pGrandParent; pOp->dInfo.pParent = res.pParent; pOp->dInfo.pLeaf = res.pLeaf; pOp->dInfo.pUpdateParent = res.updParent.ptr(); pOp->dInfo.bRightParent = res.bRightParent; pOp->dInfo.bRightLeaf = res.bRightLeaf; update_ptr updGP( res.updGrandParent.ptr()); if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ), memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { if ( help_delete( pOp )) { // res.pLeaf is not deleted yet since it is guarded f( *node_traits::to_value_ptr( res.pLeaf )); break; } pOp = nullptr; } } } bkoff(); m_Stat.onEraseRetry(); } --m_ItemCounter; m_Stat.onEraseSuccess(); return true; } template guarded_ptr extract_item( Q const& key, Compare cmp ) { update_desc * pOp = nullptr; search_result res; back_off bkoff; for ( ;; ) { if ( !search( res, key, cmp )) { if ( pOp ) retire_update_desc( pOp ); m_Stat.onEraseFailed(); return guarded_ptr(); } if ( res.updGrandParent.bits() == update_desc::Clean && res.updParent.bits() == update_desc::Clean ) { if ( !pOp ) pOp = alloc_update_desc(); if ( check_delete_precondition( res )) { typename gc::Guard guard; guard.assign( pOp ); pOp->dInfo.pGrandParent = res.pGrandParent; pOp->dInfo.pParent = res.pParent; pOp->dInfo.pLeaf = res.pLeaf; pOp->dInfo.pUpdateParent = res.updParent.ptr(); pOp->dInfo.bRightParent = res.bRightParent; pOp->dInfo.bRightLeaf = res.bRightLeaf; update_ptr updGP( res.updGrandParent.ptr()); if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ), memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { if ( help_delete( pOp )) break; pOp = nullptr; } } } bkoff(); m_Stat.onEraseRetry(); } --m_ItemCounter; m_Stat.onEraseSuccess(); return guarded_ptr( res.guards.release( search_result::Guard_Leaf )); } template guarded_ptr extract_( Q const& key ) { return extract_item( key, node_compare()); } template guarded_ptr extract_with_( Q const& key, Less /*pred*/ ) { typedef ellen_bintree::details::compare< key_type, value_type, opt::details::make_comparator_from_less, node_traits > compare_functor; return extract_item( key, compare_functor()); } guarded_ptr extract_max_() { update_desc * pOp = nullptr; search_result res; back_off bkoff; for ( ;; ) { if ( !search_max( res )) { // Tree is empty if ( pOp ) retire_update_desc( pOp ); m_Stat.onExtractMaxFailed(); return guarded_ptr(); } if ( res.updGrandParent.bits() == update_desc::Clean && res.updParent.bits() == update_desc::Clean ) { if ( !pOp ) pOp = alloc_update_desc(); if ( check_delete_precondition( res )) { typename gc::Guard guard; guard.assign( pOp ); pOp->dInfo.pGrandParent = res.pGrandParent; pOp->dInfo.pParent = res.pParent; pOp->dInfo.pLeaf = res.pLeaf; pOp->dInfo.pUpdateParent = res.updParent.ptr(); pOp->dInfo.bRightParent = res.bRightParent; pOp->dInfo.bRightLeaf = res.bRightLeaf; update_ptr updGP( res.updGrandParent.ptr()); if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ), memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { if ( help_delete( pOp )) break; pOp = nullptr; } } } bkoff(); m_Stat.onExtractMaxRetry(); } --m_ItemCounter; m_Stat.onExtractMaxSuccess(); return guarded_ptr( res.guards.release( search_result::Guard_Leaf )); } guarded_ptr extract_min_() { update_desc * pOp = nullptr; search_result res; back_off bkoff; for ( ;; ) { if ( !search_min( res )) { // Tree is empty if ( pOp ) retire_update_desc( pOp ); m_Stat.onExtractMinFailed(); return guarded_ptr(); } if ( res.updGrandParent.bits() == update_desc::Clean && res.updParent.bits() == update_desc::Clean ) { if ( !pOp ) pOp = alloc_update_desc(); if ( check_delete_precondition( res )) { typename gc::Guard guard; guard.assign( pOp ); pOp->dInfo.pGrandParent = res.pGrandParent; pOp->dInfo.pParent = res.pParent; pOp->dInfo.pLeaf = res.pLeaf; pOp->dInfo.pUpdateParent = res.updParent.ptr(); pOp->dInfo.bRightParent = res.bRightParent; pOp->dInfo.bRightLeaf = res.bRightLeaf; update_ptr updGP( res.updGrandParent.ptr()); if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ), memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { if ( help_delete( pOp )) break; pOp = nullptr; } } } bkoff(); m_Stat.onExtractMinRetry(); } --m_ItemCounter; m_Stat.onExtractMinSuccess(); return guarded_ptr( res.guards.release( search_result::Guard_Leaf )); } template bool find_( Q& val, Func f ) const { search_result res; if ( search( res, val, node_compare())) { assert( res.pLeaf ); f( *node_traits::to_value_ptr( res.pLeaf ), val ); m_Stat.onFindSuccess(); return true; } m_Stat.onFindFailed(); return false; } template bool find_with_( Q& val, Less /*pred*/, Func f ) const { typedef ellen_bintree::details::compare< key_type, value_type, opt::details::make_comparator_from_less, node_traits > compare_functor; search_result res; if ( search( res, val, compare_functor())) { assert( res.pLeaf ); f( *node_traits::to_value_ptr( res.pLeaf ), val ); m_Stat.onFindSuccess(); return true; } m_Stat.onFindFailed(); return false; } template guarded_ptr get_( Q const& val ) const { search_result res; if ( search( res, val, node_compare())) { assert( res.pLeaf ); m_Stat.onFindSuccess(); return guarded_ptr( res.guards.release( search_result::Guard_Leaf )); } m_Stat.onFindFailed(); return guarded_ptr(); } template guarded_ptr get_with_( Q const& val, Less pred ) const { CDS_UNUSED( pred ); typedef ellen_bintree::details::compare< key_type, value_type, opt::details::make_comparator_from_less, node_traits > compare_functor; search_result res; if ( search( res, val, compare_functor())) { assert( res.pLeaf ); m_Stat.onFindSuccess(); return guarded_ptr( res.guards.release( search_result::Guard_Leaf )); } m_Stat.onFindFailed(); return guarded_ptr(); } //@endcond }; }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_IMPL_ELLEN_BINTREE_H libcds-2.3.3/cds/intrusive/impl/feldman_hashset.h000066400000000000000000001450311341244201700220210ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_IMPL_FELDMAN_HASHSET_H #define CDSLIB_INTRUSIVE_IMPL_FELDMAN_HASHSET_H #include // std::ref #include // std::iterator_traits #include #include #include namespace cds { namespace intrusive { /// Intrusive hash set based on multi-level array /** @ingroup cds_intrusive_map @anchor cds_intrusive_FeldmanHashSet_hp Source: - [2013] Steven Feldman, Pierre LaBorde, Damian Dechev "Concurrent Multi-level Arrays: Wait-free Extensible Hash Maps" [From the paper] The hardest problem encountered while developing a parallel hash map is how to perform a global resize, the process of redistributing the elements in a hash map that occurs when adding new buckets. The negative impact of blocking synchronization is multiplied during a global resize, because all threads will be forced to wait on the thread that is performing the involved process of resizing the hash map and redistributing the elements. \p %FeldmanHashSet implementation avoids global resizes through new array allocation. By allowing concurrent expansion this structure is free from the overhead of an explicit resize, which facilitates concurrent operations. The presented design includes dynamic hashing, the use of sub-arrays within the hash map data structure; which, in combination with perfect hashing, means that each element has a unique final, as well as current, position. It is important to note that the perfect hash function required by our hash map is trivial to realize as any hash function that permutes the bits of the key is suitable. This is possible because of our approach to the hash function; we require that it produces hash values that are equal in size to that of the key. We know that if we expand the hash map a fixed number of times there can be no collision as duplicate keys are not provided for in the standard semantics of a hash map. \p %FeldmanHashSet is a multi-level array which has a structure similar to a tree: @image html feldman_hashset.png The multi-level array differs from a tree in that each position on the tree could hold an array of nodes or a single node. A position that holds a single node is a \p dataNode which holds the hash value of a key and the value that is associated with that key; it is a simple struct holding two variables. A \p dataNode in the multi-level array could be marked. A \p markedDataNode refers to a pointer to a \p dataNode that has been bitmarked at the least significant bit (LSB) of the pointer to the node. This signifies that this \p dataNode is contended. An expansion must occur at this node; any thread that sees this \p markedDataNode will try to replace it with an \p arrayNode; which is a position that holds an array of nodes. The pointer to an \p arrayNode is differentiated from that of a pointer to a \p dataNode by a bitmark on the second-least significant bit. \p %FeldmanHashSet multi-level array is similar to a tree in that we keep a pointer to the root, which is a memory array called \p head. The length of the \p head memory array is unique, whereas every other \p arrayNode has a uniform length; a normal \p arrayNode has a fixed power-of-two length equal to the binary logarithm of a variable called \p arrayLength. The maximum depth of the tree, \p maxDepth, is the maximum number of pointers that must be followed to reach any node. We define \p currentDepth as the number of memory arrays that we need to traverse to reach the \p arrayNode on which we need to operate; this is initially one, because of \p head. That approach to the structure of the hash set uses an extensible hashing scheme; the hash value is treated as a bit string and rehash incrementally. @note Two important things you should keep in mind when you're using \p %FeldmanHashSet: - all keys must be fixed-size. It means that you cannot use \p std::string as a key for \p %FeldmanHashSet. Instead, for the strings you should use well-known hashing algorithms like SHA1, SHA2, MurmurHash, CityHash or its successor FarmHash and so on, which converts variable-length strings to fixed-length bit-strings, and use that hash as a key in \p %FeldmanHashSet. - \p %FeldmanHashSet uses a perfect hashing. It means that if two different keys, for example, of type \p std::string, have identical hash then you cannot insert both that keys in the set. \p %FeldmanHashSet does not maintain the key, it maintains its fixed-size hash value. The set supports @ref cds_intrusive_FeldmanHashSet_iterators "bidirectional thread-safe iterators". Template parameters: - \p GC - safe memory reclamation schema. Can be \p gc::HP, \p gc::DHP or one of \ref cds_urcu_type "RCU type" - \p T - a value type to be stored in the set - \p Traits - type traits, the structure based on \p feldman_hashset::traits or result of \p feldman_hashset::make_traits metafunction. \p Traits is the mandatory argument because it has one mandatory type - an @ref feldman_hashset::traits::hash_accessor "accessor" to hash value of \p T. The set algorithm does not calculate that hash value. There are several specializations of \p %FeldmanHashSet for each \p GC. You should include: - for \p gc::HP garbage collector - for \p gc::DHP garbage collector - for \ref cds_intrusive_FeldmanHashSet_rcu "RCU type". RCU specialization has a slightly different interface. */ template < class GC ,typename T #ifdef CDS_DOXYGEN_INVOKED ,typename Traits = feldman_hashset::traits #else ,typename Traits #endif > class FeldmanHashSet: protected feldman_hashset::multilevel_array { //@cond typedef feldman_hashset::multilevel_array base_class; //@endcond public: typedef GC gc; ///< Garbage collector typedef T value_type; ///< type of value stored in the set typedef Traits traits; ///< Traits template parameter, see \p feldman_hashset::traits typedef typename traits::hash_accessor hash_accessor; ///< Hash accessor functor typedef typename base_class::hash_type hash_type; ///< Hash type deduced from \p hash_accessor return type typedef typename traits::disposer disposer; ///< data node disposer typedef typename base_class::hash_comparator hash_comparator; ///< hash compare functor based on \p traits::compare and \p traits::less options typedef typename traits::item_counter item_counter; ///< Item counter type typedef typename traits::node_allocator node_allocator; ///< Array node allocator typedef typename traits::memory_model memory_model; ///< Memory model typedef typename traits::back_off back_off; ///< Backoff strategy typedef typename traits::stat stat; ///< Internal statistics type typedef typename gc::template guarded_ptr< value_type > guarded_ptr; ///< Guarded pointer /// Count of hazard pointers required static constexpr size_t const c_nHazardPtrCount = 2; /// The size of hash_type in bytes, see \p feldman_hashset::traits::hash_size for explanation static constexpr size_t const c_hash_size = base_class::c_hash_size; /// Level statistics typedef feldman_hashset::level_statistics level_statistics; protected: //@cond typedef typename base_class::node_ptr node_ptr; typedef typename base_class::atomic_node_ptr atomic_node_ptr; typedef typename base_class::array_node array_node; typedef typename base_class::traverse_data traverse_data; using base_class::to_array; using base_class::to_node; using base_class::stats; using base_class::head; using base_class::metrics; //@endcond protected: //@cond class iterator_base { friend class FeldmanHashSet; protected: array_node * m_pNode; ///< current array node size_t m_idx; ///< current position in m_pNode typename gc::Guard m_guard; ///< HP guard FeldmanHashSet const* m_set; ///< Hash set public: iterator_base() noexcept : m_pNode( nullptr ) , m_idx( 0 ) , m_set( nullptr ) {} iterator_base( iterator_base const& rhs ) noexcept : m_pNode( rhs.m_pNode ) , m_idx( rhs.m_idx ) , m_set( rhs.m_set ) { m_guard.copy( rhs.m_guard ); } iterator_base& operator=( iterator_base const& rhs ) noexcept { m_pNode = rhs.m_pNode; m_idx = rhs.m_idx; m_set = rhs.m_set; m_guard.copy( rhs.m_guard ); return *this; } iterator_base& operator++() { forward(); return *this; } iterator_base& operator--() { backward(); return *this; } void release() { m_guard.clear(); } bool operator ==( iterator_base const& rhs ) const noexcept { return m_pNode == rhs.m_pNode && m_idx == rhs.m_idx && m_set == rhs.m_set; } bool operator !=( iterator_base const& rhs ) const noexcept { return !( *this == rhs ); } protected: iterator_base( FeldmanHashSet const& set, array_node * pNode, size_t idx, bool ) : m_pNode( pNode ) , m_idx( idx ) , m_set( &set ) {} iterator_base( FeldmanHashSet const& set, array_node * pNode, size_t idx ) : m_pNode( pNode ) , m_idx( idx ) , m_set( &set ) { forward(); } value_type * pointer() const noexcept { return m_guard.template get(); } void forward() { assert( m_set != nullptr ); assert( m_pNode != nullptr ); size_t const arrayNodeSize = m_set->array_node_size(); size_t const headSize = m_set->head_size(); array_node * pNode = m_pNode; size_t idx = m_idx + 1; size_t nodeSize = m_pNode->pParent? arrayNodeSize : headSize; for ( ;; ) { if ( idx < nodeSize ) { node_ptr slot = pNode->nodes[idx].load( memory_model::memory_order_acquire ); if ( slot.bits() == base_class::flag_array_node ) { // array node, go down the tree assert( slot.ptr() != nullptr ); pNode = to_array( slot.ptr()); idx = 0; nodeSize = arrayNodeSize; } else if ( slot.bits() == base_class::flag_array_converting ) { // the slot is converting to array node right now - skip the node ++idx; } else { if ( slot.ptr()) { // data node if ( m_guard.protect( pNode->nodes[idx], []( node_ptr p ) -> value_type* { return p.ptr(); }) == slot ) { m_pNode = pNode; m_idx = idx; return; } } ++idx; } } else { // up to parent node if ( pNode->pParent ) { idx = pNode->idxParent + 1; pNode = pNode->pParent; nodeSize = pNode->pParent ? arrayNodeSize : headSize; } else { // end() assert( pNode == m_set->head()); assert( idx == headSize ); m_pNode = pNode; m_idx = idx; return; } } } } void backward() { assert( m_set != nullptr ); assert( m_pNode != nullptr ); size_t const arrayNodeSize = m_set->array_node_size(); size_t const headSize = m_set->head_size(); size_t const endIdx = size_t(0) - 1; array_node * pNode = m_pNode; size_t idx = m_idx - 1; size_t nodeSize = m_pNode->pParent? arrayNodeSize : headSize; for ( ;; ) { if ( idx != endIdx ) { node_ptr slot = pNode->nodes[idx].load( memory_model::memory_order_acquire ); if ( slot.bits() == base_class::flag_array_node ) { // array node, go down the tree assert( slot.ptr() != nullptr ); pNode = to_array( slot.ptr()); nodeSize = arrayNodeSize; idx = nodeSize - 1; } else if ( slot.bits() == base_class::flag_array_converting ) { // the slot is converting to array node right now - skip the node --idx; } else { if ( slot.ptr()) { // data node if ( m_guard.protect( pNode->nodes[idx], []( node_ptr p ) -> value_type* { return p.ptr(); }) == slot ) { m_pNode = pNode; m_idx = idx; return; } } --idx; } } else { // up to parent node if ( pNode->pParent ) { idx = pNode->idxParent - 1; pNode = pNode->pParent; nodeSize = pNode->pParent ? arrayNodeSize : headSize; } else { // rend() assert( pNode == m_set->head()); assert( idx == endIdx ); m_pNode = pNode; m_idx = idx; return; } } } } }; template Iterator init_begin() const { return Iterator( *this, head(), size_t(0) - 1 ); } template Iterator init_end() const { return Iterator( *this, head(), head_size(), false ); } template Iterator init_rbegin() const { return Iterator( *this, head(), head_size()); } template Iterator init_rend() const { return Iterator( *this, head(), size_t(0) - 1, false ); } /// Bidirectional iterator class template class bidirectional_iterator: protected iterator_base { friend class FeldmanHashSet; protected: static constexpr bool const c_bConstantIterator = IsConst; public: typedef typename std::conditional< IsConst, value_type const*, value_type*>::type value_ptr; ///< Value pointer typedef typename std::conditional< IsConst, value_type const&, value_type&>::type value_ref; ///< Value reference public: bidirectional_iterator() noexcept {} bidirectional_iterator( bidirectional_iterator const& rhs ) noexcept : iterator_base( rhs ) {} bidirectional_iterator& operator=( bidirectional_iterator const& rhs ) noexcept { iterator_base::operator=( rhs ); return *this; } bidirectional_iterator& operator++() { iterator_base::operator++(); return *this; } bidirectional_iterator& operator--() { iterator_base::operator--(); return *this; } value_ptr operator ->() const noexcept { return iterator_base::pointer(); } value_ref operator *() const noexcept { value_ptr p = iterator_base::pointer(); assert( p ); return *p; } void release() { iterator_base::release(); } template bool operator ==( bidirectional_iterator const& rhs ) const noexcept { return iterator_base::operator==( rhs ); } template bool operator !=( bidirectional_iterator const& rhs ) const noexcept { return !( *this == rhs ); } protected: bidirectional_iterator( FeldmanHashSet& set, array_node * pNode, size_t idx, bool ) : iterator_base( set, pNode, idx, false ) {} bidirectional_iterator( FeldmanHashSet& set, array_node * pNode, size_t idx ) : iterator_base( set, pNode, idx ) {} }; /// Reverse bidirectional iterator template class reverse_bidirectional_iterator : public iterator_base { friend class FeldmanHashSet; public: typedef typename std::conditional< IsConst, value_type const*, value_type*>::type value_ptr; ///< Value pointer typedef typename std::conditional< IsConst, value_type const&, value_type&>::type value_ref; ///< Value reference public: reverse_bidirectional_iterator() noexcept : iterator_base() {} reverse_bidirectional_iterator( reverse_bidirectional_iterator const& rhs ) noexcept : iterator_base( rhs ) {} reverse_bidirectional_iterator& operator=( reverse_bidirectional_iterator const& rhs) noexcept { iterator_base::operator=( rhs ); return *this; } reverse_bidirectional_iterator& operator++() { iterator_base::operator--(); return *this; } reverse_bidirectional_iterator& operator--() { iterator_base::operator++(); return *this; } value_ptr operator ->() const noexcept { return iterator_base::pointer(); } value_ref operator *() const noexcept { value_ptr p = iterator_base::pointer(); assert( p ); return *p; } void release() { iterator_base::release(); } template bool operator ==( reverse_bidirectional_iterator const& rhs ) const { return iterator_base::operator==( rhs ); } template bool operator !=( reverse_bidirectional_iterator const& rhs ) { return !( *this == rhs ); } private: reverse_bidirectional_iterator( FeldmanHashSet& set, array_node * pNode, size_t idx, bool ) : iterator_base( set, pNode, idx, false ) {} reverse_bidirectional_iterator( FeldmanHashSet& set, array_node * pNode, size_t idx ) : iterator_base( set, pNode, idx, false ) { iterator_base::backward(); } }; //@endcond public: #ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined iterator; ///< @ref cds_intrusive_FeldmanHashSet_iterators "bidirectional iterator" type typedef implementation_defined const_iterator; ///< @ref cds_intrusive_FeldmanHashSet_iterators "bidirectional const iterator" type typedef implementation_defined reverse_iterator; ///< @ref cds_intrusive_FeldmanHashSet_iterators "bidirectional reverse iterator" type typedef implementation_defined const_reverse_iterator; ///< @ref cds_intrusive_FeldmanHashSet_iterators "bidirectional reverse const iterator" type #else typedef bidirectional_iterator iterator; typedef bidirectional_iterator const_iterator; typedef reverse_bidirectional_iterator reverse_iterator; typedef reverse_bidirectional_iterator const_reverse_iterator; #endif private: //@cond item_counter m_ItemCounter; ///< Item counter //@endcond public: /// Creates empty set /** @param head_bits - 2head_bits specifies the size of head array, minimum is 4. @param array_bits - 2array_bits specifies the size of array node, minimum is 2. Equation for \p head_bits and \p array_bits: \code sizeof( hash_type ) * 8 == head_bits + N * array_bits \endcode where \p N is multi-level array depth. */ FeldmanHashSet( size_t head_bits = 8, size_t array_bits = 4 ) : base_class( head_bits, array_bits ) {} /// Destructs the set and frees all data ~FeldmanHashSet() { clear(); } /// Inserts new node /** The function inserts \p val in the set if it does not contain an item with that hash. Returns \p true if \p val is placed into the set, \p false otherwise. */ bool insert( value_type& val ) { return insert( val, []( value_type& ) {} ); } /// Inserts new node /** This function is intended for derived non-intrusive containers. The function allows to split creating of new item into two part: - create item with key only - insert new item into the set - if inserting is success, calls \p f functor to initialize \p val. The functor signature is: \code void func( value_type& val ); \endcode where \p val is the item inserted. The user-defined functor is called only if the inserting is success. @warning See \ref cds_intrusive_item_creating "insert item troubleshooting". */ template bool insert( value_type& val, Func f ) { hash_type const& hash = hash_accessor()( val ); traverse_data pos( hash, *this ); hash_comparator cmp; typename gc::template GuardArray<2> guards; guards.assign( 1, &val ); while ( true ) { node_ptr slot = base_class::traverse( pos ); assert( slot.bits() == 0 ); // protect data node by hazard pointer if ( guards.protect( 0, pos.pArr->nodes[pos.nSlot], []( node_ptr p ) -> value_type* { return p.ptr(); }) != slot ) { // slot value has been changed - retry stats().onSlotChanged(); } else if ( slot.ptr()) { if ( cmp( hash, hash_accessor()( *slot.ptr())) == 0 ) { // the item with that hash value already exists stats().onInsertFailed(); return false; } if ( !pos.splitter.eos()) { // the slot must be expanded base_class::expand_slot( pos, slot ); } else return false; } else { // the slot is empty, try to insert data node node_ptr pNull; if ( pos.pArr->nodes[pos.nSlot].compare_exchange_strong( pNull, node_ptr( &val ), memory_model::memory_order_release, atomics::memory_order_relaxed )) { // the new data node has been inserted f( val ); ++m_ItemCounter; stats().onInsertSuccess(); stats().height( pos.nHeight ); return true; } // insert failed - slot has been changed by another thread // retry inserting stats().onInsertRetry(); } } } /// Updates the node /** Performs inserting or updating the item with hash value equal to \p val. - If hash value is found then existing item is replaced with \p val, old item is disposed with \p Traits::disposer. Note that the disposer is called by \p GC asynchronously. The function returns std::pair - If hash value is not found and \p bInsert is \p true then \p val is inserted, the function returns std::pair - If hash value is not found and \p bInsert is \p false then the set is unchanged, the function returns std::pair Returns std::pair where \p first is \p true if operation is successful (i.e. the item has been inserted or updated), \p second is \p true if new item has been added or \p false if the set contains that hash. */ std::pair update( value_type& val, bool bInsert = true ) { return do_update( val, []( value_type&, value_type* ) {}, bInsert ); } /// Unlinks the item \p val from the set /** The function searches the item \p val in the set and unlink it if it is found and its address is equal to &val. The function returns \p true if success and \p false otherwise. */ bool unlink( value_type const& val ) { typename gc::Guard guard; auto pred = [&val]( value_type const& item ) -> bool { return &item == &val; }; value_type * p = do_erase( hash_accessor()( val ), guard, std::ref( pred )); return p != nullptr; } /// Deletes the item from the set /** The function searches \p hash in the set, unlinks the item found, and returns \p true. If that item is not found the function returns \p false. The \ref disposer specified in \p Traits is called by garbage collector \p GC asynchronously. */ bool erase( hash_type const& hash ) { return erase( hash, []( value_type const& ) {} ); } /// Deletes the item from the set /** The function searches \p hash in the set, call \p f functor with item found, and unlinks it from the set. The \ref disposer specified in \p Traits is called by garbage collector \p GC asynchronously. The \p Func interface is \code struct functor { void operator()( value_type& item ); }; \endcode If \p hash is not found the function returns \p false. */ template bool erase( hash_type const& hash, Func f ) { typename gc::Guard guard; value_type * p = do_erase( hash, guard, []( value_type const&) -> bool {return true; } ); // p is guarded by HP if ( p ) { f( *p ); return true; } return false; } /// Deletes the item pointed by iterator \p iter /** Returns \p true if the operation is successful, \p false otherwise. The function does not invalidate the iterator, it remains valid and can be used for further traversing. */ bool erase_at( iterator const& iter ) { return do_erase_at( iter ); } //@cond bool erase_at( reverse_iterator const& iter ) { return do_erase_at( iter ); } //@endcond /// Extracts the item with specified \p hash /** The function searches \p hash in the set, unlinks it from the set, and returns an guarded pointer to the item extracted. If \p hash is not found the function returns an empty guarded pointer. The \p disposer specified in \p Traits class' template parameter is called automatically by garbage collector \p GC when returned \ref guarded_ptr object to be destroyed or released. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. Usage: \code typedef cds::intrusive::FeldmanHashSet< your_template_args > my_set; my_set theSet; // ... { my_set::guarded_ptr gp( theSet.extract( 5 )); if ( gp ) { // Deal with gp // ... } // Destructor of gp releases internal HP guard } \endcode */ guarded_ptr extract( hash_type const& hash ) { typename gc::Guard guard; if ( do_erase( hash, guard, []( value_type const&) -> bool {return true;} )) return guarded_ptr( std::move( guard )); return guarded_ptr(); } /// Finds an item by it's \p hash /** The function searches the item by \p hash and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item ); }; \endcode where \p item is the item found. The functor may change non-key fields of \p item. Note that the functor is only guarantee that \p item cannot be disposed during the functor is executing. The functor does not serialize simultaneous access to the set's \p item. If such access is possible you must provide your own synchronization schema on item level to prevent unsafe item modifications. The function returns \p true if \p hash is found, \p false otherwise. */ template bool find( hash_type const& hash, Func f ) { typename gc::Guard guard; value_type * p = search( hash, guard ); // p is guarded by HP if ( p ) { f( *p ); return true; } return false; } /// Checks whether the set contains \p hash /** The function searches the item by its \p hash and returns \p true if it is found, or \p false otherwise. */ bool contains( hash_type const& hash ) { return find( hash, []( value_type& ) {} ); } /// Finds an item by it's \p hash and returns the item found /** The function searches the item by its \p hash and returns the guarded pointer to the item found. If \p hash is not found the function returns an empty \p guarded_ptr. @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. Usage: \code typedef cds::intrusive::FeldmanHashSet< your_template_params > my_set; my_set theSet; // ... { my_set::guarded_ptr gp( theSet.get( 5 )); if ( theSet.get( 5 )) { // Deal with gp //... } // Destructor of guarded_ptr releases internal HP guard } \endcode */ guarded_ptr get( hash_type const& hash ) { typename gc::Guard guard; if ( search( hash, guard )) return guarded_ptr( std::move( guard )); return guarded_ptr(); } /// Clears the set (non-atomic) /** The function unlink all data node from the set. The function is not atomic but is thread-safe. After \p %clear() the set may not be empty because another threads may insert items. For each item the \p disposer is called after unlinking. */ void clear() { clear_array( head(), head_size()); } /// Checks if the set is empty /** Emptiness is checked by item counting: if item count is zero then the set is empty. Thus, the correct item counting feature is an important part of the set implementation. */ bool empty() const { return size() == 0; } /// Returns item count in the set size_t size() const { return m_ItemCounter; } /// Returns const reference to internal statistics stat const& statistics() const { return stats(); } /// Returns the size of head node using base_class::head_size; /// Returns the size of the array node using base_class::array_node_size; /// Collects tree level statistics into \p stat /** The function traverses the set and collects statistics for each level of the tree into \p feldman_hashset::level_statistics struct. The element of \p stat[i] represents statistics for level \p i, level 0 is head array. The function is thread-safe and may be called in multi-threaded environment. Result can be useful for estimating efficiency of hash functor you use. */ void get_level_statistics( std::vector< feldman_hashset::level_statistics>& stat ) const { base_class::get_level_statistics( stat ); } public: ///@name Thread-safe iterators /** @anchor cds_intrusive_FeldmanHashSet_iterators The set supports thread-safe iterators: you may iterate over the set in multi-threaded environment. It is guaranteed that the iterators will remain valid even if another thread deletes the node the iterator points to: Hazard Pointer embedded into the iterator object protects the node from physical reclamation. @note Since the iterator object contains hazard pointer that is a thread-local resource, the iterator should not be passed to another thread. Each iterator object supports the common interface: - dereference operators: @code value_type [const] * operator ->() noexcept value_type [const] & operator *() noexcept @endcode - pre-increment and pre-decrement. Post-operators is not supported - equality operators == and !=. Iterators are equal iff they point to the same cell of the same array node. Note that for two iterators \p it1 and \p it2, the conditon it1 == it2 does not entail &(*it1) == &(*it2) : welcome to concurrent containers - helper member function \p release() that clears internal hazard pointer. After \p release() the iterator points to \p nullptr but it still remain valid: further iterating is possible. During iteration you may safely erase any item from the set; @ref erase_at() function call doesn't invalidate any iterator. If some iterator points to the item to be erased, that item is not deleted immediately but only after that iterator will be advanced forward or backward. @note It is possible the item can be iterated more that once, for example, if an iterator points to the item in array node that is being splitted. */ ///@{ /// Returns an iterator to the beginning of the set iterator begin() { return iterator( *this, head(), size_t(0) - 1 ); } /// Returns an const iterator to the beginning of the set const_iterator begin() const { return const_iterator( *this, head(), size_t(0) - 1 ); } /// Returns an const iterator to the beginning of the set const_iterator cbegin() { return const_iterator( *this, head(), size_t(0) - 1 ); } /// Returns an iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior. iterator end() { return iterator( *this, head(), head_size(), false ); } /// Returns a const iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior. const_iterator end() const { return const_iterator( *this, head(), head_size(), false ); } /// Returns a const iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior. const_iterator cend() { return const_iterator( *this, head(), head_size(), false ); } /// Returns a reverse iterator to the first element of the reversed set reverse_iterator rbegin() { return reverse_iterator( *this, head(), head_size()); } /// Returns a const reverse iterator to the first element of the reversed set const_reverse_iterator rbegin() const { return const_reverse_iterator( *this, head(), head_size()); } /// Returns a const reverse iterator to the first element of the reversed set const_reverse_iterator crbegin() { return const_reverse_iterator( *this, head(), head_size()); } /// Returns a reverse iterator to the element following the last element of the reversed set /** It corresponds to the element preceding the first element of the non-reversed container. This element acts as a placeholder, attempting to access it results in undefined behavior. */ reverse_iterator rend() { return reverse_iterator( *this, head(), size_t(0) - 1, false ); } /// Returns a const reverse iterator to the element following the last element of the reversed set /** It corresponds to the element preceding the first element of the non-reversed container. This element acts as a placeholder, attempting to access it results in undefined behavior. */ const_reverse_iterator rend() const { return const_reverse_iterator( *this, head(), size_t(0) - 1, false ); } /// Returns a const reverse iterator to the element following the last element of the reversed set /** It corresponds to the element preceding the first element of the non-reversed container. This element acts as a placeholder, attempting to access it results in undefined behavior. */ const_reverse_iterator crend() { return const_reverse_iterator( *this, head(), size_t(0) - 1, false ); } ///@} private: //@cond void clear_array( array_node * pArrNode, size_t nSize ) { back_off bkoff; for ( atomic_node_ptr * pArr = pArrNode->nodes, *pLast = pArr + nSize; pArr != pLast; ++pArr ) { while ( true ) { node_ptr slot = pArr->load( memory_model::memory_order_acquire ); if ( slot.bits() == base_class::flag_array_node ) { // array node, go down the tree assert( slot.ptr() != nullptr ); clear_array( to_array( slot.ptr()), array_node_size()); break; } else if ( slot.bits() == base_class::flag_array_converting ) { // the slot is converting to array node right now while (( slot = pArr->load( memory_model::memory_order_acquire )).bits() == base_class::flag_array_converting ) { bkoff(); stats().onSlotConverting(); } bkoff.reset(); assert( slot.ptr() != nullptr ); assert( slot.bits() == base_class::flag_array_node ); clear_array( to_array( slot.ptr()), array_node_size()); break; } else { // data node if ( pArr->compare_exchange_strong( slot, node_ptr(), memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { if ( slot.ptr()) { gc::template retire( slot.ptr()); --m_ItemCounter; stats().onEraseSuccess(); } break; } } } } } //@endcond protected: //@cond value_type * search( hash_type const& hash, typename gc::Guard& guard ) { traverse_data pos( hash, *this ); hash_comparator cmp; while ( true ) { node_ptr slot = base_class::traverse( pos ); assert( slot.bits() == 0 ); // protect data node by hazard pointer if ( guard.protect( pos.pArr->nodes[pos.nSlot], []( node_ptr p ) -> value_type* { return p.ptr(); }) != slot) { // slot value has been changed - retry stats().onSlotChanged(); continue; } else if ( slot.ptr() && cmp( hash, hash_accessor()( *slot.ptr())) == 0 ) { // item found stats().onFindSuccess(); return slot.ptr(); } stats().onFindFailed(); return nullptr; } } template value_type * do_erase( hash_type const& hash, typename gc::Guard& guard, Predicate pred ) { traverse_data pos( hash, *this ); hash_comparator cmp; while ( true ) { node_ptr slot = base_class::traverse( pos ); assert( slot.bits() == 0 ); // protect data node by hazard pointer if ( guard.protect( pos.pArr->nodes[pos.nSlot], []( node_ptr p ) -> value_type* { return p.ptr(); }) != slot ) { // slot value has been changed - retry stats().onSlotChanged(); } else if ( slot.ptr()) { if ( cmp( hash, hash_accessor()( *slot.ptr())) == 0 && pred( *slot.ptr())) { // item found - replace it with nullptr if ( pos.pArr->nodes[pos.nSlot].compare_exchange_strong( slot, node_ptr( nullptr ), memory_model::memory_order_acquire, atomics::memory_order_relaxed)) { // slot is guarded by HP gc::template retire( slot.ptr()); --m_ItemCounter; stats().onEraseSuccess(); return slot.ptr(); } stats().onEraseRetry(); continue; } stats().onEraseFailed(); return nullptr; } else { // the slot is empty stats().onEraseFailed(); return nullptr; } } } bool do_erase_at( iterator_base const& iter ) { if ( iter.m_set != this ) return false; if ( iter.m_pNode == head()) { if ( iter.m_idx >= head_size()) return false; } else if ( iter.m_idx >= array_node_size()) return false; for (;;) { node_ptr slot = iter.m_pNode->nodes[iter.m_idx].load( memory_model::memory_order_acquire ); if ( slot.bits() == 0 && slot.ptr() == iter.pointer()) { if ( iter.m_pNode->nodes[iter.m_idx].compare_exchange_strong( slot, node_ptr( nullptr ), memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { // the item is guarded by iterator, so we may retire it safely gc::template retire( slot.ptr()); --m_ItemCounter; stats().onEraseSuccess(); return true; } } else return false; } } template std::pair do_update( value_type& val, Func f, bool bInsert = true ) { hash_type const& hash = hash_accessor()( val ); traverse_data pos( hash, *this ); hash_comparator cmp; typename gc::template GuardArray<2> guards; guards.assign( 1, &val ); while ( true ) { node_ptr slot = base_class::traverse( pos ); assert( slot.bits() == 0 ); // protect data node by hazard pointer if ( guards.protect( 0, pos.pArr->nodes[pos.nSlot], []( node_ptr p ) -> value_type* { return p.ptr(); }) != slot ) { // slot value has been changed - retry stats().onSlotChanged(); } else if ( slot.ptr()) { if ( cmp( hash, hash_accessor()( *slot.ptr())) == 0 ) { // the item with that hash value already exists // Replace it with val if ( slot.ptr() == &val ) { stats().onUpdateExisting(); return std::make_pair( true, false ); } if ( pos.pArr->nodes[pos.nSlot].compare_exchange_strong( slot, node_ptr( &val ), memory_model::memory_order_release, atomics::memory_order_relaxed )) { // slot can be disposed f( val, slot.ptr()); gc::template retire( slot.ptr()); stats().onUpdateExisting(); return std::make_pair( true, false ); } stats().onUpdateRetry(); continue; } if ( bInsert ) { if ( !pos.splitter.eos()) { // the slot must be expanded base_class::expand_slot( pos, slot ); } else return std::make_pair( false, false ); } else { stats().onUpdateFailed(); return std::make_pair( false, false ); } } else { // the slot is empty, try to insert data node if ( bInsert ) { node_ptr pNull; if ( pos.pArr->nodes[pos.nSlot].compare_exchange_strong( pNull, node_ptr( &val ), memory_model::memory_order_release, atomics::memory_order_relaxed )) { // the new data node has been inserted f( val, nullptr ); ++m_ItemCounter; stats().onUpdateNew(); stats().height( pos.nHeight ); return std::make_pair( true, true ); } } else { stats().onUpdateFailed(); return std::make_pair( false, false ); } // insert failed - slot has been changed by another thread // retry updating stats().onUpdateRetry(); } } // while } //@endcond }; }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_IMPL_FELDMAN_HASHSET_H libcds-2.3.3/cds/intrusive/impl/iterable_list.h000066400000000000000000001535071341244201700215250ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_IMPL_ITERABLE_LIST_H #define CDSLIB_INTRUSIVE_IMPL_ITERABLE_LIST_H #include #include namespace cds { namespace intrusive { /// Iterable lock-free ordered single-linked list /** @ingroup cds_intrusive_list \anchor cds_intrusive_IterableList_hp This non-blocking list implementation supports thread-safe iterators; searching and removing are lock-free, inserting is non-blocking because it uses a light-weight synchronization based on marked pointers. Unlike \p cds::intrusive::MichaelList the iterable list does not require any hook in \p T to be stored in the list. Usually, ordered single-linked list is used as a building block for the hash table implementation. Iterable list is suitable for almost append-only hash table because the list doesn't delete its internal node when erasing a key but it is marked them as empty to be reused in the future. However, plenty of empty nodes degrades performance. Separation of internal nodes and user data implies the need for an allocator for internal node so the iterable list is not fully intrusive. Nevertheless, if you need thread-safe iterator, the iterable list is good choice. The complexity of searching is O(N). Template arguments: - \p GC - Garbage collector used. - \p T - type to be stored in the list. - \p Traits - type traits, default is \p iterable_list::traits. It is possible to declare option-based list with \p cds::intrusive::iterable_list::make_traits metafunction: For example, the following traits-based declaration of \p gc::HP iterable list \code #include // Declare item stored in your list struct foo { int nKey; // .... other data }; // Declare comparator for the item struct my_compare { int operator()( foo const& i1, foo const& i2 ) const { return i1.nKey - i2.nKey; } }; // Declare traits struct my_traits: public cds::intrusive::iterable_list::traits { typedef my_compare compare; }; // Declare list typedef cds::intrusive::IterableList< cds::gc::HP, foo, my_traits > list_type; \endcode is equivalent for the following option-based list \code #include // foo struct and my_compare are the same // Declare option-based list typedef cds::intrusive::IterableList< cds::gc::HP, foo, typename cds::intrusive::iterable_list::make_traits< cds::intrusive::opt::compare< my_compare > // item comparator option >::type > option_list_type; \endcode \par Usage There are different specializations of this template for each garbage collecting schema. You should select GC you want and include appropriate .h-file: - for \p gc::HP: - for \p gc::DHP: */ template < class GC ,typename T #ifdef CDS_DOXYGEN_INVOKED ,class Traits = iterable_list::traits #else ,class Traits #endif > class IterableList #ifndef CDS_DOXYGEN_INVOKED : public iterable_list_tag #endif { public: typedef T value_type; ///< type of value stored in the list typedef Traits traits; ///< Traits template parameter typedef iterable_list::node< value_type > node_type; ///< node type # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined key_comparator ; ///< key comparison functor based on opt::compare and opt::less option setter. # else typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator; # endif typedef typename traits::disposer disposer; ///< disposer for \p value_type typedef GC gc; ///< Garbage collector typedef typename traits::back_off back_off; ///< back-off strategy typedef typename traits::item_counter item_counter; ///< Item counting policy used typedef typename traits::memory_model memory_model; ///< Memory ordering. See \p cds::opt::memory_model option typedef typename traits::node_allocator node_allocator; ///< Node allocator typedef typename traits::stat stat; ///< Internal statistics typedef typename gc::template guarded_ptr< value_type > guarded_ptr; ///< Guarded pointer static constexpr const size_t c_nHazardPtrCount = 4; ///< Count of hazard pointer required for the algorithm //@cond // Rebind traits (split-list support) template struct rebind_traits { typedef IterableList< gc , value_type , typename cds::opt::make_options< traits, Options...>::type > type; }; // Stat selector template using select_stat_wrapper = iterable_list::select_stat_wrapper< Stat >; //@endcond protected: //@cond typedef atomics::atomic< node_type* > atomic_node_ptr; ///< Atomic node pointer typedef atomic_node_ptr auxiliary_head; ///< Auxiliary head type (for split-list support) typedef typename node_type::marked_data_ptr marked_data_ptr; node_type m_Head; node_type m_Tail; item_counter m_ItemCounter; ///< Item counter mutable stat m_Stat; ///< Internal statistics typedef cds::details::Allocator< node_type, node_allocator > cxx_node_allocator; /// Position pointer for item search struct position { node_type const* pHead; node_type * pPrev; ///< Previous node node_type * pCur; ///< Current node value_type * pFound; ///< Value of \p pCur->data, valid only if data found typename gc::Guard guard; ///< guard for \p pFound }; struct insert_position: public position { value_type * pPrevVal; ///< Value of \p pPrev->data, can be \p nullptr typename gc::Guard prevGuard; ///< guard for \p pPrevVal }; //@endcond protected: //@cond template class iterator_type { friend class IterableList; protected: node_type* m_pNode; typename gc::Guard m_Guard; // data guard void next() { for ( node_type* p = m_pNode->next.load( memory_model::memory_order_relaxed ); p != m_pNode; p = p->next.load( memory_model::memory_order_relaxed )) { m_pNode = p; if ( m_Guard.protect( p->data, []( marked_data_ptr ptr ) { return ptr.ptr(); }).ptr()) return; } m_Guard.clear(); } explicit iterator_type( node_type* pNode ) : m_pNode( pNode ) { if ( !m_Guard.protect( pNode->data, []( marked_data_ptr p ) { return p.ptr(); }).ptr()) next(); } iterator_type( node_type* pNode, value_type* pVal ) : m_pNode( pNode ) { if ( m_pNode ) { assert( pVal != nullptr ); m_Guard.assign( pVal ); } } value_type* data() const { return m_Guard.template get(); } public: typedef typename cds::details::make_const_type::pointer value_ptr; typedef typename cds::details::make_const_type::reference value_ref; iterator_type() : m_pNode( nullptr ) {} iterator_type( iterator_type const& src ) : m_pNode( src.m_pNode ) { m_Guard.copy( src.m_Guard ); } value_ptr operator ->() const { return data(); //return m_Guard.template get(); } value_ref operator *() const { assert( m_Guard.get_native() != nullptr ); return *data(); //return *m_Guard.template get(); } /// Pre-increment iterator_type& operator ++() { next(); return *this; } iterator_type& operator = (iterator_type const& src) { m_pNode = src.m_pNode; m_Guard.copy( src.m_Guard ); return *this; } template bool operator ==(iterator_type const& i ) const { return m_pNode == i.m_pNode; } template bool operator !=(iterator_type const& i ) const { return !( *this == i ); } }; //@endcond public: ///@name Thread-safe forward iterators //@{ /// Forward iterator /** The forward iterator for iterable list has some features: - it has no post-increment operator - to protect the value, the iterator contains a GC-specific guard. For some GC (like as \p gc::HP), a guard is a limited resource per thread, so an exception (or assertion) "no free guard" may be thrown if the limit of guard count per thread is exceeded. - The iterator cannot be moved across thread boundary since it contains thread-private GC's guard. - Iterator is thread-safe: even if the element the iterator points to is removed, the iterator stays valid because it contains the guard keeping the value from to be recycled. The iterator interface: \code class iterator { public: // Default constructor iterator(); // Copy construtor iterator( iterator const& src ); // Dereference operator value_type * operator ->() const; // Dereference operator value_type& operator *() const; // Preincrement operator iterator& operator ++(); // Assignment operator iterator& operator = (iterator const& src); // Equality operators bool operator ==(iterator const& i ) const; bool operator !=(iterator const& i ) const; }; \endcode @note For two iterators pointed to the same element the value can be different; this code \code if ( it1 == it2 ) assert( &(*it1) == &(*it2)); \endcode can throw assertion. The point is that the iterator stores the value of element which can be modified later by other thread. The guard inside the iterator prevents recycling that value so the iterator's value remains valid even after changing. Other iterator may observe modified value of the element. */ typedef iterator_type iterator; /// Const forward iterator /** For iterator's features and requirements see \ref iterator */ typedef iterator_type const_iterator; /// Returns a forward iterator addressing the first element in a list /** For empty list \code begin() == end() \endcode */ iterator begin() { return iterator( &m_Head ); } /// Returns an iterator that addresses the location succeeding the last element in a list /** Do not use the value returned by end function to access any item. Internally, end returning value equals to \p nullptr. The returned value can be used only to control reaching the end of the list. For empty list begin() == end() */ iterator end() { return iterator( &m_Tail ); } /// Returns a forward const iterator addressing the first element in a list const_iterator cbegin() const { return const_iterator( const_cast( &m_Head )); } /// Returns a forward const iterator addressing the first element in a list const_iterator begin() const { return const_iterator( const_cast( &m_Head )); } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator end() const { return const_iterator( const_cast( &m_Tail )); } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator cend() const { return const_iterator( const_cast( &m_Tail )); } //@} public: /// Default constructor initializes empty list IterableList() { init_list(); } //@cond template >::value >> explicit IterableList( Stat& st ) : m_Stat( st ) { init_list(); } //@endcond /// Destroys the list object ~IterableList() { destroy(); } /// Inserts new node /** The function inserts \p val into the list if the list does not contain an item with key equal to \p val. Returns \p true if \p val has been linked to the list, \p false otherwise. */ bool insert( value_type& val ) { return insert_at( &m_Head, val ); } /// Inserts new node /** This function is intended for derived non-intrusive containers. The function allows to split new item creating into two part: - create item with key only - insert new item into the list - if inserting is success, calls \p f functor to initialize value-field of \p val. The functor signature is: \code void func( value_type& val ); \endcode where \p val is the item inserted. User-defined functor \p f should guarantee that during changing \p val no any other changes could be made on this list's item by concurrent threads. The user-defined functor is called only if the inserting is success. @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" */ template bool insert( value_type& val, Func f ) { return insert_at( &m_Head, val, f ); } /// Updates the node /** The operation performs inserting or changing data with lock-free manner. If the item \p val is not found in the list, then \p val is inserted iff \p bInsert is \p true. Otherwise, the current element is changed to \p val, the element will be retired later by call \p Traits::disposer. The functor \p func is called after inserting or replacing, it signature is: \code void func( value_type& val, value_type * old ); \endcode where - \p val - argument \p val passed into the \p %update() function - \p old - old value that will be retired. If new item has been inserted then \p old is \p nullptr. Returns std::pair where \p first is \p true if operation is successful, \p second is \p true if \p val has been added or \p false if the item with that key already in the list. */ template std::pair update( value_type& val, Func func, bool bInsert = true ) { return update_at( &m_Head, val, func, bInsert ); } /// Insert or update /** The operation performs inserting or updating data with lock-free manner. If the item \p val is not found in the list, then \p val is inserted iff \p bInsert is \p true. Otherwise, the current element is changed to \p val, the old element will be retired later by call \p Traits::disposer. Returns std::pair where \p first is \p true if operation is successful, \p second is \p true if \p val has been added or \p false if the item with that key already in the list. */ std::pair upsert( value_type& val, bool bInsert = true ) { return upsert_at( &m_Head, val, bInsert ); } /// Unlinks the item \p val from the list /** The function searches the item \p val in the list and unlinks it from the list if it is found and it is equal to \p val. Difference between \p erase() and \p %unlink(): \p %erase() finds a key and deletes the item found. \p %unlink() finds an item by key and deletes it only if \p val is an item of the list, i.e. the pointer to item found is equal to &val . \p disposer specified in \p Traits is called for deleted item. The function returns \p true if success and \p false otherwise. */ bool unlink( value_type& val ) { return unlink_at( &m_Head, val ); } /// Deletes the item from the list /** \anchor cds_intrusive_IterableList_hp_erase_val The function searches an item with key equal to \p key in the list, unlinks it from the list, and returns \p true. If \p key is not found the function return \p false. \p disposer specified in \p Traits is called for deleted item. */ template bool erase( Q const& key ) { return erase_at( &m_Head, key, key_comparator()); } /// Deletes the item from the list using \p pred predicate for searching /** The function is an analog of \ref cds_intrusive_IterableList_hp_erase_val "erase(Q const&)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. \p disposer specified in \p Traits is called for deleted item. */ template bool erase_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return erase_at( &m_Head, key, cds::opt::details::make_comparator_from_less()); } /// Deletes the item from the list /** \anchor cds_intrusive_IterableList_hp_erase_func The function searches an item with key equal to \p key in the list, call \p func functor with item found, unlinks it from the list, and returns \p true. The \p Func interface is \code struct functor { void operator()( value_type const& item ); }; \endcode If \p key is not found the function return \p false, \p func is not called. \p disposer specified in \p Traits is called for deleted item. */ template bool erase( Q const& key, Func func ) { return erase_at( &m_Head, key, key_comparator(), func ); } /// Deletes the item from the list using \p pred predicate for searching /** The function is an analog of \ref cds_intrusive_IterableList_hp_erase_func "erase(Q const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. \p disposer specified in \p Traits is called for deleted item. */ template bool erase_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return erase_at( &m_Head, key, cds::opt::details::make_comparator_from_less(), f ); } /// Deletes the item pointed by iterator \p iter /** Returns \p true if the operation is successful, \p false otherwise. The function can return \p false if the node the iterator points to has already been deleted by other thread. The function does not invalidate the iterator, it remains valid and can be used for further traversing. */ bool erase_at( iterator const& iter ) { assert( iter != end()); marked_data_ptr val( iter.data()); if ( iter.m_pNode->data.compare_exchange_strong( val, marked_data_ptr(), memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { --m_ItemCounter; retire_data( val.ptr()); m_Stat.onEraseSuccess(); return true; } return false; } /// Extracts the item from the list with specified \p key /** \anchor cds_intrusive_IterableList_hp_extract The function searches an item with key equal to \p key, unlinks it from the list, and returns it as \p guarded_ptr. If \p key is not found returns an empty guarded pointer. Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. The \ref disposer specified in \p Traits class template parameter is called automatically by garbage collector \p GC when returned \ref guarded_ptr object will be destroyed or released. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. Usage: \code typedef cds::intrusive::IterableList< cds::gc::HP, foo, my_traits > ord_list; ord_list theList; // ... { ord_list::guarded_ptr gp( theList.extract( 5 )); if ( gp ) { // Deal with gp // ... } // Destructor of gp releases internal HP guard } \endcode */ template guarded_ptr extract( Q const& key ) { return extract_at( &m_Head, key, key_comparator()); } /// Extracts the item using compare functor \p pred /** The function is an analog of \ref cds_intrusive_IterableList_hp_extract "extract(Q const&)" but \p pred predicate is used for key comparing. \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the list. */ template guarded_ptr extract_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return extract_at( &m_Head, key, cds::opt::details::make_comparator_from_less()); } /// Finds \p key in the list /** \anchor cds_intrusive_IterableList_hp_find_func The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& key ); }; \endcode where \p item is the item found, \p key is the \p %find() function argument. The functor may change non-key fields of \p item. Note that the function is only guarantee that \p item cannot be disposed during functor is executing. The function does not serialize simultaneous access to the \p item. If such access is possible you must provide your own synchronization schema to keep out unsafe item modifications. The function returns \p true if \p val is found, \p false otherwise. */ template bool find( Q& key, Func f ) const { return find_at( &m_Head, key, key_comparator(), f ); } //@cond template bool find( Q const& key, Func f ) const { return find_at( &m_Head, key, key_comparator(), f ); } //@endcond /// Finds \p key in the list and returns iterator pointed to the item found /** If \p key is not found the function returns \p end(). */ template iterator find( Q const& key ) const { return find_iterator_at( &m_Head, key, key_comparator()); } /// Finds the \p key using \p pred predicate for searching /** The function is an analog of \ref cds_intrusive_IterableList_hp_find_func "find(Q&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool find_with( Q& key, Less pred, Func f ) const { CDS_UNUSED( pred ); return find_at( &m_Head, key, cds::opt::details::make_comparator_from_less(), f ); } //@cond template bool find_with( Q const& key, Less pred, Func f ) const { CDS_UNUSED( pred ); return find_at( &m_Head, key, cds::opt::details::make_comparator_from_less(), f ); } //@endcond /// Finds \p key in the list using \p pred predicate for searching and returns iterator pointed to the item found /** The function is an analog of \p find(Q&) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. If \p key is not found the function returns \p end(). */ template iterator find_with( Q const& key, Less pred ) const { CDS_UNUSED( pred ); return find_iterator_at( &m_Head, key, cds::opt::details::make_comparator_from_less()); } /// Checks whether the list contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. */ template bool contains( Q const& key ) const { return find_at( &m_Head, key, key_comparator()); } /// Checks whether the list contains \p key using \p pred predicate for searching /** The function is an analog of contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the list. */ template bool contains( Q const& key, Less pred ) const { CDS_UNUSED( pred ); return find_at( &m_Head, key, cds::opt::details::make_comparator_from_less()); } /// Finds the \p key and return the item found /** \anchor cds_intrusive_IterableList_hp_get The function searches the item with key equal to \p key and returns it as \p guarded_ptr. If \p key is not found the function returns an empty guarded pointer. The \ref disposer specified in \p Traits class template parameter is called by garbage collector \p GC automatically when returned \ref guarded_ptr object will be destroyed or released. @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. Usage: \code typedef cds::intrusive::IterableList< cds::gc::HP, foo, my_traits > ord_list; ord_list theList; // ... { ord_list::guarded_ptr gp(theList.get( 5 )); if ( gp ) { // Deal with gp //... } // Destructor of guarded_ptr releases internal HP guard } \endcode Note the compare functor specified for \p Traits template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. */ template guarded_ptr get( Q const& key ) const { return get_at( &m_Head, key, key_comparator()); } /// Finds the \p key and return the item found /** The function is an analog of \ref cds_intrusive_IterableList_hp_get "get( Q const&)" but \p pred is used for comparing the keys. \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the list. */ template guarded_ptr get_with( Q const& key, Less pred ) const { CDS_UNUSED( pred ); return get_at( &m_Head, key, cds::opt::details::make_comparator_from_less()); } /// Clears the list (thread safe, not atomic) void clear() { position pos; pos.pPrev = nullptr; for ( pos.pCur = m_Head.next.load( memory_model::memory_order_relaxed ); pos.pCur != pos.pPrev; pos.pCur = pos.pCur->next.load( memory_model::memory_order_relaxed )) { while ( true ) { pos.pFound = pos.guard.protect( pos.pCur->data, []( marked_data_ptr p ) { return p.ptr(); }).ptr(); if ( !pos.pFound ) break; if ( cds_likely( unlink_data( pos ))) { --m_ItemCounter; break; } } pos.pPrev = pos.pCur; } } /// Checks if the list is empty /** Emptiness is checked by item counting: if item count is zero then the set is empty. Thus, if you need to use \p %empty() you should provide appropriate (non-empty) \p iterable_list::traits::item_counter feature. */ bool empty() const { return size() == 0; } /// Returns list's item count /** The value returned depends on item counter provided by \p iterable_list::traits::item_counter. For \p atomicity::empty_item_counter, this function always returns 0. */ size_t size() const { return m_ItemCounter.value(); } /// Returns const reference to internal statistics stat const& statistics() const { return m_Stat; } protected: //@cond // split-list support bool insert_aux_node( node_type * pNode ) { return insert_aux_node( &m_Head, pNode ); } // split-list support bool insert_aux_node( node_type* pHead, node_type * pNode ) { assert( pNode != nullptr ); assert( pNode->data.load( memory_model::memory_order_relaxed ) != nullptr ); insert_position pos; while ( true ) { if ( inserting_search( pHead, *pNode->data.load(memory_model::memory_order_relaxed).ptr(), pos, key_comparator())) { m_Stat.onInsertFailed(); return false; } if ( link_aux_node( pNode, pos, pHead )) { ++m_ItemCounter; m_Stat.onInsertSuccess(); return true; } m_Stat.onInsertRetry(); } } bool insert_at( node_type* pHead, value_type& val ) { insert_position pos; while ( true ) { if ( inserting_search( pHead, val, pos, key_comparator())) { m_Stat.onInsertFailed(); return false; } if ( link_data( &val, pos, pHead )) { ++m_ItemCounter; m_Stat.onInsertSuccess(); return true; } m_Stat.onInsertRetry(); } } template bool insert_at( node_type* pHead, value_type& val, Func f ) { insert_position pos; typename gc::Guard guard; guard.assign( &val ); while ( true ) { if ( inserting_search( pHead, val, pos, key_comparator())) { m_Stat.onInsertFailed(); return false; } if ( link_data( &val, pos, pHead )) { f( val ); ++m_ItemCounter; m_Stat.onInsertSuccess(); return true; } m_Stat.onInsertRetry(); } } template std::pair update_at( node_type* pHead, value_type& val, Func func, bool bInsert ) { insert_position pos; typename gc::Guard guard; guard.assign( &val ); while ( true ) { if ( inserting_search( pHead, val, pos, key_comparator())) { // try to replace pCur->data with val assert( pos.pFound != nullptr ); assert( key_comparator()(*pos.pFound, val) == 0 ); marked_data_ptr pFound( pos.pFound ); if ( cds_likely( pos.pCur->data.compare_exchange_strong( pFound, marked_data_ptr( &val ), memory_model::memory_order_release, atomics::memory_order_relaxed ))) { if ( pos.pFound != &val ) { retire_data( pos.pFound ); func( val, pos.pFound ); } m_Stat.onUpdateExisting(); return std::make_pair( true, false ); } } else { if ( !bInsert ) { m_Stat.onUpdateFailed(); return std::make_pair( false, false ); } if ( link_data( &val, pos, pHead )) { func( val, static_cast( nullptr )); ++m_ItemCounter; m_Stat.onUpdateNew(); return std::make_pair( true, true ); } } m_Stat.onUpdateRetry(); } } std::pair upsert_at( node_type* pHead, value_type& val, bool bInsert ) { return update_at( pHead, val, []( value_type&, value_type* ) {}, bInsert ); } bool unlink_at( node_type* pHead, value_type& val ) { position pos; back_off bkoff; while ( search( pHead, val, pos, key_comparator())) { if ( pos.pFound == &val ) { if ( unlink_data( pos )) { --m_ItemCounter; m_Stat.onEraseSuccess(); return true; } else bkoff(); } else break; m_Stat.onEraseRetry(); } m_Stat.onEraseFailed(); return false; } template bool erase_at( node_type* pHead, Q const& val, Compare cmp, Func f, position& pos ) { back_off bkoff; while ( search( pHead, val, pos, cmp )) { if ( unlink_data( pos )) { f( *pos.pFound ); --m_ItemCounter; m_Stat.onEraseSuccess(); return true; } else bkoff(); m_Stat.onEraseRetry(); } m_Stat.onEraseFailed(); return false; } template bool erase_at( node_type* pHead, Q const& val, Compare cmp, Func f ) { position pos; return erase_at( pHead, val, cmp, f, pos ); } template bool erase_at( node_type* pHead, Q const& val, Compare cmp ) { position pos; return erase_at( pHead, val, cmp, [](value_type const&){}, pos ); } template guarded_ptr extract_at( node_type* pHead, Q const& val, Compare cmp ) { position pos; back_off bkoff; while ( search( pHead, val, pos, cmp )) { if ( unlink_data( pos )) { --m_ItemCounter; m_Stat.onEraseSuccess(); assert( pos.pFound != nullptr ); return guarded_ptr( std::move( pos.guard )); } else bkoff(); m_Stat.onEraseRetry(); } m_Stat.onEraseFailed(); return guarded_ptr(); } template bool find_at( node_type const* pHead, Q const& val, Compare cmp ) const { position pos; if ( search( pHead, val, pos, cmp )) { m_Stat.onFindSuccess(); return true; } m_Stat.onFindFailed(); return false; } template bool find_at( node_type const* pHead, Q& val, Compare cmp, Func f ) const { position pos; if ( search( pHead, val, pos, cmp )) { assert( pos.pFound != nullptr ); f( *pos.pFound, val ); m_Stat.onFindSuccess(); return true; } m_Stat.onFindFailed(); return false; } template iterator find_iterator_at( node_type const* pHead, Q const& val, Compare cmp ) const { position pos; if ( search( pHead, val, pos, cmp )) { assert( pos.pCur != nullptr ); assert( pos.pFound != nullptr ); m_Stat.onFindSuccess(); return iterator( pos.pCur, pos.pFound ); } m_Stat.onFindFailed(); return iterator( const_cast( &m_Tail )); } template guarded_ptr get_at( node_type const* pHead, Q const& val, Compare cmp ) const { position pos; if ( search( pHead, val, pos, cmp )) { m_Stat.onFindSuccess(); return guarded_ptr( std::move( pos.guard )); } m_Stat.onFindFailed(); return guarded_ptr(); } node_type* head() { return &m_Head; } node_type const* head() const { return &m_Head; } //@endcond protected: //@cond template bool search( node_type const* pHead, Q const& val, position& pos, Compare cmp ) const { pos.pHead = pHead; node_type* pPrev = const_cast( pHead ); while ( true ) { node_type * pCur = pPrev->next.load( memory_model::memory_order_relaxed ); if ( pCur == pCur->next.load( memory_model::memory_order_acquire )) { // end-of-list pos.pPrev = pPrev; pos.pCur = pCur; pos.pFound = nullptr; return false; } value_type * pVal = pos.guard.protect( pCur->data, []( marked_data_ptr p ) -> value_type* { return p.ptr(); }).ptr(); if ( pVal ) { int const nCmp = cmp( *pVal, val ); if ( nCmp >= 0 ) { pos.pPrev = pPrev; pos.pCur = pCur; pos.pFound = pVal; return nCmp == 0; } } pPrev = pCur; } } template bool inserting_search( node_type const* pHead, Q const& val, insert_position& pos, Compare cmp ) const { pos.pHead = pHead; node_type* pPrev = const_cast(pHead); value_type* pPrevVal = pPrev->data.load( memory_model::memory_order_relaxed ).ptr(); while ( true ) { node_type * pCur = pPrev->next.load( memory_model::memory_order_relaxed ); if ( pCur == pCur->next.load( memory_model::memory_order_acquire )) { // end-of-list pos.pPrev = pPrev; pos.pCur = pCur; pos.pFound = nullptr; pos.pPrevVal = pPrevVal; return false; } value_type * pVal = pos.guard.protect( pCur->data, []( marked_data_ptr p ) -> value_type* { return p.ptr(); } ).ptr(); if ( pVal ) { int const nCmp = cmp( *pVal, val ); if ( nCmp >= 0 ) { pos.pPrev = pPrev; pos.pCur = pCur; pos.pFound = pVal; pos.pPrevVal = pPrevVal; return nCmp == 0; } } pPrev = pCur; pPrevVal = pVal; pos.prevGuard.copy( pos.guard ); } } // split-list support template void destroy( Predicate pred ) { node_type * pNode = m_Head.next.load( memory_model::memory_order_relaxed ); while ( pNode != pNode->next.load( memory_model::memory_order_relaxed )) { value_type * pVal = pNode->data.load( memory_model::memory_order_relaxed ).ptr(); node_type * pNext = pNode->next.load( memory_model::memory_order_relaxed ); bool const is_regular_node = !pVal || pred( pVal ); if ( is_regular_node ) { if ( pVal ) retire_data( pVal ); delete_node( pNode ); } pNode = pNext; } m_Head.next.store( &m_Tail, memory_model::memory_order_relaxed ); } //@endcond private: //@cond void init_list() { m_Head.next.store( &m_Tail, memory_model::memory_order_relaxed ); // end-of-list mark: node.next == node m_Tail.next.store( &m_Tail, memory_model::memory_order_release ); } node_type * alloc_node( value_type * pVal ) { m_Stat.onNodeCreated(); return cxx_node_allocator().New( pVal ); } void delete_node( node_type * pNode ) { m_Stat.onNodeRemoved(); cxx_node_allocator().Delete( pNode ); } static void retire_data( value_type * pVal ) { assert( pVal != nullptr ); gc::template retire( pVal ); } void destroy() { node_type * pNode = m_Head.next.load( memory_model::memory_order_relaxed ); while ( pNode != pNode->next.load( memory_model::memory_order_relaxed )) { value_type * pVal = pNode->data.load( memory_model::memory_order_relaxed ).ptr(); if ( pVal ) retire_data( pVal ); node_type * pNext = pNode->next.load( memory_model::memory_order_relaxed ); delete_node( pNode ); pNode = pNext; } } bool link_data( value_type* pVal, insert_position& pos, node_type* pHead ) { assert( pos.pPrev != nullptr ); assert( pos.pCur != nullptr ); // We need pos.pCur data should be unchanged, otherwise ordering violation can be possible // if current thread will be preempted and another thread will delete pos.pCur data // and then set it to another. // To prevent this we mark pos.pCur data as undeletable by setting LSB marked_data_ptr valCur( pos.pFound ); if ( !pos.pCur->data.compare_exchange_strong( valCur, valCur | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { // oops, pos.pCur data has been changed or another thread is setting pos.pPrev data m_Stat.onNodeMarkFailed(); return false; } marked_data_ptr valPrev( pos.pPrevVal ); if ( !pos.pPrev->data.compare_exchange_strong( valPrev, valPrev | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { pos.pCur->data.store( valCur, memory_model::memory_order_relaxed ); m_Stat.onNodeMarkFailed(); return false; } // checks if link pPrev -> pCur is broken if ( pos.pPrev->next.load( memory_model::memory_order_acquire ) != pos.pCur ) { // sequence pPrev - pCur is broken pos.pPrev->data.store( valPrev, memory_model::memory_order_relaxed ); pos.pCur->data.store( valCur, memory_model::memory_order_relaxed ); m_Stat.onNodeSeqBreak(); return false; } if ( pos.pPrevVal == nullptr ) { // Check ABA-problem for prev // There is a possibility that the current thread was preempted // on entry of this function. Other threads can link data to prev // and then remove it. As a result, the order of items may be changed if ( find_prev( pHead, *pVal ) != pos.pPrev ) { pos.pPrev->data.store( valPrev, memory_model::memory_order_relaxed ); pos.pCur->data.store( valCur, memory_model::memory_order_relaxed ); m_Stat.onNullPrevABA(); return false; } } if ( pos.pPrev != pos.pHead && pos.pPrevVal == nullptr ) { // reuse pPrev // Set pos.pPrev data if it is null valPrev |= 1; bool result = pos.pPrev->data.compare_exchange_strong( valPrev, marked_data_ptr( pVal ), memory_model::memory_order_release, atomics::memory_order_relaxed ); // Clears data marks pos.pCur->data.store( valCur, memory_model::memory_order_relaxed ); if ( result ) { m_Stat.onReuseNode(); return result; } } else { // insert new node between pos.pPrev and pos.pCur node_type * pNode = alloc_node( pVal ); pNode->next.store( pos.pCur, memory_model::memory_order_relaxed ); bool result = pos.pPrev->next.compare_exchange_strong( pos.pCur, pNode, memory_model::memory_order_release, atomics::memory_order_relaxed ); // Clears data marks pos.pPrev->data.store( valPrev, memory_model::memory_order_relaxed ); pos.pCur->data.store( valCur, memory_model::memory_order_relaxed ); if ( result ) { m_Stat.onNewNodeCreated(); return result; } delete_node( pNode ); } return false; } // split-list support bool link_aux_node( node_type * pNode, insert_position& pos, node_type* pHead ) { assert( pos.pPrev != nullptr ); assert( pos.pCur != nullptr ); // We need pos.pCur data should be unchanged, otherwise ordering violation can be possible // if current thread will be preempted and another thread will delete pos.pCur data // and then set it to another. // To prevent this we mark pos.pCur data as undeletable by setting LSB marked_data_ptr valCur( pos.pFound ); if ( !pos.pCur->data.compare_exchange_strong( valCur, valCur | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { // oops, pos.pCur data has been changed or another thread is setting pos.pPrev data m_Stat.onNodeMarkFailed(); return false; } marked_data_ptr valPrev( pos.pPrevVal ); if ( !pos.pPrev->data.compare_exchange_strong( valPrev, valPrev | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { pos.pCur->data.store( valCur, memory_model::memory_order_relaxed ); m_Stat.onNodeMarkFailed(); return false; } // checks if link pPrev -> pCur is broken if ( pos.pPrev->next.load( memory_model::memory_order_acquire ) != pos.pCur ) { // sequence pPrev - pCur is broken pos.pPrev->data.store( valPrev, memory_model::memory_order_relaxed ); pos.pCur->data.store( valCur, memory_model::memory_order_relaxed ); m_Stat.onNodeSeqBreak(); return false; } if ( pos.pPrevVal == nullptr ) { // Check ABA-problem for prev // There is a possibility that the current thread was preempted // on entry of this function. Other threads can insert (link) an item to prev // and then remove it. As a result, the order of items may be changed if ( find_prev( pHead, *pNode->data.load( memory_model::memory_order_relaxed ).ptr()) != pos.pPrev ) { pos.pPrev->data.store( valPrev, memory_model::memory_order_relaxed ); pos.pCur->data.store( valCur, memory_model::memory_order_relaxed ); m_Stat.onNullPrevABA(); return false; } } // insert new node between pos.pPrev and pos.pCur pNode->next.store( pos.pCur, memory_model::memory_order_relaxed ); bool result = pos.pPrev->next.compare_exchange_strong( pos.pCur, pNode, memory_model::memory_order_release, atomics::memory_order_relaxed ); // Clears data marks pos.pPrev->data.store( valPrev, memory_model::memory_order_relaxed ); pos.pCur->data.store( valCur, memory_model::memory_order_relaxed ); return result; } static bool unlink_data( position& pos ) { assert( pos.pCur != nullptr ); assert( pos.pFound != nullptr ); marked_data_ptr val( pos.pFound ); if ( pos.pCur->data.compare_exchange_strong( val, marked_data_ptr(), memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { retire_data( pos.pFound ); return true; } return false; } template node_type* find_prev( node_type const* pHead, Q const& val ) const { node_type* pPrev = const_cast(pHead); typename gc::Guard guard; key_comparator cmp; while ( true ) { node_type * pCur = pPrev->next.load( memory_model::memory_order_relaxed ); if ( pCur == pCur->next.load( memory_model::memory_order_acquire )) { // end-of-list return pPrev; } value_type * pVal = guard.protect( pCur->data, []( marked_data_ptr p ) -> value_type* { return p.ptr(); } ).ptr(); if ( pVal && cmp( *pVal, val ) >= 0 ) return pPrev; pPrev = pCur; } } //@endcond }; }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_IMPL_ITERABLE_LIST_H libcds-2.3.3/cds/intrusive/impl/lazy_list.h000066400000000000000000001337311341244201700207120ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_IMPL_LAZY_LIST_H #define CDSLIB_INTRUSIVE_IMPL_LAZY_LIST_H #include // unique_lock #include namespace cds { namespace intrusive { /// Lazy ordered single-linked list /** @ingroup cds_intrusive_list \anchor cds_intrusive_LazyList_hp Usually, ordered single-linked list is used as a building block for the hash table implementation. The complexity of searching is O(N). Source: - [2005] Steve Heller, Maurice Herlihy, Victor Luchangco, Mark Moir, William N. Scherer III, and Nir Shavit "A Lazy Concurrent List-Based Set Algorithm" The lazy list is based on an optimistic locking scheme for inserts and removes, eliminating the need to use the equivalent of an atomically markable reference. It also has a novel wait-free membership \p find operation that does not need to perform cleanup operations and is more efficient. Template arguments: - \p GC - Garbage collector used. Note the \p GC must be the same as the GC used for item type \p T (see lazy_list::node). - \p T - type to be stored in the list. The type must be based on lazy_list::node (for lazy_list::base_hook) or it must have a member of type lazy_list::node (for lazy_list::member_hook). - \p Traits - type traits. See lazy_list::traits for explanation. It is possible to declare option-based list with cds::intrusive::lazy_list::make_traits metafunction instead of \p Traits template argument. For example, the following traits-based declaration of \p gc::HP lazy list \code #include // Declare item stored in your list struct item: public cds::intrusive::lazy_list::node< cds::gc::HP > { ... }; // Declare comparator for the item struct my_compare { ... } // Declare traits struct my_traits: public cds::intrusive::lazy_list::traits { typedef cds::intrusive::lazy_list::base_hook< cds::opt::gc< cds::gc::HP > > hook; typedef my_compare compare; }; // Declare traits-based list typedef cds::intrusive::LazyList< cds::gc::HP, item, my_traits > traits_based_list; \endcode is equivalent for the following option-based list \code #include // item struct and my_compare are the same // Declare option-based list typedef cds::intrusive::LazyList< cds::gc::HP, item, typename cds::intrusive::lazy_list::make_traits< cds::intrusive::opt::hook< cds::intrusive::lazy_list::base_hook< cds::opt::gc< cds::gc::HP > > > // hook option ,cds::intrusive::opt::compare< my_compare > // item comparator option >::type > option_based_list; \endcode \par Usage There are different specializations of this template for each garbage collecting schema used. You should select GC needed and include appropriate .h-file: - for gc::HP: \code #include \endcode - for gc::DHP: \code #include \endcode - for gc::nogc: \code #include \endcode - for \ref cds_urcu_type "RCU" - see \ref cds_intrusive_LazyList_rcu "LazyList RCU specialization" Then, you should incorporate lazy_list::node into your struct \p T and provide appropriate \p lazy_list::traits::hook in your \p Traits template parameters. Usually, for \p Traits a struct based on \p lazy_list::traits should be defined. Example for gc::DHP and base hook: \code // Include GC-related lazy list specialization #include // Data stored in lazy list struct my_data: public cds::intrusive::lazy_list::node< cds::gc::DHP > { // key field std::string strKey; // other data // ... }; // my_data comparing functor struct compare { int operator()( const my_data& d1, const my_data& d2 ) { return d1.strKey.compare( d2.strKey ); } int operator()( const my_data& d, const std::string& s ) { return d.strKey.compare(s); } int operator()( const std::string& s, const my_data& d ) { return s.compare( d.strKey ); } }; // Declare traits struct my_traits: public cds::intrusive::lazy_list::traits { typedef cds::intrusive::lazy_list::base_hook< cds::opt::gc< cds::gc::DHP > > hook; typedef my_data_cmp compare; }; // Declare list type typedef cds::intrusive::LazyList< cds::gc::DHP, my_data, my_traits > traits_based_list; \endcode Equivalent option-based code: \code // GC-related specialization #include struct my_data { // see above }; struct compare { // see above }; // Declare option-based list typedef cds::intrusive::LazyList< cds::gc::DHP ,my_data , typename cds::intrusive::lazy_list::make_traits< cds::intrusive::opt::hook< cds::intrusive::lazy_list::base_hook< cds::opt::gc< cds::gc::DHP > > > ,cds::intrusive::opt::compare< my_data_cmp > >::type > option_based_list; \endcode */ template < class GC ,typename T #ifdef CDS_DOXYGEN_INVOKED ,class Traits = lazy_list::traits #else ,class Traits #endif > class LazyList { public: typedef GC gc; ///< Garbage collector typedef T value_type; ///< type of value stored in the list typedef Traits traits; ///< Traits template parameter typedef typename traits::hook hook; ///< hook type typedef typename hook::node_type node_type; ///< node type # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined key_comparator; ///< key comparison functor based on opt::compare and opt::less option setter. # else typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator; # endif typedef typename traits::disposer disposer; ///< disposer typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< node traits typedef typename lazy_list::get_link_checker< node_type, traits::link_checker >::type link_checker; ///< link checker typedef typename traits::back_off back_off; ///< back-off strategy typedef typename traits::item_counter item_counter; ///< Item counting policy used typedef typename traits::memory_model memory_model; ///< C++ memory ordering (see \p lazy_list::traits::memory_model) typedef typename traits::stat stat; ///< Internal statistics static_assert((std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type"); typedef typename gc::template guarded_ptr< value_type > guarded_ptr; ///< Guarded pointer static constexpr const size_t c_nHazardPtrCount = 4; ///< Count of hazard pointer required for the algorithm //@cond // Rebind traits (split-list support) template struct rebind_traits { typedef LazyList< gc , value_type , typename cds::opt::make_options< traits, Options...>::type > type; }; // Stat selector template using select_stat_wrapper = lazy_list::select_stat_wrapper< Stat >; //@endcond protected: typedef typename node_type::marked_ptr marked_node_ptr; ///< Node marked pointer typedef node_type * auxiliary_head; ///< Auxiliary head type (for split-list support) protected: //@cond node_type m_Head; node_type m_Tail; item_counter m_ItemCounter; stat m_Stat; ///< Internal statistics struct clean_disposer { void operator()( value_type * p ) { lazy_list::node_cleaner()( node_traits::to_node_ptr( p )); disposer()( p ); } }; /// Position pointer for item search struct position { node_type * pPred; ///< Previous node node_type * pCur; ///< Current node typename gc::template GuardArray<2> guards; ///< Guards array enum { guard_prev_item, guard_current_item }; /// Locks nodes \p pPred and \p pCur void lock() { pPred->m_Lock.lock(); pCur->m_Lock.lock(); } /// Unlocks nodes \p pPred and \p pCur void unlock() { pCur->m_Lock.unlock(); pPred->m_Lock.unlock(); } }; typedef std::unique_lock< position > scoped_position_lock; //@endcond protected: //@cond void link_node( node_type * pNode, node_type * pPred, node_type * pCur ) { assert( pPred->m_pNext.load(memory_model::memory_order_relaxed).ptr() == pCur ); link_checker::is_empty( pNode ); pNode->m_pNext.store( marked_node_ptr(pCur), memory_model::memory_order_release ); pPred->m_pNext.store( marked_node_ptr(pNode), memory_model::memory_order_release ); } void unlink_node( node_type * pPred, node_type * pCur, node_type * pHead ) { assert( pPred->m_pNext.load(memory_model::memory_order_relaxed).ptr() == pCur ); node_type * pNext = pCur->m_pNext.load(memory_model::memory_order_relaxed).ptr(); pCur->m_pNext.store( marked_node_ptr( pHead, 1 ), memory_model::memory_order_release ); // logical removal + back-link for search pPred->m_pNext.store( marked_node_ptr( pNext ), memory_model::memory_order_release); // physically deleting } void retire_node( node_type * pNode ) { assert( pNode != nullptr ); gc::template retire( node_traits::to_value_ptr( *pNode )); } //@endcond protected: //@cond template class iterator_type { friend class LazyList; protected: value_type * m_pNode; typename gc::Guard m_Guard; void next() { assert( m_pNode != nullptr ); if ( m_pNode ) { typename gc::Guard g; node_type * pCur = node_traits::to_node_ptr( m_pNode ); if ( pCur->m_pNext.load( memory_model::memory_order_relaxed ).ptr() != nullptr ) { // if pCur is not tail node node_type * pNext; do { pNext = pCur->m_pNext.load(memory_model::memory_order_relaxed).ptr(); g.assign( node_traits::to_value_ptr( pNext )); } while ( pNext != pCur->m_pNext.load(memory_model::memory_order_relaxed).ptr()); m_pNode = m_Guard.assign( g.template get()); } } } void skip_deleted() { if ( m_pNode != nullptr ) { typename gc::Guard g; node_type * pNode = node_traits::to_node_ptr( m_pNode ); // Dummy tail node could not be marked while ( pNode->is_marked()) { node_type * p = pNode->m_pNext.load(memory_model::memory_order_relaxed).ptr(); g.assign( node_traits::to_value_ptr( p )); if ( p == pNode->m_pNext.load(memory_model::memory_order_relaxed).ptr()) pNode = p; } if ( pNode != node_traits::to_node_ptr( m_pNode )) m_pNode = m_Guard.assign( g.template get()); } } iterator_type( node_type * pNode ) { m_pNode = m_Guard.assign( node_traits::to_value_ptr( pNode )); skip_deleted(); } public: typedef typename cds::details::make_const_type::pointer value_ptr; typedef typename cds::details::make_const_type::reference value_ref; iterator_type() : m_pNode( nullptr ) {} iterator_type( iterator_type const& src ) { if ( src.m_pNode ) { m_pNode = m_Guard.assign( src.m_pNode ); } else m_pNode = nullptr; } value_ptr operator ->() const { return m_pNode; } value_ref operator *() const { assert( m_pNode != nullptr ); return *m_pNode; } /// Pre-increment iterator_type& operator ++() { next(); skip_deleted(); return *this; } iterator_type& operator = (iterator_type const& src) { m_pNode = src.m_pNode; m_Guard.assign( m_pNode ); return *this; } template bool operator ==(iterator_type const& i ) const { return m_pNode == i.m_pNode; } template bool operator !=(iterator_type const& i ) const { return m_pNode != i.m_pNode; } }; //@endcond public: ///@name Forward iterators (only for debugging purpose) //@{ /// Forward iterator /** The forward iterator for lazy list has some features: - it has no post-increment operator - to protect the value, the iterator contains a GC-specific guard + another guard is required locally for increment operator. For some GC (\p gc::HP), a guard is limited resource per thread, so an exception (or assertion) "no free guard" may be thrown if a limit of guard count per thread is exceeded. - The iterator cannot be moved across thread boundary since it contains GC's guard that is thread-private GC data. - Iterator ensures thread-safety even if you delete the item that iterator points to. However, in case of concurrent deleting operations it is no guarantee that you iterate all item in the list. Moreover, a crash is possible when you try to iterate the next element that has been deleted by concurrent thread. @warning Use this iterator on the concurrent container for debugging purpose only. */ typedef iterator_type iterator; /// Const forward iterator /** For iterator's features and requirements see \ref iterator */ typedef iterator_type const_iterator; /// Returns a forward iterator addressing the first element in a list /** For empty list \code begin() == end() \endcode */ iterator begin() { iterator it( &m_Head ); ++it ; // skip dummy head return it; } /// Returns an iterator that addresses the location succeeding the last element in a list /** Do not use the value returned by end function to access any item. The returned value can be used only to control reaching the end of the list. For empty list \code begin() == end() \endcode */ iterator end() { return iterator( &m_Tail ); } /// Returns a forward const iterator addressing the first element in a list const_iterator begin() const { return get_const_begin(); } /// Returns a forward const iterator addressing the first element in a list const_iterator cbegin() const { return get_const_begin(); } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator end() const { return get_const_end(); } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator cend() const { return get_const_end(); } //@} private: //@cond const_iterator get_const_begin() const { const_iterator it( const_cast( &m_Head )); ++it ; // skip dummy head return it; } const_iterator get_const_end() const { return const_iterator( const_cast(&m_Tail)); } //@endcond public: /// Default constructor initializes empty list LazyList() { m_Head.m_pNext.store( marked_node_ptr( &m_Tail ), memory_model::memory_order_relaxed ); } //@cond template >::value >> explicit LazyList( Stat& st ) : m_Stat( st ) { m_Head.m_pNext.store( marked_node_ptr( &m_Tail ), memory_model::memory_order_relaxed ); } //@endcond /// Destroys the list object ~LazyList() { clear(); assert( m_Head.m_pNext.load( memory_model::memory_order_relaxed ).ptr() == &m_Tail ); m_Head.m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed ); } /// Inserts new node /** The function inserts \p val in the list if the list does not contain an item with key equal to \p val. Returns \p true if \p val is linked into the list, \p false otherwise. */ bool insert( value_type& val ) { return insert_at( &m_Head, val ); } /// Inserts new node /** This function is intended for derived non-intrusive containers. The function allows to split new item creating into two part: - create item with key only - insert new item into the list - if inserting is success, calls \p f functor to initialize value-field of \p val. The functor signature is: \code void func( value_type& val ); \endcode where \p val is the item inserted. While the functor \p f is called the item \p val is locked so the functor has an exclusive access to the item. The user-defined functor is called only if the inserting is success. */ template bool insert( value_type& val, Func f ) { return insert_at( &m_Head, val, f ); } /// Updates the item /** The operation performs inserting or changing data with lock-free manner. If the item \p val not found in the list, then \p val is inserted into the list iff \p bAllowInsert is \p true. Otherwise, the functor \p func is called with item found. The functor signature is: \code struct functor { void operator()( bool bNew, value_type& item, value_type& val ); }; \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the list - \p val - argument \p val passed into the \p update() function If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments refer to the same thing. The functor may change non-key fields of the \p item. While the functor \p f is working the item \p item is locked, so \p func has exclusive access to the item. Returns std::pair where \p first is \p true if operation is successful, \p second is \p true if new item has been added or \p false if the item with \p key already is in the list. The function makes RCU lock internally. */ template std::pair update( value_type& val, Func func, bool bAllowInsert = true ) { return update_at( &m_Head, val, func, bAllowInsert ); } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( value_type& val, Func func ) { return update( val, func, true ); } //@endcond /// Unlinks the item \p val from the list /** The function searches the item \p val in the list and unlink it from the list if it is found and it is equal to \p val. Difference between \ref erase and \p unlink functions: \p erase finds a key and deletes the item found. \p unlink finds an item by key and deletes it only if \p val is an item of that list, i.e. the pointer to item found is equal to &val . The function returns \p true if success and \p false otherwise. \p disposer specified in \p Traits is called for unlinked item. */ bool unlink( value_type& val ) { return unlink_at( &m_Head, val ); } /// Deletes the item from the list /** \anchor cds_intrusive_LazyList_hp_erase_val The function searches an item with key equal to \p key in the list, unlinks it from the list, and returns \p true. If the item with the key equal to \p key is not found the function return \p false. \p disposer specified in \p Traits is called for deleted item. */ template bool erase( Q const& key ) { return erase_at( &m_Head, key, key_comparator()); } /// Deletes the item from the list using \p pred predicate for searching /** The function is an analog of \ref cds_intrusive_LazyList_hp_erase_val "erase(Q const&)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. \p disposer specified in \p Traits is called for deleted item. */ template bool erase_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return erase_at( &m_Head, key, cds::opt::details::make_comparator_from_less()); } /// Deletes the item from the list /** \anchor cds_intrusive_LazyList_hp_erase_func The function searches an item with key equal to \p key in the list, call \p func functor with item found, unlinks it from the list, and returns \p true. The \p Func interface is \code struct functor { void operator()( value_type const& item ); }; \endcode If \p key is not found the function return \p false. \p disposer specified in \p Traits is called for deleted item. */ template bool erase( const Q& key, Func func ) { return erase_at( &m_Head, key, key_comparator(), func ); } /// Deletes the item from the list using \p pred predicate for searching /** The function is an analog of \ref cds_intrusive_LazyList_hp_erase_func "erase(Q const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. \p disposer specified in \p Traits is called for deleted item. */ template bool erase_with( const Q& key, Less pred, Func func ) { CDS_UNUSED( pred ); return erase_at( &m_Head, key, cds::opt::details::make_comparator_from_less(), func ); } /// Extracts the item from the list with specified \p key /** \anchor cds_intrusive_LazyList_hp_extract The function searches an item with key equal to \p key, unlinks it from the list, and returns it as \p guarded_ptr. If \p key is not found the function returns an empty guarded pointer. Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. The \ref disposer specified in \p Traits class template parameter is called automatically by garbage collector \p GC specified in class' template parameters when returned \p guarded_ptr object will be destroyed or released. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. Usage: \code typedef cds::intrusive::LazyList< cds::gc::HP, foo, my_traits > ord_list; ord_list theList; // ... { ord_list::guarded_ptr gp( theList.extract( 5 )); // Deal with gp // ... // Destructor of gp releases internal HP guard } \endcode */ template guarded_ptr extract( Q const& key ) { return extract_at( &m_Head, key, key_comparator()); } /// Extracts the item from the list with comparing functor \p pred /** The function is an analog of \ref cds_intrusive_LazyList_hp_extract "extract(Q const&)" but \p pred predicate is used for key comparing. \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the list. */ template guarded_ptr extract_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return extract_at( &m_Head, key, cds::opt::details::make_comparator_from_less()); } /// Finds the key \p key /** \anchor cds_intrusive_LazyList_hp_find The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& key ); }; \endcode where \p item is the item found, \p key is the find function argument. The functor may change non-key fields of \p item. While the functor \p f is calling the item \p item is locked. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( Q& key, Func f ) { return find_at( &m_Head, key, key_comparator(), f ); } //@cond template bool find( Q const& key, Func f ) { return find_at( &m_Head, key, key_comparator(), f ); } //@endcond /// Finds the key \p key using \p pred predicate for searching /** The function is an analog of \ref cds_intrusive_LazyList_hp_find "find(Q&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool find_with( Q& key, Less pred, Func f ) { CDS_UNUSED( pred ); return find_at( &m_Head, key, cds::opt::details::make_comparator_from_less(), f ); } //@cond template bool find_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return find_at( &m_Head, key, cds::opt::details::make_comparator_from_less(), f ); } //@endcond /// Checks whether the list contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. */ template bool contains( Q const& key ) { return find_at( &m_Head, key, key_comparator()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find( Q const& key ) { return contains( key ); } //@cond /// Checks whether the map contains \p key using \p pred predicate for searching /** The function is an analog of contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the list. */ template bool contains( Q const& key, Less pred ) { CDS_UNUSED( pred ); return find_at( &m_Head, key, cds::opt::details::make_comparator_from_less()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find_with( Q const& key, Less pred ) { return contains( key, pred ); } //@endcond /// Finds \p key and return the item found /** \anchor cds_intrusive_LazyList_hp_get The function searches the item with key equal to \p key and returns an guarded pointer to it. If \p key is not found the function returns an empty guarded pointer. The \ref disposer specified in \p Traits class template parameter is called by garbage collector \p GC automatically when returned \p guarded_ptr object will be destroyed or released. @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. Usage: \code typedef cds::intrusive::LazyList< cds::gc::HP, foo, my_traits > ord_list; ord_list theList; // ... { ord_list::guarded_ptr gp(theList.get( 5 )); if ( gp ) { // Deal with gp //... } // Destructor of guarded_ptr releases internal HP guard } \endcode Note the compare functor specified for class \p Traits template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. */ template guarded_ptr get( Q const& key ) { return get_at( &m_Head, key, key_comparator()); } /// Finds \p key and return the item found /** The function is an analog of \ref cds_intrusive_LazyList_hp_get "get( Q const&)" but \p pred is used for comparing the keys. \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the list. */ template guarded_ptr get_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return get_at( &m_Head, key, cds::opt::details::make_comparator_from_less()); } /// Clears the list void clear() { typename gc::Guard guard; marked_node_ptr h; while ( !empty()) { h = m_Head.m_pNext.load( memory_model::memory_order_relaxed ); guard.assign( node_traits::to_value_ptr( h.ptr())); if ( m_Head.m_pNext.load(memory_model::memory_order_acquire) == h ) { m_Head.m_Lock.lock(); h->m_Lock.lock(); unlink_node( &m_Head, h.ptr(), &m_Head ); --m_ItemCounter; h->m_Lock.unlock(); m_Head.m_Lock.unlock(); retire_node( h.ptr()) ; // free node } } } /// Checks if the list is empty bool empty() const { return m_Head.m_pNext.load( memory_model::memory_order_relaxed ).ptr() == &m_Tail; } /// Returns list's item count /** The value returned depends on item counter provided by \p Traits. For \p atomicity::empty_item_counter, this function always returns 0. @note Even if you use real item counter and it returns 0, this fact does not mean that the list is empty. To check list emptiness use \p empty() method. */ size_t size() const { return m_ItemCounter.value(); } /// Returns const reference to internal statistics stat const& statistics() const { return m_Stat; } protected: //@cond // split-list support bool insert_aux_node( node_type * pNode ) { return insert_aux_node( &m_Head, pNode ); } // split-list support bool insert_aux_node( node_type * pHead, node_type * pNode ) { assert( pNode != nullptr ); // Hack: convert node_type to value_type. // In principle, auxiliary node cannot be reducible to value_type // We assume that internal comparator can correctly distinguish aux and regular node. return insert_at( pHead, *node_traits::to_value_ptr( pNode )); } bool insert_at( node_type * pHead, value_type& val ) { position pos; key_comparator cmp; while ( true ) { search( pHead, val, pos, key_comparator()); { scoped_position_lock alp( pos ); if ( validate( pos.pPred, pos.pCur )) { if ( pos.pCur != &m_Tail && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { // failed: key already in list m_Stat.onInsertFailed(); return false; } else { link_node( node_traits::to_node_ptr( val ), pos.pPred, pos.pCur ); break; } } } m_Stat.onInsertRetry(); } ++m_ItemCounter; m_Stat.onInsertSuccess(); return true; } template bool insert_at( node_type * pHead, value_type& val, Func f ) { position pos; key_comparator cmp; while ( true ) { search( pHead, val, pos, key_comparator()); { scoped_position_lock alp( pos ); if ( validate( pos.pPred, pos.pCur )) { if ( pos.pCur != &m_Tail && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { // failed: key already in list m_Stat.onInsertFailed(); return false; } else { link_node( node_traits::to_node_ptr( val ), pos.pPred, pos.pCur ); f( val ); break; } } } m_Stat.onInsertRetry(); } ++m_ItemCounter; m_Stat.onInsertSuccess(); return true; } template std::pair update_at( node_type * pHead, value_type& val, Func func, bool bAllowInsert ) { position pos; key_comparator cmp; while ( true ) { search( pHead, val, pos, key_comparator()); { scoped_position_lock alp( pos ); if ( validate( pos.pPred, pos.pCur )) { if ( pos.pCur != &m_Tail && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { // key already in the list func( false, *node_traits::to_value_ptr( *pos.pCur ) , val ); m_Stat.onUpdateExisting(); return std::make_pair( true, false ); } else { // new key if ( !bAllowInsert ) { m_Stat.onUpdateFailed(); return std::make_pair( false, false ); } link_node( node_traits::to_node_ptr( val ), pos.pPred, pos.pCur ); func( true, val, val ); break; } } } m_Stat.onUpdateRetry(); } ++m_ItemCounter; m_Stat.onUpdateNew(); return std::make_pair( true, true ); } bool unlink_at( node_type * pHead, value_type& val ) { position pos; key_comparator cmp; while ( true ) { search( pHead, val, pos, key_comparator()); { int nResult = 0; { scoped_position_lock alp( pos ); if ( validate( pos.pPred, pos.pCur )) { if ( pos.pCur != &m_Tail && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 && node_traits::to_value_ptr( pos.pCur ) == &val ) { // item found unlink_node( pos.pPred, pos.pCur, pHead ); nResult = 1; } else nResult = -1; } } if ( nResult ) { if ( nResult > 0 ) { --m_ItemCounter; retire_node( pos.pCur ); m_Stat.onEraseSuccess(); return true; } m_Stat.onEraseFailed(); return false; } } m_Stat.onEraseRetry(); } } template bool erase_at( node_type * pHead, const Q& val, Compare cmp, Func f, position& pos ) { while ( true ) { search( pHead, val, pos, cmp ); { int nResult = 0; { scoped_position_lock alp( pos ); if ( validate( pos.pPred, pos.pCur )) { if ( pos.pCur != &m_Tail && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { // key found unlink_node( pos.pPred, pos.pCur, pHead ); f( *node_traits::to_value_ptr( *pos.pCur )); nResult = 1; } else { nResult = -1; } } } if ( nResult ) { if ( nResult > 0 ) { --m_ItemCounter; retire_node( pos.pCur ); m_Stat.onEraseSuccess(); return true; } m_Stat.onEraseFailed(); return false; } } m_Stat.onEraseRetry(); } } template bool erase_at( node_type * pHead, const Q& val, Compare cmp, Func f ) { position pos; return erase_at( pHead, val, cmp, f, pos ); } template bool erase_at( node_type * pHead, const Q& val, Compare cmp ) { position pos; return erase_at( pHead, val, cmp, [](value_type const &){}, pos ); } template guarded_ptr extract_at( node_type * pHead, const Q& val, Compare cmp ) { position pos; if ( erase_at( pHead, val, cmp, [](value_type const &){}, pos )) return guarded_ptr( pos.guards.release( position::guard_current_item )); return guarded_ptr(); } template bool find_at( node_type * pHead, Q& val, Compare cmp, Func f ) { position pos; search( pHead, val, pos, cmp ); if ( pos.pCur != &m_Tail ) { std::unique_lock< typename node_type::lock_type> al( pos.pCur->m_Lock ); if ( !pos.pCur->is_marked() && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { f( *node_traits::to_value_ptr( *pos.pCur ), val ); m_Stat.onFindSuccess(); return true; } } m_Stat.onFindFailed(); return false; } template bool find_at( node_type * pHead, Q const& val, Compare cmp ) { position pos; search( pHead, val, pos, cmp ); if ( pos.pCur != &m_Tail && !pos.pCur->is_marked() && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { m_Stat.onFindSuccess(); return true; } m_Stat.onFindFailed(); return false; } template guarded_ptr get_at( node_type * pHead, Q const& val, Compare cmp ) { position pos; search( pHead, val, pos, cmp ); if ( pos.pCur != &m_Tail && !pos.pCur->is_marked() && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { m_Stat.onFindSuccess(); return guarded_ptr( pos.guards.release( position::guard_current_item )); } m_Stat.onFindFailed(); return guarded_ptr(); } // split-list support template void destroy( Predicate /*pred*/ ) { clear(); } //@endcond protected: //@cond template void search( node_type * pHead, const Q& key, position& pos, Compare cmp ) { node_type const* pTail = &m_Tail; marked_node_ptr pCur( pHead ); marked_node_ptr pPrev( pHead ); while ( pCur.ptr() != pTail ) { if ( pCur.ptr() != pHead ) { if ( cmp( *node_traits::to_value_ptr( *pCur.ptr()), key ) >= 0 ) break; } pos.guards.copy( position::guard_prev_item, position::guard_current_item ); pPrev = pCur; pCur = pos.guards.protect( position::guard_current_item, pPrev->m_pNext, []( marked_node_ptr p ) { return node_traits::to_value_ptr( p.ptr()); } ); assert( pCur.ptr() != nullptr ); if ( pCur.bits()) pPrev = pCur = pHead; } pos.pCur = pCur.ptr(); pos.pPred = pPrev.ptr(); } bool validate( node_type * pPred, node_type * pCur ) noexcept { if ( validate_link( pPred, pCur )) { m_Stat.onValidationSuccess(); return true; } m_Stat.onValidationFailed(); return false; } static bool validate_link( node_type * pPred, node_type * pCur ) noexcept { return !pPred->is_marked() && !pCur->is_marked() && pPred->m_pNext.load(memory_model::memory_order_relaxed) == pCur; } //@endcond }; }} // namespace cds::intrusive #endif // CDSLIB_INTRUSIVE_IMPL_LAZY_LIST_H libcds-2.3.3/cds/intrusive/impl/michael_list.h000066400000000000000000001326511341244201700213350ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_IMPL_MICHAEL_LIST_H #define CDSLIB_INTRUSIVE_IMPL_MICHAEL_LIST_H #include #include namespace cds { namespace intrusive { /// Michael's lock-free ordered single-linked list /** @ingroup cds_intrusive_list \anchor cds_intrusive_MichaelList_hp Usually, ordered single-linked list is used as a building block for the hash table implementation. The complexity of searching is O(N). Source: - [2002] Maged Michael "High performance dynamic lock-free hash tables and list-based sets" Template arguments: - \p GC - Garbage collector used. Note the \p GC must be the same as the GC used for item type \p T (see \p michael_list::node). - \p T - type to be stored in the list. The type must be based on \p michael_list::node (for \p michael_list::base_hook) or it must have a member of type \p michael_list::node (for \p michael_list::member_hook). - \p Traits - type traits, default is \p michael_list::traits. It is possible to declare option-based list with \p cds::intrusive::michael_list::make_traits metafunction: For example, the following traits-based declaration of \p gc::HP Michael's list \code #include // Declare item stored in your list struct item: public cds::intrusive::michael_list::node< cds::gc::HP > { int nKey; // .... other data }; // Declare comparator for the item struct my_compare { int operator()( item const& i1, item const& i2 ) const { return i1.nKey - i2.nKey; } }; // Declare traits struct my_traits: public cds::intrusive::michael_list::traits { typedef cds::intrusive::michael_list::base_hook< cds::opt::gc< cds::gc::HP > > hook; typedef my_compare compare; }; // Declare traits-based list typedef cds::intrusive::MichaelList< cds::gc::HP, item, my_traits > traits_based_list; \endcode is equivalent for the following option-based list \code #include // item struct and my_compare are the same // Declare option-based list typedef cds::intrusive::MichaelList< cds::gc::HP, item, typename cds::intrusive::michael_list::make_traits< cds::intrusive::opt::hook< cds::intrusive::michael_list::base_hook< cds::opt::gc< cds::gc::HP > > > // hook option ,cds::intrusive::opt::compare< my_compare > // item comparator option >::type > option_based_list; \endcode \par Usage There are different specializations of this template for each garbage collecting schema. You should select GC needed and include appropriate .h-file: - for \p gc::HP: - for \p gc::DHP: - for \ref cds_urcu_gc "RCU type" - see \ref cds_intrusive_MichaelList_rcu "RCU-based MichaelList" - for \p gc::nogc: See \ref cds_intrusive_MichaelList_nogc "non-GC MichaelList" Then, you should incorporate \p michael_list::node into your struct \p T and provide appropriate \p michael_list::traits::hook in your \p Traits template parameters. Usually, for \p Traits you define a struct based on \p michael_list::traits. Example for \p gc::DHP and base hook: \code // Include GC-related Michael's list specialization #include // Data stored in Michael's list struct my_data: public cds::intrusive::michael_list::node< cds::gc::DHP > { // key field std::string strKey; // other data // ... }; // my_data comparing functor struct my_data_cmp { int operator()( const my_data& d1, const my_data& d2 ) { return d1.strKey.compare( d2.strKey ); } int operator()( const my_data& d, const std::string& s ) { return d.strKey.compare(s); } int operator()( const std::string& s, const my_data& d ) { return s.compare( d.strKey ); } }; // Declare traits struct my_traits: public cds::intrusive::michael_list::traits { typedef cds::intrusive::michael_list::base_hook< cds::opt::gc< cds::gc::DHP > > hook; typedef my_data_cmp compare; }; // Declare list type typedef cds::intrusive::MichaelList< cds::gc::DHP, my_data, my_traits > traits_based_list; \endcode Equivalent option-based code: \code // GC-related specialization #include struct my_data { // see above }; struct compare { // see above }; // Declare option-based list typedef cds::intrusive::MichaelList< cds::gc::DHP ,my_data , typename cds::intrusive::michael_list::make_traits< cds::intrusive::opt::hook< cds::intrusive::michael_list::base_hook< cds::opt::gc< cds::gc::DHP > > > ,cds::intrusive::opt::compare< my_data_cmp > >::type > option_based_list; \endcode */ template < class GC ,typename T #ifdef CDS_DOXYGEN_INVOKED ,class Traits = michael_list::traits #else ,class Traits #endif > class MichaelList { public: typedef T value_type; ///< type of value stored in the list typedef Traits traits; ///< Traits template parameter typedef typename traits::hook hook; ///< hook type typedef typename hook::node_type node_type; ///< node type # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined key_comparator ; ///< key comparison functor based on opt::compare and opt::less option setter. # else typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator; # endif typedef typename traits::disposer disposer; ///< disposer used typedef typename traits::stat stat; ///< Internal statistics typedef typename get_node_traits< value_type, node_type, hook>::type node_traits ; ///< node traits typedef typename michael_list::get_link_checker< node_type, traits::link_checker >::type link_checker; ///< link checker typedef GC gc ; ///< Garbage collector typedef typename traits::back_off back_off; ///< back-off strategy typedef typename traits::item_counter item_counter; ///< Item counting policy used typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option typedef typename gc::template guarded_ptr< value_type > guarded_ptr; ///< Guarded pointer static constexpr const size_t c_nHazardPtrCount = 4; ///< Count of hazard pointer required for the algorithm //@cond // Rebind traits (split-list support) template struct rebind_traits { typedef MichaelList< gc , value_type , typename cds::opt::make_options< traits, Options...>::type > type; }; // Stat selector template using select_stat_wrapper = michael_list::select_stat_wrapper< Stat >; //@endcond protected: typedef typename node_type::atomic_marked_ptr atomic_node_ptr; ///< Atomic node pointer typedef typename node_type::marked_ptr marked_node_ptr; ///< Node marked pointer typedef atomic_node_ptr auxiliary_head; ///< Auxiliary head type (for split-list support) atomic_node_ptr m_pHead; ///< Head pointer item_counter m_ItemCounter; ///< Item counter stat m_Stat; ///< Internal statistics //@cond /// Position pointer for item search struct position { atomic_node_ptr * pPrev ; ///< Previous node node_type * pCur ; ///< Current node node_type * pNext ; ///< Next node typename gc::template GuardArray<3> guards ; ///< Guards array enum { guard_prev_item, guard_current_item, guard_next_item }; }; struct clean_disposer { void operator()( value_type * p ) { michael_list::node_cleaner()( node_traits::to_node_ptr( p )); disposer()( p ); } }; //@endcond protected: //@cond static void retire_node( node_type * pNode ) { assert( pNode != nullptr ); gc::template retire( node_traits::to_value_ptr( *pNode )); } static bool link_node( node_type * pNode, position& pos ) { assert( pNode != nullptr ); link_checker::is_empty( pNode ); marked_node_ptr cur(pos.pCur); pNode->m_pNext.store( cur, memory_model::memory_order_release ); if ( cds_likely( pos.pPrev->compare_exchange_strong( cur, marked_node_ptr(pNode), memory_model::memory_order_release, atomics::memory_order_relaxed ))) return true; pNode->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed ); return false; } static bool unlink_node( position& pos ) { assert( pos.pPrev != nullptr ); assert( pos.pCur != nullptr ); // Mark the node (logical deleting) marked_node_ptr next(pos.pNext, 0); if ( cds_likely( pos.pCur->m_pNext.compare_exchange_strong( next, marked_node_ptr(pos.pNext, 1), memory_model::memory_order_release, atomics::memory_order_relaxed ))) { // physical deletion may be performed by search function if it detects that a node is logically deleted (marked) // CAS may be successful here or in other thread that searching something marked_node_ptr cur(pos.pCur); if ( cds_likely( pos.pPrev->compare_exchange_strong( cur, marked_node_ptr( pos.pNext ), memory_model::memory_order_acquire, atomics::memory_order_relaxed ))) retire_node( pos.pCur ); return true; } return false; } //@endcond protected: //@cond template class iterator_type { friend class MichaelList; protected: value_type * m_pNode; typename gc::Guard m_Guard; void next() { if ( m_pNode ) { typename gc::Guard g; node_type * pCur = node_traits::to_node_ptr( *m_pNode ); marked_node_ptr pNext; do { pNext = pCur->m_pNext.load(memory_model::memory_order_relaxed); g.assign( node_traits::to_value_ptr( pNext.ptr())); } while ( cds_unlikely( pNext != pCur->m_pNext.load(memory_model::memory_order_acquire))); if ( pNext.ptr()) m_pNode = m_Guard.assign( g.template get()); else { m_pNode = nullptr; m_Guard.clear(); } } } iterator_type( atomic_node_ptr const& pNode ) { for (;;) { marked_node_ptr p = pNode.load(memory_model::memory_order_relaxed); if ( p.ptr()) { m_pNode = m_Guard.assign( node_traits::to_value_ptr( p.ptr())); } else { m_pNode = nullptr; m_Guard.clear(); } if ( cds_likely( p == pNode.load(memory_model::memory_order_acquire))) break; } } public: typedef typename cds::details::make_const_type::pointer value_ptr; typedef typename cds::details::make_const_type::reference value_ref; iterator_type() : m_pNode( nullptr ) {} iterator_type( iterator_type const& src ) { if ( src.m_pNode ) { m_pNode = m_Guard.assign( src.m_pNode ); } else m_pNode = nullptr; } value_ptr operator ->() const { return m_pNode; } value_ref operator *() const { assert( m_pNode != nullptr ); return *m_pNode; } /// Pre-increment iterator_type& operator ++() { next(); return *this; } iterator_type& operator = (iterator_type const& src) { m_pNode = src.m_pNode; m_Guard.assign( m_pNode ); return *this; } /* /// Post-increment void operator ++(int) { next(); } */ template bool operator ==(iterator_type const& i ) const { return m_pNode == i.m_pNode; } template bool operator !=(iterator_type const& i ) const { return m_pNode != i.m_pNode; } }; //@endcond public: ///@name Forward iterators (only for debugging purpose) //@{ /// Forward iterator /** The forward iterator for Michael's list has some features: - it has no post-increment operator - to protect the value, the iterator contains a GC-specific guard + another guard is required locally for increment operator. For some GC (like as \p gc::HP), a guard is a limited resource per thread, so an exception (or assertion) "no free guard" may be thrown if the limit of guard count per thread is exceeded. - The iterator cannot be moved across thread boundary since it contains thread-private GC's guard. - Iterator ensures thread-safety even if you delete the item the iterator points to. However, in case of concurrent deleting operations there is no guarantee that you iterate all item in the list. Moreover, a crash is possible when you try to iterate the next element that has been deleted by concurrent thread. @warning Use this iterator on the concurrent container for debugging purpose only. The iterator interface: \code class iterator { public: // Default constructor iterator(); // Copy construtor iterator( iterator const& src ); // Dereference operator value_type * operator ->() const; // Dereference operator value_type& operator *() const; // Preincrement operator iterator& operator ++(); // Assignment operator iterator& operator = (iterator const& src); // Equality operators bool operator ==(iterator const& i ) const; bool operator !=(iterator const& i ) const; }; \endcode */ typedef iterator_type iterator; /// Const forward iterator /** For iterator's features and requirements see \ref iterator */ typedef iterator_type const_iterator; /// Returns a forward iterator addressing the first element in a list /** For empty list \code begin() == end() \endcode */ iterator begin() { return iterator( m_pHead ); } /// Returns an iterator that addresses the location succeeding the last element in a list /** Do not use the value returned by end function to access any item. Internally, end returning value equals to \p nullptr. The returned value can be used only to control reaching the end of the list. For empty list begin() == end() */ iterator end() { return iterator(); } /// Returns a forward const iterator addressing the first element in a list const_iterator cbegin() const { return const_iterator( m_pHead ); } /// Returns a forward const iterator addressing the first element in a list const_iterator begin() const { return const_iterator( m_pHead ); } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator end() const { return const_iterator(); } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator cend() const { return const_iterator(); } //@} public: /// Default constructor initializes empty list MichaelList() : m_pHead( nullptr ) { static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" ); } //@cond template >::value >> explicit MichaelList( Stat& st ) : m_pHead( nullptr ) , m_Stat( st ) {} //@endcond /// Destroys the list object ~MichaelList() { clear(); } /// Inserts new node /** The function inserts \p val into the list if the list does not contain an item with key equal to \p val. Returns \p true if \p val has been linked to the list, \p false otherwise. */ bool insert( value_type& val ) { return insert_at( m_pHead, val ); } /// Inserts new node /** This function is intended for derived non-intrusive containers. The function allows to split new item creating into two part: - create item with key only - insert new item into the list - if inserting is success, calls \p f functor to initialize value-field of \p val. The functor signature is: \code void func( value_type& val ); \endcode where \p val is the item inserted. User-defined functor \p f should guarantee that during changing \p val no any other changes could be made on this list's item by concurrent threads. The user-defined functor is called only if the inserting is success. @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" */ template bool insert( value_type& val, Func f ) { return insert_at( m_pHead, val, f ); } /// Updates the node /** The operation performs inserting or changing data with lock-free manner. If the item \p val is not found in the list, then \p val is inserted iff \p bInsert is \p true. Otherwise, the functor \p func is called with item found. The functor signature is: \code void func( bool bNew, value_type& item, value_type& val ); \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the list - \p val - argument \p val passed into the \p update() function If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments refers to the same thing. The functor may change non-key fields of the \p item; however, \p func must guarantee that during changing no any other modifications could be made on this item by concurrent threads. Returns std::pair where \p first is \p true if operation is successful, \p second is \p true if new item has been added or \p false if the item with that key already in the list. @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" */ template std::pair update( value_type& val, Func func, bool bInsert = true ) { return update_at( m_pHead, val, func, bInsert ); } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( value_type& val, Func func ) { return update( val, func, true ); } //@endcond /// Unlinks the item \p val from the list /** The function searches the item \p val in the list and unlinks it from the list if it is found and it is equal to \p val. Difference between \p erase() and \p %unlink(): \p %erase() finds a key and deletes the item found. \p %unlink() finds an item by key and deletes it only if \p val is an item of the list, i.e. the pointer to item found is equal to &val . \p disposer specified in \p Traits is called for deleted item. The function returns \p true if success and \p false otherwise. */ bool unlink( value_type& val ) { return unlink_at( m_pHead, val ); } /// Deletes the item from the list /** \anchor cds_intrusive_MichaelList_hp_erase_val The function searches an item with key equal to \p key in the list, unlinks it from the list, and returns \p true. If \p key is not found the function return \p false. \p disposer specified in \p Traits is called for deleted item. */ template bool erase( Q const& key ) { return erase_at( m_pHead, key, key_comparator()); } /// Deletes the item from the list using \p pred predicate for searching /** The function is an analog of \ref cds_intrusive_MichaelList_hp_erase_val "erase(Q const&)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. \p disposer specified in \p Traits is called for deleted item. */ template bool erase_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return erase_at( m_pHead, key, cds::opt::details::make_comparator_from_less()); } /// Deletes the item from the list /** \anchor cds_intrusive_MichaelList_hp_erase_func The function searches an item with key equal to \p key in the list, call \p func functor with item found, unlinks it from the list, and returns \p true. The \p Func interface is \code struct functor { void operator()( value_type const& item ); }; \endcode If \p key is not found the function return \p false, \p func is not called. \p disposer specified in \p Traits is called for deleted item. */ template bool erase( Q const& key, Func func ) { return erase_at( m_pHead, key, key_comparator(), func ); } /// Deletes the item from the list using \p pred predicate for searching /** The function is an analog of \ref cds_intrusive_MichaelList_hp_erase_func "erase(Q const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. \p disposer specified in \p Traits is called for deleted item. */ template bool erase_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return erase_at( m_pHead, key, cds::opt::details::make_comparator_from_less(), f ); } /// Extracts the item from the list with specified \p key /** \anchor cds_intrusive_MichaelList_hp_extract The function searches an item with key equal to \p key, unlinks it from the list, and returns it as \p guarded_ptr. If \p key is not found returns an empty guarded pointer. Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. The \ref disposer specified in \p Traits class template parameter is called automatically by garbage collector \p GC when returned \ref guarded_ptr object will be destroyed or released. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. Usage: \code typedef cds::intrusive::MichaelList< cds::gc::HP, foo, my_traits > ord_list; ord_list theList; // ... { ord_list::guarded_ptr gp(theList.extract( 5 )); if ( gp ) { // Deal with gp // ... } // Destructor of gp releases internal HP guard } \endcode */ template guarded_ptr extract( Q const& key ) { return extract_at( m_pHead, key, key_comparator()); } /// Extracts the item using compare functor \p pred /** The function is an analog of \ref cds_intrusive_MichaelList_hp_extract "extract(Q const&)" but \p pred predicate is used for key comparing. \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the list. */ template guarded_ptr extract_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return extract_at( m_pHead, key, cds::opt::details::make_comparator_from_less()); } /// Finds \p key in the list /** \anchor cds_intrusive_MichaelList_hp_find_func The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& key ); }; \endcode where \p item is the item found, \p key is the find function argument. The functor may change non-key fields of \p item. Note that the function is only guarantee that \p item cannot be disposed during functor is executing. The function does not serialize simultaneous access to the \p item. If such access is possible you must provide your own synchronization schema to keep out unsafe item modifications. The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor may modify both arguments. The function returns \p true if \p val is found, \p false otherwise. */ template bool find( Q& key, Func f ) { return find_at( m_pHead, key, key_comparator(), f ); } //@cond template bool find( Q const& key, Func f ) { return find_at( m_pHead, key, key_comparator(), f ); } //@endcond /// Finds the \p key using \p pred predicate for searching /** The function is an analog of \ref cds_intrusive_MichaelList_hp_find_func "find(Q&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool find_with( Q& key, Less pred, Func f ) { CDS_UNUSED( pred ); return find_at( m_pHead, key, cds::opt::details::make_comparator_from_less(), f ); } //@cond template bool find_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return find_at( m_pHead, key, cds::opt::details::make_comparator_from_less(), f ); } //@endcond /// Checks whether the list contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. */ template bool contains( Q const& key ) { return find_at( m_pHead, key, key_comparator()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find( Q const& key ) { return contains( key ); } //@endcond /// Checks whether the list contains \p key using \p pred predicate for searching /** The function is an analog of contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the list. */ template bool contains( Q const& key, Less pred ) { CDS_UNUSED( pred ); return find_at( m_pHead, key, cds::opt::details::make_comparator_from_less()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find_with( Q const& key, Less pred ) { return contains( key, pred ); } //@endcond /// Finds the \p key and return the item found /** \anchor cds_intrusive_MichaelList_hp_get The function searches the item with key equal to \p key and returns it as \p guarded_ptr. If \p key is not found the function returns an empty guarded pointer. The \ref disposer specified in \p Traits class template parameter is called by garbage collector \p GC automatically when returned \ref guarded_ptr object will be destroyed or released. @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. Usage: \code typedef cds::intrusive::MichaelList< cds::gc::HP, foo, my_traits > ord_list; ord_list theList; // ... { ord_list::guarded_ptr gp(theList.get( 5 )); if ( gp ) { // Deal with gp //... } // Destructor of guarded_ptr releases internal HP guard } \endcode Note the compare functor specified for \p Traits template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. */ template guarded_ptr get( Q const& key ) { return get_at( m_pHead, key, key_comparator()); } /// Finds the \p key and return the item found /** The function is an analog of \ref cds_intrusive_MichaelList_hp_get "get( Q const&)" but \p pred is used for comparing the keys. \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the list. */ template guarded_ptr get_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return get_at( m_pHead, key, cds::opt::details::make_comparator_from_less()); } /// Clears the list /** The function unlink all items from the list. */ void clear() { typename gc::Guard guard; marked_node_ptr head; while ( true ) { head = m_pHead.load(memory_model::memory_order_relaxed); if ( head.ptr()) guard.assign( node_traits::to_value_ptr( *head.ptr())); if ( cds_likely( m_pHead.load(memory_model::memory_order_acquire) == head )) { if ( head.ptr() == nullptr ) break; value_type& val = *node_traits::to_value_ptr( *head.ptr()); unlink( val ); } } } /// Checks whether the list is empty bool empty() const { return m_pHead.load( memory_model::memory_order_relaxed ).all() == nullptr; } /// Returns list's item count /** The value returned depends on item counter provided by \p Traits. For \p atomicity::empty_item_counter, this function always returns 0. @note Even if you use real item counter and it returns 0, this fact does not mean that the list is empty. To check list emptiness use \p empty() method. */ size_t size() const { return m_ItemCounter.value(); } /// Returns const reference to internal statistics stat const& statistics() const { return m_Stat; } protected: //@cond // split-list support bool insert_aux_node( node_type * pNode ) { return insert_aux_node( m_pHead, pNode ); } // split-list support bool insert_aux_node( atomic_node_ptr& refHead, node_type * pNode ) { assert( pNode != nullptr ); // Hack: convert node_type to value_type. // In principle, auxiliary node can be non-reducible to value_type // We assume that comparator can correctly distinguish aux and regular node. return insert_at( refHead, *node_traits::to_value_ptr( pNode )); } bool insert_at( atomic_node_ptr& refHead, value_type& val ) { node_type * pNode = node_traits::to_node_ptr( val ); position pos; while ( true ) { if ( search( refHead, val, pos, key_comparator())) { m_Stat.onInsertFailed(); return false; } if ( link_node( pNode, pos )) { ++m_ItemCounter; m_Stat.onInsertSuccess(); return true; } m_Stat.onInsertRetry(); } } template bool insert_at( atomic_node_ptr& refHead, value_type& val, Func f ) { node_type * pNode = node_traits::to_node_ptr( val ); position pos; while ( true ) { if ( search( refHead, val, pos, key_comparator())) { m_Stat.onInsertFailed(); return false; } typename gc::Guard guard; guard.assign( &val ); if ( link_node( pNode, pos )) { f( val ); ++m_ItemCounter; m_Stat.onInsertSuccess(); return true; } m_Stat.onInsertRetry(); } } template std::pair update_at( atomic_node_ptr& refHead, value_type& val, Func func, bool bInsert ) { position pos; node_type * pNode = node_traits::to_node_ptr( val ); while ( true ) { if ( search( refHead, val, pos, key_comparator())) { if ( cds_unlikely( pos.pCur->m_pNext.load(memory_model::memory_order_acquire).bits())) { back_off()(); m_Stat.onUpdateMarked(); continue; // the node found is marked as deleted } assert( key_comparator()( val, *node_traits::to_value_ptr( *pos.pCur )) == 0 ); func( false, *node_traits::to_value_ptr( *pos.pCur ) , val ); m_Stat.onUpdateExisting(); return std::make_pair( true, false ); } else { if ( !bInsert ) { m_Stat.onUpdateFailed(); return std::make_pair( false, false ); } typename gc::Guard guard; guard.assign( &val ); if ( link_node( pNode, pos )) { ++m_ItemCounter; func( true, val, val ); m_Stat.onUpdateNew(); return std::make_pair( true, true ); } } m_Stat.onUpdateRetry(); } } bool unlink_at( atomic_node_ptr& refHead, value_type& val ) { position pos; back_off bkoff; while ( search( refHead, val, pos, key_comparator())) { if ( node_traits::to_value_ptr( *pos.pCur ) == &val ) { if ( unlink_node( pos )) { --m_ItemCounter; m_Stat.onEraseSuccess(); return true; } else bkoff(); } else { m_Stat.onUpdateFailed(); break; } m_Stat.onEraseRetry(); } m_Stat.onEraseFailed(); return false; } template bool erase_at( atomic_node_ptr& refHead, const Q& val, Compare cmp, Func f, position& pos ) { back_off bkoff; while ( search( refHead, val, pos, cmp )) { if ( unlink_node( pos )) { f( *node_traits::to_value_ptr( *pos.pCur )); --m_ItemCounter; m_Stat.onEraseSuccess(); return true; } else bkoff(); m_Stat.onEraseRetry(); } m_Stat.onEraseFailed(); return false; } template bool erase_at( atomic_node_ptr& refHead, const Q& val, Compare cmp, Func f ) { position pos; return erase_at( refHead, val, cmp, f, pos ); } template bool erase_at( atomic_node_ptr& refHead, Q const& val, Compare cmp ) { position pos; return erase_at( refHead, val, cmp, [](value_type const&){}, pos ); } template guarded_ptr extract_at( atomic_node_ptr& refHead, Q const& val, Compare cmp ) { position pos; back_off bkoff; while ( search( refHead, val, pos, cmp )) { if ( unlink_node( pos )) { --m_ItemCounter; m_Stat.onEraseSuccess(); return guarded_ptr( pos.guards.release( position::guard_current_item )); } else bkoff(); m_Stat.onEraseRetry(); } m_Stat.onEraseFailed(); return guarded_ptr(); } template bool find_at( atomic_node_ptr& refHead, Q const& val, Compare cmp ) { position pos; if ( search( refHead, val, pos, cmp )) { m_Stat.onFindSuccess(); return true; } m_Stat.onFindFailed(); return false; } template bool find_at( atomic_node_ptr& refHead, Q& val, Compare cmp, Func f ) { position pos; if ( search( refHead, val, pos, cmp )) { f( *node_traits::to_value_ptr( *pos.pCur ), val ); m_Stat.onFindSuccess(); return true; } m_Stat.onFindFailed(); return false; } template guarded_ptr get_at( atomic_node_ptr& refHead, Q const& val, Compare cmp ) { position pos; if ( search( refHead, val, pos, cmp )) { m_Stat.onFindSuccess(); return guarded_ptr( pos.guards.release( position::guard_current_item )); } m_Stat.onFindFailed(); return guarded_ptr(); } // split-list support template void destroy( Predicate /*pred*/ ) { clear(); } //@endcond protected: //@cond template bool search( atomic_node_ptr& refHead, const Q& val, position& pos, Compare cmp ) { atomic_node_ptr * pPrev; marked_node_ptr pNext; marked_node_ptr pCur; back_off bkoff; try_again: pPrev = &refHead; pNext = nullptr; pCur = pos.guards.protect( position::guard_current_item, *pPrev, [](marked_node_ptr p) -> value_type * { return node_traits::to_value_ptr( p.ptr()); }); while ( true ) { if ( pCur.ptr() == nullptr ) { pos.pPrev = pPrev; pos.pCur = nullptr; pos.pNext = nullptr; return false; } pNext = pos.guards.protect( position::guard_next_item, pCur->m_pNext, [](marked_node_ptr p ) -> value_type * { return node_traits::to_value_ptr( p.ptr()); }); if ( cds_unlikely( pPrev->load(memory_model::memory_order_acquire).all() != pCur.ptr())) { bkoff(); goto try_again; } // pNext contains deletion mark for pCur if ( pNext.bits() == 1 ) { // pCur marked i.e. logically deleted. Help the erase/unlink function to unlink pCur node marked_node_ptr cur( pCur.ptr()); if ( cds_unlikely( pPrev->compare_exchange_strong( cur, marked_node_ptr( pNext.ptr()), memory_model::memory_order_acquire, atomics::memory_order_relaxed ))) { retire_node( pCur.ptr()); m_Stat.onHelpingSuccess(); } else { bkoff(); m_Stat.onHelpingFailed(); goto try_again; } } else { assert( pCur.ptr() != nullptr ); int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val ); if ( nCmp >= 0 ) { pos.pPrev = pPrev; pos.pCur = pCur.ptr(); pos.pNext = pNext.ptr(); return nCmp == 0; } pPrev = &( pCur->m_pNext ); pos.guards.copy( position::guard_prev_item, position::guard_current_item ); } pCur = pNext; pos.guards.copy( position::guard_current_item, position::guard_next_item ); } } //@endcond }; }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_IMPL_MICHAEL_LIST_H libcds-2.3.3/cds/intrusive/impl/skip_list.h000066400000000000000000002065321341244201700207010ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_IMPL_SKIP_LIST_H #define CDSLIB_INTRUSIVE_IMPL_SKIP_LIST_H #include #include #include // ref #include #include #include namespace cds { namespace intrusive { //@cond namespace skip_list { namespace details { template class iterator { public: typedef GC gc; typedef NodeTraits node_traits; typedef BackOff back_off; typedef typename node_traits::node_type node_type; typedef typename node_traits::value_type value_type; static constexpr bool const c_isConst = IsConst; typedef typename std::conditional< c_isConst, value_type const&, value_type&>::type value_ref; protected: typedef typename node_type::marked_ptr marked_ptr; typedef typename node_type::atomic_marked_ptr atomic_marked_ptr; typename gc::Guard m_guard; node_type * m_pNode; protected: static value_type * gc_protect( marked_ptr p ) { return node_traits::to_value_ptr( p.ptr()); } void next() { typename gc::Guard g; g.copy( m_guard ); back_off bkoff; for (;;) { if ( m_pNode->next( m_pNode->height() - 1 ).load( atomics::memory_order_acquire ).bits()) { // Current node is marked as deleted. So, its next pointer can point to anything // In this case we interrupt our iteration and returns end() iterator. *this = iterator(); return; } marked_ptr p = m_guard.protect( (*m_pNode)[0], gc_protect ); node_type * pp = p.ptr(); if ( p.bits()) { // p is marked as deleted. Spin waiting for physical removal bkoff(); continue; } else if ( pp && pp->next( pp->height() - 1 ).load( atomics::memory_order_relaxed ).bits()) { // p is marked as deleted. Spin waiting for physical removal bkoff(); continue; } m_pNode = pp; break; } } public: // for internal use only!!! iterator( node_type& refHead ) : m_pNode( nullptr ) { back_off bkoff; for (;;) { marked_ptr p = m_guard.protect( refHead[0], gc_protect ); if ( !p.ptr()) { // empty skip-list m_guard.clear(); break; } node_type * pp = p.ptr(); // Logically deleted node is marked from highest level if ( !pp->next( pp->height() - 1 ).load( atomics::memory_order_acquire ).bits()) { m_pNode = pp; break; } bkoff(); } } public: iterator() : m_pNode( nullptr ) {} iterator( iterator const& s) : m_pNode( s.m_pNode ) { m_guard.assign( node_traits::to_value_ptr(m_pNode)); } value_type * operator ->() const { assert( m_pNode != nullptr ); assert( node_traits::to_value_ptr( m_pNode ) != nullptr ); return node_traits::to_value_ptr( m_pNode ); } value_ref operator *() const { assert( m_pNode != nullptr ); assert( node_traits::to_value_ptr( m_pNode ) != nullptr ); return *node_traits::to_value_ptr( m_pNode ); } /// Pre-increment iterator& operator ++() { next(); return *this; } iterator& operator =(const iterator& src) { m_pNode = src.m_pNode; m_guard.copy( src.m_guard ); return *this; } template bool operator ==(iterator const& i ) const { return m_pNode == i.m_pNode; } template bool operator !=(iterator const& i ) const { return !( *this == i ); } }; }} // namespace skip_list::details //@endcond /// Lock-free skip-list set /** @ingroup cds_intrusive_map @anchor cds_intrusive_SkipListSet_hp The implementation of well-known probabilistic data structure called skip-list invented by W.Pugh in his papers: - [1989] W.Pugh Skip Lists: A Probabilistic Alternative to Balanced Trees - [1990] W.Pugh A Skip List Cookbook A skip-list is a probabilistic data structure that provides expected logarithmic time search without the need of rebalance. The skip-list is a collection of sorted linked list. Nodes are ordered by key. Each node is linked into a subset of the lists. Each list has a level, ranging from 0 to 32. The bottom-level list contains all the nodes, and each higher-level list is a sublist of the lower-level lists. Each node is created with a random top level (with a random height), and belongs to all lists up to that level. The probability that a node has the height 1 is 1/2. The probability that a node has the height N is 1/2 ** N (more precisely, the distribution depends on an random generator provided, but our generators have this property). The lock-free variant of skip-list is implemented according to book - [2008] M.Herlihy, N.Shavit "The Art of Multiprocessor Programming", chapter 14.4 "A Lock-Free Concurrent Skiplist". Template arguments: - \p GC - Garbage collector used. Note the \p GC must be the same as the GC used for item type \p T, see \p skip_list::node. - \p T - type to be stored in the list. The type must be based on \p skip_list::node (for \p skip_list::base_hook) or it must have a member of type \p skip_list::node (for \p skip_list::member_hook). - \p Traits - skip-list traits, default is \p skip_list::traits. It is possible to declare option-based list with \p cds::intrusive::skip_list::make_traits metafunction istead of \p Traits template argument. @warning The skip-list requires up to 67 hazard pointers that may be critical for some GCs for which the guard count is limited (like as \p gc::HP). Those GCs should be explicitly initialized with hazard pointer enough: \code cds::gc::HP myhp( 67 ) \endcode. Otherwise an run-time exception may be raised when you try to create skip-list object. There are several specializations of \p %SkipListSet for each \p GC. You should include: - for \p gc::HP garbage collector - for \p gc::DHP garbage collector - for \ref cds_intrusive_SkipListSet_nogc for append-only set - for \ref cds_intrusive_SkipListSet_rcu "RCU type" Iterators The class supports a forward iterator (\ref iterator and \ref const_iterator). The iteration is ordered. The iterator object is thread-safe: the element pointed by the iterator object is guarded, so, the element cannot be reclaimed while the iterator object is alive. However, passing an iterator object between threads is dangerous. @warning Due to concurrent nature of skip-list set it is not guarantee that you can iterate all elements in the set: any concurrent deletion can exclude the element pointed by the iterator from the set, and your iteration can be terminated before end of the set. Therefore, such iteration is more suitable for debugging purpose only Remember, each iterator object requires 2 additional hazard pointers, that may be a limited resource for \p GC like as \p gc::HP (for \p gc::DHP the count of guards is unlimited). The iterator class supports the following minimalistic interface: \code struct iterator { // Default ctor iterator(); // Copy ctor iterator( iterator const& s); value_type * operator ->() const; value_type& operator *() const; // Pre-increment iterator& operator ++(); // Copy assignment iterator& operator = (const iterator& src); bool operator ==(iterator const& i ) const; bool operator !=(iterator const& i ) const; }; \endcode Note, the iterator object returned by \p end(), \p cend() member functions points to \p nullptr and should not be dereferenced. How to use You should incorporate \p skip_list::node into your struct \p T and provide appropriate \p skip_list::traits::hook in your \p Traits template parameters. Usually, for \p Traits you define a struct based on \p skip_list::traits. Example for \p gc::HP and base hook: \code // Include GC-related skip-list specialization #include // Data stored in skip list struct my_data: public cds::intrusive::skip_list::node< cds::gc::HP > { // key field std::string strKey; // other data // ... }; // my_data compare functor struct my_data_cmp { int operator()( const my_data& d1, const my_data& d2 ) { return d1.strKey.compare( d2.strKey ); } int operator()( const my_data& d, const std::string& s ) { return d.strKey.compare(s); } int operator()( const std::string& s, const my_data& d ) { return s.compare( d.strKey ); } }; // Declare your traits struct my_traits: public cds::intrusive::skip_list::traits { typedef cds::intrusive::skip_list::base_hook< cds::opt::gc< cds::gc::HP > > hook; typedef my_data_cmp compare; }; // Declare skip-list set type typedef cds::intrusive::SkipListSet< cds::gc::HP, my_data, my_traits > traits_based_set; \endcode Equivalent option-based code: \code // GC-related specialization #include struct my_data { // see above }; struct compare { // see above }; // Declare option-based skip-list set typedef cds::intrusive::SkipListSet< cds::gc::HP ,my_data , typename cds::intrusive::skip_list::make_traits< cds::intrusive::opt::hook< cds::intrusive::skip_list::base_hook< cds::opt::gc< cds::gc::HP > > > ,cds::intrusive::opt::compare< my_data_cmp > >::type > option_based_set; \endcode */ template < class GC ,typename T #ifdef CDS_DOXYGEN_INVOKED ,typename Traits = skip_list::traits #else ,typename Traits #endif > class SkipListSet { public: typedef GC gc; ///< Garbage collector typedef T value_type; ///< type of value stored in the skip-list typedef Traits traits; ///< Traits template parameter typedef typename traits::hook hook; ///< hook type typedef typename hook::node_type node_type; ///< node type # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined key_comparator ; ///< key comparison functor based on opt::compare and opt::less option setter. # else typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator; # endif typedef typename traits::disposer disposer; ///< item disposer typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< node traits typedef typename traits::item_counter item_counter; ///< Item counting policy typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option typedef typename traits::random_level_generator random_level_generator; ///< random level generator typedef typename traits::allocator allocator_type; ///< allocator for maintaining array of next pointers of the node typedef typename traits::back_off back_off; ///< Back-off strategy typedef typename traits::stat stat; ///< internal statistics type public: typedef typename gc::template guarded_ptr< value_type > guarded_ptr; ///< Guarded pointer /// Max node height. The actual node height should be in range [0 .. c_nMaxHeight) /** The max height is specified by \ref skip_list::random_level_generator "random level generator" constant \p m_nUpperBound but it should be no more than 32 (\p skip_list::c_nHeightLimit). */ static unsigned int const c_nMaxHeight = std::conditional< (random_level_generator::c_nUpperBound <= skip_list::c_nHeightLimit), std::integral_constant< unsigned int, random_level_generator::c_nUpperBound >, std::integral_constant< unsigned int, skip_list::c_nHeightLimit > >::type::value; //@cond static unsigned int const c_nMinHeight = 5; //@endcond // c_nMaxHeight * 2 - pPred/pSucc guards // + 1 - for erase, unlink // + 1 - for clear // + 1 - for help_remove() static size_t const c_nHazardPtrCount = c_nMaxHeight * 2 + 3; ///< Count of hazard pointer required for the skip-list protected: typedef typename node_type::atomic_marked_ptr atomic_node_ptr; ///< Atomic marked node pointer typedef typename node_type::marked_ptr marked_node_ptr; ///< Node marked pointer protected: //@cond typedef skip_list::details::intrusive_node_builder< node_type, atomic_node_ptr, allocator_type > intrusive_node_builder; typedef typename std::conditional< std::is_same< typename traits::internal_node_builder, cds::opt::none >::value ,intrusive_node_builder ,typename traits::internal_node_builder >::type node_builder; typedef std::unique_ptr< node_type, typename node_builder::node_disposer > scoped_node_ptr; struct position { node_type * pPrev[ c_nMaxHeight ]; node_type * pSucc[ c_nMaxHeight ]; typename gc::template GuardArray< c_nMaxHeight * 2 > guards; ///< Guards array for pPrev/pSucc node_type * pCur; // guarded by one of guards }; //@endcond public: /// Default constructor /** The constructor checks whether the count of guards is enough for skip-list and may raise an exception if not. */ SkipListSet() : m_Head( c_nMaxHeight ) , m_nHeight( c_nMinHeight ) { static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" ); gc::check_available_guards( c_nHazardPtrCount ); // Barrier for head node atomics::atomic_thread_fence( memory_model::memory_order_release ); } /// Clears and destructs the skip-list ~SkipListSet() { destroy(); } public: ///@name Forward iterators (only for debugging purpose) //@{ /// Iterator type /** The forward iterator has some features: - it has no post-increment operator - to protect the value, the iterator contains a GC-specific guard + another guard is required locally for increment operator. For some GC (like as \p gc::HP), a guard is a limited resource per thread, so an exception (or assertion) "no free guard" may be thrown if the limit of guard count per thread is exceeded. - The iterator cannot be moved across thread boundary because it contains thread-private GC's guard. - Iterator ensures thread-safety even if you delete the item the iterator points to. However, in case of concurrent deleting operations there is no guarantee that you iterate all item in the list. Moreover, a crash is possible when you try to iterate the next element that has been deleted by concurrent thread. @warning Use this iterator on the concurrent container for debugging purpose only. The iterator interface: \code class iterator { public: // Default constructor iterator(); // Copy construtor iterator( iterator const& src ); // Dereference operator value_type * operator ->() const; // Dereference operator value_type& operator *() const; // Preincrement operator iterator& operator ++(); // Assignment operator iterator& operator = (iterator const& src); // Equality operators bool operator ==(iterator const& i ) const; bool operator !=(iterator const& i ) const; }; \endcode */ typedef skip_list::details::iterator< gc, node_traits, back_off, false > iterator; /// Const iterator type typedef skip_list::details::iterator< gc, node_traits, back_off, true > const_iterator; /// Returns a forward iterator addressing the first element in a set iterator begin() { return iterator( *m_Head.head()); } /// Returns a forward const iterator addressing the first element in a set const_iterator begin() const { return const_iterator( *m_Head.head()); } /// Returns a forward const iterator addressing the first element in a set const_iterator cbegin() const { return const_iterator( *m_Head.head()); } /// Returns a forward iterator that addresses the location succeeding the last element in a set. iterator end() { return iterator(); } /// Returns a forward const iterator that addresses the location succeeding the last element in a set. const_iterator end() const { return const_iterator(); } /// Returns a forward const iterator that addresses the location succeeding the last element in a set. const_iterator cend() const { return const_iterator(); } //@} public: /// Inserts new node /** The function inserts \p val in the set if it does not contain an item with key equal to \p val. Returns \p true if \p val is placed into the set, \p false otherwise. */ bool insert( value_type& val ) { return insert( val, []( value_type& ) {} ); } /// Inserts new node /** This function is intended for derived non-intrusive containers. The function allows to split creating of new item into two part: - create item with key only - insert new item into the set - if inserting is success, calls \p f functor to initialize value-field of \p val. The functor signature is: \code void func( value_type& val ); \endcode where \p val is the item inserted. User-defined functor \p f should guarantee that during changing \p val no any other changes could be made on this set's item by concurrent threads. The user-defined functor is called only if the inserting is success. */ template bool insert( value_type& val, Func f ) { typename gc::Guard gNew; gNew.assign( &val ); node_type * pNode = node_traits::to_node_ptr( val ); scoped_node_ptr scp( pNode ); unsigned int nHeight = pNode->height(); bool bTowerOk = pNode->has_tower(); // nHeight > 1 && pNode->get_tower() != nullptr; bool bTowerMade = false; position pos; while ( true ) { if ( find_position( val, pos, key_comparator(), true )) { // scoped_node_ptr deletes the node tower if we create it if ( !bTowerMade ) scp.release(); m_Stat.onInsertFailed(); return false; } if ( !bTowerOk ) { build_node( pNode ); nHeight = pNode->height(); bTowerMade = pNode->has_tower(); bTowerOk = true; } if ( !insert_at_position( val, pNode, pos, f )) { m_Stat.onInsertRetry(); continue; } increase_height( nHeight ); ++m_ItemCounter; m_Stat.onAddNode( nHeight ); m_Stat.onInsertSuccess(); scp.release(); return true; } } /// Updates the node /** The operation performs inserting or changing data with lock-free manner. If the item \p val is not found in the set, then \p val is inserted into the set iff \p bInsert is \p true. Otherwise, the functor \p func is called with item found. The functor \p func signature is: \code void func( bool bNew, value_type& item, value_type& val ); \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the set - \p val - argument \p val passed into the \p %update() function If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments refer to the same thing. Returns std::pair where \p first is \p true if operation is successful, i.e. the node has been inserted or updated, \p second is \p true if new item has been added or \p false if the item with \p key already exists. @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" */ template std::pair update( value_type& val, Func func, bool bInsert = true ) { typename gc::Guard gNew; gNew.assign( &val ); node_type * pNode = node_traits::to_node_ptr( val ); scoped_node_ptr scp( pNode ); unsigned int nHeight = pNode->height(); bool bTowerOk = pNode->has_tower(); bool bTowerMade = false; position pos; while ( true ) { bool bFound = find_position( val, pos, key_comparator(), true ); if ( bFound ) { // scoped_node_ptr deletes the node tower if we create it before if ( !bTowerMade ) scp.release(); func( false, *node_traits::to_value_ptr(pos.pCur), val ); m_Stat.onUpdateExist(); return std::make_pair( true, false ); } if ( !bInsert ) { scp.release(); return std::make_pair( false, false ); } if ( !bTowerOk ) { build_node( pNode ); nHeight = pNode->height(); bTowerMade = pNode->has_tower(); bTowerOk = true; } if ( !insert_at_position( val, pNode, pos, [&func]( value_type& item ) { func( true, item, item ); })) { m_Stat.onInsertRetry(); continue; } increase_height( nHeight ); ++m_ItemCounter; scp.release(); m_Stat.onAddNode( nHeight ); m_Stat.onUpdateNew(); return std::make_pair( true, true ); } } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( value_type& val, Func func ) { return update( val, func, true ); } //@endcond /// Unlinks the item \p val from the set /** The function searches the item \p val in the set and unlink it from the set if it is found and is equal to \p val. Difference between \p erase() and \p %unlink() functions: \p %erase() finds a key and deletes the item found. \p %unlink() finds an item by key and deletes it only if \p val is an item of that set, i.e. the pointer to item found is equal to &val . The \p disposer specified in \p Traits class template parameter is called by garbage collector \p GC asynchronously. The function returns \p true if success and \p false otherwise. */ bool unlink( value_type& val ) { position pos; if ( !find_position( val, pos, key_comparator(), false )) { m_Stat.onUnlinkFailed(); return false; } node_type * pDel = pos.pCur; assert( key_comparator()( *node_traits::to_value_ptr( pDel ), val ) == 0 ); unsigned int nHeight = pDel->height(); typename gc::Guard gDel; gDel.assign( node_traits::to_value_ptr(pDel)); if ( node_traits::to_value_ptr( pDel ) == &val && try_remove_at( pDel, pos, [](value_type const&) {} )) { --m_ItemCounter; m_Stat.onRemoveNode( nHeight ); m_Stat.onUnlinkSuccess(); return true; } m_Stat.onUnlinkFailed(); return false; } /// Extracts the item from the set with specified \p key /** \anchor cds_intrusive_SkipListSet_hp_extract The function searches an item with key equal to \p key in the set, unlinks it from the set, and returns it as \p guarded_ptr object. If \p key is not found the function returns an empty guarded pointer. Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. The \p disposer specified in \p Traits class template parameter is called automatically by garbage collector \p GC specified in class' template parameters when returned \p guarded_ptr object will be destroyed or released. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. Usage: \code typedef cds::intrusive::SkipListSet< cds::gc::HP, foo, my_traits > skip_list; skip_list theList; // ... { skip_list::guarded_ptr gp(theList.extract( 5 )); if ( gp ) { // Deal with gp // ... } // Destructor of gp releases internal HP guard } \endcode */ template guarded_ptr extract( Q const& key ) { return extract_( key, key_comparator()); } /// Extracts the item from the set with comparing functor \p pred /** The function is an analog of \ref cds_intrusive_SkipListSet_hp_extract "extract(Q const&)" but \p pred predicate is used for key comparing. \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the set. */ template guarded_ptr extract_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return extract_( key, cds::opt::details::make_comparator_from_less()); } /// Extracts an item with minimal key from the list /** The function searches an item with minimal key, unlinks it, and returns it as \p guarded_ptr object. If the skip-list is empty the function returns an empty guarded pointer. @note Due the concurrent nature of the list, the function extracts nearly minimum key. It means that the function gets leftmost item and tries to unlink it. During unlinking, a concurrent thread may insert an item with key less than leftmost item's key. So, the function returns the item with minimum key at the moment of list traversing. The \p disposer specified in \p Traits class template parameter is called by garbage collector \p GC automatically when returned \p guarded_ptr object will be destroyed or released. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. Usage: \code typedef cds::intrusive::SkipListSet< cds::gc::HP, foo, my_traits > skip_list; skip_list theList; // ... { skip_list::guarded_ptr gp(theList.extract_min()); if ( gp ) { // Deal with gp //... } // Destructor of gp releases internal HP guard } \endcode */ guarded_ptr extract_min() { return extract_min_(); } /// Extracts an item with maximal key from the list /** The function searches an item with maximal key, unlinks it, and returns the pointer to item as \p guarded_ptr object. If the skip-list is empty the function returns an empty \p guarded_ptr. @note Due the concurrent nature of the list, the function extracts nearly maximal key. It means that the function gets rightmost item and tries to unlink it. During unlinking, a concurrent thread may insert an item with key greater than rightmost item's key. So, the function returns the item with maximum key at the moment of list traversing. The \p disposer specified in \p Traits class template parameter is called by garbage collector \p GC asynchronously when returned \ref guarded_ptr object will be destroyed or released. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. Usage: \code typedef cds::intrusive::SkipListSet< cds::gc::HP, foo, my_traits > skip_list; skip_list theList; // ... { skip_list::guarded_ptr gp( theList.extract_max( gp )); if ( gp ) { // Deal with gp //... } // Destructor of gp releases internal HP guard } \endcode */ guarded_ptr extract_max() { return extract_max_(); } /// Deletes the item from the set /** \anchor cds_intrusive_SkipListSet_hp_erase The function searches an item with key equal to \p key in the set, unlinks it from the set, and returns \p true. If the item with key equal to \p key is not found the function return \p false. Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. */ template bool erase( Q const& key ) { return erase_( key, key_comparator(), [](value_type const&) {} ); } /// Deletes the item from the set with comparing functor \p pred /** The function is an analog of \ref cds_intrusive_SkipListSet_hp_erase "erase(Q const&)" but \p pred predicate is used for key comparing. \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the set. */ template bool erase_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return erase_( key, cds::opt::details::make_comparator_from_less(), [](value_type const&) {} ); } /// Deletes the item from the set /** \anchor cds_intrusive_SkipListSet_hp_erase_func The function searches an item with key equal to \p key in the set, call \p f functor with item found, unlinks it from the set, and returns \p true. The \ref disposer specified in \p Traits class template parameter is called by garbage collector \p GC asynchronously. The \p Func interface is \code struct functor { void operator()( value_type const& item ); }; \endcode If the item with key equal to \p key is not found the function return \p false. Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. */ template bool erase( Q const& key, Func f ) { return erase_( key, key_comparator(), f ); } /// Deletes the item from the set with comparing functor \p pred /** The function is an analog of \ref cds_intrusive_SkipListSet_hp_erase_func "erase(Q const&, Func)" but \p pred predicate is used for key comparing. \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the set. */ template bool erase_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return erase_( key, cds::opt::details::make_comparator_from_less(), f ); } /// Finds \p key /** \anchor cds_intrusive_SkipListSet_hp_find_func The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& key ); }; \endcode where \p item is the item found, \p key is the find function argument. The functor can change non-key fields of \p item. Note that the functor is only guarantee that \p item cannot be disposed during functor is executing. The functor does not serialize simultaneous access to the set \p item. If such access is possible you must provide your own synchronization on item level to exclude unsafe item modifications. Note the compare functor specified for class \p Traits template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( Q& key, Func f ) { return find_with_( key, key_comparator(), f ); } //@cond template bool find( Q const& key, Func f ) { return find_with_( key, key_comparator(), f ); } //@endcond /// Finds the key \p key with \p pred predicate for comparing /** The function is an analog of \ref cds_intrusive_SkipListSet_hp_find_func "find(Q&, Func)" but \p pred is used for key compare. \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the set. */ template bool find_with( Q& key, Less pred, Func f ) { CDS_UNUSED( pred ); return find_with_( key, cds::opt::details::make_comparator_from_less(), f ); } //@cond template bool find_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return find_with_( key, cds::opt::details::make_comparator_from_less(), f ); } //@endcond /// Checks whether the set contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. */ template bool contains( Q const& key ) { return find_with_( key, key_comparator(), [](value_type& , Q const& ) {} ); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find( Q const& key ) { return contains( key ); } //@endcond /// Checks whether the set contains \p key using \p pred predicate for searching /** The function is similar to contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool contains( Q const& key, Less pred ) { CDS_UNUSED( pred ); return find_with_( key, cds::opt::details::make_comparator_from_less(), [](value_type& , Q const& ) {} ); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find_with( Q const& key, Less pred ) { return contains( key, pred ); } //@endcond /// Finds \p key and return the item found /** \anchor cds_intrusive_SkipListSet_hp_get The function searches the item with key equal to \p key and returns the pointer to the item found as \p guarded_ptr. If \p key is not found the function returns an empt guarded pointer. The \p disposer specified in \p Traits class template parameter is called by garbage collector \p GC asynchronously when returned \ref guarded_ptr object will be destroyed or released. @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. Usage: \code typedef cds::intrusive::SkipListSet< cds::gc::HP, foo, my_traits > skip_list; skip_list theList; // ... { skip_list::guarded_ptr gp(theList.get( 5 )); if ( gp ) { // Deal with gp //... } // Destructor of guarded_ptr releases internal HP guard } \endcode Note the compare functor specified for class \p Traits template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. */ template guarded_ptr get( Q const& key ) { return get_with_( key, key_comparator()); } /// Finds \p key and return the item found /** The function is an analog of \ref cds_intrusive_SkipListSet_hp_get "get( Q const&)" but \p pred is used for comparing the keys. \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the set. */ template guarded_ptr get_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return get_with_( key, cds::opt::details::make_comparator_from_less()); } /// Returns item count in the set /** The value returned depends on item counter type provided by \p Traits template parameter. If it is \p atomicity::empty_item_counter this function always returns 0. Therefore, the function is not suitable for checking the set emptiness, use \p empty() for this purpose. */ size_t size() const { return m_ItemCounter; } /// Checks if the set is empty bool empty() const { return m_Head.head()->next( 0 ).load( memory_model::memory_order_relaxed ) == nullptr; } /// Clears the set (not atomic) /** The function unlink all items from the set. The function is not atomic, i.e., in multi-threaded environment with parallel insertions this sequence \code set.clear(); assert( set.empty()); \endcode the assertion could be raised. For each item the \ref disposer will be called after unlinking. */ void clear() { while ( extract_min_()); } /// Returns maximum height of skip-list. The max height is a constant for each object and does not exceed 32. static constexpr unsigned int max_height() noexcept { return c_nMaxHeight; } /// Returns const reference to internal statistics stat const& statistics() const { return m_Stat; } protected: //@cond unsigned int random_level() { // Random generator produces a number from range [0..31] // We need a number from range [1..32] return m_RandomLevelGen() + 1; } template node_type * build_node( Q v ) { return node_builder::make_tower( v, m_RandomLevelGen ); } static value_type * gc_protect( marked_node_ptr p ) { return node_traits::to_value_ptr( p.ptr()); } static void dispose_node( void* p ) { assert( p != nullptr ); value_type* pVal = reinterpret_cast( p ); typename node_builder::node_disposer()( node_traits::to_node_ptr( pVal )); disposer()( pVal ); } void help_remove( int nLevel, node_type* pPred, marked_node_ptr pCur ) { if ( pCur->is_upper_level( nLevel )) { marked_node_ptr p( pCur.ptr()); typename gc::Guard hp; marked_node_ptr pSucc = hp.protect( pCur->next( nLevel ), gc_protect ); if ( pSucc.bits() && pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr()), memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { if ( pCur->level_unlinked()) { gc::retire( node_traits::to_value_ptr( pCur.ptr()), dispose_node ); m_Stat.onEraseWhileFind(); } } } } template bool find_position( Q const& val, position& pos, Compare cmp, bool bStopIfFound ) { node_type * pPred; marked_node_ptr pSucc; marked_node_ptr pCur; // Hazard pointer array: // pPred: [nLevel * 2] // pSucc: [nLevel * 2 + 1] retry: pPred = m_Head.head(); int nCmp = 1; for ( int nLevel = static_cast( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) { pos.guards.assign( nLevel * 2, node_traits::to_value_ptr( pPred )); while ( true ) { pCur = pos.guards.protect( nLevel * 2 + 1, pPred->next( nLevel ), gc_protect ); if ( pCur.bits()) { // pCur.bits() means that pPred is logically deleted goto retry; } if ( pCur.ptr() == nullptr ) { // end of list at level nLevel - goto next level break; } // pSucc contains deletion mark for pCur pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire ); if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr()) goto retry; if ( pSucc.bits()) { // pCur is marked, i.e. logically deleted // try to help deleting pCur help_remove( nLevel, pPred, pCur ); goto retry; } else { nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val ); if ( nCmp < 0 ) { pPred = pCur.ptr(); pos.guards.copy( nLevel * 2, nLevel * 2 + 1 ); // pPrev guard := cur guard } else if ( nCmp == 0 && bStopIfFound ) goto found; else break; } } // Next level pos.pPrev[nLevel] = pPred; pos.pSucc[nLevel] = pCur.ptr(); } if ( nCmp != 0 ) return false; found: pos.pCur = pCur.ptr(); return pCur.ptr() && nCmp == 0; } bool find_min_position( position& pos ) { node_type * pPred; marked_node_ptr pSucc; marked_node_ptr pCur; // Hazard pointer array: // pPred: [nLevel * 2] // pSucc: [nLevel * 2 + 1] retry: pPred = m_Head.head(); for ( int nLevel = static_cast( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) { pos.guards.assign( nLevel * 2, node_traits::to_value_ptr( pPred )); pCur = pos.guards.protect( nLevel * 2 + 1, pPred->next( nLevel ), gc_protect ); // pCur.bits() means that pPred is logically deleted // head cannot be deleted assert( pCur.bits() == 0 ); if ( pCur.ptr()) { // pSucc contains deletion mark for pCur pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire ); if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr()) goto retry; if ( pSucc.bits()) { // pCur is marked, i.e. logically deleted. // try to help deleting pCur help_remove( nLevel, pPred, pCur ); goto retry; } } // Next level pos.pPrev[nLevel] = pPred; pos.pSucc[nLevel] = pCur.ptr(); } return ( pos.pCur = pCur.ptr()) != nullptr; } bool find_max_position( position& pos ) { node_type * pPred; marked_node_ptr pSucc; marked_node_ptr pCur; // Hazard pointer array: // pPred: [nLevel * 2] // pSucc: [nLevel * 2 + 1] retry: pPred = m_Head.head(); for ( int nLevel = static_cast( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) { pos.guards.assign( nLevel * 2, node_traits::to_value_ptr( pPred )); while ( true ) { pCur = pos.guards.protect( nLevel * 2 + 1, pPred->next( nLevel ), gc_protect ); if ( pCur.bits()) { // pCur.bits() means that pPred is logically deleted goto retry; } if ( pCur.ptr() == nullptr ) { // end of the list at level nLevel - goto next level break; } // pSucc contains deletion mark for pCur pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire ); if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr()) goto retry; if ( pSucc.bits()) { // pCur is marked, i.e. logically deleted. // try to help deleting pCur help_remove( nLevel, pPred, pCur ); goto retry; } else { if ( !pSucc.ptr()) break; pPred = pCur.ptr(); pos.guards.copy( nLevel * 2, nLevel * 2 + 1 ); } } // Next level pos.pPrev[nLevel] = pPred; pos.pSucc[nLevel] = pCur.ptr(); } return ( pos.pCur = pCur.ptr()) != nullptr; } bool renew_insert_position( value_type& val, node_type * pNode, position& pos ) { node_type * pPred; marked_node_ptr pSucc; marked_node_ptr pCur; key_comparator cmp; // Hazard pointer array: // pPred: [nLevel * 2] // pSucc: [nLevel * 2 + 1] retry: pPred = m_Head.head(); int nCmp = 1; for ( int nLevel = static_cast( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) { pos.guards.assign( nLevel * 2, node_traits::to_value_ptr( pPred )); while ( true ) { pCur = pos.guards.protect( nLevel * 2 + 1, pPred->next( nLevel ), gc_protect ); if ( pCur.bits()) { // pCur.bits() means that pPred is logically deleted goto retry; } if ( pCur.ptr() == nullptr ) { // end of list at level nLevel - goto next level break; } // pSucc contains deletion mark for pCur pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire ); if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr()) goto retry; if ( pSucc.bits()) { // pCur is marked, i.e. logically deleted if ( pCur.ptr() == pNode ) { // Node is removing while we are inserting it return false; } // try to help deleting pCur help_remove( nLevel, pPred, pCur ); goto retry; } else { nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val ); if ( nCmp < 0 ) { pPred = pCur.ptr(); pos.guards.copy( nLevel * 2, nLevel * 2 + 1 ); // pPrev guard := cur guard } else break; } } // Next level pos.pPrev[nLevel] = pPred; pos.pSucc[nLevel] = pCur.ptr(); } return nCmp == 0; } template bool insert_at_position( value_type& val, node_type * pNode, position& pos, Func f ) { unsigned int const nHeight = pNode->height(); for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) pNode->next( nLevel ).store( marked_node_ptr(), memory_model::memory_order_relaxed ); // Insert at level 0 { marked_node_ptr p( pos.pSucc[0] ); pNode->next( 0 ).store( p, memory_model::memory_order_release ); if ( !pos.pPrev[0]->next( 0 ).compare_exchange_strong( p, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed )) return false; f( val ); } // Insert at level 1..max for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) { marked_node_ptr p; while ( true ) { marked_node_ptr pSucc( pos.pSucc[nLevel] ); // Set pNode->next // pNode->next can have "logical deleted" flag if another thread is removing pNode right now if ( !pNode->next( nLevel ).compare_exchange_strong( p, pSucc, memory_model::memory_order_release, atomics::memory_order_acquire )) { // pNode has been marked as removed while we are inserting it // Stop inserting assert( p.bits() != 0 ); // Here pNode is linked at least level 0 so level_unlinked() cannot returns true CDS_VERIFY_FALSE( pNode->level_unlinked( nHeight - nLevel )); // pNode is linked up to nLevel - 1 // Remove it via find_position() find_position( val, pos, key_comparator(), false ); m_Stat.onLogicDeleteWhileInsert(); return true; } p = pSucc; // Link pNode into the list at nLevel if ( pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( pSucc, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed )) { // go to next level break; } // Renew insert position m_Stat.onRenewInsertPosition(); if ( !renew_insert_position( val, pNode, pos )) { // The node has been deleted while we are inserting it // Update current height for concurent removing CDS_VERIFY_FALSE( pNode->level_unlinked( nHeight - nLevel )); m_Stat.onRemoveWhileInsert(); // help to removing val find_position( val, pos, key_comparator(), false ); return true; } } } return true; } template bool try_remove_at( node_type * pDel, position& pos, Func f ) { assert( pDel != nullptr ); marked_node_ptr pSucc; back_off bkoff; // logical deletion (marking) for ( unsigned int nLevel = pDel->height() - 1; nLevel > 0; --nLevel ) { pSucc = pDel->next( nLevel ).load( memory_model::memory_order_relaxed ); if ( pSucc.bits() == 0 ) { bkoff.reset(); while ( !( pDel->next( nLevel ).compare_exchange_weak( pSucc, pSucc | 1, memory_model::memory_order_release, atomics::memory_order_acquire ) || pSucc.bits() != 0 )) { bkoff(); m_Stat.onMarkFailed(); } } } marked_node_ptr p( pDel->next( 0 ).load( memory_model::memory_order_relaxed ).ptr()); while ( true ) { if ( pDel->next( 0 ).compare_exchange_strong( p, p | 1, memory_model::memory_order_release, atomics::memory_order_acquire )) { f( *node_traits::to_value_ptr( pDel )); // Physical deletion // try fast erase p = pDel; for ( int nLevel = static_cast( pDel->height() - 1 ); nLevel >= 0; --nLevel ) { pSucc = pDel->next( nLevel ).load( memory_model::memory_order_acquire ); if ( pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr()), memory_model::memory_order_acq_rel, atomics::memory_order_relaxed )) { pDel->level_unlinked(); } else { // Make slow erase # ifdef CDS_DEBUG if ( find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false )) assert( pDel != pos.pCur ); # else find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false ); # endif m_Stat.onSlowErase(); return true; } } // Fast erasing success gc::retire( node_traits::to_value_ptr( pDel ), dispose_node ); m_Stat.onFastErase(); return true; } else if ( p.bits()) { // Another thread is deleting pDel right now m_Stat.onEraseContention(); return false; } m_Stat.onEraseRetry(); bkoff(); } } enum finsd_fastpath_result { find_fastpath_found, find_fastpath_not_found, find_fastpath_abort }; template finsd_fastpath_result find_fastpath( Q& val, Compare cmp, Func f ) { node_type * pPred; marked_node_ptr pCur; marked_node_ptr pNull; // guard array: // 0 - pPred on level N // 1 - pCur on level N typename gc::template GuardArray<2> guards; back_off bkoff; unsigned attempt = 0; try_again: pPred = m_Head.head(); for ( int nLevel = static_cast( m_nHeight.load( memory_model::memory_order_relaxed ) - 1 ); nLevel >= 0; --nLevel ) { pCur = guards.protect( 1, pPred->next( nLevel ), gc_protect ); while ( pCur != pNull ) { if ( pCur.bits()) { // pPred is being removed if ( ++attempt < 4 ) { bkoff(); goto try_again; } return find_fastpath_abort; } if ( pCur.ptr()) { int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val ); if ( nCmp < 0 ) { guards.copy( 0, 1 ); pPred = pCur.ptr(); pCur = guards.protect( 1, pCur->next( nLevel ), gc_protect ); } else if ( nCmp == 0 ) { // found f( *node_traits::to_value_ptr( pCur.ptr()), val ); return find_fastpath_found; } else { // pCur > val - go down break; } } } } return find_fastpath_not_found; } template bool find_slowpath( Q& val, Compare cmp, Func f ) { position pos; if ( find_position( val, pos, cmp, true )) { assert( cmp( *node_traits::to_value_ptr( pos.pCur ), val ) == 0 ); f( *node_traits::to_value_ptr( pos.pCur ), val ); return true; } else return false; } template bool find_with_( Q& val, Compare cmp, Func f ) { switch ( find_fastpath( val, cmp, f )) { case find_fastpath_found: m_Stat.onFindFastSuccess(); return true; case find_fastpath_not_found: m_Stat.onFindFastFailed(); return false; default: break; } if ( find_slowpath( val, cmp, f )) { m_Stat.onFindSlowSuccess(); return true; } m_Stat.onFindSlowFailed(); return false; } template guarded_ptr get_with_( Q const& val, Compare cmp ) { guarded_ptr gp; if ( find_with_( val, cmp, [&gp]( value_type& found, Q const& ) { gp.reset( &found ); } )) return gp; return guarded_ptr(); } template bool erase_( Q const& val, Compare cmp, Func f ) { position pos; if ( !find_position( val, pos, cmp, false )) { m_Stat.onEraseFailed(); return false; } node_type * pDel = pos.pCur; typename gc::Guard gDel; gDel.assign( node_traits::to_value_ptr( pDel )); assert( cmp( *node_traits::to_value_ptr( pDel ), val ) == 0 ); unsigned int nHeight = pDel->height(); if ( try_remove_at( pDel, pos, f )) { --m_ItemCounter; m_Stat.onRemoveNode( nHeight ); m_Stat.onEraseSuccess(); return true; } m_Stat.onEraseFailed(); return false; } template guarded_ptr extract_( Q const& val, Compare cmp ) { position pos; guarded_ptr gp; for (;;) { if ( !find_position( val, pos, cmp, false )) { m_Stat.onExtractFailed(); return guarded_ptr(); } node_type * pDel = pos.pCur; gp.reset( node_traits::to_value_ptr( pDel )); assert( cmp( *node_traits::to_value_ptr( pDel ), val ) == 0 ); unsigned int nHeight = pDel->height(); if ( try_remove_at( pDel, pos, []( value_type const& ) {} )) { --m_ItemCounter; m_Stat.onRemoveNode( nHeight ); m_Stat.onExtractSuccess(); return gp; } m_Stat.onExtractRetry(); } } guarded_ptr extract_min_() { position pos; guarded_ptr gp; for ( ;;) { if ( !find_min_position( pos )) { // The list is empty m_Stat.onExtractMinFailed(); return guarded_ptr(); } node_type * pDel = pos.pCur; unsigned int nHeight = pDel->height(); gp.reset( node_traits::to_value_ptr( pDel )); if ( try_remove_at( pDel, pos, []( value_type const& ) {} )) { --m_ItemCounter; m_Stat.onRemoveNode( nHeight ); m_Stat.onExtractMinSuccess(); return gp; } m_Stat.onExtractMinRetry(); } } guarded_ptr extract_max_() { position pos; guarded_ptr gp; for ( ;;) { if ( !find_max_position( pos )) { // The list is empty m_Stat.onExtractMaxFailed(); return guarded_ptr(); } node_type * pDel = pos.pCur; unsigned int nHeight = pDel->height(); gp.reset( node_traits::to_value_ptr( pDel )); if ( try_remove_at( pDel, pos, []( value_type const& ) {} )) { --m_ItemCounter; m_Stat.onRemoveNode( nHeight ); m_Stat.onExtractMaxSuccess(); return gp; } m_Stat.onExtractMaxRetry(); } } void increase_height( unsigned int nHeight ) { unsigned int nCur = m_nHeight.load( memory_model::memory_order_relaxed ); if ( nCur < nHeight ) m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_relaxed, atomics::memory_order_relaxed ); } void destroy() { node_type* p = m_Head.head()->next( 0 ).load( atomics::memory_order_relaxed ).ptr(); while ( p ) { node_type* pNext = p->next( 0 ).load( atomics::memory_order_relaxed ).ptr(); dispose_node( node_traits::to_value_ptr( p )); p = pNext; } } //@endcond private: //@cond skip_list::details::head_node< node_type > m_Head; ///< head tower (max height) random_level_generator m_RandomLevelGen; ///< random level generator instance atomics::atomic m_nHeight; ///< estimated high level item_counter m_ItemCounter; ///< item counter mutable stat m_Stat; ///< internal statistics //@endcond }; }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_IMPL_SKIP_LIST_H libcds-2.3.3/cds/intrusive/iterable_list_dhp.h000066400000000000000000000006321341244201700214050ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_ITERABLE_LIST_DHP_H #define CDSLIB_INTRUSIVE_ITERABLE_LIST_DHP_H #include #include #endif // #ifndef CDSLIB_INTRUSIVE_ITERABLE_LIST_DHP_H libcds-2.3.3/cds/intrusive/iterable_list_hp.h000066400000000000000000000006261341244201700212440ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_ITERABLE_LIST_HP_H #define CDSLIB_INTRUSIVE_ITERABLE_LIST_HP_H #include #include #endif // #ifndef CDSLIB_INTRUSIVE_ITERABLE_LIST_HP_H libcds-2.3.3/cds/intrusive/lazy_list_dhp.h000066400000000000000000000006121341244201700205730ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_LAZY_LIST_DHP_H #define CDSLIB_INTRUSIVE_LAZY_LIST_DHP_H #include #include #endif // #ifndef CDSLIB_INTRUSIVE_LAZY_LIST_DHP_H libcds-2.3.3/cds/intrusive/lazy_list_hp.h000066400000000000000000000006061341244201700204320ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_LAZY_LIST_HP_H #define CDSLIB_INTRUSIVE_LAZY_LIST_HP_H #include #include #endif // #ifndef CDSLIB_INTRUSIVE_LAZY_LIST_HP_H libcds-2.3.3/cds/intrusive/lazy_list_nogc.h000066400000000000000000000715401341244201700207560ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_LAZY_LIST_NOGC_H #define CDSLIB_INTRUSIVE_LAZY_LIST_NOGC_H #include // unique_lock #include #include namespace cds { namespace intrusive { namespace lazy_list { /// Lazy list node for \p gc::nogc /** Template parameters: - Lock - lock type. Default is \p cds::sync::spin - Tag - a \ref cds_intrusive_hook_tag "tag" */ template < #ifdef CDS_DOXYGEN_INVOKED typename Lock = cds::sync::spin, typename Tag = opt::none #else typename Lock, typename Tag #endif > struct node { typedef gc::nogc gc; ///< Garbage collector typedef Lock lock_type; ///< Lock type typedef Tag tag; ///< tag atomics::atomic m_pNext; ///< pointer to the next node in the list mutable lock_type m_Lock; ///< Node lock node() : m_pNext( nullptr ) {} }; } // namespace lazy_list /// Lazy single-linked list (template specialization for \p gc::nogc) /** @ingroup cds_intrusive_list \anchor cds_intrusive_LazyList_nogc This specialization is append-only list when no item reclamation may be performed. The class does not support deleting of list item. The list can be ordered if \p Traits::sort is \p true that is default or unordered otherwise. Unordered list can be maintained by \p equal_to relationship (\p Traits::equal_to), but for the ordered list \p less or \p compare relations should be specified in \p Traits. See \ref cds_intrusive_LazyList_hp "LazyList" for description of template parameters. */ template < typename T #ifdef CDS_DOXYGEN_INVOKED ,class Traits = lazy_list::traits #else ,class Traits #endif > class LazyList { public: typedef gc::nogc gc; ///< Garbage collector typedef T value_type; ///< type of value stored in the list typedef Traits traits; ///< Traits template parameter typedef typename traits::hook hook; ///< hook type typedef typename hook::node_type node_type; ///< node type static constexpr bool const c_bSort = traits::sort; ///< List type: ordered (\p true) or unordered (\p false) # ifdef CDS_DOXYGEN_INVOKED /// Key comparing functor /** - for ordered list, the functor is based on \p traits::compare or \p traits::less - for unordered list, the functor is based on \p traits::equal_to, \p traits::compare or \p traits::less */ typedef implementation_defined key_comparator; # else typedef typename std::conditional< c_bSort, typename opt::details::make_comparator< value_type, traits >::type, typename opt::details::make_equal_to< value_type, traits >::type >::type key_comparator; # endif typedef typename traits::back_off back_off; ///< Back-off strategy typedef typename traits::disposer disposer; ///< disposer typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< node traits typedef typename lazy_list::get_link_checker< node_type, traits::link_checker >::type link_checker; ///< link checker typedef typename traits::item_counter item_counter; ///< Item counting policy used typedef typename traits::memory_model memory_model; ///< C++ memory ordering (see \p lazy_list::traits::memory_model) typedef typename traits::stat stat; ///< Internal statistics //@cond static_assert((std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type"); // Rebind traits (split-list support) template struct rebind_traits { typedef LazyList< gc , value_type , typename cds::opt::make_options< traits, Options...>::type > type; }; // Stat selector template using select_stat_wrapper = lazy_list::select_stat_wrapper< Stat >; //@endcond protected: typedef node_type * auxiliary_head ; ///< Auxiliary head type (for split-list support) protected: node_type m_Head; ///< List head (dummy node) node_type m_Tail; ///< List tail (dummy node) item_counter m_ItemCounter; ///< Item counter mutable stat m_Stat; ///< Internal statistics //@cond /// Position pointer for item search struct position { node_type * pPred ; ///< Previous node node_type * pCur ; ///< Current node /// Locks nodes \p pPred and \p pCur void lock() { pPred->m_Lock.lock(); pCur->m_Lock.lock(); } /// Unlocks nodes \p pPred and \p pCur void unlock() { pCur->m_Lock.unlock(); pPred->m_Lock.unlock(); } }; class auto_lock_position { position& m_pos; public: auto_lock_position( position& pos ) : m_pos(pos) { pos.lock(); } ~auto_lock_position() { m_pos.unlock(); } }; //@endcond protected: //@cond void clear_links( node_type * pNode ) { pNode->m_pNext.store( nullptr, memory_model::memory_order_relaxed ); } template void dispose_node( node_type * pNode, Disposer disp ) { clear_links( pNode ); disp( node_traits::to_value_ptr( *pNode )); } template void dispose_value( value_type& val, Disposer disp ) { dispose_node( node_traits::to_node_ptr( val ), disp ); } void link_node( node_type * pNode, node_type * pPred, node_type * pCur ) { link_checker::is_empty( pNode ); assert( pPred->m_pNext.load(memory_model::memory_order_relaxed) == pCur ); pNode->m_pNext.store( pCur, memory_model::memory_order_release ); pPred->m_pNext.store( pNode, memory_model::memory_order_release ); } //@endcond protected: //@cond template class iterator_type { friend class LazyList; protected: value_type * m_pNode; void next() { assert( m_pNode != nullptr ); node_type * pNode = node_traits::to_node_ptr( m_pNode ); node_type * pNext = pNode->m_pNext.load(memory_model::memory_order_relaxed); if ( pNext != nullptr ) m_pNode = node_traits::to_value_ptr( pNext ); } iterator_type( node_type * pNode ) { m_pNode = node_traits::to_value_ptr( pNode ); } public: typedef typename cds::details::make_const_type::pointer value_ptr; typedef typename cds::details::make_const_type::reference value_ref; iterator_type() : m_pNode( nullptr ) {} iterator_type( const iterator_type& src ) : m_pNode( src.m_pNode ) {} value_ptr operator ->() const { return m_pNode; } value_ref operator *() const { assert( m_pNode != nullptr ); return *m_pNode; } /// Pre-increment iterator_type& operator ++() { next(); return *this; } /// Post-increment iterator_type operator ++(int) { iterator_type i(*this); next(); return i; } iterator_type& operator = (const iterator_type& src) { m_pNode = src.m_pNode; return *this; } template bool operator ==(iterator_type const& i ) const { return m_pNode == i.m_pNode; } template bool operator !=(iterator_type const& i ) const { return m_pNode != i.m_pNode; } }; //@endcond public: /// Forward iterator typedef iterator_type iterator; /// Const forward iterator typedef iterator_type const_iterator; /// Returns a forward iterator addressing the first element in a list /** For empty list \code begin() == end() \endcode */ iterator begin() { iterator it( &m_Head ); ++it ; // skip dummy head return it; } /// Returns an iterator that addresses the location succeeding the last element in a list /** Do not use the value returned by end function to access any item. The returned value can be used only to control reaching the end of the list. For empty list \code begin() == end() \endcode */ iterator end() { return iterator( &m_Tail ); } /// Returns a forward const iterator addressing the first element in a list const_iterator begin() const { return cbegin(); } /// Returns a forward const iterator addressing the first element in a list const_iterator cbegin() const { const_iterator it( const_cast(&m_Head)); ++it; // skip dummy head return it; } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator end() const { return cend(); } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator cend() const { return const_iterator( const_cast(&m_Tail)); } public: /// Default constructor initializes empty list LazyList() { m_Head.m_pNext.store( &m_Tail, memory_model::memory_order_relaxed ); } //@cond template >::value >> explicit LazyList( Stat& st ) : m_Stat( st ) { m_Head.m_pNext.store( &m_Tail, memory_model::memory_order_relaxed ); } //@endcond /// Destroys the list object ~LazyList() { clear(); assert( m_Head.m_pNext.load(memory_model::memory_order_relaxed) == &m_Tail ); m_Head.m_pNext.store( nullptr, memory_model::memory_order_relaxed ); } /// Inserts new node /** The function inserts \p val in the list if the list does not contain an item with key equal to \p val. Returns \p true if \p val is linked into the list, \p false otherwise. */ bool insert( value_type& val ) { return insert_at( &m_Head, val ); } /// Updates the item /** The operation performs inserting or changing data with lock-free manner. If the item \p val not found in the list, then \p val is inserted into the list iff \p bAllowInsert is \p true. Otherwise, the functor \p func is called with item found. The functor signature is: \code struct functor { void operator()( bool bNew, value_type& item, value_type& val ); }; \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the list - \p val - argument \p val passed into the \p update() function If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments refer to the same thing. The functor may change non-key fields of the \p item. While the functor \p f is calling the item \p item is locked. Returns std::pair where \p first is \p true if operation is successful, \p second is \p true if new item has been added or \p false if the item with \p key already is in the list. */ template std::pair update( value_type& val, Func func, bool bAllowInsert = true ) { return update_at( &m_Head, val, func, bAllowInsert ); } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( value_type& val, Func func ) { return update( val, func, true ); } //@endcond /// Finds the key \p key /** \anchor cds_intrusive_LazyList_nogc_find_func The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& key ); }; \endcode where \p item is the item found, \p key is the find function argument. The functor may change non-key fields of \p item. While the functor \p f is calling the item found \p item is locked. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( Q& key, Func f ) { return find_at( &m_Head, key, key_comparator(), f ); } //@cond template bool find( Q const& key, Func f ) { return find_at( &m_Head, key, key_comparator(), f ); } //@endcond /// Finds the key \p key using \p less predicate for searching. Disabled for unordered lists. /** The function is an analog of \ref cds_intrusive_LazyList_nogc_find_func "find(Q&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template typename std::enable_if::type find_with( Q& key, Less less, Func f ) { CDS_UNUSED( less ); return find_at( &m_Head, key, cds::opt::details::make_comparator_from_less(), f ); } /// Finds the key \p key using \p equal predicate for searching. Disabled for ordered lists. /** The function is an analog of \ref cds_intrusive_LazyList_nogc_find_func "find(Q&, Func)" but \p equal is used for key comparing. \p Equal functor has the interface like \p std::equal_to. */ template typename std::enable_if::type find_with( Q& key, Equal eq, Func f ) { //CDS_UNUSED( eq ); return find_at( &m_Head, key, eq, f ); } //@cond template typename std::enable_if::type find_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return find_at( &m_Head, key, cds::opt::details::make_comparator_from_less(), f ); } template typename std::enable_if::type find_with( Q const& key, Equal eq, Func f ) { //CDS_UNUSED( eq ); return find_at( &m_Head, key, eq, f ); } //@endcond /// Checks whether the list contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. */ template value_type * contains( Q const& key ) { return find_at( &m_Head, key, key_comparator()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") value_type * find( Q const& key ) { return contains( key ); } //@endcond /// Checks whether the map contains \p key using \p pred predicate for searching (ordered list version) /** The function is an analog of contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the list. */ template typename std::enable_if::type contains( Q const& key, Less pred ) { CDS_UNUSED( pred ); return find_at( &m_Head, key, cds::opt::details::make_comparator_from_less()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") typename std::enable_if::type find_with( Q const& key, Less pred ) { return contains( key, pred ); } //@endcond /// Checks whether the map contains \p key using \p equal predicate for searching (unordered list version) /** The function is an analog of contains( key ) but \p equal is used for key comparing. \p Equal functor has the interface like \p std::equal_to. */ template typename std::enable_if::type contains( Q const& key, Equal eq ) { return find_at( &m_Head, key, eq ); } //@cond template CDS_DEPRECATED("deprecated, use contains()") typename std::enable_if::type find_with( Q const& key, Equal eq ) { return contains( key, eq ); } //@endcond /// Clears the list /** The function unlink all items from the list. For each unlinked item the item disposer \p disp is called after unlinking. This function is not thread-safe. */ template void clear( Disposer disp ) { node_type * pHead = m_Head.m_pNext.exchange( &m_Tail, memory_model::memory_order_release ); while ( pHead != &m_Tail ) { node_type * p = pHead->m_pNext.load(memory_model::memory_order_relaxed); dispose_node( pHead, disp ); --m_ItemCounter; pHead = p; } } /// Clears the list using default disposer /** The function clears the list using default (provided in class template) disposer functor. */ void clear() { clear( disposer()); } /// Checks if the list is empty bool empty() const { return m_Head.m_pNext.load(memory_model::memory_order_relaxed) == &m_Tail; } /// Returns list's item count /** The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, this function always returns 0. Warning: even if you use real item counter and it returns 0, this fact is not mean that the list is empty. To check list emptyness use \ref empty() method. */ size_t size() const { return m_ItemCounter.value(); } /// Returns const reference to internal statistics stat const& statistics() const { return m_Stat; } protected: //@cond // split-list support bool insert_aux_node( node_type * pNode ) { return insert_aux_node( &m_Head, pNode ); } // split-list support bool insert_aux_node( node_type * pHead, node_type * pNode ) { assert( pHead != nullptr ); assert( pNode != nullptr ); // Hack: convert node_type to value_type. // In principle, auxiliary node can be non-reducible to value_type // We assume that comparator can correctly distinguish aux and regular node. return insert_at( pHead, *node_traits::to_value_ptr( pNode )); } bool insert_at( node_type * pHead, value_type& val ) { position pos; key_comparator pred; while ( true ) { search( pHead, val, pos, pred ); { auto_lock_position alp( pos ); if ( validate( pos.pPred, pos.pCur )) { if ( pos.pCur != &m_Tail && equal( *node_traits::to_value_ptr( *pos.pCur ), val, pred )) { // failed: key already in list m_Stat.onInsertFailed(); return false; } else { link_node( node_traits::to_node_ptr( val ), pos.pPred, pos.pCur ); break; } } } m_Stat.onInsertRetry(); } ++m_ItemCounter; m_Stat.onInsertSuccess(); return true; } iterator insert_at_( node_type * pHead, value_type& val ) { if ( insert_at( pHead, val )) return iterator( node_traits::to_node_ptr( val )); return end(); } template std::pair update_at_( node_type * pHead, value_type& val, Func func, bool bAllowInsert ) { position pos; key_comparator pred; while ( true ) { search( pHead, val, pos, pred ); { auto_lock_position alp( pos ); if ( validate( pos.pPred, pos.pCur )) { if ( pos.pCur != &m_Tail && equal( *node_traits::to_value_ptr( *pos.pCur ), val, pred )) { // key already in the list func( false, *node_traits::to_value_ptr( *pos.pCur ) , val ); m_Stat.onUpdateExisting(); return std::make_pair( iterator( pos.pCur ), false ); } else { // new key if ( !bAllowInsert ) { m_Stat.onUpdateFailed(); return std::make_pair( end(), false ); } link_node( node_traits::to_node_ptr( val ), pos.pPred, pos.pCur ); func( true, val, val ); break; } } m_Stat.onUpdateRetry(); } } ++m_ItemCounter; m_Stat.onUpdateNew(); return std::make_pair( iterator( node_traits::to_node_ptr( val )), true ); } template std::pair update_at( node_type * pHead, value_type& val, Func func, bool bAllowInsert ) { std::pair ret = update_at_( pHead, val, func, bAllowInsert ); return std::make_pair( ret.first != end(), ret.second ); } template bool find_at( node_type * pHead, Q& val, Pred pred, Func f ) { position pos; search( pHead, val, pos, pred ); if ( pos.pCur != &m_Tail ) { std::unique_lock< typename node_type::lock_type> al( pos.pCur->m_Lock ); if ( equal( *node_traits::to_value_ptr( *pos.pCur ), val, pred )) { f( *node_traits::to_value_ptr( *pos.pCur ), val ); m_Stat.onFindSuccess(); return true; } } m_Stat.onFindFailed(); return false; } template value_type * find_at( node_type * pHead, Q& val, Pred pred) { iterator it = find_at_( pHead, val, pred ); if ( it != end()) return &*it; return nullptr; } template iterator find_at_( node_type * pHead, Q& val, Pred pred) { position pos; search( pHead, val, pos, pred ); if ( pos.pCur != &m_Tail ) { if ( equal( *node_traits::to_value_ptr( *pos.pCur ), val, pred )) { m_Stat.onFindSuccess(); return iterator( pos.pCur ); } } m_Stat.onFindFailed(); return end(); } //@endcond protected: //@cond template typename std::enable_if::type search( node_type * pHead, const Q& key, position& pos, Equal eq ) { const node_type * pTail = &m_Tail; node_type * pCur = pHead; node_type * pPrev = pHead; while ( pCur != pTail && ( pCur == pHead || !equal( *node_traits::to_value_ptr( *pCur ), key, eq ))) { pPrev = pCur; pCur = pCur->m_pNext.load(memory_model::memory_order_acquire); } pos.pCur = pCur; pos.pPred = pPrev; } template typename std::enable_if::type search( node_type * pHead, const Q& key, position& pos, Compare cmp ) { const node_type * pTail = &m_Tail; node_type * pCur = pHead; node_type * pPrev = pHead; while ( pCur != pTail && ( pCur == pHead || cmp( *node_traits::to_value_ptr( *pCur ), key ) < 0 )) { pPrev = pCur; pCur = pCur->m_pNext.load(memory_model::memory_order_acquire); } pos.pCur = pCur; pos.pPred = pPrev; } template static typename std::enable_if::type equal( L const& l, R const& r, Equal eq ) { return eq(l, r); } template static typename std::enable_if::type equal( L const& l, R const& r, Compare cmp ) { return cmp(l, r) == 0; } bool validate( node_type * pPred, node_type * pCur ) { if ( pPred->m_pNext.load(memory_model::memory_order_acquire) == pCur ) { m_Stat.onValidationSuccess(); return true; } m_Stat.onValidationFailed(); return false; } // for split-list template void erase_for( Predicate pred ) { node_type * pPred = nullptr; node_type * pHead = m_Head.m_pNext.load( memory_model::memory_order_relaxed ); while ( pHead != &m_Tail ) { node_type * p = pHead->m_pNext.load( memory_model::memory_order_relaxed ); if ( pred( *node_traits::to_value_ptr( pHead ))) { assert( pPred != nullptr ); pPred->m_pNext.store( p, memory_model::memory_order_relaxed ); dispose_node( pHead, disposer()); } else pPred = pHead; pHead = p; } } //@endcond }; }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_LAZY_LIST_NOGC_H libcds-2.3.3/cds/intrusive/lazy_list_rcu.h000066400000000000000000001342141341244201700206170ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_LAZY_LIST_RCU_H #define CDSLIB_INTRUSIVE_LAZY_LIST_RCU_H #include // unique_lock #include #include #include #include namespace cds { namespace intrusive { namespace lazy_list { /// Lazy list node for \ref cds_urcu_desc "RCU" /** Template parameters: - Tag - a tag used to distinguish between different implementation */ template struct node, Lock, Tag> { typedef cds::urcu::gc gc ; ///< RCU schema typedef Lock lock_type ; ///< Lock type typedef Tag tag ; ///< tag typedef cds::details::marked_ptr marked_ptr ; ///< marked pointer typedef atomics::atomic atomic_marked_ptr ; ///< atomic marked pointer specific for GC atomic_marked_ptr m_pNext ; ///< pointer to the next node in the list mutable lock_type m_Lock ; ///< Node lock /// Checks if node is marked bool is_marked() const { return m_pNext.load(atomics::memory_order_relaxed).bits() != 0; } /// Default ctor node() : m_pNext( nullptr ) {} /// Clears internal fields void clear() { m_pNext.store( marked_ptr(), atomics::memory_order_release ); } }; } // namespace lazy_list /// Lazy ordered single-linked list (template specialization for \ref cds_urcu_desc "RCU") /** @ingroup cds_intrusive_list \anchor cds_intrusive_LazyList_rcu Usually, ordered single-linked list is used as a building block for the hash table implementation. The complexity of searching is O(N). Template arguments: - \p RCU - one of \ref cds_urcu_gc "RCU type" - \p T - type to be stored in the list - \p Traits - type traits. See \p lazy_list::traits for explanation. It is possible to declare option-based list with \p %cds::intrusive::lazy_list::make_traits metafunction instead of \p Traits template argument. \par Usage Before including you should include appropriate RCU header file, see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. For example, for \ref cds_urcu_general_buffered_gc "general-purpose buffered RCU" you should include: \code #include #include // Now, you can declare lazy list for type Foo and default traits: typedef cds::intrusive::LazyList >, Foo > rcu_lazy_list; \endcode */ template < typename RCU ,typename T #ifdef CDS_DOXYGEN_INVOKED ,class Traits = lazy_list::traits #else ,class Traits #endif > class LazyList, T, Traits> { public: typedef cds::urcu::gc gc; ///< RCU schema typedef T value_type; ///< type of value stored in the list typedef Traits traits; ///< Traits template parameter typedef typename traits::hook hook; ///< hook type typedef typename hook::node_type node_type; ///< node type # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined key_comparator ; ///< key compare functor based on opt::compare and opt::less option setter. # else typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator; # endif typedef typename traits::disposer disposer; ///< disposer used typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< node traits typedef typename lazy_list::get_link_checker< node_type, traits::link_checker >::type link_checker; ///< link checker typedef typename traits::back_off back_off; ///< back-off strategy (not used) typedef typename traits::item_counter item_counter; ///< Item counting policy used typedef typename traits::memory_model memory_model; ///< C++ memory ordering (see \p lazy_list::traits::memory_model) typedef typename traits::stat stat; ///< Internal statistics typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock static constexpr const bool c_bExtractLockExternal = true; ///< Group of \p extract_xxx functions require external locking static_assert((std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type"); //@cond // Rebind traits (split-list support) template struct rebind_traits { typedef LazyList< gc , value_type , typename cds::opt::make_options< traits, Options...>::type > type; }; // Stat selector template using select_stat_wrapper = lazy_list::select_stat_wrapper< Stat >; //@endcond protected: node_type m_Head; ///< List head (dummy node) node_type m_Tail; ///< List tail (dummy node) item_counter m_ItemCounter; ///< Item counter mutable stat m_Stat; ///< Internal statistics //@cond typedef typename node_type::marked_ptr marked_node_ptr; ///< Node marked pointer typedef node_type * auxiliary_head; ///< Auxiliary head type (for split-list support) /// Position pointer for item search struct position { node_type * pPred; ///< Previous node node_type * pCur; ///< Current node /// Locks nodes \p pPred and \p pCur void lock() { pPred->m_Lock.lock(); pCur->m_Lock.lock(); } /// Unlocks nodes \p pPred and \p pCur void unlock() { pCur->m_Lock.unlock(); pPred->m_Lock.unlock(); } }; typedef std::unique_lock< position > scoped_position_lock; typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock> deadlock_policy; struct clear_and_dispose { void operator()( value_type * p ) { assert( p != nullptr ); clear_links( node_traits::to_node_ptr(p)); disposer()( p ); } }; //@endcond public: /// pointer to extracted node using exempt_ptr = cds::urcu::exempt_ptr< gc, value_type, value_type, clear_and_dispose, void >; /// Type of \p get() member function return value typedef value_type * raw_ptr; protected: //@cond template class iterator_type { friend class LazyList; protected: value_type * m_pNode; void next() { assert( m_pNode != nullptr ); node_type * pNode = node_traits::to_node_ptr( m_pNode ); node_type * pNext = pNode->m_pNext.load(memory_model::memory_order_acquire).ptr(); if ( pNext != nullptr ) m_pNode = node_traits::to_value_ptr( pNext ); } void skip_deleted() { if ( m_pNode != nullptr ) { node_type * pNode = node_traits::to_node_ptr( m_pNode ); // Dummy tail node could not be marked while ( pNode->is_marked()) pNode = pNode->m_pNext.load(memory_model::memory_order_acquire).ptr(); if ( pNode != node_traits::to_node_ptr( m_pNode )) m_pNode = node_traits::to_value_ptr( pNode ); } } iterator_type( node_type * pNode ) { m_pNode = node_traits::to_value_ptr( pNode ); skip_deleted(); } public: typedef typename cds::details::make_const_type::pointer value_ptr; typedef typename cds::details::make_const_type::reference value_ref; iterator_type() : m_pNode( nullptr ) {} iterator_type( iterator_type const& src ) : m_pNode( src.m_pNode ) {} value_ptr operator ->() const { return m_pNode; } value_ref operator *() const { assert( m_pNode != nullptr ); return *m_pNode; } /// Pre-increment iterator_type& operator ++() { next(); skip_deleted(); return *this; } /// Post-increment iterator_type operator ++(int) { iterator_type i(*this); next(); skip_deleted(); return i; } iterator_type& operator = (iterator_type const& src) { m_pNode = src.m_pNode; return *this; } template bool operator ==(iterator_type const& i ) const { return m_pNode == i.m_pNode; } template bool operator !=(iterator_type const& i ) const { return m_pNode != i.m_pNode; } }; //@endcond public: ///@name Forward iterators (thread-safe only under RCU lock) //@{ /// Forward iterator /** You may safely use iterators in multi-threaded environment only under RCU lock. Otherwise, a crash is possible if another thread deletes the item the iterator points to. */ typedef iterator_type iterator; /// Const forward iterator typedef iterator_type const_iterator; /// Returns a forward iterator addressing the first element in a list /** For empty list \code begin() == end() \endcode */ iterator begin() { iterator it( &m_Head ); ++it ; // skip dummy head return it; } /// Returns an iterator that addresses the location succeeding the last element in a list /** Do not use the value returned by end function to access any item. The returned value can be used only to control reaching the end of the list. For empty list \code begin() == end() \endcode */ iterator end() { return iterator( &m_Tail ); } /// Returns a forward const iterator addressing the first element in a list const_iterator begin() const { return get_const_begin(); } /// Returns a forward const iterator addressing the first element in a list const_iterator cbegin() const { return get_const_begin(); } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator end() const { return get_const_end(); } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator cend() const { return get_const_end(); } //@} public: /// Default constructor initializes empty list LazyList() { m_Head.m_pNext.store( marked_node_ptr( &m_Tail ), memory_model::memory_order_relaxed ); } //@cond template >::value >> explicit LazyList( Stat& st ) : m_Stat( st ) { m_Head.m_pNext.store( marked_node_ptr( &m_Tail ), memory_model::memory_order_relaxed ); } //@endcond /// Destroys the list object ~LazyList() { clear(); assert( m_Head.m_pNext.load(memory_model::memory_order_relaxed).ptr() == &m_Tail ); m_Head.m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed ); } /// Inserts new node /** The function inserts \p val in the list if the list does not contain an item with key equal to \p val. Returns \p true if \p val is linked into the list, \p false otherwise. */ bool insert( value_type& val ) { return insert_at( &m_Head, val ); } /// Inserts new node /** This function is intended for derived non-intrusive containers. The function allows to split new item creating into two part: - create item with key only - insert new item into the list - if inserting is success, calls \p f functor to initialize value-field of \p val. The functor signature is: \code void func( value_type& val ); \endcode where \p val is the item inserted. While the functor \p f is working the item \p val is locked. The user-defined functor is called only if the inserting is success. */ template bool insert( value_type& val, Func f ) { return insert_at( &m_Head, val, f ); } /// Updates the item /** The operation performs inserting or changing data with lock-free manner. If the item \p val not found in the list, then \p val is inserted into the list iff \p bAllowInsert is \p true. Otherwise, the functor \p func is called with item found. The functor signature is: \code struct functor { void operator()( bool bNew, value_type& item, value_type& val ); }; \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the list - \p val - argument \p val passed into the \p update() function If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments refer to the same thing. The functor may change non-key fields of the \p item. While the functor \p f is calling the item \p item is locked. Returns std::pair where \p first is \p true if operation is successful, \p second is \p true if new item has been added or \p false if the item with \p key already is in the list. The function makes RCU lock internally. */ template std::pair update( value_type& val, Func func, bool bAllowInsert = true ) { return update_at( &m_Head, val, func, bAllowInsert ); } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( value_type& val, Func func ) { return update( val, func, true ); } //@endcond /// Unlinks the item \p val from the list /** The function searches the item \p val in the list and unlink it from the list if it is found and it is equal to \p val. Difference between \p erase() and \p %unlink() functions: \p %erase() finds a key and deletes the item found. \p %unlink() finds an item by key and deletes it only if \p val is an item of that list, i.e. the pointer to item found is equal to &val . The function returns \p true if success and \p false otherwise. RCU \p synchronize method can be called. The RCU should not be locked. Note that depending on RCU type used the \ref disposer call can be deferred. \p disposer specified in \p Traits is called for unlinked item. The function can throw \p cds::urcu::rcu_deadlock exception if deadlock is encountered and deadlock checking policy is \p opt::v::rcu_throw_deadlock. */ bool unlink( value_type& val ) { return unlink_at( &m_Head, val ); } /// Deletes the item from the list /** The function searches an item with key equal to \p key in the list, unlinks it from the list, and returns \p true. If the item with the key equal to \p key is not found the function return \p false. RCU \p synchronize method can be called. The RCU should not be locked. Note that depending on RCU type used the \ref disposer call can be deferred. \p disposer specified in \p Traits is called for deleted item. The function can throw \ref cds_urcu_rcu_deadlock "cds::urcu::rcu_deadlock" exception if deadlock is encountered and deadlock checking policy is \p opt::v::rcu_throw_deadlock. */ template bool erase( Q const& key ) { return erase_at( &m_Head, key, key_comparator()); } /// Deletes the item from the list using \p pred predicate for searching /** The function is an analog of \p erase(Q const&) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. \p disposer specified in \p Traits is called for deleted item. */ template bool erase_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return erase_at( &m_Head, key, cds::opt::details::make_comparator_from_less()); } /// Deletes the item from the list /** The function searches an item with key equal to \p key in the list, call \p func functor with item found, unlinks it from the list, and returns \p true. The \p Func interface is \code struct functor { void operator()( value_type const& item ); }; \endcode If the item with the key equal to \p key is not found the function return \p false. RCU \p synchronize method can be called. The RCU should not be locked. Note that depending on RCU type used the \ref disposer call can be deferred. \p disposer specified in \p Traits is called for deleted item. The function can throw \ref cds_urcu_rcu_deadlock "cds::urcu::rcu_deadlock" exception if deadlock is encountered and deadlock checking policy is \p opt::v::rcu_throw_deadlock. */ template bool erase( Q const& key, Func func ) { return erase_at( &m_Head, key, key_comparator(), func ); } /// Deletes the item from the list using \p pred predicate for searching /** The function is an analog of \p erase(Q const&, Func) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. \p disposer specified in \p Traits is called for deleted item. */ template bool erase_with( Q const& key, Less pred, Func func ) { CDS_UNUSED( pred ); return erase_at( &m_Head, key, cds::opt::details::make_comparator_from_less(), func ); } /// Extracts an item from the list /** The function searches an item with key equal to \p key in the list, unlinks it from the list, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to an item found. If the item is not found the function returns empty \p exempt_ptr. @note The function does NOT call RCU read-side lock or synchronization, and does NOT dispose the item found. It just unlinks the item from the list and returns a pointer to it. You should manually lock RCU before calling this function, and you should manually release the returned exempt pointer outside the RCU lock region before reusing returned pointer. \code #include #include typedef cds::urcu::gc< general_buffered<> > rcu; typedef cds::intrusive::LazyList< rcu, Foo > rcu_lazy_list; rcu_lazy_list theList; // ... rcu_lazy_list::exempt_ptr p1; { // first, we should lock RCU rcu::scoped_lock sl; // Now, you can apply extract function // Note that you must not delete the item found inside the RCU lock p1 = theList.extract( 10 ) if ( p1 ) { // do something with p1 ... } } // We may safely release p1 here // release() passes the pointer to RCU reclamation cycle: // it invokes RCU retire_ptr function with the disposer you provided for the list. p1.release(); \endcode */ template exempt_ptr extract( Q const& key ) { return exempt_ptr( extract_at( &m_Head, key, key_comparator())); } /// Extracts an item from the list using \p pred predicate for searching /** This function is the analog for \p extract(Q const&). The \p pred is a predicate used for key comparing. \p Less has the interface like \p std::less. \p pred must imply the same element order as \ref key_comparator. */ template exempt_ptr extract_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return exempt_ptr( extract_at( &m_Head, key, cds::opt::details::make_comparator_from_less())); } /// Finds the key \p key /** The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& key ); }; \endcode where \p item is the item found, \p key is the find function argument. The functor may change non-key fields of \p item. While the functor \p f is calling the item found \p item is locked. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( Q& key, Func f ) const { return find_at( const_cast( &m_Head ), key, key_comparator(), f ); } //@cond template bool find( Q const& key, Func f ) const { return find_at( const_cast(&m_Head), key, key_comparator(), f ); } //@endcond /// Finds the key \p key using \p pred predicate for searching /** The function is an analog of \p find( Q&, Func ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool find_with( Q& key, Less pred, Func f ) const { CDS_UNUSED( pred ); return find_at( const_cast( &m_Head ), key, cds::opt::details::make_comparator_from_less(), f ); } //@cond template bool find_with( Q const& key, Less pred, Func f ) const { CDS_UNUSED( pred ); return find_at( const_cast(&m_Head), key, cds::opt::details::make_comparator_from_less(), f ); } //@endcond /// Checks whether the list contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. */ template bool contains( Q const& key ) const { return find_at( const_cast( &m_Head ), key, key_comparator()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find( Q const& key ) const { return contains( key ); } //@endcond /// Checks whether the map contains \p key using \p pred predicate for searching /** The function is an analog of \p contains( Q const& ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the list. */ template bool contains( Q const& key, Less pred ) const { CDS_UNUSED( pred ); return find_at( const_cast( &m_Head ), key, cds::opt::details::make_comparator_from_less()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find_with( Q const& key, Less pred ) const { return contains( key, pred ); } //@endcond /// Finds the key \p key and return the item found /** \anchor cds_intrusive_LazyList_rcu_get The function searches the item with key equal to \p key and returns the pointer to item found. If \p key is not found it returns \p nullptr. Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. RCU should be locked before call of this function. Returned item is valid only while RCU is locked: \code typedef cds::intrusive::LazyList< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > ord_list; ord_list theList; // ... { // Lock RCU typename ord_list::rcu_lock lock; foo * pVal = theList.get( 5 ); if ( pVal ) { // Deal with pVal //... } // Unlock RCU by rcu_lock destructor // pVal can be retired by disposer at any time after RCU has been unlocked } \endcode */ template value_type * get( Q const& key ) const { return get_at( const_cast( &m_Head ), key, key_comparator()); } /// Finds the key \p key and return the item found /** The function is an analog of \ref cds_intrusive_LazyList_rcu_get "get(Q const&)" but \p pred is used for comparing the keys. \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the list. */ template value_type * get_with( Q const& key, Less pred ) const { CDS_UNUSED( pred ); return get_at( const_cast( &m_Head ), key, cds::opt::details::make_comparator_from_less()); } /// Clears the list using default disposer /** The function clears the list using default (provided in class template) disposer functor. RCU \p synchronize method can be called. Note that depending on RCU type used the \ref disposer call can be deferred. The function can throw \p cds::urcu::rcu_deadlock exception if deadlock is encountered and deadlock checking policy is \p opt::v::rcu_throw_deadlock. */ void clear() { if( !empty()) { deadlock_policy::check(); node_type * pHead; for (;;) { { rcu_lock l; pHead = m_Head.m_pNext.load(memory_model::memory_order_acquire).ptr(); if ( pHead == &m_Tail ) break; m_Head.m_Lock.lock(); pHead->m_Lock.lock(); if ( m_Head.m_pNext.load(memory_model::memory_order_relaxed).all() == pHead ) unlink_node( &m_Head, pHead, &m_Head ); pHead->m_Lock.unlock(); m_Head.m_Lock.unlock(); } --m_ItemCounter; dispose_node( pHead ); } } } /// Checks if the list is empty bool empty() const { return m_Head.m_pNext.load(memory_model::memory_order_relaxed).ptr() == &m_Tail; } /// Returns list's item count /** The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, this function always returns 0. Warning: even if you use real item counter and it returns 0, this fact is not mean that the list is empty. To check list emptiness use \ref empty() method. */ size_t size() const { return m_ItemCounter.value(); } /// Returns const reference to internal statistics stat const& statistics() const { return m_Stat; } protected: //@cond static void clear_links( node_type * pNode ) { pNode->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed ); } static void dispose_node( node_type * pNode ) { assert( pNode ); assert( !gc::is_locked()); gc::template retire_ptr( node_traits::to_value_ptr( *pNode )); } static void link_node( node_type * pNode, node_type * pPred, node_type * pCur ) { assert( pPred->m_pNext.load( memory_model::memory_order_relaxed ).ptr() == pCur ); link_checker::is_empty( pNode ); pNode->m_pNext.store( marked_node_ptr( pCur ), memory_model::memory_order_relaxed ); pPred->m_pNext.store( marked_node_ptr( pNode ), memory_model::memory_order_release ); } void unlink_node( node_type * pPred, node_type * pCur, node_type * pHead ) { assert( pPred->m_pNext.load( memory_model::memory_order_relaxed ).ptr() == pCur ); assert( pCur != &m_Tail ); node_type * pNext = pCur->m_pNext.load( memory_model::memory_order_relaxed ).ptr(); pCur->m_pNext.store( marked_node_ptr( pHead, 1 ), memory_model::memory_order_relaxed ); // logical deletion + back-link for search pPred->m_pNext.store( marked_node_ptr( pNext ), memory_model::memory_order_release ); // physically deleting } // split-list support bool insert_aux_node( node_type * pNode ) { return insert_aux_node( &m_Head, pNode ); } // split-list support bool insert_aux_node( node_type * pHead, node_type * pNode ) { assert( pHead != nullptr ); assert( pNode != nullptr ); // Hack: convert node_type to value_type. // Actually, an auxiliary node should not be converted to value_type // We assume that comparator can correctly distinguish aux and regular node. return insert_at( pHead, *node_traits::to_value_ptr( pNode )); } bool insert_at( node_type * pHead, value_type& val ) { rcu_lock l; return insert_at_locked( pHead, val ); } template bool insert_at( node_type * pHead, value_type& val, Func f ) { position pos; key_comparator cmp; rcu_lock l; while ( true ) { search( pHead, val, pos ); { scoped_position_lock sl( pos ); if ( validate( pos.pPred, pos.pCur )) { if ( pos.pCur != &m_Tail && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { // failed: key already in list m_Stat.onInsertFailed(); return false; } f( val ); link_node( node_traits::to_node_ptr( val ), pos.pPred, pos.pCur ); break; } } m_Stat.onInsertRetry(); } ++m_ItemCounter; m_Stat.onInsertSuccess(); return true; } iterator insert_at_( node_type * pHead, value_type& val ) { rcu_lock l; if ( insert_at_locked( pHead, val )) return iterator( node_traits::to_node_ptr( val )); return end(); } template std::pair update_at_( node_type * pHead, value_type& val, Func func, bool bAllowInsert ) { rcu_lock l; return update_at_locked( pHead, val, func, bAllowInsert ); } template std::pair update_at( node_type * pHead, value_type& val, Func func, bool bAllowInsert ) { rcu_lock l; std::pair ret = update_at_locked( pHead, val, func, bAllowInsert ); return std::make_pair( ret.first != end(), ret.second ); } bool unlink_at( node_type * pHead, value_type& val ) { position pos; key_comparator cmp; deadlock_policy::check(); while ( true ) { int nResult = 0; { rcu_lock l; search( pHead, val, pos ); { scoped_position_lock alp( pos ); if ( validate( pos.pPred, pos.pCur )) { if ( pos.pCur != &m_Tail && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 && node_traits::to_value_ptr( pos.pCur ) == &val ) { // item found unlink_node( pos.pPred, pos.pCur, pHead ); nResult = 1; } else nResult = -1; } } } if ( nResult ) { if ( nResult > 0 ) { --m_ItemCounter; dispose_node( pos.pCur ); m_Stat.onEraseSuccess(); return true; } m_Stat.onEraseFailed(); return false; } m_Stat.onEraseRetry(); } } template bool erase_at( node_type * const pHead, Q const& val, Compare cmp, Func f, position& pos ) { deadlock_policy::check(); while ( true ) { int nResult = 0; { rcu_lock l; search( pHead, val, pos, cmp ); { scoped_position_lock alp( pos ); if ( validate( pos.pPred, pos.pCur )) { if ( pos.pCur != &m_Tail && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { // key found unlink_node( pos.pPred, pos.pCur, pHead ); f( *node_traits::to_value_ptr( *pos.pCur )); nResult = 1; } else nResult = -1; } } } if ( nResult ) { if ( nResult > 0 ) { --m_ItemCounter; dispose_node( pos.pCur ); m_Stat.onEraseSuccess(); return true; } m_Stat.onEraseFailed(); return false; } m_Stat.onEraseRetry(); } } template bool erase_at( node_type * pHead, Q const& val, Compare cmp, Func f ) { position pos; return erase_at( pHead, val, cmp, f, pos ); } template bool erase_at( node_type * pHead, Q const& val, Compare cmp ) { position pos; return erase_at( pHead, val, cmp, [](value_type const&){}, pos ); } template value_type * extract_at( node_type * const pHead, Q const& val, Compare cmp ) { position pos; assert( gc::is_locked()) ; // RCU must be locked while ( true ) { search( pHead, val, pos, cmp ); int nResult = 0; { scoped_position_lock alp( pos ); if ( validate( pos.pPred, pos.pCur )) { if ( pos.pCur != &m_Tail && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { // key found unlink_node( pos.pPred, pos.pCur, pHead ); nResult = 1; } else { nResult = -1; } } } if ( nResult ) { if ( nResult > 0 ) { --m_ItemCounter; m_Stat.onEraseSuccess(); return node_traits::to_value_ptr( pos.pCur ); } m_Stat.onEraseFailed(); return nullptr; } m_Stat.onEraseRetry(); } } template bool find_at( node_type * pHead, Q& val, Compare cmp, Func f ) const { position pos; rcu_lock l; search( pHead, val, pos, cmp ); if ( pos.pCur != &m_Tail ) { std::unique_lock< typename node_type::lock_type> al( pos.pCur->m_Lock ); if ( cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { f( *node_traits::to_value_ptr( *pos.pCur ), val ); m_Stat.onFindSuccess(); return true; } } m_Stat.onFindFailed(); return false; } template bool find_at( node_type * pHead, Q& val, Compare cmp ) const { rcu_lock l; return find_at_( pHead, val, cmp ) != end(); } template const_iterator find_at_( node_type * pHead, Q& val, Compare cmp ) const { assert( gc::is_locked()); position pos; search( pHead, val, pos, cmp ); if ( pos.pCur != &m_Tail ) { if ( cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { m_Stat.onFindSuccess(); return const_iterator( pos.pCur ); } } m_Stat.onFindFailed(); return end(); } template value_type * get_at( node_type * pHead, Q const& val, Compare cmp ) const { value_type * pFound = nullptr; return find_at( pHead, val, cmp, [&pFound](value_type& found, Q const& ) { pFound = &found; } ) ? pFound : nullptr; } //@endcond protected: //@cond template void search( node_type * const pHead, Q const& key, position& pos ) const { search( pHead, key, pos, key_comparator()); } template void search( node_type * const pHead, Q const& key, position& pos, Compare cmp ) const { // RCU should be locked assert( gc::is_locked()); node_type const* pTail = &m_Tail; marked_node_ptr pCur(pHead); marked_node_ptr pPrev(pHead); while ( pCur != pTail && ( pCur == pHead || cmp( *node_traits::to_value_ptr( *pCur.ptr()), key ) < 0 )) { pPrev = pCur; pCur = pCur->m_pNext.load(memory_model::memory_order_acquire); if ( pCur.bits()) pPrev = pCur = pHead; } pos.pCur = pCur.ptr(); pos.pPred = pPrev.ptr(); } bool validate( node_type * pPred, node_type * pCur ) noexcept { if ( validate_link( pPred, pCur )) { m_Stat.onValidationSuccess(); return true; } m_Stat.onValidationFailed(); return false; } static bool validate_link( node_type * pPred, node_type * pCur ) noexcept { // RCU lock should be locked assert( gc::is_locked()); return !pPred->is_marked() && !pCur->is_marked() && pPred->m_pNext.load(memory_model::memory_order_relaxed) == pCur; } //@endcond private: //@cond bool insert_at_locked( node_type * pHead, value_type& val ) { // RCU lock should be locked assert( gc::is_locked()); position pos; key_comparator cmp; while ( true ) { search( pHead, val, pos ); { scoped_position_lock alp( pos ); if ( validate( pos.pPred, pos.pCur )) { if ( pos.pCur != &m_Tail && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { // failed: key already in list m_Stat.onInsertFailed(); return false; } link_node( node_traits::to_node_ptr( val ), pos.pPred, pos.pCur ); break; } } m_Stat.onInsertRetry(); } ++m_ItemCounter; m_Stat.onInsertSuccess(); return true; } template std::pair update_at_locked( node_type * pHead, value_type& val, Func func, bool bAllowInsert ) { // RCU lock should be locked assert( gc::is_locked()); position pos; key_comparator cmp; while ( true ) { search( pHead, val, pos ); { scoped_position_lock alp( pos ); if ( validate( pos.pPred, pos.pCur )) { if ( pos.pCur != &m_Tail && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { // key already in the list func( false, *node_traits::to_value_ptr( *pos.pCur ), val ); m_Stat.onUpdateExisting(); return std::make_pair( iterator( pos.pCur ), false ); } else { // new key if ( !bAllowInsert ) { m_Stat.onUpdateFailed(); return std::make_pair( end(), false ); } func( true, val, val ); link_node( node_traits::to_node_ptr( val ), pos.pPred, pos.pCur ); break; } } } m_Stat.onUpdateRetry(); } ++m_ItemCounter; m_Stat.onUpdateNew(); return std::make_pair( iterator( node_traits::to_node_ptr( val )), true ); } //@endcond private: //@cond const_iterator get_const_begin() const { const_iterator it( const_cast(&m_Head)); ++it; // skip dummy head return it; } const_iterator get_const_end() const { return const_iterator( const_cast(&m_Tail)); } //@endcond }; }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_LAZY_LIST_RCU_H libcds-2.3.3/cds/intrusive/michael_list_dhp.h000066400000000000000000000006261341244201700212230ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_MICHAEL_LIST_DHP_H #define CDSLIB_INTRUSIVE_MICHAEL_LIST_DHP_H #include #include #endif // #ifndef CDSLIB_INTRUSIVE_MICHAEL_LIST_DHP_H libcds-2.3.3/cds/intrusive/michael_list_hp.h000066400000000000000000000006221341244201700210530ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_MICHAEL_LIST_HP_H #define CDSLIB_INTRUSIVE_MICHAEL_LIST_HP_H #include #include #endif // #ifndef CDSLIB_INTRUSIVE_MICHAEL_LIST_HP_H libcds-2.3.3/cds/intrusive/michael_list_nogc.h000066400000000000000000000607731341244201700214070ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_MICHAEL_LIST_NOGC_H #define CDSLIB_INTRUSIVE_MICHAEL_LIST_NOGC_H #include #include #include namespace cds { namespace intrusive { namespace michael_list { /// Michael list node /** Template parameters: - Tag - a tag used to distinguish between different implementation */ template struct node { typedef gc::nogc gc ; ///< Garbage collector typedef Tag tag ; ///< tag typedef atomics::atomic< node * > atomic_ptr ; ///< atomic marked pointer atomic_ptr m_pNext ; ///< pointer to the next node in the container node() : m_pNext( nullptr ) {} }; } // namespace michael_list /// Michael's lock-free ordered single-linked list (template specialization for gc::nogc) /** @ingroup cds_intrusive_list \anchor cds_intrusive_MichaelList_nogc This specialization is intended for so-called append-only usage when no item reclamation may be performed. The class does not support item removal. See \ref cds_intrusive_MichaelList_hp "MichaelList" for description of template parameters. */ template < typename T, #ifdef CDS_DOXYGEN_INVOKED class Traits = michael_list::traits #else class Traits #endif > class MichaelList { public: typedef gc::nogc gc; ///< Garbage collector typedef T value_type; ///< type of value to be stored in the queue typedef Traits traits; ///< List traits typedef typename traits::hook hook; ///< hook type typedef typename hook::node_type node_type; ///< node type # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined key_comparator ; ///< key comparison functor based on opt::compare and opt::less option setter. # else typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator; # endif typedef typename traits::disposer disposer; ///< disposer used typedef typename get_node_traits< value_type, node_type, hook>::type node_traits ; ///< node traits typedef typename michael_list::get_link_checker< node_type, traits::link_checker >::type link_checker; ///< link checker typedef typename traits::back_off back_off; ///< back-off strategy typedef typename traits::item_counter item_counter; ///< Item counting policy used typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option typedef typename traits::stat stat; ///< Internal statistics //@cond static_assert((std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type"); // Rebind traits (split-list support) template struct rebind_traits { typedef MichaelList< gc , value_type , typename cds::opt::make_options< traits, Options...>::type > type; }; // Stat selector template using select_stat_wrapper = michael_list::select_stat_wrapper< Stat >; //@endcond protected: typedef typename node_type::atomic_ptr atomic_node_ptr ; ///< Atomic node pointer typedef atomic_node_ptr auxiliary_head ; ///< Auxiliary head type (for split-list support) atomic_node_ptr m_pHead; ///< Head pointer item_counter m_ItemCounter; ///< Item counter stat m_Stat; ///< Internal statistics //@cond /// Position pointer for item search struct position { atomic_node_ptr * pPrev ; ///< Previous node node_type * pCur ; ///< Current node node_type * pNext ; ///< Next node }; //@endcond protected: //@cond static void clear_links( node_type * pNode ) { pNode->m_pNext.store( nullptr, memory_model::memory_order_release ); } template static void dispose_node( node_type * pNode, Disposer disp ) { clear_links( pNode ); disp( node_traits::to_value_ptr( *pNode )); } template static void dispose_value( value_type& val, Disposer disp ) { dispose_node( node_traits::to_node_ptr( val ), disp ); } static bool link_node( node_type * pNode, position& pos ) { assert( pNode != nullptr ); link_checker::is_empty( pNode ); pNode->m_pNext.store( pos.pCur, memory_model::memory_order_relaxed ); if ( cds_likely( pos.pPrev->compare_exchange_strong( pos.pCur, pNode, memory_model::memory_order_release, atomics::memory_order_relaxed ))) return true; pNode->m_pNext.store( nullptr, memory_model::memory_order_relaxed ); return false; } //@endcond protected: //@cond template class iterator_type { friend class MichaelList; value_type * m_pNode; void next() { if ( m_pNode ) { node_type * pNode = node_traits::to_node_ptr( *m_pNode )->m_pNext.load(memory_model::memory_order_acquire); if ( pNode ) m_pNode = node_traits::to_value_ptr( *pNode ); else m_pNode = nullptr; } } protected: explicit iterator_type( node_type * pNode) { if ( pNode ) m_pNode = node_traits::to_value_ptr( *pNode ); else m_pNode = nullptr; } explicit iterator_type( atomic_node_ptr const& refNode) { node_type * pNode = refNode.load(memory_model::memory_order_relaxed); if ( pNode ) m_pNode = node_traits::to_value_ptr( *pNode ); else m_pNode = nullptr; } public: typedef typename cds::details::make_const_type::pointer value_ptr; typedef typename cds::details::make_const_type::reference value_ref; iterator_type() : m_pNode( nullptr ) {} iterator_type( const iterator_type& src ) : m_pNode( src.m_pNode ) {} value_ptr operator ->() const { return m_pNode; } value_ref operator *() const { assert( m_pNode != nullptr ); return *m_pNode; } /// Pre-increment iterator_type& operator ++() { next(); return *this; } /// Post-increment iterator_type operator ++(int) { iterator_type i(*this); next(); return i; } iterator_type& operator = (const iterator_type& src) { m_pNode = src.m_pNode; return *this; } template bool operator ==(iterator_type const& i ) const { return m_pNode == i.m_pNode; } template bool operator !=(iterator_type const& i ) const { return m_pNode != i.m_pNode; } }; //@endcond public: /// Forward iterator typedef iterator_type iterator; /// Const forward iterator typedef iterator_type const_iterator; /// Returns a forward iterator addressing the first element in a list /** For empty list \code begin() == end() \endcode */ iterator begin() { return iterator(m_pHead.load(memory_model::memory_order_relaxed)); } /// Returns an iterator that addresses the location succeeding the last element in a list /** Do not use the value returned by end function to access any item. Internally, end returning value equals to \p nullptr. The returned value can be used only to control reaching the end of the list. For empty list \code begin() == end() \endcode */ iterator end() { return iterator(); } /// Returns a forward const iterator addressing the first element in a list const_iterator begin() const { return const_iterator(m_pHead.load(memory_model::memory_order_relaxed)); } /// Returns a forward const iterator addressing the first element in a list const_iterator cbegin() const { return const_iterator(m_pHead.load(memory_model::memory_order_relaxed)); } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator end() const { return const_iterator(); } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator cend() const { return const_iterator(); } public: /// Default constructor initializes empty list MichaelList() : m_pHead( nullptr ) {} //@cond template >::value >> explicit MichaelList( Stat& st ) : m_pHead( nullptr ) , m_Stat( st ) {} //@endcond /// Destroys the list objects ~MichaelList() { clear(); } /// Inserts new node /** The function inserts \p val in the list if the list does not contain an item with key equal to \p val. Returns \p true if \p val is linked into the list, \p false otherwise. */ bool insert( value_type& val ) { return insert_at( m_pHead, val ); } /// Updates the item /** The operation performs inserting or changing data with lock-free manner. If the item \p val not found in the list, then \p val is inserted into the list iff \p bAllowInsert is \p true. Otherwise, the functor \p func is called with item found. The functor signature is: \code struct functor { void operator()( bool bNew, value_type& item, value_type& val ); }; \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the list - \p val - argument \p val passed into the \p update() function If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments refer to the same thing. The functor may change non-key fields of the \p item; however, \p func must guarantee that during changing no any other modifications could be made on this item by concurrent threads. Returns std::pair where \p first is \p true if operation is successful, \p second is \p true if new item has been added or \p false if the item with \p key already is in the list. */ template std::pair update( value_type& val, Func func, bool bAllowInsert = true ) { return update_at( m_pHead, val, func, bAllowInsert ); } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( value_type& val, Func func ) { return update( val, func ); } //@endcond /// Finds the key \p val /** \anchor cds_intrusive_MichaelList_nogc_find_func The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& key ); }; \endcode where \p item is the item found, \p key is the find function argument. The functor can change non-key fields of \p item. The function \p find does not serialize simultaneous access to the list \p item. If such access is possible you must provide your own synchronization schema to exclude unsafe item modifications. The function returns \p true if \p val is found, \p false otherwise. */ template bool find( Q& key, Func f ) { return find_at( m_pHead, key, key_comparator(), f ); } //@cond template bool find( Q const& key, Func f ) { return find_at( m_pHead, key, key_comparator(), f ); } //@endcond /// Finds the key \p key using \p pred predicate for searching /** The function is an analog of \ref cds_intrusive_MichaelList_nogc_find_func "find(Q&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool find_with( Q& key, Less pred, Func f ) { CDS_UNUSED( pred ); return find_at( m_pHead, key, cds::opt::details::make_comparator_from_less(), f ); } //@cond template bool find_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return find_at( m_pHead, key, cds::opt::details::make_comparator_from_less(), f ); } //@endcond /// Checks whether the list contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. */ template value_type * contains( Q const& key ) { return find_at( m_pHead, key, key_comparator()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") value_type * find( Q const& key ) { return contains( key ); } //@endcond /// Checks whether the map contains \p key using \p pred predicate for searching /** The function is an analog of contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the list. */ template value_type * contains( Q const& key, Less pred ) { CDS_UNUSED( pred ); return find_at( m_pHead, key, cds::opt::details::make_comparator_from_less()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") value_type * find_with( Q const& key, Less pred ) { return contains( key, pred ); } //@endcond /// Clears the list /** The function unlink all items from the list. For each unlinked item the item disposer \p disp is called after unlinking. */ template void clear( Disposer disp ) { node_type * pHead = m_pHead.load(memory_model::memory_order_relaxed); do {} while ( cds_unlikely( !m_pHead.compare_exchange_weak( pHead, nullptr, memory_model::memory_order_relaxed ))); while ( pHead ) { node_type * p = pHead->m_pNext.load(memory_model::memory_order_relaxed); dispose_node( pHead, disp ); pHead = p; --m_ItemCounter; } } /// Clears the list using default disposer /** The function clears the list using default (provided in class template) disposer functor. */ void clear() { clear( disposer()); } /// Checks if the list is empty bool empty() const { return m_pHead.load( memory_model::memory_order_relaxed ) == nullptr; } /// Returns list's item count /** The value returned depends on item counter provided by \p Traits. For \p atomicity::empty_item_counter, this function always returns 0. @note Even if you use real item counter and it returns 0, this fact does not mean that the list is empty. To check list emptyness use \p empty() method. */ size_t size() const { return m_ItemCounter.value(); } /// Returns const reference to internal statistics stat const& statistics() const { return m_Stat; } protected: //@cond // split-list support bool insert_aux_node( node_type * pNode ) { return insert_aux_node( m_pHead, pNode ); } // split-list support bool insert_aux_node( atomic_node_ptr& refHead, node_type * pNode ) { assert( pNode != nullptr ); // Hack: convert node_type to value_type. // In principle, auxiliary node can be non-reducible to value_type // We assume that comparator can correctly distinguish aux and regular node. return insert_at( refHead, *node_traits::to_value_ptr( pNode )); } bool insert_at( atomic_node_ptr& refHead, value_type& val ) { position pos; while ( true ) { if ( search( refHead, val, key_comparator(), pos )) { m_Stat.onInsertFailed(); return false; } if ( link_node( node_traits::to_node_ptr( val ), pos )) { ++m_ItemCounter; m_Stat.onInsertSuccess(); return true; } m_Stat.onInsertRetry(); } } iterator insert_at_( atomic_node_ptr& refHead, value_type& val ) { if ( insert_at( refHead, val )) return iterator( node_traits::to_node_ptr( val )); return end(); } template std::pair update_at_( atomic_node_ptr& refHead, value_type& val, Func func, bool bAllowInsert ) { position pos; while ( true ) { if ( search( refHead, val, key_comparator(), pos )) { assert( key_comparator()( val, *node_traits::to_value_ptr( *pos.pCur )) == 0 ); func( false, *node_traits::to_value_ptr( *pos.pCur ) , val ); m_Stat.onUpdateExisting(); return std::make_pair( iterator( pos.pCur ), false ); } else { if ( !bAllowInsert ) { m_Stat.onUpdateFailed(); return std::make_pair( end(), false ); } if ( link_node( node_traits::to_node_ptr( val ), pos )) { ++m_ItemCounter; func( true, val , val ); m_Stat.onUpdateNew(); return std::make_pair( iterator( node_traits::to_node_ptr( val )), true ); } } m_Stat.onUpdateRetry(); } } template std::pair update_at( atomic_node_ptr& refHead, value_type& val, Func func, bool bAllowInsert ) { std::pair ret = update_at_( refHead, val, func, bAllowInsert ); return std::make_pair( ret.first != end(), ret.second ); } template bool find_at( atomic_node_ptr& refHead, Q& val, Compare cmp, Func f ) { position pos; if ( search( refHead, val, cmp, pos )) { assert( pos.pCur != nullptr ); f( *node_traits::to_value_ptr( *pos.pCur ), val ); m_Stat.onFindSuccess(); return true; } m_Stat.onFindFailed(); return false; } template value_type * find_at( atomic_node_ptr& refHead, Q const& val, Compare cmp ) { iterator it = find_at_( refHead, val, cmp ); if ( it != end()) { m_Stat.onFindSuccess(); return &*it; } m_Stat.onFindFailed(); return nullptr; } template iterator find_at_( atomic_node_ptr& refHead, Q const& val, Compare cmp ) { position pos; if ( search( refHead, val, cmp, pos )) { assert( pos.pCur != nullptr ); m_Stat.onFindSuccess(); return iterator( pos.pCur ); } m_Stat.onFindFailed(); return end(); } //@endcond protected: //@cond template bool search( atomic_node_ptr& refHead, const Q& val, Compare cmp, position& pos ) { atomic_node_ptr * pPrev; node_type * pNext; node_type * pCur; back_off bkoff; try_again: pPrev = &refHead; pCur = pPrev->load(memory_model::memory_order_acquire); pNext = nullptr; while ( true ) { if ( !pCur ) { pos.pPrev = pPrev; pos.pCur = pCur; pos.pNext = pNext; return false; } pNext = pCur->m_pNext.load(memory_model::memory_order_relaxed); if ( cds_unlikely( pCur->m_pNext.load(memory_model::memory_order_acquire) != pNext )) { bkoff(); goto try_again; } if ( cds_unlikely( pPrev->load(memory_model::memory_order_acquire) != pCur )) { bkoff(); goto try_again; } assert( pCur != nullptr ); int nCmp = cmp( *node_traits::to_value_ptr( *pCur ), val ); if ( nCmp >= 0 ) { pos.pPrev = pPrev; pos.pCur = pCur; pos.pNext = pNext; return nCmp == 0; } pPrev = &( pCur->m_pNext ); pCur = pNext; } } // for split-list template void erase_for( Predicate pred ) { node_type * pPred = nullptr; node_type * pHead = m_pHead.load( memory_model::memory_order_relaxed ); while ( pHead ) { node_type * p = pHead->m_pNext.load( memory_model::memory_order_relaxed ); if ( pred( *node_traits::to_value_ptr( pHead ))) { assert( pPred != nullptr ); pPred->m_pNext.store( p, memory_model::memory_order_relaxed ); dispose_node( pHead, disposer()); } else pPred = pHead; pHead = p; } } //@endcond }; }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_MICHAEL_LIST_NOGC_H libcds-2.3.3/cds/intrusive/michael_list_rcu.h000066400000000000000000001346511341244201700212470ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_MICHAEL_LIST_RCU_H #define CDSLIB_INTRUSIVE_MICHAEL_LIST_RCU_H #include #include #include #include #include #include #include namespace cds { namespace intrusive { //@cond namespace michael_list { /// Node specialization for uRCU template struct node< cds::urcu::gc< RCU >, Tag > { typedef cds::urcu::gc< RCU > gc; ///< Garbage collector typedef Tag tag; ///< tag typedef cds::details::marked_ptr marked_ptr; ///< marked pointer typedef typename gc::template atomic_marked_ptr atomic_marked_ptr; ///< atomic marked pointer specific for GC atomic_marked_ptr m_pNext; ///< pointer to the next node in the container node * m_pDelChain; ///< Deleted node chain (local for a thread) constexpr node() noexcept : m_pNext( nullptr ) , m_pDelChain( nullptr ) {} }; } // namespace michael_list //@endcond /// Michael's lock-free ordered single-linked list (template specialization for \ref cds_urcu_desc "RCU") /** @ingroup cds_intrusive_list \anchor cds_intrusive_MichaelList_rcu Usually, ordered single-linked list is used as a building block for the hash table implementation. The complexity of searching is O(N). Template arguments: - \p RCU - one of \ref cds_urcu_gc "RCU type" - \p T - type to be stored in the list; the type \p T should be based on (or has a member of type) cds::intrusive::micheal_list::node - \p Traits - type traits. See \p michael_list::traits for explanation. It is possible to declare option-based list with \p cds::intrusive::michael_list::make_traits metafunction, see \ref cds_intrusive_MichaelList_hp "here" for explanations. \par Usage Before including you should include appropriate RCU header file, see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. For example, for \ref cds_urcu_general_buffered_gc "general-purpose buffered RCU" you should include: \code #include #include // Now, you can declare Michael's list for type Foo and default traits: typedef cds::intrusive::MichaelList >, Foo > rcu_michael_list; \endcode */ template < typename RCU, typename T, #ifdef CDS_DOXYGEN_INVOKED class Traits = michael_list::traits #else class Traits #endif > class MichaelList, T, Traits> { public: typedef T value_type; ///< type of value stored in the list typedef Traits traits; ///< Traits template parameter typedef typename traits::hook hook; ///< hook type typedef typename hook::node_type node_type; ///< node type # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined key_comparator ; ///< key comparison functor based on opt::compare and opt::less option setter. # else typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator; # endif typedef typename traits::disposer disposer; ///< disposer used typedef typename get_node_traits< value_type, node_type, hook>::type node_traits ; ///< node traits typedef typename michael_list::get_link_checker< node_type, traits::link_checker >::type link_checker; ///< link checker typedef cds::urcu::gc gc; ///< RCU schema typedef typename traits::back_off back_off; ///< back-off strategy typedef typename traits::item_counter item_counter; ///< Item counting policy used typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy typedef typename traits::stat stat; ///< Internal statistics typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock static constexpr const bool c_bExtractLockExternal = false; ///< Group of \p extract_xxx functions do not require external locking //@cond // Rebind traits (split-list support) template struct rebind_traits { typedef MichaelList< gc , value_type , typename cds::opt::make_options< traits, Options...>::type > type; }; // Stat selector template using select_stat_wrapper = michael_list::select_stat_wrapper< Stat >; //@endcond protected: typedef typename node_type::marked_ptr marked_node_ptr; ///< Marked node pointer typedef typename node_type::atomic_marked_ptr atomic_node_ptr; ///< Atomic node pointer typedef atomic_node_ptr auxiliary_head; ///< Auxiliary head type (for split-list support) atomic_node_ptr m_pHead; ///< Head pointer item_counter m_ItemCounter; ///< Item counter stat m_Stat; ///< Internal statistics protected: //@cond enum erase_node_mask { erase_mask = 1, extract_mask = 3 }; typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock> check_deadlock_policy; struct clear_and_dispose { void operator()( value_type * p ) { assert( p != nullptr ); clear_links( node_traits::to_node_ptr(p)); disposer()( p ); } }; /// Position pointer for item search struct position { atomic_node_ptr * pPrev ; ///< Previous node node_type * pCur ; ///< Current node node_type * pNext ; ///< Next node atomic_node_ptr& refHead; node_type * pDelChain; ///< Head of deleted node chain position( atomic_node_ptr& head ) : refHead( head ) , pDelChain( nullptr ) {} ~position() { dispose_chain( pDelChain ); } }; //@endcond public: using exempt_ptr = cds::urcu::exempt_ptr< gc, value_type, value_type, clear_and_dispose, void >; ///< pointer to extracted node private: //@cond struct chain_disposer { void operator()( node_type * pChain ) const { dispose_chain( pChain ); } }; typedef cds::intrusive::details::raw_ptr_disposer< gc, node_type, chain_disposer> raw_ptr_disposer; //@endcond public: /// Result of \p get(), \p get_with() functions - pointer to the node found typedef cds::urcu::raw_ptr< gc, value_type, raw_ptr_disposer > raw_ptr; protected: //@cond template class iterator_type { friend class MichaelList; value_type * m_pNode; void next() { if ( m_pNode ) { node_type * p = node_traits::to_node_ptr( *m_pNode )->m_pNext.load(memory_model::memory_order_relaxed).ptr(); m_pNode = p ? node_traits::to_value_ptr( p ) : nullptr; } } protected: explicit iterator_type( node_type * pNode) { if ( pNode ) m_pNode = node_traits::to_value_ptr( *pNode ); else m_pNode = nullptr; } explicit iterator_type( atomic_node_ptr const& refNode) { node_type * pNode = refNode.load(memory_model::memory_order_relaxed).ptr(); m_pNode = pNode ? node_traits::to_value_ptr( *pNode ) : nullptr; } public: typedef typename cds::details::make_const_type::pointer value_ptr; typedef typename cds::details::make_const_type::reference value_ref; iterator_type() : m_pNode( nullptr ) {} iterator_type( const iterator_type& src ) : m_pNode( src.m_pNode ) {} value_ptr operator ->() const { return m_pNode; } value_ref operator *() const { assert( m_pNode != nullptr ); return *m_pNode; } /// Pre-increment iterator_type& operator ++() { next(); return *this; } /// Post-increment iterator_type operator ++(int) { iterator_type i(*this); next(); return i; } iterator_type& operator = (const iterator_type& src) { m_pNode = src.m_pNode; return *this; } template bool operator ==(iterator_type const& i ) const { return m_pNode == i.m_pNode; } template bool operator !=(iterator_type const& i ) const { return m_pNode != i.m_pNode; } }; //@endcond public: ///@name Forward iterators (thread-safe only under RCU lock) //@{ /// Forward iterator /** You may safely use iterators in multi-threaded environment only under RCU lock. Otherwise, a crash is possible if another thread deletes the item the iterator points to. */ typedef iterator_type iterator; /// Const forward iterator typedef iterator_type const_iterator; /// Returns a forward iterator addressing the first element in a list /** For empty list \code begin() == end() \endcode */ iterator begin() { return iterator( m_pHead ); } /// Returns an iterator that addresses the location succeeding the last element in a list /** Do not use the value returned by end function to access any item. Internally, end returning value equals to \p nullptr. The returned value can be used only to control reaching the end of the list. For empty list \code begin() == end() \endcode */ iterator end() { return iterator(); } /// Returns a forward const iterator addressing the first element in a list const_iterator begin() const { return const_iterator(m_pHead ); } /// Returns a forward const iterator addressing the first element in a list const_iterator cbegin() const { return const_iterator(m_pHead ); } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator end() const { return const_iterator(); } /// Returns an const iterator that addresses the location succeeding the last element in a list const_iterator cend() const { return const_iterator(); } //@} public: /// Default constructor initializes empty list MichaelList() : m_pHead( nullptr ) { static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" ); } //@cond template >::value >> explicit MichaelList( Stat& st ) : m_pHead( nullptr ) , m_Stat( st ) {} //@endcond /// Destroy list ~MichaelList() { clear(); } /// Inserts new node /** The function inserts \p val in the list if the list does not contain an item with key equal to \p val. The function makes RCU lock internally. Returns \p true if \p val is linked into the list, \p false otherwise. */ bool insert( value_type& val ) { return insert_at( m_pHead, val ); } /// Inserts new node /** This function is intended for derived non-intrusive containers. The function allows to split new item creating into two part: - create item with key only - insert new item into the list - if inserting is success, calls \p f functor to initialize value-field of \p val. The functor signature is: \code void func( value_type& val ); \endcode where \p val is the item inserted. User-defined functor \p f should guarantee that during changing \p val no any other changes could be made on this list's item by concurrent threads. The user-defined functor is called only if the inserting is success. The function makes RCU lock internally. @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" */ template bool insert( value_type& val, Func f ) { return insert_at( m_pHead, val, f ); } /// Updates the item /** The operation performs inserting or changing data with lock-free manner. If the item \p val not found in the list, then \p val is inserted into the list iff \p bAllowInsert is \p true. Otherwise, the functor \p func is called with item found. The functor signature is: \code struct functor { void operator()( bool bNew, value_type& item, value_type& val ); }; \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the list - \p val - argument \p val passed into the \p update() function If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments refer to the same thing. The functor may change non-key fields of the \p item; however, \p func must guarantee that during changing no any other modifications could be made on this item by concurrent threads. Returns std::pair where \p first is \p true if operation is successful, \p second is \p true if new item has been added or \p false if the item with \p key already is in the list. The function makes RCU lock internally. @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" */ template std::pair update( value_type& val, Func func, bool bAllowInsert = true ) { return update_at( m_pHead, val, func, bAllowInsert ); } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( value_type& val, Func func ) { return update( val, func, true ); } //@endcond /// Unlinks the item \p val from the list /** The function searches the item \p val in the list and unlink it from the list if it is found and it is equal to \p val. Difference between \p erase() and \p %unlink() functions: \p %erase() finds a key and deletes the item found. \p %unlink() finds an item by key and deletes it only if \p val is an item of that list, i.e. the pointer to the item found is equal to &val . The function returns \p true if success and \p false otherwise. RCU \p synchronize method can be called. Note that depending on RCU type used the \ref disposer call can be deferred. \p disposer specified in \p Traits is called for unlinked item. The function can throw cds::urcu::rcu_deadlock exception if deadlock is encountered and deadlock checking policy is opt::v::rcu_throw_deadlock. */ bool unlink( value_type& val ) { return unlink_at( m_pHead, val ); } /// Deletes the item from the list /** The function searches an item with key equal to \p key in the list, unlinks it from the list, and returns \p true. If the item with the key equal to \p key is not found the function return \p false. RCU \p synchronize method can be called. Note that depending on RCU type used the \ref disposer call can be deferred. \p disposer specified in \p Traits is called for deleted item. The function can throw \ref cds_urcu_rcu_deadlock "cds::urcu::rcu_deadlock" exception if a deadlock is detected and the deadlock checking policy is \p opt::v::rcu_throw_deadlock. */ template bool erase( Q const& key ) { return erase_at( m_pHead, key, key_comparator()); } /// Deletes the item from the list using \p pred predicate for searching /** The function is an analog of \p erase(Q const&) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. \p disposer specified in \p Traits is called for deleted item. */ template bool erase_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return erase_at( m_pHead, key, cds::opt::details::make_comparator_from_less()); } /// Deletes the item from the list /** The function searches an item with key equal to \p key in the list, call \p func functor with item found, unlinks it from the list, and returns \p true. The \p Func interface is \code struct functor { void operator()( value_type const& item ); }; \endcode If the item with the key equal to \p key is not found the function return \p false. RCU \p synchronize method can be called. Note that depending on RCU type used the \ref disposer call can be deferred. \p disposer specified in \p Traits is called for deleted item. The function can throw \ref cds_urcu_rcu_deadlock "cds::urcu::rcu_deadlock" exception if a deadlock is detected and the deadlock checking policy is \p opt::v::rcu_throw_deadlock. */ template bool erase( Q const& key, Func func ) { return erase_at( m_pHead, key, key_comparator(), func ); } /// Deletes the item from the list using \p pred predicate for searching /** The function is an analog of \p erase(Q const&, Func) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. \p disposer specified in \p Traits is called for deleted item. */ template bool erase_with( Q const& key, Less pred, Func func ) { CDS_UNUSED( pred ); return erase_at( m_pHead, key, cds::opt::details::make_comparator_from_less(), func ); } /// Extracts an item from the list /** The function searches an item with key equal to \p key in the list, unlinks it from the list, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found. If \p key is not found the function returns an empty \p exempt_ptr. @note The function does NOT dispose the item found. It just unlinks the item from the list and returns a pointer to item found. You shouldn't lock RCU for current thread before calling this function, and you should manually release the returned exempt pointer before reusing it. \code #include #include typedef cds::urcu::gc< general_buffered<> > rcu; typedef cds::intrusive::MichaelList< rcu, Foo > rcu_michael_list; rcu_michael_list theList; // ... rcu_michael_list::exempt_ptr p1; // The RCU should NOT be locked when extract() is called! assert( !rcu::is_locked()); // You can call extract() function p1 = theList.extract( 10 ); if ( p1 ) { // do something with p1 ... } // We may safely release p1 here // release() passes the pointer to RCU reclamation cycle: // it invokes RCU retire_ptr function with the disposer you provided for the list. p1.release(); \endcode */ template exempt_ptr extract( Q const& key ) { return exempt_ptr( extract_at( m_pHead, key, key_comparator())); } /// Extracts an item from the list using \p pred predicate for searching /** This function is the analog for \p extract(Q const&) The \p pred is a predicate used for key comparing. \p Less has the interface like \p std::less. \p pred must imply the same element order as \ref key_comparator. */ template exempt_ptr extract_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return exempt_ptr( extract_at( m_pHead, key, cds::opt::details::make_comparator_from_less())); } /// Find the key \p val /** The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& key ); }; \endcode where \p item is the item found, \p key is the find function argument. The functor can change non-key fields of \p item. The function \p find does not serialize simultaneous access to the list \p item. If such access is possible you must provide your own synchronization schema to exclude unsafe item modifications. The function makes RCU lock internally. The function returns \p true if \p val is found, \p false otherwise. */ template bool find( Q& key, Func f ) { return find_at( m_pHead, key, key_comparator(), f ); } //@cond template bool find( Q const& key, Func f ) { return find_at( m_pHead, key, key_comparator(), f ); } //@endcond /// Finds \p key using \p pred predicate for searching /** The function is an analog of \p find(Q&, Func) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the list. */ template bool find_with( Q& key, Less pred, Func f ) { CDS_UNUSED( pred ); return find_at( m_pHead, key, cds::opt::details::make_comparator_from_less(), f ); } //@cond template bool find_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return find_at( m_pHead, key, cds::opt::details::make_comparator_from_less(), f ); } //@endcond /// Checks whether the list contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. */ template bool contains( Q const& key ) { return find_at( m_pHead, key, key_comparator()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find( Q const& key ) { return contains( key ); } //@endcond /// Checks whether the map contains \p key using \p pred predicate for searching /** The function is an analog of contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the list. */ template bool contains( Q const& key, Less pred ) { CDS_UNUSED( pred ); return find_at( m_pHead, key, cds::opt::details::make_comparator_from_less()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find_with( Q const& key, Less pred ) { return contains( key, pred ); } //@endcond /// Finds \p key and return the item found /** \anchor cds_intrusive_MichaelList_rcu_get The function searches the item with key equal to \p key and returns the pointer to item found. If \p key is not found it returns empty \p raw_ptr object. Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. RCU should be locked before call of this function. Returned item is valid only while RCU is locked: \code typedef cds::intrusive::MichaelList< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > ord_list; ord_list theList; // ... typename ord_list::raw_ptr rp; { // Lock RCU ord_list::rcu_lock lock; rp = theList.get( 5 ); if ( rp ) { // Deal with rp //... } // Unlock RCU by rcu_lock destructor // Node owned by rp can be retired by disposer at any time after RCU has been unlocked } // You can manually release rp after RCU-locked section rp.release(); \endcode */ template raw_ptr get( Q const& key ) { return get_at( m_pHead, key, key_comparator()); } /// Finds \p key and return the item found /** The function is an analog of \ref cds_intrusive_MichaelList_rcu_get "get(Q const&)" but \p pred is used for comparing the keys. \p Less functor has the semantics like \p std::less but should take arguments of type \p value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the list. */ template raw_ptr get_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return get_at( m_pHead, key, cds::opt::details::make_comparator_from_less()); } /// Clears the list using default disposer /** The function clears the list using default (provided by \p Traits class template argument) disposer functor. RCU \p synchronize method can be called. Note that depending on RCU type used the \ref disposer invocation can be deferred. The function can throw \p cds::urcu::rcu_deadlock exception if a deadlock is encountered and deadlock checking policy is \p opt::v::rcu_throw_deadlock. */ void clear() { if( !empty()) { check_deadlock_policy::check(); marked_node_ptr pHead; for (;;) { { rcu_lock l; pHead = m_pHead.load(memory_model::memory_order_acquire); if ( !pHead.ptr()) break; marked_node_ptr pNext( pHead->m_pNext.load(memory_model::memory_order_relaxed)); if ( cds_unlikely( !pHead->m_pNext.compare_exchange_weak( pNext, pNext | 1, memory_model::memory_order_acquire, memory_model::memory_order_relaxed ))) continue; if ( cds_unlikely( !m_pHead.compare_exchange_weak( pHead, marked_node_ptr(pNext.ptr()), memory_model::memory_order_release, memory_model::memory_order_relaxed ))) continue; } --m_ItemCounter; dispose_node( pHead.ptr()); } } } /// Check if the list is empty bool empty() const { return m_pHead.load( memory_model::memory_order_relaxed ).all() == nullptr; } /// Returns list's item count /** The value returned depends on item counter provided by \p Traits. For \p atomicity::empty_item_counter, this function always returns 0. @note Even if you use real item counter and it returns 0, this fact does not mean that the list is empty. To check list emptyness use \p empty() method. */ size_t size() const { return m_ItemCounter.value(); } /// Returns const reference to internal statistics stat const& statistics() const { return m_Stat; } protected: //@cond static void clear_links( node_type * pNode ) { pNode->m_pNext.store( marked_node_ptr(), memory_model::memory_order_release ); pNode->m_pDelChain = nullptr; } static void dispose_node( node_type * pNode ) { assert( pNode ); assert( !gc::is_locked()); gc::template retire_ptr( node_traits::to_value_ptr( *pNode )); } static void dispose_chain( node_type * pChain ) { if ( pChain ) { assert( !gc::is_locked()); auto f = [&pChain]() -> cds::urcu::retired_ptr { node_type * p = pChain; if ( p ) { pChain = p->m_pDelChain; return cds::urcu::make_retired_ptr( node_traits::to_value_ptr( p )); } return cds::urcu::make_retired_ptr( static_cast(nullptr)); }; gc::batch_retire( std::ref( f )); } } bool link_node( node_type * pNode, position& pos ) { assert( pNode != nullptr ); link_checker::is_empty( pNode ); marked_node_ptr p( pos.pCur ); pNode->m_pNext.store( p, memory_model::memory_order_release ); if ( cds_likely( pos.pPrev->compare_exchange_strong( p, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed ))) return true; pNode->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed ); return false; } static void link_to_remove_chain( position& pos, node_type * pDel ) { assert( pDel->m_pDelChain == nullptr ); pDel->m_pDelChain = pos.pDelChain; pos.pDelChain = pDel; } bool unlink_node( position& pos, erase_node_mask nMask ) { assert( gc::is_locked()); // Mark the node (logical deletion) marked_node_ptr next( pos.pNext, 0 ); if ( cds_likely( pos.pCur->m_pNext.compare_exchange_strong( next, next | nMask, memory_model::memory_order_release, atomics::memory_order_relaxed ))) { // Try physical removal - fast path marked_node_ptr cur( pos.pCur ); if ( cds_likely( pos.pPrev->compare_exchange_strong( cur, marked_node_ptr( pos.pNext ), memory_model::memory_order_acquire, atomics::memory_order_relaxed ))) { if ( nMask == erase_mask ) link_to_remove_chain( pos, pos.pCur ); } else { // Slow path search( pos.refHead, *node_traits::to_value_ptr( pos.pCur ), pos, key_comparator()); } return true; } return false; } // split-list support bool insert_aux_node( node_type * pNode ) { return insert_aux_node( m_pHead, pNode ); } // split-list support bool insert_aux_node( atomic_node_ptr& refHead, node_type * pNode ) { assert( pNode != nullptr ); // Hack: convert node_type to value_type. // In principle, auxiliary node can be non-reducible to value_type // We assume that comparator can correctly distinguish between aux and regular node. return insert_at( refHead, *node_traits::to_value_ptr( pNode )); } bool insert_at( atomic_node_ptr& refHead, value_type& val ) { position pos( refHead ); { rcu_lock l; return insert_at_locked( pos, val ); } } template bool insert_at( atomic_node_ptr& refHead, value_type& val, Func f ) { position pos( refHead ); { rcu_lock l; while ( true ) { if ( search( refHead, val, pos, key_comparator())) { m_Stat.onInsertFailed(); return false; } if ( link_node( node_traits::to_node_ptr( val ), pos )) { f( val ); ++m_ItemCounter; m_Stat.onInsertSuccess(); return true; } // clear next field node_traits::to_node_ptr( val )->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed ); m_Stat.onInsertRetry(); } } } iterator insert_at_( atomic_node_ptr& refHead, value_type& val ) { rcu_lock l; if ( insert_at_locked( refHead, val )) return iterator( node_traits::to_node_ptr( val )); return end(); } template std::pair update_at_( atomic_node_ptr& refHead, value_type& val, Func func, bool bInsert ) { position pos( refHead ); { rcu_lock l; return update_at_locked( pos, val, func, bInsert ); } } template std::pair update_at( atomic_node_ptr& refHead, value_type& val, Func func, bool bInsert ) { position pos( refHead ); { rcu_lock l; std::pair ret = update_at_locked( pos, val, func, bInsert ); return std::make_pair( ret.first != end(), ret.second ); } } bool unlink_at( atomic_node_ptr& refHead, value_type& val ) { position pos( refHead ); back_off bkoff; check_deadlock_policy::check(); for (;;) { { rcu_lock l; if ( !search( refHead, val, pos, key_comparator()) || node_traits::to_value_ptr( *pos.pCur ) != &val ) { m_Stat.onEraseFailed(); return false; } if ( !unlink_node( pos, erase_mask )) { bkoff(); m_Stat.onEraseRetry(); continue; } } --m_ItemCounter; m_Stat.onEraseSuccess(); return true; } } template bool erase_at( position& pos, Q const& val, Compare cmp, Func f ) { back_off bkoff; check_deadlock_policy::check(); node_type * pDel; for (;;) { { rcu_lock l; if ( !search( pos.refHead, val, pos, cmp )) { m_Stat.onEraseFailed(); return false; } // store pCur since it may be changed by unlink_node() slow path pDel = pos.pCur; if ( !unlink_node( pos, erase_mask )) { bkoff(); m_Stat.onEraseRetry(); continue; } } assert( pDel ); f( *node_traits::to_value_ptr( pDel )); --m_ItemCounter; m_Stat.onEraseSuccess(); return true; } } template bool erase_at( atomic_node_ptr& refHead, Q const& val, Compare cmp, Func f ) { position pos( refHead ); return erase_at( pos, val, cmp, f ); } template bool erase_at( atomic_node_ptr& refHead, const Q& val, Compare cmp ) { position pos( refHead ); return erase_at( pos, val, cmp, [](value_type const&){} ); } template value_type * extract_at( atomic_node_ptr& refHead, Q const& val, Compare cmp ) { position pos( refHead ); back_off bkoff; assert( !gc::is_locked()) ; // RCU must not be locked!!! node_type * pExtracted; { rcu_lock l; for (;;) { if ( !search( refHead, val, pos, cmp )) { m_Stat.onEraseFailed(); return nullptr; } // store pCur since it may be changed by unlink_node() slow path pExtracted = pos.pCur; if ( !unlink_node( pos, extract_mask )) { bkoff(); m_Stat.onEraseRetry(); continue; } --m_ItemCounter; value_type * pRet = node_traits::to_value_ptr( pExtracted ); assert( pExtracted->m_pDelChain == nullptr ); m_Stat.onEraseSuccess(); return pRet; } } } template bool find_at( atomic_node_ptr& refHead, Q& val, Compare cmp, Func f ) { position pos( refHead ); { rcu_lock l; if ( search( refHead, val, pos, cmp )) { assert( pos.pCur != nullptr ); f( *node_traits::to_value_ptr( *pos.pCur ), val ); m_Stat.onFindSuccess(); return true; } } m_Stat.onFindFailed(); return false; } template bool find_at( atomic_node_ptr& refHead, Q const& val, Compare cmp ) { position pos( refHead ); { rcu_lock l; return find_at_locked( pos, val, cmp ) != cend(); } } template raw_ptr get_at( atomic_node_ptr& refHead, Q const& val, Compare cmp ) { // RCU should be locked! assert(gc::is_locked()); position pos( refHead ); if ( search( refHead, val, pos, cmp )) { m_Stat.onFindSuccess(); return raw_ptr( node_traits::to_value_ptr( pos.pCur ), raw_ptr_disposer( pos )); } m_Stat.onFindFailed(); return raw_ptr( raw_ptr_disposer( pos )); } //@endcond protected: //@cond template bool search( atomic_node_ptr& refHead, const Q& val, position& pos, Compare cmp ) { // RCU lock should be locked!!! assert( gc::is_locked()); atomic_node_ptr * pPrev; marked_node_ptr pNext; marked_node_ptr pCur; back_off bkoff; try_again: pPrev = &refHead; pCur = pPrev->load(memory_model::memory_order_acquire); pNext = nullptr; while ( true ) { if ( !pCur.ptr()) { pos.pPrev = pPrev; pos.pCur = nullptr; pos.pNext = nullptr; return false; } pNext = pCur->m_pNext.load(memory_model::memory_order_acquire); if ( cds_unlikely( pPrev->load(memory_model::memory_order_acquire) != pCur || pNext != pCur->m_pNext.load(memory_model::memory_order_acquire ))) { bkoff(); goto try_again; } if ( pNext.bits()) { // pCur is marked as deleted. Try to unlink it from the list if ( cds_likely( pPrev->compare_exchange_weak( pCur, marked_node_ptr( pNext.ptr()), memory_model::memory_order_acquire, atomics::memory_order_relaxed ))) { if ( pNext.bits() == erase_mask ) link_to_remove_chain( pos, pCur.ptr()); m_Stat.onHelpingSuccess(); } m_Stat.onHelpingFailed(); goto try_again; } assert( pCur.ptr() != nullptr ); int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val ); if ( nCmp >= 0 ) { pos.pPrev = pPrev; pos.pCur = pCur.ptr(); pos.pNext = pNext.ptr(); return nCmp == 0; } pPrev = &( pCur->m_pNext ); pCur = pNext; } } //@endcond private: //@cond bool insert_at_locked( position& pos, value_type& val ) { // RCU lock should be locked!!! assert( gc::is_locked()); while ( true ) { if ( search( pos.refHead, val, pos, key_comparator())) { m_Stat.onInsertFailed(); return false; } if ( link_node( node_traits::to_node_ptr( val ), pos )) { ++m_ItemCounter; m_Stat.onInsertSuccess(); return true; } // clear next field node_traits::to_node_ptr( val )->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed ); m_Stat.onInsertRetry(); } } template std::pair update_at_locked( position& pos, value_type& val, Func func, bool bInsert ) { // RCU should be locked!!! assert( gc::is_locked()); while ( true ) { if ( search( pos.refHead, val, pos, key_comparator())) { assert( key_comparator()( val, *node_traits::to_value_ptr( *pos.pCur )) == 0 ); func( false, *node_traits::to_value_ptr( *pos.pCur ), val ); m_Stat.onUpdateExisting(); return std::make_pair( iterator( pos.pCur ), false ); } else { if ( !bInsert ) { m_Stat.onUpdateFailed(); return std::make_pair( end(), false ); } if ( link_node( node_traits::to_node_ptr( val ), pos )) { ++m_ItemCounter; func( true, val , val ); m_Stat.onUpdateNew(); return std::make_pair( iterator( node_traits::to_node_ptr( val )), true ); } // clear the next field node_traits::to_node_ptr( val )->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed ); m_Stat.onUpdateRetry(); } } } template const_iterator find_at_locked( position& pos, Q const& val, Compare cmp ) { assert( gc::is_locked()); if ( search( pos.refHead, val, pos, cmp )) { assert( pos.pCur != nullptr ); m_Stat.onFindSuccess(); return const_iterator( pos.pCur ); } m_Stat.onFindFailed(); return cend(); } //@endcond }; }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_MICHAEL_LIST_NOGC_H libcds-2.3.3/cds/intrusive/michael_set.h000066400000000000000000001161741341244201700202160ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_MICHAEL_SET_H #define CDSLIB_INTRUSIVE_MICHAEL_SET_H #include #include #include namespace cds { namespace intrusive { /// Michael's hash set /** @ingroup cds_intrusive_map \anchor cds_intrusive_MichaelHashSet_hp Source: - [2002] Maged Michael "High performance dynamic lock-free hash tables and list-based sets" Michael's hash table algorithm is based on lock-free ordered list and it is very simple. The main structure is an array \p T of size \p M. Each element in \p T is basically a pointer to a hash bucket, implemented as a singly linked list. The array of buckets cannot be dynamically expanded. However, each bucket may contain unbounded number of items. Template parameters are: - \p GC - Garbage collector used. Note the \p GC must be the same as the GC used for \p OrderedList - \p OrderedList - ordered list implementation used as bucket for hash set, possible implementations: \p MichaelList, \p LazyList, \p IterableList. The intrusive ordered list implementation specifies the type \p T stored in the hash-set, the reclamation schema \p GC used by hash-set, the comparison functor for the type \p T and other features specific for the ordered list. - \p Traits - type traits. See \p michael_set::traits for explanation. Instead of defining \p Traits struct you can use option-based syntax with \p michael_set::make_traits metafunction. There are several specializations of \p %MichaelHashSet for each GC. You should include: - for \ref cds_intrusive_MichaelHashSet_rcu "RCU type" - for \ref cds_intrusive_MichaelHashSet_nogc for append-only set - for \p gc::HP, \p gc::DHP Hash functor Some member functions of Michael's hash set accept the key parameter of type \p Q which differs from \p value_type. It is expected that type \p Q contains full key of \p value_type, and for equal keys of type \p Q and \p value_type the hash values of these keys must be equal. The hash functor \p Traits::hash should accept parameters of both type: \code // Our node type struct Foo { std::string key_; // key field // ... other fields }; // Hash functor struct fooHash { size_t operator()( const std::string& s ) const { return std::hash( s ); } size_t operator()( const Foo& f ) const { return (*this)( f.key_ ); } }; \endcode How to use First, you should define ordered list type to use in your hash set: \code // For gc::HP-based MichaelList implementation #include // cds::intrusive::MichaelHashSet declaration #include // Type of hash-set items struct Foo: public cds::intrusive::michael_list::node< cds::gc::HP > { std::string key_ ; // key field unsigned val_ ; // value field // ... other value fields }; // Declare comparator for the item struct FooCmp { int operator()( const Foo& f1, const Foo& f2 ) const { return f1.key_.compare( f2.key_ ); } }; // Declare bucket type for Michael's hash set // The bucket type is any ordered list type like MichaelList, LazyList typedef cds::intrusive::MichaelList< cds::gc::HP, Foo, typename cds::intrusive::michael_list::make_traits< // hook option cds::intrusive::opt::hook< cds::intrusive::michael_list::base_hook< cds::opt::gc< cds::gc::HP > > > // item comparator option ,cds::opt::compare< FooCmp > >::type > Foo_bucket; \endcode Second, you should declare Michael's hash set container: \code // Declare hash functor // Note, the hash functor accepts parameter type Foo and std::string struct FooHash { size_t operator()( const Foo& f ) const { return cds::opt::v::hash()( f.key_ ); } size_t operator()( const std::string& f ) const { return cds::opt::v::hash()( f ); } }; // Michael's set typedef typedef cds::intrusive::MichaelHashSet< cds::gc::HP ,Foo_bucket ,typename cds::intrusive::michael_set::make_traits< cds::opt::hash< FooHash > >::type > Foo_set; \endcode Now, you can use \p Foo_set in your application. Like other intrusive containers, you may build several containers on single item structure: \code #include #include #include struct tag_key1_idx; struct tag_key2_idx; // Your two-key data // The first key is maintained by gc::HP, second key is maintained by gc::DHP garbage collectors // (I don't know what is needed for, but it is correct) struct Foo : public cds::intrusive::michael_list::node< cds::gc::HP, tag_key1_idx > , public cds::intrusive::michael_list::node< cds::gc::DHP, tag_key2_idx > { std::string key1_ ; // first key field unsigned int key2_ ; // second key field // ... value fields and fields for controlling item's lifetime }; // Declare comparators for the item struct Key1Cmp { int operator()( const Foo& f1, const Foo& f2 ) const { return f1.key1_.compare( f2.key1_ ) ; } }; struct Key2Less { bool operator()( const Foo& f1, const Foo& f2 ) const { return f1.key2_ < f2.key1_ ; } }; // Declare bucket type for Michael's hash set indexed by key1_ field and maintained by gc::HP typedef cds::intrusive::MichaelList< cds::gc::HP, Foo, typename cds::intrusive::michael_list::make_traits< // hook option cds::intrusive::opt::hook< cds::intrusive::michael_list::base_hook< cds::opt::gc< cds::gc::HP >, tag_key1_idx > > // item comparator option ,cds::opt::compare< Key1Cmp > >::type > Key1_bucket; // Declare bucket type for Michael's hash set indexed by key2_ field and maintained by gc::DHP typedef cds::intrusive::MichaelList< cds::gc::DHP, Foo, typename cds::intrusive::michael_list::make_traits< // hook option cds::intrusive::opt::hook< cds::intrusive::michael_list::base_hook< cds::opt::gc< cds::gc::DHP >, tag_key2_idx > > // item comparator option ,cds::opt::less< Key2Less > >::type > Key2_bucket; // Declare hash functor struct Key1Hash { size_t operator()( const Foo& f ) const { return cds::opt::v::hash()( f.key1_ ) ; } size_t operator()( const std::string& s ) const { return cds::opt::v::hash()( s ) ; } }; inline size_t Key2Hash( const Foo& f ) { return (size_t) f.key2_ ; } // Michael's set indexed by key1_ field typedef cds::intrusive::MichaelHashSet< cds::gc::HP ,Key1_bucket ,typename cds::intrusive::michael_set::make_traits< cds::opt::hash< Key1Hash > >::type > key1_set; // Michael's set indexed by key2_ field typedef cds::intrusive::MichaelHashSet< cds::gc::DHP ,Key2_bucket ,typename cds::intrusive::michael_set::make_traits< cds::opt::hash< Key2Hash > >::type > key2_set; \endcode */ template < class GC, class OrderedList, #ifdef CDS_DOXYGEN_INVOKED class Traits = michael_set::traits #else class Traits #endif > class MichaelHashSet { public: typedef GC gc; ///< Garbage collector typedef OrderedList ordered_list; ///< type of ordered list used as a bucket implementation typedef Traits traits; ///< Set traits typedef typename ordered_list::value_type value_type ; ///< type of value to be stored in the set typedef typename ordered_list::key_comparator key_comparator ; ///< key comparing functor typedef typename ordered_list::disposer disposer ; ///< Node disposer functor #ifdef CDS_DOXYGEN_INVOKED typedef typename ordered_list::stat stat ; ///< Internal statistics #endif /// Hash functor for \p value_type and all its derivatives that you use typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash; typedef typename traits::item_counter item_counter; ///< Item counter type typedef typename traits::allocator allocator; ///< Bucket table allocator typedef typename ordered_list::guarded_ptr guarded_ptr; ///< Guarded pointer /// Count of hazard pointer required for the algorithm static constexpr const size_t c_nHazardPtrCount = ordered_list::c_nHazardPtrCount; // GC and OrderedList::gc must be the same static_assert(std::is_same::value, "GC and OrderedList::gc must be the same"); protected: //@cond typedef typename ordered_list::template select_stat_wrapper< typename ordered_list::stat > bucket_stat; typedef typename ordered_list::template rebind_traits< cds::opt::item_counter< cds::atomicity::empty_item_counter > , cds::opt::stat< typename bucket_stat::wrapped_stat > >::type internal_bucket_type; typedef typename std::allocator_traits::template rebind_alloc< internal_bucket_type > bucket_table_allocator; //@endcond public: //@cond typedef typename bucket_stat::stat stat; //@endcond protected: //@cond hash m_HashFunctor; ///< Hash functor size_t const m_nHashBitmask; internal_bucket_type* m_Buckets; ///< bucket table item_counter m_ItemCounter; ///< Item counter stat m_Stat; ///< Internal statistics //@endcond public: ///@name Forward iterators //@{ /// Forward iterator /** The forward iterator for Michael's set is based on \p OrderedList forward iterator and has some features: - it has no post-increment operator - it iterates items in unordered fashion - The iterator cannot be moved across thread boundary because it may contain GC's guard that is thread-private GC data. Iterator thread safety depends on type of \p OrderedList: - for \p MichaelList and \p LazyList: iterator guarantees safety even if you delete the item that iterator points to because that item is guarded by hazard pointer. However, in case of concurrent deleting operations it is no guarantee that you iterate all item in the set. Moreover, a crash is possible when you try to iterate the next element that has been deleted by concurrent thread. Use this iterator on the concurrent container for debugging purpose only. - for \p IterableList: iterator is thread-safe. You may use it freely in concurrent environment. */ typedef michael_set::details::iterator< internal_bucket_type, false > iterator; /// Const forward iterator /** For iterator's features and requirements see \ref iterator */ typedef michael_set::details::iterator< internal_bucket_type, true > const_iterator; /// Returns a forward iterator addressing the first element in a set /** For empty set \code begin() == end() \endcode */ iterator begin() { return iterator( m_Buckets[0].begin(), bucket_begin(), bucket_end()); } /// Returns an iterator that addresses the location succeeding the last element in a set /** Do not use the value returned by end function to access any item. The returned value can be used only to control reaching the end of the set. For empty set \code begin() == end() \endcode */ iterator end() { return iterator( bucket_end()[-1].end(), bucket_end() - 1, bucket_end()); } /// Returns a forward const iterator addressing the first element in a set const_iterator begin() const { return get_const_begin(); } /// Returns a forward const iterator addressing the first element in a set const_iterator cbegin() const { return get_const_begin(); } /// Returns an const iterator that addresses the location succeeding the last element in a set const_iterator end() const { return get_const_end(); } /// Returns an const iterator that addresses the location succeeding the last element in a set const_iterator cend() const { return get_const_end(); } //@} public: /// Initializes hash set /** The Michael's hash set is an unbounded container, but its hash table is non-expandable. At construction time you should pass estimated maximum item count and a load factor. The load factor is average size of one bucket - a small number between 1 and 10. The bucket is an ordered single-linked list, searching in the bucket has linear complexity O(nLoadFactor). The constructor defines hash table size as rounding nMaxItemCount / nLoadFactor up to nearest power of two. */ MichaelHashSet( size_t nMaxItemCount, ///< estimation of max item count in the hash set size_t nLoadFactor ///< load factor: estimation of max number of items in the bucket. Small integer up to 10. ) : m_nHashBitmask( michael_set::details::init_hash_bitmask( nMaxItemCount, nLoadFactor )) , m_Buckets( bucket_table_allocator().allocate( bucket_count())) { for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it ) construct_bucket( it ); } /// Clears hash set object and destroys it ~MichaelHashSet() { clear(); for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it ) it->~internal_bucket_type(); bucket_table_allocator().deallocate( m_Buckets, bucket_count()); } /// Inserts new node /** The function inserts \p val in the set if it does not contain an item with key equal to \p val. Returns \p true if \p val is placed into the set, \p false otherwise. */ bool insert( value_type& val ) { bool bRet = bucket( val ).insert( val ); if ( bRet ) ++m_ItemCounter; return bRet; } /// Inserts new node /** This function is intended for derived non-intrusive containers. The function allows to split creating of new item into two part: - create item with key only - insert new item into the set - if inserting is success, calls \p f functor to initialize value-field of \p val. The functor signature is: \code void func( value_type& val ); \endcode where \p val is the item inserted. The user-defined functor is called only if the inserting is success. @warning For \ref cds_intrusive_MichaelList_hp "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". \ref cds_intrusive_LazyList_hp "LazyList" provides exclusive access to inserted item and does not require any node-level synchronization. */ template bool insert( value_type& val, Func f ) { bool bRet = bucket( val ).insert( val, f ); if ( bRet ) ++m_ItemCounter; return bRet; } /// Updates the element /** The operation performs inserting or changing data with lock-free manner. If the item \p val not found in the set, then \p val is inserted iff \p bAllowInsert is \p true. Otherwise, the functor \p func is called with item found. The functor signature depends of the type of \p OrderedList: for \p MichaelList, \p LazyList \code struct functor { void operator()( bool bNew, value_type& item, value_type& val ); }; \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the set - \p val - argument \p val passed into the \p %update() function If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments refers to the same thing. The functor may change non-key fields of the \p item. @warning For \ref cds_intrusive_MichaelList_hp "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". \ref cds_intrusive_LazyList_hp "LazyList" provides exclusive access to inserted item and does not require any node-level synchronization. for \p IterableList \code void func( value_type& val, value_type * old ); \endcode where - \p val - argument \p val passed into the \p %update() function - \p old - old value that will be retired. If new item has been inserted then \p old is \p nullptr. Returns std::pair where \p first is \p true if operation is successful, \p second is \p true if new item has been added or \p false if the item with \p key already is in the set. */ template std::pair update( value_type& val, Func func, bool bAllowInsert = true ) { std::pair bRet = bucket( val ).update( val, func, bAllowInsert ); if ( bRet.second ) ++m_ItemCounter; return bRet; } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( value_type& val, Func func ) { return update( val, func, true ); } //@endcond /// Inserts or updates the node (only for \p IterableList) /** The operation performs inserting or changing data with lock-free manner. If the item \p val is not found in the set, then \p val is inserted iff \p bAllowInsert is \p true. Otherwise, the current element is changed to \p val, the old element will be retired later by call \p Traits::disposer. Returns std::pair where \p first is \p true if operation is successful, \p second is \p true if \p val has been added or \p false if the item with that key already in the set. */ #ifdef CDS_DOXYGEN_INVOKED std::pair upsert( value_type& val, bool bAllowInsert = true ) #else template typename std::enable_if< std::is_same< Q, value_type>::value && is_iterable_list< ordered_list >::value, std::pair >::type upsert( Q& val, bool bAllowInsert = true ) #endif { std::pair bRet = bucket( val ).upsert( val, bAllowInsert ); if ( bRet.second ) ++m_ItemCounter; return bRet; } /// Unlinks the item \p val from the set /** The function searches the item \p val in the set and unlink it if it is found and is equal to \p val. The function returns \p true if success and \p false otherwise. */ bool unlink( value_type& val ) { bool bRet = bucket( val ).unlink( val ); if ( bRet ) --m_ItemCounter; return bRet; } /// Deletes the item from the set /** \anchor cds_intrusive_MichaelHashSet_hp_erase The function searches an item with key equal to \p key in the set, unlinks it, and returns \p true. If the item with key equal to \p key is not found the function return \p false. Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. */ template bool erase( Q const& key ) { if ( bucket( key ).erase( key )) { --m_ItemCounter; return true; } return false; } /// Deletes the item from the set using \p pred predicate for searching /** The function is an analog of \ref cds_intrusive_MichaelHashSet_hp_erase "erase(Q const&)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. */ template bool erase_with( Q const& key, Less pred ) { if ( bucket( key ).erase_with( key, pred )) { --m_ItemCounter; return true; } return false; } /// Deletes the item from the set /** \anchor cds_intrusive_MichaelHashSet_hp_erase_func The function searches an item with key equal to \p key in the set, call \p f functor with item found, and unlinks it from the set. The \ref disposer specified in \p OrderedList class template parameter is called by garbage collector \p GC asynchronously. The \p Func interface is \code struct functor { void operator()( value_type const& item ); }; \endcode If the item with key equal to \p key is not found the function return \p false. Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. */ template bool erase( Q const& key, Func f ) { if ( bucket( key ).erase( key, f )) { --m_ItemCounter; return true; } return false; } /// Deletes the item from the set using \p pred predicate for searching /** The function is an analog of \ref cds_intrusive_MichaelHashSet_hp_erase_func "erase(Q const&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. */ template bool erase_with( Q const& key, Less pred, Func f ) { if ( bucket( key ).erase_with( key, pred, f )) { --m_ItemCounter; return true; } return false; } /// Deletes the item pointed by iterator \p iter (only for \p IterableList based set) /** Returns \p true if the operation is successful, \p false otherwise. The function can return \p false if the node the iterator points to has already been deleted by other thread. The function does not invalidate the iterator, it remains valid and can be used for further traversing. @note \p %erase_at() is supported only for \p %MichaelHashSet based on \p IterableList. */ #ifdef CDS_DOXYGEN_INVOKED bool erase_at( iterator const& iter ) #else template typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, bool >::type erase_at( Iterator const& iter ) #endif { assert( iter != end()); assert( iter.bucket() != nullptr ); if ( iter.bucket()->erase_at( iter.underlying_iterator())) { --m_ItemCounter; return true; } return false; } /// Extracts the item with specified \p key /** \anchor cds_intrusive_MichaelHashSet_hp_extract The function searches an item with key equal to \p key, unlinks it from the set, and returns an guarded pointer to the item extracted. If \p key is not found the function returns an empty guarded pointer. Note the compare functor should accept a parameter of type \p Q that may be not the same as \p value_type. The \p disposer specified in \p OrderedList class' template parameter is called automatically by garbage collector \p GC when returned \ref guarded_ptr object will be destroyed or released. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. Usage: \code typedef cds::intrusive::MichaelHashSet< your_template_args > michael_set; michael_set theSet; // ... { michael_set::guarded_ptr gp( theSet.extract( 5 )); if ( gp ) { // Deal with gp // ... } // Destructor of gp releases internal HP guard } \endcode */ template guarded_ptr extract( Q const& key ) { guarded_ptr gp = bucket( key ).extract( key ); if ( gp ) --m_ItemCounter; return gp; } /// Extracts the item using compare functor \p pred /** The function is an analog of \ref cds_intrusive_MichaelHashSet_hp_extract "extract(Q const&)" but \p pred predicate is used for key comparing. \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the list. */ template guarded_ptr extract_with( Q const& key, Less pred ) { guarded_ptr gp = bucket( key ).extract_with( key, pred ); if ( gp ) --m_ItemCounter; return gp; } /// Finds the key \p key /** \anchor cds_intrusive_MichaelHashSet_hp_find_func The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& key ); }; \endcode where \p item is the item found, \p key is the find function argument. The functor may change non-key fields of \p item. Note that the functor is only guarantee that \p item cannot be disposed during functor is executing. The functor does not serialize simultaneous access to the set \p item. If such access is possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor may modify both arguments. Note the hash functor specified for class \p Traits template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( Q& key, Func f ) { return bucket( key ).find( key, f ); } //@cond template bool find( Q const& key, Func f ) { return bucket( key ).find( key, f ); } //@endcond /// Finds \p key and returns iterator pointed to the item found (only for \p IterableList) /** If \p key is not found the function returns \p end(). @note This function is supported only for the set based on \p IterableList */ template #ifdef CDS_DOXYGEN_INVOKED iterator #else typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type #endif find( Q& key ) { internal_bucket_type& b = bucket( key ); typename internal_bucket_type::iterator it = b.find( key ); if ( it == b.end()) return end(); return iterator( it, &b, bucket_end()); } //@cond template typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type find( Q const& key ) { internal_bucket_type& b = bucket( key ); typename internal_bucket_type::iterator it = b.find( key ); if ( it == b.end()) return end(); return iterator( it, &b, bucket_end()); } //@endcond /// Finds the key \p key using \p pred predicate for searching /** The function is an analog of \ref cds_intrusive_MichaelHashSet_hp_find_func "find(Q&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. */ template bool find_with( Q& key, Less pred, Func f ) { return bucket( key ).find_with( key, pred, f ); } //@cond template bool find_with( Q const& key, Less pred, Func f ) { return bucket( key ).find_with( key, pred, f ); } //@endcond /// Finds \p key using \p pred predicate and returns iterator pointed to the item found (only for \p IterableList) /** The function is an analog of \p find(Q&) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. If \p key is not found the function returns \p end(). @note This function is supported only for the set based on \p IterableList */ template #ifdef CDS_DOXYGEN_INVOKED iterator #else typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type #endif find_with( Q& key, Less pred ) { internal_bucket_type& b = bucket( key ); typename internal_bucket_type::iterator it = b.find_with( key, pred ); if ( it == b.end()) return end(); return iterator( it, &b, bucket_end()); } //@cond template typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type find_with( Q const& key, Less pred ) { internal_bucket_type& b = bucket( key ); typename internal_bucket_type::iterator it = b.find_with( key, pred ); if ( it == b.end()) return end(); return iterator( it, &b, bucket_end()); } //@endcond /// Checks whether the set contains \p key /** The function searches the item with key equal to \p key and returns \p true if the key is found, and \p false otherwise. Note the hash functor specified for class \p Traits template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. */ template bool contains( Q const& key ) { return bucket( key ).contains( key ); } /// Checks whether the set contains \p key using \p pred predicate for searching /** The function is an analog of contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool contains( Q const& key, Less pred ) { return bucket( key ).contains( key, pred ); } /// Finds the key \p key and return the item found /** \anchor cds_intrusive_MichaelHashSet_hp_get The function searches the item with key equal to \p key and returns the guarded pointer to the item found. If \p key is not found the function returns an empty \p guarded_ptr. @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. Usage: \code typedef cds::intrusive::MichaelHashSet< your_template_params > michael_set; michael_set theSet; // ... { michael_set::guarded_ptr gp( theSet.get( 5 )); if ( theSet.get( 5 )) { // Deal with gp //... } // Destructor of guarded_ptr releases internal HP guard } \endcode Note the compare functor specified for \p OrderedList template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. */ template guarded_ptr get( Q const& key ) { return bucket( key ).get( key ); } /// Finds the key \p key and return the item found /** The function is an analog of \ref cds_intrusive_MichaelHashSet_hp_get "get( Q const&)" but \p pred is used for comparing the keys. \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the set. */ template guarded_ptr get_with( Q const& key, Less pred ) { return bucket( key ).get_with( key, pred ); } /// Clears the set (non-atomic) /** The function unlink all items from the set. The function is not atomic. It cleans up each bucket and then resets the item counter to zero. If there are a thread that performs insertion while \p %clear() is working the result is undefined in general case: \p empty() may return \p true but the set may contain item(s). Therefore, \p %clear() may be used only for debugging purposes. For each item the \p disposer is called after unlinking. */ void clear() { for ( size_t i = 0; i < bucket_count(); ++i ) m_Buckets[i].clear(); m_ItemCounter.reset(); } /// Checks if the set is empty /** @warning If you use \p atomicity::empty_item_counter in \p traits::item_counter, the function always returns \p true. */ bool empty() const { return size() == 0; } /// Returns item count in the set /** If you use \p atomicity::empty_item_counter in \p traits::item_counter, the function always returns 0. */ size_t size() const { return m_ItemCounter; } /// Returns const reference to internal statistics stat const& statistics() const { return m_Stat; } /// Returns the size of hash table /** Since \p %MichaelHashSet cannot dynamically extend the hash table size, the value returned is an constant depending on object initialization parameters, see \p MichaelHashSet::MichaelHashSet. */ size_t bucket_count() const { return m_nHashBitmask + 1; } private: //@cond internal_bucket_type * bucket_begin() const { return m_Buckets; } internal_bucket_type * bucket_end() const { return m_Buckets + bucket_count(); } const_iterator get_const_begin() const { return const_iterator( m_Buckets[0].cbegin(), bucket_begin(), bucket_end()); } const_iterator get_const_end() const { return const_iterator( bucket_end()[-1].cend(), bucket_end() - 1, bucket_end()); } template typename std::enable_if< Stat::empty >::type construct_bucket( internal_bucket_type * b ) { new (b) internal_bucket_type; } template typename std::enable_if< !Stat::empty >::type construct_bucket( internal_bucket_type * b ) { new (b) internal_bucket_type( m_Stat ); } /// Calculates hash value of \p key template size_t hash_value( const Q& key ) const { return m_HashFunctor( key ) & m_nHashBitmask; } /// Returns the bucket (ordered list) for \p key template internal_bucket_type& bucket( const Q& key ) { return m_Buckets[hash_value( key )]; } //@endcond }; }} // namespace cds::intrusive #endif // ifndef CDSLIB_INTRUSIVE_MICHAEL_SET_H libcds-2.3.3/cds/intrusive/michael_set_nogc.h000066400000000000000000000421041341244201700212130ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_MICHAEL_SET_NOGC_H #define CDSLIB_INTRUSIVE_MICHAEL_SET_NOGC_H #include #include #include namespace cds { namespace intrusive { /// Michael's hash set (template specialization for gc::nogc) /** @ingroup cds_intrusive_map \anchor cds_intrusive_MichaelHashSet_nogc This specialization is so-called append-only when no item reclamation may be performed. The set does not support deleting of list item. See \ref cds_intrusive_MichaelHashSet_hp "MichaelHashSet" for description of template parameters. The template parameter \p OrderedList should be any \p cds::gc::nogc -derived ordered list, for example, \ref cds_intrusive_MichaelList_nogc "append-only MichaelList". */ template < class OrderedList, #ifdef CDS_DOXYGEN_INVOKED class Traits = michael_set::traits #else class Traits #endif > class MichaelHashSet< cds::gc::nogc, OrderedList, Traits > { public: typedef cds::gc::nogc gc; ///< Garbage collector typedef OrderedList ordered_list; ///< type of ordered list used as a bucket implementation typedef Traits traits; ///< Set traits typedef typename ordered_list::value_type value_type; ///< type of value to be stored in the set typedef typename ordered_list::key_comparator key_comparator; ///< key comparing functor typedef typename ordered_list::disposer disposer; ///< Node disposer functor #ifdef CDS_DOXYGEN_INVOKED typedef typename ordered_list::stat stat; ///< Internal statistics #endif /// Hash functor for \p value_type and all its derivatives that you use typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash; typedef typename traits::item_counter item_counter; ///< Item counter type typedef typename traits::allocator allocator; ///< Bucket table allocator // GC and OrderedList::gc must be the same static_assert(std::is_same::value, "GC and OrderedList::gc must be the same"); protected: //@cond typedef typename ordered_list::template select_stat_wrapper< typename ordered_list::stat > bucket_stat; typedef typename ordered_list::template rebind_traits< cds::opt::item_counter< cds::atomicity::empty_item_counter > , cds::opt::stat< typename bucket_stat::wrapped_stat > >::type internal_bucket_type; typedef typename std::allocator_traits::template rebind_alloc< internal_bucket_type > bucket_table_allocator; //@endcond public: //@cond typedef typename bucket_stat::stat stat; //@endcond protected: //@cond hash m_HashFunctor; ///< Hash functor const size_t m_nHashBitmask; internal_bucket_type * m_Buckets; ///< bucket table item_counter m_ItemCounter; ///< Item counter stat m_Stat; ///< Internal statistics //@endcond protected: //@cond /// Calculates hash value of \p key template size_t hash_value( Q const & key ) const { return m_HashFunctor( key ) & m_nHashBitmask; } /// Returns the bucket (ordered list) for \p key template internal_bucket_type& bucket( Q const & key ) { return m_Buckets[ hash_value( key ) ]; } //@endcond public: ///@name Forward iterators //@{ /// Forward iterator /** The forward iterator for Michael's set is based on \p OrderedList forward iterator and has some features: - it has no post-increment operator - it iterates items in unordered fashion The iterator interface: \code class iterator { public: // Default constructor iterator(); // Copy construtor iterator( iterator const& src ); // Dereference operator value_type * operator ->() const; // Dereference operator value_type& operator *() const; // Preincrement operator iterator& operator ++(); // Assignment operator iterator& operator = (iterator const& src); // Equality operators bool operator ==(iterator const& i ) const; bool operator !=(iterator const& i ) const; }; \endcode */ typedef michael_set::details::iterator< internal_bucket_type, false > iterator; /// Const forward iterator /** For iterator's features and requirements see \ref iterator */ typedef michael_set::details::iterator< internal_bucket_type, true > const_iterator; /// Returns a forward iterator addressing the first element in a set /** For empty set \code begin() == end() \endcode */ iterator begin() { return iterator( m_Buckets[0].begin(), m_Buckets, m_Buckets + bucket_count()); } /// Returns an iterator that addresses the location succeeding the last element in a set /** Do not use the value returned by end function to access any item. The returned value can be used only to control reaching the end of the set. For empty set \code begin() == end() \endcode */ iterator end() { return iterator( m_Buckets[bucket_count() - 1].end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count()); } /// Returns a forward const iterator addressing the first element in a set const_iterator begin() const { return cbegin(); } /// Returns a forward const iterator addressing the first element in a set const_iterator cbegin() const { return const_iterator( m_Buckets[0].cbegin(), m_Buckets, m_Buckets + bucket_count()); } /// Returns an const iterator that addresses the location succeeding the last element in a set const_iterator end() const { return cend(); } /// Returns an const iterator that addresses the location succeeding the last element in a set const_iterator cend() const { return const_iterator( m_Buckets[bucket_count() - 1].cend(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count()); } //@} public: /// Initializes hash set /** The Michael's hash set is an unbounded container, but its hash table is non-expandable. At construction time you should pass estimated maximum item count and a load factor. The load factor is average size of one bucket - a small number between 1 and 10. The bucket is an ordered single-linked list, searching in the bucket has linear complexity O(nLoadFactor). The constructor defines hash table size as rounding nMaxItemCount / nLoadFactor up to nearest power of two. */ MichaelHashSet( size_t nMaxItemCount, ///< estimation of max item count in the hash set size_t nLoadFactor ///< load factor: estimation of max number of items in the bucket ) : m_nHashBitmask( michael_set::details::init_hash_bitmask( nMaxItemCount, nLoadFactor )) , m_Buckets( bucket_table_allocator().allocate( bucket_count())) { for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it ) construct_bucket( it ); } /// Clears hash set object and destroys it ~MichaelHashSet() { clear(); for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it ) it->~internal_bucket_type(); bucket_table_allocator().deallocate( m_Buckets, bucket_count()); } /// Inserts new node /** The function inserts \p val in the set if it does not contain an item with key equal to \p val. Returns \p true if \p val is placed into the set, \p false otherwise. */ bool insert( value_type& val ) { bool bRet = bucket( val ).insert( val ); if ( bRet ) ++m_ItemCounter; return bRet; } /// Updates the element /** The operation performs inserting or changing data with lock-free manner. If the item \p val not found in the set, then \p val is inserted iff \p bAllowInsert is \p true. Otherwise, the functor \p func is called with item found. The functor signature is: \code struct functor { void operator()( bool bNew, value_type& item, value_type& val ); }; \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the set - \p val - argument \p val passed into the \p %update() function If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments refers to the same thing. The functor may change non-key fields of the \p item. Returns std::pair where \p first is \p true if operation is successful, \p second is \p true if new item has been added or \p false if the item with \p key already is in the set. @warning For \ref cds_intrusive_MichaelList_hp "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". \ref cds_intrusive_LazyList_hp "LazyList" provides exclusive access to inserted item and does not require any node-level synchronization. */ template std::pair update( value_type& val, Func func, bool bAllowInsert = true ) { std::pair bRet = bucket( val ).update( val, func, bAllowInsert ); if ( bRet.second ) ++m_ItemCounter; return bRet; } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( value_type& val, Func func ) { return update( val, func, true ); } //@endcond /// Checks whether the set contains \p key /** The function searches the item with key equal to \p key and returns the pointer to an element found or \p nullptr. Note the hash functor specified for class \p Traits template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. */ template value_type * contains( Q const& key ) { return bucket( key ).contains( key ); } //@cond template CDS_DEPRECATED("use contains()") value_type * find( Q const& key ) { return contains( key ); } //@endcond /// Checks whether the set contains \p key using \p pred predicate for searching /** The function is an analog of contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template value_type * contains( Q const& key, Less pred ) { return bucket( key ).contains( key, pred ); } //@cond template CDS_DEPRECATED("use contains()") value_type * find_with( Q const& key, Less pred ) { return contains( key, pred ); } //@endcond /// Finds the key \p key /** \anchor cds_intrusive_MichaelHashSet_nogc_find_func The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& key ); }; \endcode where \p item is the item found, \p key is the find function argument. The functor can change non-key fields of \p item. The functor does not serialize simultaneous access to the set \p item. If such access is possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor can modify both arguments. Note the hash functor specified for class \p Traits template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( Q& key, Func f ) { return bucket( key ).find( key, f ); } //@cond template bool find( Q const& key, Func f ) { return bucket( key ).find( key, f ); } //@endcond /// Finds the key \p key using \p pred predicate for searching /** The function is an analog of \ref cds_intrusive_MichaelHashSet_nogc_find_func "find(Q&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. */ template bool find_with( Q& key, Less pred, Func f ) { return bucket( key ).find_with( key, pred, f ); } //@cond template bool find_with( Q const& key, Less pred, Func f ) { return bucket( key ).find_with( key, pred, f ); } //@endcond /// Clears the set (non-atomic) /** The function unlink all items from the set. The function is not atomic. It cleans up each bucket and then resets the item counter to zero. If there are a thread that performs insertion while \p %clear() is working the result is undefined in general case: empty() may return \p true but the set may contain item(s). Therefore, \p %clear() may be used only for debugging purposes. For each item the \p disposer is called after unlinking. */ void clear() { for ( size_t i = 0; i < bucket_count(); ++i ) m_Buckets[i].clear(); m_ItemCounter.reset(); } /// Checks if the set is empty /** @warning If you use \p atomicity::empty_item_counter in \p traits::item_counter, the function always returns \p true. */ bool empty() const { return size() == 0; } /// Returns item count in the set /** If you use \p atomicity::empty_item_counter in \p traits::item_counter, the function always returns 0. */ size_t size() const { return m_ItemCounter; } /// Returns the size of hash table /** Since \p %MichaelHashSet cannot dynamically extend the hash table size, the value returned is an constant depending on object initialization parameters; see MichaelHashSet::MichaelHashSet for explanation. */ size_t bucket_count() const { return m_nHashBitmask + 1; } /// Returns const reference to internal statistics stat const& statistics() const { return m_Stat; } private: //@cond template typename std::enable_if< Stat::empty >::type construct_bucket( internal_bucket_type * b ) { new (b) internal_bucket_type; } template typename std::enable_if< !Stat::empty >::type construct_bucket( internal_bucket_type * b ) { new (b) internal_bucket_type( m_Stat ); } //@endcond }; }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_MICHAEL_SET_NOGC_H libcds-2.3.3/cds/intrusive/michael_set_rcu.h000066400000000000000000000732541341244201700210700ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_MICHAEL_SET_RCU_H #define CDSLIB_INTRUSIVE_MICHAEL_SET_RCU_H #include #include namespace cds { namespace intrusive { /// Michael's hash set, \ref cds_urcu_desc "RCU" specialization /** @ingroup cds_intrusive_map \anchor cds_intrusive_MichaelHashSet_rcu Source: - [2002] Maged Michael "High performance dynamic lock-free hash tables and list-based sets" Michael's hash table algorithm is based on lock-free ordered list and it is very simple. The main structure is an array \p T of size \p M. Each element in \p T is basically a pointer to a hash bucket, implemented as a singly linked list. The array of buckets cannot be dynamically expanded. However, each bucket may contain unbounded number of items. Template parameters are: - \p RCU - one of \ref cds_urcu_gc "RCU type" - \p OrderedList - ordered list implementation used as bucket for hash set, for example, MichaelList, LazyList. The intrusive ordered list implementation specifies the type \p T stored in the hash-set, the reclamation schema \p GC used by hash-set, the comparison functor for the type \p T and other features specific for the ordered list. - \p Traits - type traits, default is \p michael_set::traits. Instead of defining \p Traits struct you can use option-based syntax with \p michael_set::make_traits metafunction. \par Usage Before including you should include appropriate RCU header file, see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. For example, for \ref cds_urcu_general_buffered_gc "general-purpose buffered RCU" you should include: \code #include #include #include struct Foo { ... }; // Hash functor for struct Foo struct foo_hash { size_t operator()( Foo const& foo ) const { return ... } }; // Now, you can declare Michael's list for type Foo and default traits: typedef cds::intrusive::MichaelList >, Foo > rcu_michael_list; // Declare Michael's set with MichaelList as bucket type typedef cds::intrusive::MichaelSet< cds::urcu::gc< general_buffered<> >, rcu_michael_list, cds::intrusive::michael_set::make_traits< cds::opt::::hash< foo_hash > >::type > rcu_michael_set; // Declares hash set for 1000000 items with load factor 2 rcu_michael_set theSet( 1000000, 2 ); // Now you can use theSet object in many threads without any synchronization. \endcode */ template < class RCU, class OrderedList, #ifdef CDS_DOXYGEN_INVOKED class Traits = michael_set::traits #else class Traits #endif > class MichaelHashSet< cds::urcu::gc< RCU >, OrderedList, Traits > { public: typedef cds::urcu::gc< RCU > gc; ///< RCU schema typedef OrderedList ordered_list; ///< type of ordered list used as a bucket implementation typedef Traits traits; ///< Set traits typedef typename ordered_list::value_type value_type; ///< type of value stored in the list typedef typename ordered_list::key_comparator key_comparator; ///< key comparing functor typedef typename ordered_list::disposer disposer; ///< Node disposer functor #ifdef CDS_DOXYGEN_INVOKED typedef typename ordered_list::stat stat; ///< Internal statistics #endif /// Hash functor for \ref value_type and all its derivatives that you use typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash; typedef typename traits::item_counter item_counter; ///< Item counter type typedef typename traits::allocator allocator; ///< Bucket table allocator typedef typename ordered_list::rcu_lock rcu_lock; ///< RCU scoped lock /// Group of \p extract_xxx functions require external locking if underlying ordered list requires that static constexpr const bool c_bExtractLockExternal = ordered_list::c_bExtractLockExternal; // GC and OrderedList::gc must be the same static_assert(std::is_same::value, "GC and OrderedList::gc must be the same"); protected: //@cond typedef typename ordered_list::template select_stat_wrapper< typename ordered_list::stat > bucket_stat; typedef typename ordered_list::template rebind_traits< cds::opt::item_counter< cds::atomicity::empty_item_counter > , cds::opt::stat< typename bucket_stat::wrapped_stat > >::type internal_bucket_type; typedef typename std::allocator_traits::template rebind_alloc< internal_bucket_type > bucket_table_allocator; //@endcond public: typedef typename internal_bucket_type::exempt_ptr exempt_ptr; ///< pointer to extracted node typedef typename internal_bucket_type::raw_ptr raw_ptr; ///< Return type of \p get() member function and its derivatives //@cond typedef typename bucket_stat::stat stat; //@endcond private: //@cond hash m_HashFunctor; ///< Hash functor size_t const m_nHashBitmask; internal_bucket_type* m_Buckets; ///< bucket table item_counter m_ItemCounter; ///< Item counter stat m_Stat; ///< Internal statistics //@endcond public: ///@name Forward iterators (thread-safe under RCU lock) //@{ /// Forward iterator /** The forward iterator for Michael's set is based on \p OrderedList forward iterator and has some features: - it has no post-increment operator - it iterates items in unordered fashion You may safely use iterators in multi-threaded environment only under RCU lock. Otherwise, a crash is possible if another thread deletes the element the iterator points to. The iterator interface: \code class iterator { public: // Default constructor iterator(); // Copy construtor iterator( iterator const& src ); // Dereference operator value_type * operator ->() const; // Dereference operator value_type& operator *() const; // Preincrement operator iterator& operator ++(); // Assignment operator iterator& operator = (iterator const& src); // Equality operators bool operator ==(iterator const& i ) const; bool operator !=(iterator const& i ) const; }; \endcode */ typedef michael_set::details::iterator< internal_bucket_type, false > iterator; /// Const forward iterator /** For iterator's features and requirements see \ref iterator */ typedef michael_set::details::iterator< internal_bucket_type, true > const_iterator; /// Returns a forward iterator addressing the first element in a set /** For empty set \code begin() == end() \endcode */ iterator begin() { return iterator( m_Buckets[0].begin(), m_Buckets, m_Buckets + bucket_count()); } /// Returns an iterator that addresses the location succeeding the last element in a set /** Do not use the value returned by end function to access any item. The returned value can be used only to control reaching the end of the set. For empty set \code begin() == end() \endcode */ iterator end() { return iterator( m_Buckets[bucket_count() - 1].end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count()); } /// Returns a forward const iterator addressing the first element in a set const_iterator begin() const { return cbegin(); } /// Returns a forward const iterator addressing the first element in a set const_iterator cbegin() const { return const_iterator( m_Buckets[0].cbegin(), m_Buckets, m_Buckets + bucket_count()); } /// Returns an const iterator that addresses the location succeeding the last element in a set const_iterator end() const { return cend(); } /// Returns an const iterator that addresses the location succeeding the last element in a set const_iterator cend() const { return const_iterator( m_Buckets[bucket_count() - 1].cend(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count()); } //@} public: /// Initialize hash set /** The Michael's hash set is an unbounded container, but its hash table is non-expandable. At construction time you should pass estimated maximum item count and a load factor. The load factor is average size of one bucket - a small number between 1 and 10. The bucket is an ordered single-linked list, the complexity of searching in the bucket is linear O(nLoadFactor). The constructor defines hash table size as rounding nMaxItemCount / nLoadFactor up to nearest power of two. */ MichaelHashSet( size_t nMaxItemCount, ///< estimation of max item count in the hash set size_t nLoadFactor ///< load factor: average size of the bucket ) : m_nHashBitmask( michael_set::details::init_hash_bitmask( nMaxItemCount, nLoadFactor )) , m_Buckets( bucket_table_allocator().allocate( bucket_count())) { for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it ) construct_bucket( it ); } /// Clear hash set and destroy it ~MichaelHashSet() { clear(); for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it ) it->~internal_bucket_type(); bucket_table_allocator().deallocate( m_Buckets, bucket_count()); } /// Inserts new node /** The function inserts \p val in the set if it does not contain an item with key equal to \p val. Returns \p true if \p val is placed into the set, \p false otherwise. */ bool insert( value_type& val ) { bool bRet = bucket( val ).insert( val ); if ( bRet ) ++m_ItemCounter; return bRet; } /// Inserts new node /** This function is intended for derived non-intrusive containers. The function allows to split creating of new item into two part: - create item with key only - insert new item into the set - if inserting is success, calls \p f functor to initialize value-field of \p val. The functor signature is: \code void func( value_type& val ); \endcode where \p val is the item inserted. The user-defined functor is called only if the inserting is success. @warning For \ref cds_intrusive_MichaelList_rcu "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". \ref cds_intrusive_LazyList_rcu "LazyList" provides exclusive access to inserted item and does not require any node-level synchronization. */ template bool insert( value_type& val, Func f ) { bool bRet = bucket( val ).insert( val, f ); if ( bRet ) ++m_ItemCounter; return bRet; } /// Updates the element /** The operation performs inserting or changing data with lock-free manner. If the item \p val not found in the set, then \p val is inserted iff \p bAllowInsert is \p true. Otherwise, the functor \p func is called with item found. The functor signature is: \code struct functor { void operator()( bool bNew, value_type& item, value_type& val ); }; \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the set - \p val - argument \p val passed into the \p %update() function If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments refers to the same thing. The functor may change non-key fields of the \p item. Returns std::pair where \p first is \p true if operation is successful, \p second is \p true if new item has been added or \p false if the item with \p key already is in the set. @warning For \ref cds_intrusive_MichaelList_hp "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". \ref cds_intrusive_LazyList_hp "LazyList" provides exclusive access to inserted item and does not require any node-level synchronization. */ template std::pair update( value_type& val, Func func, bool bAllowInsert = true ) { std::pair bRet = bucket( val ).update( val, func, bAllowInsert ); if ( bRet.second ) ++m_ItemCounter; return bRet; } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( value_type& val, Func func ) { return update( val, func, true ); } //@endcond /// Unlinks the item \p val from the set /** The function searches the item \p val in the set and unlink it from the set if it is found and is equal to \p val. The function returns \p true if success and \p false otherwise. */ bool unlink( value_type& val ) { bool bRet = bucket( val ).unlink( val ); if ( bRet ) --m_ItemCounter; return bRet; } /// Deletes the item from the set /** \anchor cds_intrusive_MichaelHashSet_rcu_erase The function searches an item with key equal to \p key in the set, unlinks it from the set, and returns \p true. If the item with key equal to \p key is not found the function return \p false. Note the hash functor should accept a parameter of type \p Q that may be not the same as \p value_type. */ template bool erase( Q const& key ) { if ( bucket( key ).erase( key )) { --m_ItemCounter; return true; } return false; } /// Deletes the item from the set using \p pred predicate for searching /** The function is an analog of \ref cds_intrusive_MichaelHashSet_rcu_erase "erase(Q const&)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. */ template bool erase_with( Q const& key, Less pred ) { if ( bucket( key ).erase_with( key, pred )) { --m_ItemCounter; return true; } return false; } /// Deletes the item from the set /** \anchor cds_intrusive_MichaelHashSet_rcu_erase_func The function searches an item with key equal to \p key in the set, call \p f functor with item found, and unlinks it from the set. The \ref disposer specified in \p OrderedList class template parameter is called by garbage collector \p GC asynchronously. The \p Func interface is \code struct functor { void operator()( value_type const& item ); }; \endcode If the item with key equal to \p key is not found the function return \p false. Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. */ template bool erase( const Q& key, Func f ) { if ( bucket( key ).erase( key, f )) { --m_ItemCounter; return true; } return false; } /// Deletes the item from the set using \p pred predicate for searching /** The function is an analog of \ref cds_intrusive_MichaelHashSet_rcu_erase_func "erase(Q const&)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. */ template bool erase_with( const Q& key, Less pred, Func f ) { if ( bucket( key ).erase_with( key, pred, f )) { --m_ItemCounter; return true; } return false; } /// Extracts an item from the set /** \anchor cds_intrusive_MichaelHashSet_rcu_extract The function searches an item with key equal to \p key in the set, unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found. If the item with the key equal to \p key is not found the function returns an empty \p exempt_ptr. Depends on \p ordered_list you should or should not lock RCU before calling of this function: - for the set based on \ref cds_intrusive_MichaelList_rcu "MichaelList" RCU should not be locked - for the set based on \ref cds_intrusive_LazyList_rcu "LazyList" RCU should be locked See ordered list implementation for details. \code #include #include #include typedef cds::urcu::gc< general_buffered<> > rcu; typedef cds::intrusive::MichaelList< rcu, Foo > rcu_michael_list; typedef cds::intrusive::MichaelHashSet< rcu, rcu_michael_list, foo_traits > rcu_michael_set; rcu_michael_set theSet; // ... typename rcu_michael_set::exempt_ptr p; // For MichaelList we should not lock RCU // Now, you can apply extract function // Note that you must not delete the item found inside the RCU lock p = theSet.extract( 10 ) if ( p ) { // do something with p ... } // We may safely release p here // release() passes the pointer to RCU reclamation cycle: // it invokes RCU retire_ptr function with the disposer you provided for rcu_michael_list. p.release(); \endcode */ template exempt_ptr extract( Q const& key ) { exempt_ptr p( bucket( key ).extract( key )); if ( p ) --m_ItemCounter; return p; } /// Extracts an item from the set using \p pred predicate for searching /** The function is an analog of \p extract(Q const&) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. */ template exempt_ptr extract_with( Q const& key, Less pred ) { exempt_ptr p( bucket( key ).extract_with( key, pred )); if ( p ) --m_ItemCounter; return p; } /// Checks whether the set contains \p key /** The function searches the item with key equal to \p key and returns \p true if the key is found, and \p false otherwise. Note the hash functor specified for class \p Traits template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. */ template bool contains( Q const& key ) { return bucket( key ).contains( key ); } //@cond template CDS_DEPRECATED("use contains()") bool find( Q const& key ) { return contains( key ); } //@endcond /// Checks whether the set contains \p key using \p pred predicate for searching /** The function is an analog of contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool contains( Q const& key, Less pred ) { return bucket( key ).contains( key, pred ); } //@cond template CDS_DEPRECATED("use contains()") bool find_with( Q const& key, Less pred ) { return contains( key, pred ); } //@endcond /// Find the key \p key /** \anchor cds_intrusive_MichaelHashSet_rcu_find_func The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& key ); }; \endcode where \p item is the item found, \p key is the find function argument. The functor can change non-key fields of \p item. The functor does not serialize simultaneous access to the set \p item. If such access is possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor can modify both arguments. Note the hash functor specified for class \p Traits template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( Q& key, Func f ) { return bucket( key ).find( key, f ); } //@cond template bool find( Q const& key, Func f ) { return bucket( key ).find( key, f ); } //@endcond /// Finds the key \p key using \p pred predicate for searching /** The function is an analog of \ref cds_intrusive_MichaelHashSet_rcu_find_func "find(Q&, Func)" but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. */ template bool find_with( Q& key, Less pred, Func f ) { return bucket( key ).find_with( key, pred, f ); } //@cond template bool find_with( Q const& key, Less pred, Func f ) { return bucket( key ).find_with( key, pred, f ); } //@endcond /// Finds the key \p key and return the item found /** \anchor cds_intrusive_MichaelHashSet_rcu_get The function searches the item with key equal to \p key and returns the pointer to item found. If \p key is not found it returns \p nullptr. Note the type of returned value depends on underlying \p ordered_list. For details, see documentation of ordered list you use. Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. RCU should be locked before call of this function. Returned item is valid only while RCU is locked: \code typedef cds::intrusive::MichaelHashSet< your_template_parameters > hash_set; hash_set theSet; // ... // Result of get() call typename hash_set::raw_ptr ptr; { // Lock RCU hash_set::rcu_lock lock; ptr = theSet.get( 5 ); if ( ptr ) { // Deal with ptr //... } // Unlock RCU by rcu_lock destructor // ptr can be reclaimed by disposer at any time after RCU has been unlocked } \endcode */ template raw_ptr get( Q const& key ) { return bucket( key ).get( key ); } /// Finds the key \p key and return the item found /** The function is an analog of \ref cds_intrusive_MichaelHashSet_rcu_get "get(Q const&)" but \p pred is used for comparing the keys. \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the set. */ template raw_ptr get_with( Q const& key, Less pred ) { return bucket( key ).get_with( key, pred ); } /// Clears the set (non-atomic) /** The function unlink all items from the set. The function is not atomic. It cleans up each bucket and then resets the item counter to zero. If there are a thread that performs insertion while \p clear is working the result is undefined in general case: empty() may return \p true but the set may contain item(s). Therefore, \p clear may be used only for debugging purposes. For each item the \p disposer is called after unlinking. */ void clear() { for ( size_t i = 0; i < bucket_count(); ++i ) m_Buckets[i].clear(); m_ItemCounter.reset(); } /// Checks if the set is empty /** @warning If you use \p atomicity::empty_item_counter in \p traits::item_counter, the function always returns \p true. */ bool empty() const { return size() == 0; } /// Returns item count in the set /** If you use \p atomicity::empty_item_counter in \p traits::item_counter, the function always returns 0. */ size_t size() const { return m_ItemCounter; } /// Returns the size of hash table /** Since %MichaelHashSet cannot dynamically extend the hash table size, the value returned is an constant depending on object initialization parameters; see \ref cds_intrusive_MichaelHashSet_hp "MichaelHashSet" for explanation. */ size_t bucket_count() const { return m_nHashBitmask + 1; } /// Returns const reference to internal statistics stat const& statistics() const { return m_Stat; } private: //@cond template typename std::enable_if< Stat::empty >::type construct_bucket( internal_bucket_type * bkt ) { new (bkt) internal_bucket_type; } template typename std::enable_if< !Stat::empty >::type construct_bucket( internal_bucket_type * bkt ) { new (bkt) internal_bucket_type( m_Stat ); } /// Calculates hash value of \p key template size_t hash_value( Q const& key ) const { return m_HashFunctor( key ) & m_nHashBitmask; } /// Returns the bucket (ordered list) for \p key template internal_bucket_type& bucket( Q const& key ) { return m_Buckets[hash_value( key )]; } template internal_bucket_type const& bucket( Q const& key ) const { return m_Buckets[hash_value( key )]; } //@endcond }; }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_MICHAEL_SET_NOGC_H libcds-2.3.3/cds/intrusive/moir_queue.h000066400000000000000000000141521341244201700201040ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_MOIR_QUEUE_H #define CDSLIB_INTRUSIVE_MOIR_QUEUE_H #include namespace cds { namespace intrusive { /// A variation of Michael & Scott's lock-free queue (intrusive variant) /** @ingroup cds_intrusive_queue This is slightly optimized Michael & Scott's queue algorithm that overloads \ref dequeue function. Source: - [2000] Simon Doherty, Lindsay Groves, Victor Luchangco, Mark Moir "Formal Verification of a practical lock-free queue algorithm" Cite from this work about difference from Michael & Scott algo: "Our algorithm differs from Michael and Scott's [MS98] in that we test whether \p Tail points to the header node only after \p Head has been updated, so a dequeuing process reads \p Tail only once. The dequeue in [MS98] performs this test before checking whether the next pointer in the dummy node is null, which means that it reads \p Tail every time a dequeuing process loops. Under high load, when operations retry frequently, our modification will reduce the number of accesses to global memory. This modification, however, introduces the possibility of \p Head and \p Tail 'crossing'." Explanation of template arguments see \p intrusive::MSQueue. \par Examples \code #include #include namespace ci = cds::inrtusive; typedef cds::gc::HP hp_gc; // MoirQueue with Hazard Pointer garbage collector, base hook + item disposer: struct Foo: public ci::msqueue::node< hp_gc > { // Your data ... }; // Disposer for Foo struct just deletes the object passed in struct fooDisposer { void operator()( Foo * p ) { delete p; } }; typedef ci::MoirQueue< hp_gc ,Foo typename ci::msqueue::make_traits< ,ci::opt::hook< ci::msqueue::base_hook< ci::opt::gc > > ,ci::opt::disposer< fooDisposer > >::type > fooQueue; // MoirQueue with Hazard Pointer garbage collector, // member hook + item disposer + item counter, // without padding of internal queue data: struct Bar { // Your data ... ci::msqueue::node< hp_gc > hMember; }; struct barQueueTraits: public ci::msqueue::traits { typedef ci::msqueue::member_hook< offsetof(Bar, hMember), ,ci::opt::gc > hook; typedef fooDisposer disposer; typedef cds::atomicity::item_counter item_counter; enum { padding = cds::opt::no_special_padding }; }; typedef ci::MoirQueue< hp_gc, Bar, barQueueTraits > barQueue; \endcode */ template class MoirQueue: public MSQueue< GC, T, Traits > { //@cond typedef MSQueue< GC, T, Traits > base_class; typedef typename base_class::node_type node_type; //@endcond public: //@cond typedef typename base_class::value_type value_type; typedef typename base_class::back_off back_off; typedef typename base_class::gc gc; typedef typename base_class::node_traits node_traits; typedef typename base_class::memory_model memory_model; //@endcond /// Rebind template arguments template < typename GC2, typename T2, typename Traits2 > struct rebind { typedef MoirQueue< GC2, T2, Traits2> other ; ///< Rebinding result }; protected: //@cond typedef typename base_class::dequeue_result dequeue_result; bool do_dequeue( dequeue_result& res ) { back_off bkoff; node_type * pNext; node_type * h; while ( true ) { h = res.guards.protect( 0, base_class::m_pHead, []( node_type * p ) -> value_type * { return node_traits::to_value_ptr( p );}); pNext = res.guards.protect( 1, h->m_pNext, []( node_type * p ) -> value_type * { return node_traits::to_value_ptr( p );}); if ( pNext == nullptr ) { base_class::m_Stat.onEmptyDequeue(); return false; // queue is empty } if ( base_class::m_pHead.compare_exchange_strong( h, pNext, memory_model::memory_order_release, atomics::memory_order_relaxed )) { node_type * t = base_class::m_pTail.load(memory_model::memory_order_acquire); if ( h == t ) base_class::m_pTail.compare_exchange_strong( t, pNext, memory_model::memory_order_release, atomics::memory_order_relaxed ); break; } base_class::m_Stat.onDequeueRace(); bkoff(); } --base_class::m_ItemCounter; base_class::m_Stat.onDequeue(); res.pHead = h; res.pNext = pNext; return true; } //@endcond public: /// Dequeues a value from the queue /** @anchor cds_intrusive_MoirQueue_dequeue See warning about item disposing in \p MSQueue::dequeue. */ value_type * dequeue() { dequeue_result res; if ( do_dequeue( res )) { base_class::dispose_result( res ); return node_traits::to_value_ptr( *res.pNext ); } return nullptr; } /// Synonym for \ref cds_intrusive_MoirQueue_dequeue "dequeue" function value_type * pop() { return dequeue(); } }; }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_MOIR_QUEUE_H libcds-2.3.3/cds/intrusive/mspriority_queue.h000066400000000000000000000457451341244201700213730ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_MSPRIORITY_QUEUE_H #define CDSLIB_INTRUSIVE_MSPRIORITY_QUEUE_H #include // std::unique_lock #include #include #include #include #include #include #include #include namespace cds { namespace intrusive { /// MSPriorityQueue related definitions /** @ingroup cds_intrusive_helper */ namespace mspriority_queue { /// MSPriorityQueue statistics template struct stat { typedef Counter event_counter ; ///< Event counter type event_counter m_nPushCount; ///< Count of success push operation event_counter m_nPopCount; ///< Count of success pop operation event_counter m_nPushFailCount; ///< Count of failed ("the queue is full") push operation event_counter m_nPopFailCount; ///< Count of failed ("the queue is empty") pop operation event_counter m_nPushHeapifySwapCount; ///< Count of item swapping when heapifying in push event_counter m_nPopHeapifySwapCount; ///< Count of item swapping when heapifying in pop event_counter m_nItemMovedTop; ///< Count of events when \p push() encountered that inserted item was moved to top by a concurrent \p pop() event_counter m_nItemMovedUp; ///< Count of events when \p push() encountered that inserted item was moved upwards by a concurrent \p pop() event_counter m_nPushEmptyPass; ///< Count of empty pass during heapify via concurrent operations //@cond void onPushSuccess() { ++m_nPushCount ;} void onPopSuccess() { ++m_nPopCount ;} void onPushFailed() { ++m_nPushFailCount ;} void onPopFailed() { ++m_nPopFailCount ;} void onPushHeapifySwap() { ++m_nPushHeapifySwapCount ;} void onPopHeapifySwap() { ++m_nPopHeapifySwapCount ;} void onItemMovedTop() { ++m_nItemMovedTop ;} void onItemMovedUp() { ++m_nItemMovedUp ;} void onPushEmptyPass() { ++m_nPushEmptyPass ;} //@endcond }; /// MSPriorityQueue empty statistics struct empty_stat { //@cond void onPushSuccess() const {} void onPopSuccess() const {} void onPushFailed() const {} void onPopFailed() const {} void onPushHeapifySwap() const {} void onPopHeapifySwap() const {} void onItemMovedTop() const {} void onItemMovedUp() const {} void onPushEmptyPass() const {} //@endcond }; /// MSPriorityQueue traits struct traits { /// Storage type /** The storage type for the heap array. Default is \p cds::opt::v::initialized_dynamic_buffer. You may specify any type of buffer's value since at instantiation time the \p buffer::rebind member metafunction is called to change type of values stored in the buffer. */ typedef opt::v::initialized_dynamic_buffer buffer; /// Priority compare functor /** No default functor is provided. If the option is not specified, the \p less is used. */ typedef opt::none compare; /// Specifies binary predicate used for priority comparing. /** Default is \p std::less. */ typedef opt::none less; /// Type of mutual-exclusion lock. The lock is not need to be recursive. typedef cds::sync::spin lock_type; /// Back-off strategy typedef backoff::Default back_off; /// Internal statistics /** Possible types: \p mspriority_queue::empty_stat (the default, no overhead), \p mspriority_queue::stat or any other with interface like \p %mspriority_queue::stat */ typedef empty_stat stat; }; /// Metafunction converting option list to traits /** \p Options: - \p opt::buffer - the buffer type for heap array. Possible type are: \p opt::v::initialized_static_buffer, \p opt::v::initialized_dynamic_buffer. Default is \p %opt::v::initialized_dynamic_buffer. You may specify any type of value for the buffer since at instantiation time the \p buffer::rebind member metafunction is called to change the type of values stored in the buffer. - \p opt::compare - priority compare functor. No default functor is provided. If the option is not specified, the \p opt::less is used. - \p opt::less - specifies binary predicate used for priority compare. Default is \p std::less. - \p opt::lock_type - lock type. Default is \p cds::sync::spin - \p opt::back_off - back-off strategy. Default is \p cds::backoff::yield - \p opt::stat - internal statistics. Available types: \p mspriority_queue::stat, \p mspriority_queue::empty_stat (the default, no overhead) */ template struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type ; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type ,Options... >::type type; # endif }; } // namespace mspriority_queue /// Michael & Scott array-based lock-based concurrent priority queue heap /** @ingroup cds_intrusive_priority_queue Source: - [1996] G.Hunt, M.Michael, S. Parthasarathy, M.Scott "An efficient algorithm for concurrent priority queue heaps" \p %MSPriorityQueue augments the standard array-based heap data structure with a mutual-exclusion lock on the heap's size and locks on each node in the heap. Each node also has a tag that indicates whether it is empty, valid, or in a transient state due to an update to the heap by an inserting thread. The algorithm allows concurrent insertions and deletions in opposite directions, without risking deadlock and without the need for special server threads. It also uses a "bit-reversal" technique to scatter accesses across the fringe of the tree to reduce contention. On large heaps the algorithm achieves significant performance improvements over serialized single-lock algorithm, for various insertion/deletion workloads. For small heaps it still performs well, but not as well as single-lock algorithm. Template parameters: - \p T - type to be stored in the queue. The priority is a part of \p T type. - \p Traits - type traits. See \p mspriority_queue::traits for explanation. It is possible to declare option-based queue with \p cds::container::mspriority_queue::make_traits metafunction instead of \p Traits template argument. */ template class MSPriorityQueue: public cds::bounded_container { public: typedef T value_type ; ///< Value type stored in the queue typedef Traits traits ; ///< Traits template parameter # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined key_comparator ; ///< priority comparing functor based on opt::compare and opt::less option setter. # else typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator; # endif typedef typename traits::lock_type lock_type; ///< heap's size lock type typedef typename traits::back_off back_off; ///< Back-off strategy typedef typename traits::stat stat; ///< internal statistics type, see \p mspriority_queue::traits::stat typedef typename cds::bitop::bit_reverse_counter<> item_counter;///< Item counter type protected: //@cond typedef cds::OS::ThreadId tag_type; enum tag_value { Available = -1, Empty = 0 }; //@endcond //@cond /// Heap item type struct node { value_type * m_pVal ; ///< A value pointer tag_type volatile m_nTag ; ///< A tag mutable lock_type m_Lock ; ///< Node-level lock /// Creates empty node node() : m_pVal( nullptr ) , m_nTag( tag_type(Empty)) {} /// Lock the node void lock() { m_Lock.lock(); } /// Unlock the node void unlock() { m_Lock.unlock(); } }; //@endcond public: typedef typename traits::buffer::template rebind::other buffer_type ; ///< Heap array buffer type //@cond typedef typename item_counter::counter_type counter_type; //@endcond protected: item_counter m_ItemCounter ; ///< Item counter mutable lock_type m_Lock ; ///< Heap's size lock buffer_type m_Heap ; ///< Heap array stat m_Stat ; ///< internal statistics accumulator public: /// Constructs empty priority queue /** For \p cds::opt::v::initialized_static_buffer the \p nCapacity parameter is ignored. */ MSPriorityQueue( size_t nCapacity ) : m_Heap( nCapacity ) {} /// Clears priority queue and destructs the object ~MSPriorityQueue() { clear(); } /// Inserts a item into priority queue /** If the priority queue is full, the function returns \p false, no item has been added. Otherwise, the function inserts the pointer to \p val into the heap and returns \p true. The function does not make a copy of \p val. */ bool push( value_type& val ) { tag_type const curId = cds::OS::get_current_thread_id(); // Insert new item at bottom of the heap m_Lock.lock(); if ( m_ItemCounter.value() >= capacity()) { // the heap is full m_Lock.unlock(); m_Stat.onPushFailed(); return false; } counter_type i = m_ItemCounter.inc(); assert( i < m_Heap.capacity()); node& refNode = m_Heap[i]; refNode.lock(); m_Lock.unlock(); assert( refNode.m_nTag == tag_type( Empty )); assert( refNode.m_pVal == nullptr ); refNode.m_pVal = &val; refNode.m_nTag = curId; refNode.unlock(); // Move item towards top of heap while it has a higher priority than its parent heapify_after_push( i, curId ); m_Stat.onPushSuccess(); return true; } /// Extracts item with high priority /** If the priority queue is empty, the function returns \p nullptr. Otherwise, it returns the item extracted. */ value_type * pop() { node& refTop = m_Heap[1]; m_Lock.lock(); if ( m_ItemCounter.value() == 0 ) { // the heap is empty m_Lock.unlock(); m_Stat.onPopFailed(); return nullptr; } counter_type nBottom = m_ItemCounter.dec(); assert( nBottom < m_Heap.capacity()); assert( nBottom > 0 ); refTop.lock(); if ( nBottom == 1 ) { refTop.m_nTag = tag_type( Empty ); value_type * pVal = refTop.m_pVal; refTop.m_pVal = nullptr; refTop.unlock(); m_Lock.unlock(); m_Stat.onPopSuccess(); return pVal; } node& refBottom = m_Heap[nBottom]; refBottom.lock(); m_Lock.unlock(); refBottom.m_nTag = tag_type(Empty); value_type * pVal = refBottom.m_pVal; refBottom.m_pVal = nullptr; refBottom.unlock(); if ( refTop.m_nTag == tag_type(Empty)) { // nBottom == nTop refTop.unlock(); m_Stat.onPopSuccess(); return pVal; } std::swap( refTop.m_pVal, pVal ); refTop.m_nTag = tag_type( Available ); // refTop will be unlocked inside heapify_after_pop heapify_after_pop( &refTop ); m_Stat.onPopSuccess(); return pVal; } /// Clears the queue (not atomic) /** This function is no atomic, but thread-safe */ void clear() { clear_with( []( value_type const& /*src*/ ) {} ); } /// Clears the queue (not atomic) /** This function is no atomic, but thread-safe. For each item removed the functor \p f is called. \p Func interface is: \code struct clear_functor { void operator()( value_type& item ); }; \endcode A lambda function or a function pointer can be used as \p f. */ template void clear_with( Func f ) { value_type * pVal; while (( pVal = pop()) != nullptr ) f( *pVal ); } /// Checks is the priority queue is empty bool empty() const { return size() == 0; } /// Checks if the priority queue is full bool full() const { return size() == capacity(); } /// Returns current size of priority queue size_t size() const { std::unique_lock l( m_Lock ); return static_cast( m_ItemCounter.value()); } /// Return capacity of the priority queue size_t capacity() const { // m_Heap[0] is not used return m_Heap.capacity() - 1; } /// Returns const reference to internal statistics stat const& statistics() const { return m_Stat; } protected: //@cond void heapify_after_push( counter_type i, tag_type curId ) { key_comparator cmp; back_off bkoff; // Move item towards top of the heap while it has higher priority than parent while ( i > 1 ) { bool bProgress = true; counter_type nParent = i / 2; node& refParent = m_Heap[nParent]; refParent.lock(); node& refItem = m_Heap[i]; refItem.lock(); if ( refParent.m_nTag == tag_type(Available) && refItem.m_nTag == curId ) { if ( cmp( *refItem.m_pVal, *refParent.m_pVal ) > 0 ) { std::swap( refItem.m_nTag, refParent.m_nTag ); std::swap( refItem.m_pVal, refParent.m_pVal ); m_Stat.onPushHeapifySwap(); i = nParent; } else { refItem.m_nTag = tag_type(Available); i = 0; } } else if ( refParent.m_nTag == tag_type( Empty )) { m_Stat.onItemMovedTop(); i = 0; } else if ( refItem.m_nTag != curId ) { m_Stat.onItemMovedUp(); i = nParent; } else { m_Stat.onPushEmptyPass(); bProgress = false; } refItem.unlock(); refParent.unlock(); if ( !bProgress ) bkoff(); else bkoff.reset(); } if ( i == 1 ) { node& refItem = m_Heap[i]; refItem.lock(); if ( refItem.m_nTag == curId ) refItem.m_nTag = tag_type(Available); refItem.unlock(); } } void heapify_after_pop( node * pParent ) { key_comparator cmp; counter_type const nCapacity = m_Heap.capacity(); counter_type nParent = 1; for ( counter_type nChild = nParent * 2; nChild < nCapacity; nChild *= 2 ) { node* pChild = &m_Heap[ nChild ]; pChild->lock(); if ( pChild->m_nTag == tag_type( Empty )) { pChild->unlock(); break; } counter_type const nRight = nChild + 1; if ( nRight < nCapacity ) { node& refRight = m_Heap[nRight]; refRight.lock(); if ( refRight.m_nTag != tag_type( Empty ) && cmp( *refRight.m_pVal, *pChild->m_pVal ) > 0 ) { // get right child pChild->unlock(); nChild = nRight; pChild = &refRight; } else refRight.unlock(); } // If child has higher priority than parent then swap // Otherwise stop if ( cmp( *pChild->m_pVal, *pParent->m_pVal ) > 0 ) { std::swap( pParent->m_nTag, pChild->m_nTag ); std::swap( pParent->m_pVal, pChild->m_pVal ); pParent->unlock(); m_Stat.onPopHeapifySwap(); nParent = nChild; pParent = pChild; } else { pChild->unlock(); break; } } pParent->unlock(); } //@endcond }; }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_MSPRIORITY_QUEUE_H libcds-2.3.3/cds/intrusive/msqueue.h000066400000000000000000000577721341244201700174350ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_MSQUEUE_H #define CDSLIB_INTRUSIVE_MSQUEUE_H #include #include #include namespace cds { namespace intrusive { /// MSQueue related definitions /** @ingroup cds_intrusive_helper */ namespace msqueue { /// Queue node /** Template parameters: - GC - garbage collector used - Tag - a \ref cds_intrusive_hook_tag "tag" */ template using node = cds::intrusive::single_link::node< GC, Tag >; /// Base hook /** \p Options are: - opt::gc - garbage collector used. - opt::tag - a \ref cds_intrusive_hook_tag "tag" */ template < typename... Options > using base_hook = cds::intrusive::single_link::base_hook< Options...>; /// Member hook /** \p MemberOffset specifies offset in bytes of \ref node member into your structure. Use \p offsetof macro to define \p MemberOffset \p Options are: - opt::gc - garbage collector used. - opt::tag - a \ref cds_intrusive_hook_tag "tag" */ template < size_t MemberOffset, typename... Options > using member_hook = cds::intrusive::single_link::member_hook< MemberOffset, Options... >; /// Traits hook /** \p NodeTraits defines type traits for node. See \ref node_traits for \p NodeTraits interface description \p Options are: - opt::gc - garbage collector used. - opt::tag - a \ref cds_intrusive_hook_tag "tag" */ template using traits_hook = cds::intrusive::single_link::traits_hook< NodeTraits, Options... >; /// Queue internal statistics. May be used for debugging or profiling /** Template argument \p Counter defines type of counter. Default is \p cds::atomicity::event_counter, that is weak, i.e. it is not guaranteed strict event counting. You may use stronger type of counter like as \p cds::atomicity::item_counter, or even integral type, for example, \p int. */ template struct stat { typedef Counter counter_type; ///< Counter type counter_type m_EnqueueCount ; ///< Enqueue call count counter_type m_DequeueCount ; ///< Dequeue call count counter_type m_EnqueueRace ; ///< Count of enqueue race conditions encountered counter_type m_DequeueRace ; ///< Count of dequeue race conditions encountered counter_type m_AdvanceTailError ; ///< Count of "advance tail failed" events counter_type m_BadTail ; ///< Count of events "Tail is not pointed to the last item in the queue" counter_type m_EmptyDequeue ; ///< Count of dequeue from empty queue /// Register enqueue call void onEnqueue() { ++m_EnqueueCount; } /// Register dequeue call void onDequeue() { ++m_DequeueCount; } /// Register enqueue race event void onEnqueueRace() { ++m_EnqueueRace; } /// Register dequeue race event void onDequeueRace() { ++m_DequeueRace; } /// Register "advance tail failed" event void onAdvanceTailFailed() { ++m_AdvanceTailError; } /// Register event "Tail is not pointed to last item in the queue" void onBadTail() { ++m_BadTail; } /// Register dequeuing from empty queue void onEmptyDequeue() { ++m_EmptyDequeue; } //@cond void reset() { m_EnqueueCount.reset(); m_DequeueCount.reset(); m_EnqueueRace.reset(); m_DequeueRace.reset(); m_AdvanceTailError.reset(); m_BadTail.reset(); m_EmptyDequeue.reset(); } stat& operator +=( stat const& s ) { m_EnqueueCount += s.m_EnqueueCount.get(); m_DequeueCount += s.m_DequeueCount.get(); m_EnqueueRace += s.m_EnqueueRace.get(); m_DequeueRace += s.m_DequeueRace.get(); m_AdvanceTailError += s.m_AdvanceTailError.get(); m_BadTail += s.m_BadTail.get(); m_EmptyDequeue += s.m_EmptyDequeue.get(); return *this; } //@endcond }; /// Dummy queue statistics - no counting is performed, no overhead. Support interface like \p msqueue::stat struct empty_stat { //@cond void onEnqueue() const {} void onDequeue() const {} void onEnqueueRace() const {} void onDequeueRace() const {} void onAdvanceTailFailed() const {} void onBadTail() const {} void onEmptyDequeue() const {} void reset() {} empty_stat& operator +=( empty_stat const& ) { return *this; } //@endcond }; /// MSQueue default traits struct traits { /// Back-off strategy typedef cds::backoff::empty back_off; /// Hook, possible types are \p msqueue::base_hook, \p msqueue::member_hook, \p msqueue::traits_hook typedef msqueue::base_hook<> hook; /// The functor used for dispose removed items. Default is \p opt::v::empty_disposer. This option is used for dequeuing typedef opt::v::empty_disposer disposer; /// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter to enable item counting typedef atomicity::empty_item_counter item_counter; /// Internal statistics (by default, disabled) /** Possible option value are: \p msqueue::stat, \p msqueue::empty_stat (the default), user-provided class that supports \p %msqueue::stat interface. */ typedef msqueue::empty_stat stat; /// C++ memory ordering model /** Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). */ typedef opt::v::relaxed_ordering memory_model; /// Link checking, see \p cds::opt::link_checker static constexpr const opt::link_check_type link_checker = opt::debug_check_link; /// Padding for internal critical atomic data. Default is \p opt::cache_line_padding enum { padding = opt::cache_line_padding }; }; /// Metafunction converting option list to \p msqueue::traits /** Supported \p Options are: - \p opt::hook - hook used. Possible hooks are: \p msqueue::base_hook, \p msqueue::member_hook, \p msqueue::traits_hook. If the option is not specified, \p %msqueue::base_hook<> is used. - \p opt::back_off - back-off strategy used, default is \p cds::backoff::empty. - \p opt::disposer - the functor used for dispose removed items. Default is \p opt::v::empty_disposer. This option is used when dequeuing. - \p opt::link_checker - the type of node's link fields checking. Default is \p opt::debug_check_link - \p opt::item_counter - the type of item counting feature. Default is \p cds::atomicity::empty_item_counter (item counting disabled) To enable item counting use \p cds::atomicity::item_counter - \p opt::stat - the type to gather internal statistics. Possible statistics types are: \p msqueue::stat, \p msqueue::empty_stat, user-provided class that supports \p %msqueue::stat interface. Default is \p %msqueue::empty_stat (internal statistics disabled). - \p opt::padding - padding for internal critical atomic data. Default is \p opt::cache_line_padding - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). Example: declare \p %MSQueue with item counting and internal statistics \code typedef cds::intrusive::MSQueue< cds::gc::HP, Foo, typename cds::intrusive::msqueue::make_traits< cds::intrusive::opt:hook< cds::intrusive::msqueue::base_hook< cds::opt::gc >>, cds::opt::item_counte< cds::atomicity::item_counter >, cds::opt::stat< cds::intrusive::msqueue::stat<> > >::type > myQueue; \endcode */ template struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type , Options... >::type type; # endif }; } // namespace msqueue /// Michael & Scott's intrusive lock-free queue /** @ingroup cds_intrusive_queue Implementation of well-known Michael & Scott's queue algorithm: - [1998] Maged Michael, Michael Scott "Simple, fast, and practical non-blocking and blocking concurrent queue algorithms" Template arguments: - \p GC - garbage collector type: \p gc::HP, \p gc::DHP - \p T - type of value to be stored in the queue. A value of type \p T must be derived from \p msqueue::node for \p msqueue::base_hook, or it should have a member of type \p %msqueue::node for \p msqueue::member_hook, or it should be convertible to \p %msqueue::node for \p msqueue::traits_hook. - \p Traits - queue traits, default is \p msqueue::traits. You can use \p msqueue::make_traits metafunction to make your traits or just derive your traits from \p %msqueue::traits: \code struct myTraits: public cds::intrusive::msqueue::traits { typedef cds::intrusive::msqueue::stat<> stat; typedef cds::atomicity::item_counter item_counter; }; typedef cds::intrusive::MSQueue< cds::gc::HP, Foo, myTraits > myQueue; // Equivalent make_traits example: typedef cds::intrusive::MSQueue< cds::gc::HP, Foo, typename cds::intrusive::msqueue::make_traits< cds::opt::stat< cds::intrusive::msqueue::stat<> >, cds::opt::item_counter< cds::atomicity::item_counter > >::type > myQueue; \endcode \par About item disposing The Michael & Scott's queue algo has a key feature: even if the queue is empty it contains one item that is "dummy" one from the standpoint of the algo. See \p dequeue() function for explanation. \par Examples \code #include #include namespace ci = cds::inrtusive; typedef cds::gc::HP hp_gc; // MSQueue with Hazard Pointer garbage collector, base hook + item disposer: struct Foo: public ci::msqueue::node< hp_gc > { // Your data ... }; // Disposer for Foo struct just deletes the object passed in struct fooDisposer { void operator()( Foo * p ) { delete p; } }; // Declare traits for the queue struct myTraits: public ci::msqueue::traits { ,ci::opt::hook< ci::msqueue::base_hook< ci::opt::gc > > ,ci::opt::disposer< fooDisposer > }; // At least, declare the queue type typedef ci::MSQueue< hp_gc, Foo, myTraits > fooQueue; // Example 2: // MSQueue with Hazard Pointer garbage collector, // member hook + item disposer + item counter, // without padding of internal queue data // Use msqueue::make_traits struct Bar { // Your data ... ci::msqueue::node< hp_gc > hMember; }; typedef ci::MSQueue< hp_gc, Foo, typename ci::msqueue::make_traits< ci::opt::hook< ci::msqueue::member_hook< offsetof(Bar, hMember) ,ci::opt::gc > > ,ci::opt::disposer< fooDisposer > ,cds::opt::item_counter< cds::atomicity::item_counter > ,cds::opt::padding< cds::opt::no_special_padding > >::type > barQueue; \endcode */ template class MSQueue { public: typedef GC gc; ///< Garbage collector typedef T value_type; ///< type of value to be stored in the queue typedef Traits traits; ///< Queue traits typedef typename traits::hook hook; ///< hook type typedef typename hook::node_type node_type; ///< node type typedef typename traits::disposer disposer; ///< disposer used typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< node traits typedef typename single_link::get_link_checker< node_type, traits::link_checker >::type link_checker; ///< link checker typedef typename traits::back_off back_off; ///< back-off strategy typedef typename traits::item_counter item_counter; ///< Item counter class typedef typename traits::stat stat; ///< Internal statistics typedef typename traits::memory_model memory_model; ///< Memory ordering. See \p cds::opt::memory_model option /// Rebind template arguments template struct rebind { typedef MSQueue< GC2, T2, Traits2 > other; ///< Rebinding result }; static constexpr const size_t c_nHazardPtrCount = 2; ///< Count of hazard pointer required for the algorithm protected: //@cond // GC and node_type::gc must be the same static_assert((std::is_same::value), "GC and node_type::gc must be the same"); typedef typename node_type::atomic_node_ptr atomic_node_ptr; atomic_node_ptr m_pHead; ///< Queue's head pointer typename opt::details::apply_padding< atomic_node_ptr, traits::padding >::padding_type pad1_; atomic_node_ptr m_pTail; ///< Queue's tail pointer typename opt::details::apply_padding< atomic_node_ptr, traits::padding >::padding_type pad2_; node_type m_Dummy; ///< dummy node typename opt::details::apply_padding< node_type, traits::padding >::padding_type pad3_; item_counter m_ItemCounter; ///< Item counter stat m_Stat; ///< Internal statistics //@endcond //@cond struct dequeue_result { typename gc::template GuardArray<2> guards; node_type * pHead; node_type * pNext; }; bool do_dequeue( dequeue_result& res ) { node_type * pNext; back_off bkoff; node_type * h; while ( true ) { h = res.guards.protect( 0, m_pHead, []( node_type * p ) -> value_type * { return node_traits::to_value_ptr( p );}); pNext = res.guards.protect( 1, h->m_pNext, []( node_type * p ) -> value_type * { return node_traits::to_value_ptr( p );}); if ( m_pHead.load(memory_model::memory_order_acquire) != h ) continue; if ( pNext == nullptr ) { m_Stat.onEmptyDequeue(); return false; // empty queue } node_type * t = m_pTail.load(memory_model::memory_order_acquire); if ( h == t ) { // It is needed to help enqueue m_pTail.compare_exchange_strong( t, pNext, memory_model::memory_order_release, atomics::memory_order_relaxed ); m_Stat.onBadTail(); continue; } if ( m_pHead.compare_exchange_strong( h, pNext, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) break; m_Stat.onDequeueRace(); bkoff(); } --m_ItemCounter; m_Stat.onDequeue(); res.pHead = h; res.pNext = pNext; return true; } static void clear_links( node_type * pNode ) { pNode->m_pNext.store( nullptr, memory_model::memory_order_release ); } void dispose_result( dequeue_result& res ) { dispose_node( res.pHead ); } void dispose_node( node_type * p ) { // Note about the dummy node: // We cannot clear m_Dummy here since it leads to ABA. // On the other hand, we cannot use deferred clear_links( &m_Dummy ) call via // HP retiring cycle since m_Dummy is member of MSQueue and may be destroyed // before HP retiring cycle invocation. // So, we will never clear m_Dummy struct disposer_thunk { void operator()( value_type * p ) const { assert( p != nullptr ); MSQueue::clear_links( node_traits::to_node_ptr( p )); disposer()(p); } }; if ( p != &m_Dummy ) gc::template retire( node_traits::to_value_ptr( p )); } //@endcond public: /// Initializes empty queue MSQueue() : m_pHead( &m_Dummy ) , m_pTail( &m_Dummy ) {} /// Destructor clears the queue /** Since the Michael & Scott queue contains at least one item even if the queue is empty, the destructor may call item disposer. */ ~MSQueue() { clear(); node_type * pHead = m_pHead.load(memory_model::memory_order_relaxed); assert( pHead != nullptr ); assert( pHead == m_pTail.load(memory_model::memory_order_relaxed)); m_pHead.store( nullptr, memory_model::memory_order_relaxed ); m_pTail.store( nullptr, memory_model::memory_order_relaxed ); dispose_node( pHead ); } /// Enqueues \p val value into the queue. /** @anchor cds_intrusive_MSQueue_enqueue The function always returns \p true. */ bool enqueue( value_type& val ) { node_type * pNew = node_traits::to_node_ptr( val ); link_checker::is_empty( pNew ); typename gc::Guard guard; back_off bkoff; node_type * t; while ( true ) { t = guard.protect( m_pTail, []( node_type * p ) -> value_type * { return node_traits::to_value_ptr( p );}); node_type * pNext = t->m_pNext.load(memory_model::memory_order_acquire); if ( pNext != nullptr ) { // Tail is misplaced, advance it m_pTail.compare_exchange_weak( t, pNext, memory_model::memory_order_release, atomics::memory_order_relaxed ); m_Stat.onBadTail(); continue; } node_type * tmp = nullptr; if ( t->m_pNext.compare_exchange_strong( tmp, pNew, memory_model::memory_order_release, atomics::memory_order_relaxed )) break; m_Stat.onEnqueueRace(); bkoff(); } ++m_ItemCounter; m_Stat.onEnqueue(); if ( !m_pTail.compare_exchange_strong( t, pNew, memory_model::memory_order_release, atomics::memory_order_relaxed )) m_Stat.onAdvanceTailFailed(); return true; } /// Dequeues a value from the queue /** @anchor cds_intrusive_MSQueue_dequeue If the queue is empty the function returns \p nullptr. \par Warning The queue algorithm has following feature: when \p %dequeue() is called, the item returning is still queue's top, and previous top is disposed: \code before dequeuing Dequeue after dequeuing +------------------+ +------------------+ Top ->| Item 1 | -> Dispose Item 1 | Item 2 | <- Top +------------------+ +------------------+ | Item 2 | -> Return Item 2 | ... | +------------------+ | ... | \endcode \p %dequeue() function returns Item 2, that becomes new top of queue, and calls the disposer for Item 1, that was queue's top on function entry. Thus, you cannot manually delete item returned because it is still included in item sequence and it has valuable link field that must not be zeroed. The item should be deleted only in garbage collector retire cycle using the disposer. */ value_type * dequeue() { dequeue_result res; if ( do_dequeue( res )) { dispose_result( res ); return node_traits::to_value_ptr( *res.pNext ); } return nullptr; } /// Synonym for \ref cds_intrusive_MSQueue_enqueue "enqueue()" function bool push( value_type& val ) { return enqueue( val ); } /// Synonym for \ref cds_intrusive_MSQueue_dequeue "dequeue()" function value_type * pop() { return dequeue(); } /// Checks if the queue is empty bool empty() const { typename gc::Guard guard; node_type * p = guard.protect( m_pHead, []( node_type * pNode ) -> value_type * { return node_traits::to_value_ptr( pNode );}); return p->m_pNext.load( memory_model::memory_order_relaxed ) == nullptr; } /// Clear the queue /** The function repeatedly calls \p dequeue() until it returns \p nullptr. The disposer defined in template \p Traits is called for each item that can be safely disposed. */ void clear() { while ( dequeue()); } /// Returns queue's item count /** The value returned depends on \p msqueue::traits::item_counter. For \p atomicity::empty_item_counter, this function always returns 0. @note Even if you use real item counter and it returns 0, this fact is not mean that the queue is empty. To check queue emptyness use \p empty() method. */ size_t size() const { return m_ItemCounter.value(); } /// Returns reference to internal statistics stat const& statistics() const { return m_Stat; } }; }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_MSQUEUE_H libcds-2.3.3/cds/intrusive/optimistic_queue.h000066400000000000000000000667721341244201700213410ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_OPTIMISTIC_QUEUE_H #define CDSLIB_INTRUSIVE_OPTIMISTIC_QUEUE_H #include #include #include #include namespace cds { namespace intrusive { /// \p OptimisticQueue related definitions /** @ingroup cds_intrusive_helper */ namespace optimistic_queue { /// Optimistic queue node /** Template parameters: - \p GC - garbage collector - \p Tag - a \ref cds_intrusive_hook_tag "tag" */ template struct node { typedef GC gc ; ///< Garbage collector typedef Tag tag ; ///< tag typedef typename gc::template atomic_ref atomic_node_ptr ; ///< atomic pointer atomic_node_ptr m_pNext ; ///< Pointer to next node atomic_node_ptr m_pPrev ; ///< Pointer to previous node node() noexcept { m_pNext.store( nullptr, atomics::memory_order_relaxed ); m_pPrev.store( nullptr, atomics::memory_order_release ); } }; //@cond struct default_hook { typedef cds::gc::default_gc gc; typedef opt::none tag; }; //@endcond //@cond template < typename HookType, typename... Options> struct hook { typedef typename opt::make_options< default_hook, Options...>::type options; typedef typename options::gc gc; typedef typename options::tag tag; typedef node node_type; typedef HookType hook_type; }; //@endcond /// Base hook /** \p Options are: - \p opt::gc - garbage collector used. - \p opt::tag - a \ref cds_intrusive_hook_tag "tag" */ template < typename... Options > struct base_hook: public hook< opt::base_hook_tag, Options... > {}; /// Member hook /** \p MemberOffset specifies offset in bytes of \ref node member into your structure. Use \p offsetof macro to define \p MemberOffset \p Options are: - \p opt::gc - garbage collector used. - \p opt::tag - a \ref cds_intrusive_hook_tag "tag" */ template < size_t MemberOffset, typename... Options > struct member_hook: public hook< opt::member_hook_tag, Options... > { //@cond static const size_t c_nMemberOffset = MemberOffset; //@endcond }; /// Traits hook /** \p NodeTraits defines type traits for node. See \ref node_traits for \p NodeTraits interface description \p Options are: - \p opt::gc - garbage collector used. - \p opt::tag - a \ref cds_intrusive_hook_tag "tag" */ template struct traits_hook: public hook< opt::traits_hook_tag, Options... > { //@cond typedef NodeTraits node_traits; //@endcond }; /// Check link template struct link_checker { //@cond typedef Node node_type; //@endcond /// Checks if the link fields of node \p pNode is \p nullptr /** An asserting is generated if \p pNode link fields is not \p nullptr */ static void is_empty( const node_type * pNode ) { assert( pNode->m_pNext.load( atomics::memory_order_relaxed ) == nullptr ); assert( pNode->m_pPrev.load( atomics::memory_order_relaxed ) == nullptr ); CDS_UNUSED( pNode ); } }; /// Metafunction for selecting appropriate link checking policy template < typename Node, opt::link_check_type LinkType > struct get_link_checker { //@cond typedef intrusive::opt::v::empty_link_checker type; //@endcond }; //@cond template < typename Node > struct get_link_checker< Node, opt::always_check_link > { typedef link_checker type; }; template < typename Node > struct get_link_checker< Node, opt::debug_check_link > { # ifdef _DEBUG typedef link_checker type; # else typedef intrusive::opt::v::empty_link_checker type; # endif }; //@endcond /// \p OptimisticQueue internal statistics. May be used for debugging or profiling /** Template argument \p Counter defines type of counter. Default is \p cds::atomicity::event_counter. You may use stronger type of counter like as \p cds::atomicity::item_counter, or even integral type, for example, \p int. */ template struct stat { typedef Counter counter_type; ///< Counter type counter_type m_EnqueueCount; ///< Enqueue call count counter_type m_DequeueCount; ///< Dequeue call count counter_type m_EnqueueRace; ///< Count of enqueue race conditions encountered counter_type m_DequeueRace; ///< Count of dequeue race conditions encountered counter_type m_AdvanceTailError; ///< Count of "advance tail failed" events counter_type m_BadTail; ///< Count of events "Tail is not pointed to the last item in the queue" counter_type m_FixListCount; ///< Count of fix list event counter_type m_EmptyDequeue; ///< Count of dequeue from empty queue /// Register enqueue call void onEnqueue() { ++m_EnqueueCount; } /// Register dequeue call void onDequeue() { ++m_DequeueCount; } /// Register enqueue race event void onEnqueueRace() { ++m_EnqueueRace; } /// Register dequeue race event void onDequeueRace() { ++m_DequeueRace; } /// Register "advance tail failed" event void onAdvanceTailFailed() { ++m_AdvanceTailError; } /// Register event "Tail is not pointed to last item in the queue" void onBadTail() { ++m_BadTail; } /// Register fix list event void onFixList() { ++m_FixListCount; } /// Register dequeuing from empty queue void onEmptyDequeue() { ++m_EmptyDequeue; } //@cond void reset() { m_EnqueueCount.reset(); m_DequeueCount.reset(); m_EnqueueRace.reset(); m_DequeueRace.reset(); m_AdvanceTailError.reset(); m_BadTail.reset(); m_FixListCount.reset(); m_EmptyDequeue.reset(); } stat& operator +=( stat const& s ) { m_EnqueueCount += s.m_EnqueueCount.get(); m_DequeueCount += s.m_DequeueCount.get(); m_EnqueueRace += s.m_EnqueueRace.get(); m_DequeueRace += s.m_DequeueRace.get(); m_AdvanceTailError += s.m_AdvanceTailError.get(); m_BadTail += s.m_BadTail.get(); m_FixListCount += s.m_FixListCount.get(); m_EmptyDequeue += s.m_EmptyDequeue.get(); return *this; } //@endcond }; /// Dummy \p OptimisticQueue statistics - no counting is performed. Support interface like \p optimistic_queue::stat struct empty_stat { //@cond void onEnqueue() const {} void onDequeue() const {} void onEnqueueRace() const {} void onDequeueRace() const {} void onAdvanceTailFailed() const {} void onBadTail() const {} void onFixList() const {} void onEmptyDequeue() const {} void reset() {} empty_stat& operator +=( empty_stat const& ) { return *this; } //@endcond }; /// \p OptimisticQueue default type traits struct traits { /// Back-off strategy typedef cds::backoff::empty back_off; /// Hook, possible types are \p optimistic_queue::base_hook, \p optimistic_queue::member_hook, \p optimistic_queue::traits_hook typedef optimistic_queue::base_hook<> hook; /// The functor used for dispose removed items. Default is \p opt::v::empty_disposer. This option is used for dequeuing typedef opt::v::empty_disposer disposer; /// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter to enable item counting typedef cds::atomicity::empty_item_counter item_counter; /// C++ memory ordering model /** Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). */ typedef opt::v::relaxed_ordering memory_model; /// Internal statistics (by default, disabled) /** Possible option value are: \p optimistic_queue::stat, \p optimistic_queue::empty_stat (the default), user-provided class that supports \p %optimistic_queue::stat interface. */ typedef optimistic_queue::empty_stat stat; /// Link checking, see \p cds::opt::link_checker static constexpr const opt::link_check_type link_checker = opt::debug_check_link; /// Padding for internal critical atomic data. Default is \p opt::cache_line_padding enum { padding = opt::cache_line_padding }; }; /// Metafunction converting option list to \p optimistic_queue::traits /** Supported \p Options are: - \p opt::hook - hook used. Possible hooks are: \p optimistic_queue::base_hook, \p optimistic_queue::member_hook, \p optimistic_queue::traits_hook. If the option is not specified, \p %optimistic_queue::base_hook<> is used. - \p opt::back_off - back-off strategy used, default is \p cds::backoff::empty. - \p opt::disposer - the functor used for dispose removed items. Default is \p opt::v::empty_disposer. This option is used when dequeuing. - \p opt::link_checker - the type of node's link fields checking. Default is \p opt::debug_check_link - \p opt::item_counter - the type of item counting feature. Default is \p cds::atomicity::empty_item_counter (item counting disabled) To enable item counting use \p cds::atomicity::item_counter - \p opt::stat - the type to gather internal statistics. Possible statistics types are: \p optimistic_queue::stat, \p optimistic_queue::empty_stat, user-provided class that supports \p %optimistic_queue::stat interface. Default is \p %optimistic_queue::empty_stat (internal statistics disabled). - \p opt::padding - padding for internal critical atomic data. Default is \p opt::cache_line_padding - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). Example: declare \p %OptimisticQueue with item counting and internal statistics \code typedef cds::intrusive::OptimisticQueue< cds::gc::HP, Foo, typename cds::intrusive::optimistic_queue::make_traits< cds::intrusive::opt:hook< cds::intrusive::optimistic_queue::base_hook< cds::opt::gc >>, cds::opt::item_counte< cds::atomicity::item_counter >, cds::opt::stat< cds::intrusive::optimistic_queue::stat<> > >::type > myQueue; \endcode */ template struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type , Options... >::type type; # endif }; } // namespace optimistic_queue /// Optimistic intruive lock-free queue /** @ingroup cds_intrusive_queue Implementation of Ladan-Mozes & Shavit optimistic queue algorithm. [2008] Edya Ladan-Mozes, Nir Shavit "An Optimistic Approach to Lock-Free FIFO Queues" Template arguments: - \p GC - garbage collector type: \p gc::HP, \p gc::DHP - \p T - type of value to be stored in the queue. A value of type \p T must be derived from \p optimistic_queue::node for \p optimistic_queue::base_hook, or it should have a member of type \p %optimistic_queue::node for \p optimistic_queue::member_hook, or it should be convertible to \p %optimistic_queue::node for \p optimistic_queue::traits_hook. - \p Traits - queue traits, default is \p optimistic_queue::traits. You can use \p optimistic_queue::make_traits metafunction to make your traits or just derive your traits from \p %optimistic_queue::traits: \code struct myTraits: public cds::intrusive::optimistic_queue::traits { typedef cds::intrusive::optimistic_queue::stat<> stat; typedef cds::atomicity::item_counter item_counter; }; typedef cds::intrusive::OptimisticQueue< cds::gc::HP, Foo, myTraits > myQueue; // Equivalent make_traits example: typedef cds::intrusive::OptimisticQueue< cds::gc::HP, Foo, typename cds::intrusive::optimistic_queue::make_traits< cds::opt::stat< cds::intrusive::optimistic_queue::stat<> >, cds::opt::item_counter< cds::atomicity::item_counter > >::type > myQueue; \endcode Garbage collecting schema \p GC must be consistent with the optimistic_queue::node GC. \par About item disposing The optimistic queue algo has a key feature: even if the queue is empty it contains one item that is "dummy" one from the standpoint of the algo. See \p dequeue() function for explanation. \par Examples \code #include #include namespace ci = cds::inrtusive; typedef cds::gc::HP hp_gc; // Optimistic queue with Hazard Pointer garbage collector, base hook + item counter: struct Foo: public ci::optimistic_queue::node< hp_gc > { // Your data ... }; typedef ci::OptimisticQueue< hp_gc, Foo, typename ci::optimistic_queue::make_traits< ci::opt::hook< ci::optimistic_queue::base_hook< ci::opt::gc< hp_gc > > > ,cds::opt::item_counter< cds::atomicity::item_counter > >::type > FooQueue; // Optimistic queue with Hazard Pointer garbage collector, member hook, no item counter: struct Bar { // Your data ... ci::optimistic_queue::node< hp_gc > hMember; }; typedef ci::OptimisticQueue< hp_gc, Bar, typename ci::optimistic_queue::make_traits< ci::opt::hook< ci::optimistic_queue::member_hook< offsetof(Bar, hMember) ,ci::opt::gc< hp_gc > > > >::type > BarQueue; \endcode */ template class OptimisticQueue { public: typedef GC gc; ///< Garbage collector typedef T value_type; ///< type of value to be stored in the queue typedef Traits traits; ///< Queue traits typedef typename traits::hook hook; ///< hook type typedef typename hook::node_type node_type; ///< node type typedef typename traits::disposer disposer; ///< disposer used typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< node traits typedef typename optimistic_queue::get_link_checker< node_type, traits::link_checker >::type link_checker; ///< link checker typedef typename traits::back_off back_off; ///< back-off strategy typedef typename traits::item_counter item_counter; ///< Item counting policy used typedef typename traits::memory_model memory_model;///< Memory ordering. See cds::opt::memory_model option typedef typename traits::stat stat; ///< Internal statistics policy used /// Rebind template arguments template struct rebind { typedef OptimisticQueue< GC2, T2, Traits2 > other ; ///< Rebinding result }; static constexpr const size_t c_nHazardPtrCount = 5; ///< Count of hazard pointer required for the algorithm protected: //@cond typedef typename node_type::atomic_node_ptr atomic_node_ptr; // GC and node_type::gc must be the same static_assert((std::is_same::value), "GC and node_type::gc must be the same"); //@endcond atomic_node_ptr m_pTail; ///< Pointer to tail node //@cond typename opt::details::apply_padding< atomic_node_ptr, traits::padding >::padding_type pad1_; //@endcond atomic_node_ptr m_pHead; ///< Pointer to head node //@cond typename opt::details::apply_padding< atomic_node_ptr, traits::padding >::padding_type pad2_; //@endcond node_type m_Dummy ; ///< dummy node //@cond typename opt::details::apply_padding< atomic_node_ptr, traits::padding >::padding_type pad3_; //@endcond item_counter m_ItemCounter ; ///< Item counter stat m_Stat ; ///< Internal statistics protected: //@cond static void clear_links( node_type * pNode ) { pNode->m_pNext.store( nullptr, memory_model::memory_order_release ); pNode->m_pPrev.store( nullptr, memory_model::memory_order_release ); } struct dequeue_result { typename gc::template GuardArray<3> guards; node_type * pHead; node_type * pNext; }; bool do_dequeue( dequeue_result& res ) { node_type * pTail; node_type * pHead; node_type * pFirstNodePrev; back_off bkoff; while ( true ) { // Try till success or empty pHead = res.guards.protect( 0, m_pHead, [](node_type * p) -> value_type * {return node_traits::to_value_ptr(p);}); pTail = res.guards.protect( 1, m_pTail, [](node_type * p) -> value_type * {return node_traits::to_value_ptr(p);}); assert( pHead != nullptr ); pFirstNodePrev = res.guards.protect( 2, pHead->m_pPrev, [](node_type * p) -> value_type * {return node_traits::to_value_ptr(p);}); if ( pHead == m_pHead.load(memory_model::memory_order_acquire)) { if ( pTail != pHead ) { if ( pFirstNodePrev == nullptr || pFirstNodePrev->m_pNext.load(memory_model::memory_order_acquire) != pHead ) { fix_list( pTail, pHead ); continue; } if ( m_pHead.compare_exchange_weak( pHead, pFirstNodePrev, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { // dequeue success break; } } else { // the queue is empty m_Stat.onEmptyDequeue(); return false; } } m_Stat.onDequeueRace(); bkoff(); } --m_ItemCounter; m_Stat.onDequeue(); res.pHead = pHead; res.pNext = pFirstNodePrev; return true; } /// Helper function for optimistic queue. Corrects \p prev pointer of queue's nodes if it is needed void fix_list( node_type * pTail, node_type * pHead ) { // pTail and pHead are already guarded node_type * pCurNode; node_type * pCurNodeNext; typename gc::template GuardArray<2> guards; pCurNode = pTail; while ( pCurNode != pHead ) { // While not at head pCurNodeNext = guards.protect(0, pCurNode->m_pNext, [](node_type * p) -> value_type * { return node_traits::to_value_ptr(p);}); if ( pHead != m_pHead.load(memory_model::memory_order_acquire)) break; pCurNodeNext->m_pPrev.store( pCurNode, memory_model::memory_order_release ); guards.assign( 1, node_traits::to_value_ptr( pCurNode = pCurNodeNext )); } m_Stat.onFixList(); } void dispose_result( dequeue_result& res ) { dispose_node( res.pHead ); } void dispose_node( node_type * p ) { assert( p != nullptr ); if ( p != &m_Dummy ) { struct internal_disposer { void operator ()( value_type * p ) { assert( p != nullptr ); OptimisticQueue::clear_links( node_traits::to_node_ptr( *p )); disposer()(p); } }; gc::template retire( node_traits::to_value_ptr(p)); } } //@endcond public: /// Constructor creates empty queue OptimisticQueue() : m_pTail( &m_Dummy ) , m_pHead( &m_Dummy ) {} ~OptimisticQueue() { clear(); node_type * pHead = m_pHead.load(memory_model::memory_order_relaxed); CDS_DEBUG_ONLY( node_type * pTail = m_pTail.load(memory_model::memory_order_relaxed); ) CDS_DEBUG_ONLY( assert( pHead == pTail ); ) assert( pHead != nullptr ); m_pHead.store( nullptr, memory_model::memory_order_relaxed ); m_pTail.store( nullptr, memory_model::memory_order_relaxed ); dispose_node( pHead ); } /// @anchor cds_intrusive_OptimisticQueue_enqueue Enqueues \p data in lock-free manner. Always return \a true bool enqueue( value_type& val ) { node_type * pNew = node_traits::to_node_ptr( val ); link_checker::is_empty( pNew ); typename gc::template GuardArray<2> guards; back_off bkoff; guards.assign( 1, &val ); while( true ) { node_type * pTail = guards.protect( 0, m_pTail, []( node_type * p ) -> value_type * { return node_traits::to_value_ptr( p ); } ); // Read the tail pNew->m_pNext.store( pTail, memory_model::memory_order_relaxed ); if ( m_pTail.compare_exchange_strong( pTail, pNew, memory_model::memory_order_release, atomics::memory_order_acquire )) { // Try to CAS the tail pTail->m_pPrev.store( pNew, memory_model::memory_order_release ); // Success, write prev ++m_ItemCounter; m_Stat.onEnqueue(); break; // Enqueue done! } m_Stat.onEnqueueRace(); bkoff(); } return true; } /// Dequeues a value from the queue /** @anchor cds_intrusive_OptimisticQueue_dequeue If the queue is empty the function returns \p nullptr \par Warning The queue algorithm has following feature: when \p dequeue is called, the item returning is still queue's top, and previous top is disposed: \code before dequeuing Dequeue after dequeuing +------------------+ +------------------+ Top ->| Item 1 | -> Dispose Item 1 | Item 2 | <- Top +------------------+ +------------------+ | Item 2 | -> Return Item 2 | ... | +------------------+ | ... | \endcode \p %dequeue() function returns Item 2, that becomes new top of queue, and calls the disposer for Item 1, that was queue's top on function entry. Thus, you cannot manually delete item returned because it is still included in the queue and it has valuable link field that must not be zeroed. The item may be deleted only in disposer call. */ value_type * dequeue() { dequeue_result res; if ( do_dequeue( res )) { dispose_result( res ); return node_traits::to_value_ptr( *res.pNext ); } return nullptr; } /// Synonym for \p enqueue() bool push( value_type& val ) { return enqueue( val ); } /// Synonym for \p dequeue() value_type * pop() { return dequeue(); } /// Checks if the queue is empty bool empty() const { return m_pTail.load(memory_model::memory_order_relaxed) == m_pHead.load(memory_model::memory_order_relaxed); } /// Clear the stack /** The function repeatedly calls \ref dequeue until it returns \p nullptr. The disposer defined in template \p Traits is called for each item that can be safely disposed. */ void clear() { value_type * pv; while ( (pv = dequeue()) != nullptr ); } /// Returns queue's item count /** The value returned depends on \p optimistic_queue::traits::item_counter. For \p atomicity::empty_item_counter, this function always returns 0. @note Even if you use real item counter and it returns 0, this fact is not mean that the queue is empty. To check queue emptyness use \p empty() method. */ size_t size() const { return m_ItemCounter.value(); } /// Returns refernce to internal statistics const stat& statistics() const { return m_Stat; } }; }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_OPTIMISTIC_QUEUE_H libcds-2.3.3/cds/intrusive/options.h000066400000000000000000000125221341244201700174240ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_OPTIONS_H #define CDSLIB_INTRUSIVE_OPTIONS_H #include #include namespace cds { namespace intrusive { /// Common options for intrusive containers /** @ingroup cds_intrusive_helper This namespace contains options for intrusive containers. It imports all definitions from cds::opt namespace and introduces a lot of options specific for intrusive approach. */ namespace opt { using namespace cds::opt; //@cond struct base_hook_tag; struct member_hook_tag; struct traits_hook_tag; //@endcond /// Hook option /** Hook is a class that a user must add as a base class or as a member to make the user class compatible with intrusive containers. \p Hook template parameter strongly depends on the type of intrusive container you use. */ template struct hook { //@cond template struct pack: public Base { typedef Hook hook; }; //@endcond }; /// Item disposer option setter /** The option specifies a functor that is used for dispose removed items. The interface of \p Type functor is: \code struct myDisposer { void operator ()( T * val ); }; \endcode Predefined types for \p Type: - \p opt::v::empty_disposer - the disposer that does nothing - \p opt::v::delete_disposer - the disposer that calls operator \p delete Usually, the disposer should be stateless default-constructible functor. It is called by garbage collector in deferred mode. */ template struct disposer { //@cond template struct pack: public Base { typedef Type disposer; }; //@endcond }; /// Values of \ref cds::intrusive::opt::link_checker option enum link_check_type { never_check_link, ///< no link checking performed debug_check_link, ///< check only in debug build always_check_link ///< check in debug and release build }; /// Link checking /** The option specifies a type of link checking. Possible values for \p Value are is one of \ref link_check_type enum: - \ref never_check_link - no link checking performed - \ref debug_check_link - check only in debug build - \ref always_check_link - check in debug and release build (not yet implemented for release mode). When link checking is on, the container tests that the node's link fields must be \p nullptr before inserting the item. If the link is not \p nullptr an assertion is generated */ template struct link_checker { //@cond template struct pack: public Base { static const link_check_type link_checker = Value; }; //@endcond }; /// Predefined option values namespace v { using namespace cds::opt::v; //@cond /// No link checking template struct empty_link_checker { //@cond typedef Node node_type; static void is_empty( const node_type * /*pNode*/ ) {} //@endcond }; //@endcond /// Empty item disposer /** The disposer does nothing. This is one of possible values of opt::disposer option. */ struct empty_disposer { /// Empty dispose functor template void operator ()( T * ) {} }; /// Deletion item disposer /** Analogue of operator \p delete call. The disposer that calls \p T destructor and deallocates the item via \p Alloc allocator. */ template struct delete_disposer { /// Dispose functor template void operator ()( T * p ) { cds::details::Allocator alloc; alloc.Delete( p ); } }; } // namespace v //@cond // Lazy-list specific option (for split-list support) template struct boundary_node_type { //@cond template struct pack: public Base { typedef Type boundary_node_type; }; //@endcond }; //@endcond } // namespace opt }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_OPTIONS_H libcds-2.3.3/cds/intrusive/segmented_queue.h000066400000000000000000000710361341244201700211150ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_SEGMENTED_QUEUE_H #define CDSLIB_INTRUSIVE_SEGMENTED_QUEUE_H #include #include #include #include #include #include #include #if CDS_COMPILER == CDS_COMPILER_MSVC # pragma warning( push ) # pragma warning( disable: 4355 ) // warning C4355: 'this' : used in base member initializer list #endif namespace cds { namespace intrusive { /// SegmentedQueue -related declarations namespace segmented_queue { /// SegmentedQueue internal statistics. May be used for debugging or profiling template struct stat { typedef Counter counter_type; ///< Counter type counter_type m_nPush; ///< Push count counter_type m_nPushPopulated; ///< Number of attempts to push to populated (non-empty) cell counter_type m_nPushContended; ///< Number of failed CAS when pushing counter_type m_nPop; ///< Pop count counter_type m_nPopEmpty; ///< Number of dequeuing from empty queue counter_type m_nPopContended; ///< Number of failed CAS when popping counter_type m_nCreateSegmentReq; ///< Number of request to create new segment counter_type m_nDeleteSegmentReq; ///< Number to request to delete segment counter_type m_nSegmentCreated; ///< Number of created segments counter_type m_nSegmentDeleted; ///< Number of deleted segments //@cond void onPush() { ++m_nPush; } void onPushPopulated() { ++m_nPushPopulated; } void onPushContended() { ++m_nPushContended; } void onPop() { ++m_nPop; } void onPopEmpty() { ++m_nPopEmpty; } void onPopContended() { ++m_nPopContended; } void onCreateSegmentReq() { ++m_nCreateSegmentReq; } void onDeleteSegmentReq() { ++m_nDeleteSegmentReq; } void onSegmentCreated() { ++m_nSegmentCreated; } void onSegmentDeleted() { ++m_nSegmentDeleted; } //@endcond }; /// Dummy SegmentedQueue statistics, no overhead struct empty_stat { //@cond void onPush() const {} void onPushPopulated() const {} void onPushContended() const {} void onPop() const {} void onPopEmpty() const {} void onPopContended() const {} void onCreateSegmentReq() const {} void onDeleteSegmentReq() const {} void onSegmentCreated() const {} void onSegmentDeleted() const {} //@endcond }; /// SegmentedQueue default traits struct traits { /// Element disposer that is called when the item to be dequeued. Default is opt::v::empty_disposer (no disposer) typedef opt::v::empty_disposer disposer; /// Item counter, default is atomicity::item_counter /** The item counting is an essential part of segmented queue algorithm. The \p empty() member function is based on checking size() == 0. Therefore, dummy item counter like atomicity::empty_item_counter is not the proper counter. */ typedef atomicity::item_counter item_counter; /// Internal statistics, possible predefined types are \ref stat, \ref empty_stat (the default) typedef segmented_queue::empty_stat stat; /// Memory model, default is opt::v::relaxed_ordering. See cds::opt::memory_model for the full list of possible types typedef opt::v::relaxed_ordering memory_model; /// Alignment of critical data, default is cache line alignment. See cds::opt::alignment option specification enum { alignment = opt::cache_line_alignment }; /// Padding of segment data, default is no special padding /** The segment is just an array of atomic data pointers, so, the high load leads to false sharing and performance degradation. A padding of segment data can eliminate false sharing issue. On the other hand, the padding leads to increase segment size. */ enum { padding = opt::no_special_padding }; /// Segment allocator. Default is \ref CDS_DEFAULT_ALLOCATOR typedef CDS_DEFAULT_ALLOCATOR allocator; /// Lock type used to maintain an internal list of allocated segments typedef cds::sync::spin lock_type; /// Random \ref cds::opt::permutation_generator "permutation generator" for sequence [0, quasi_factor) typedef cds::opt::v::random2_permutation permutation_generator; }; /// Metafunction converting option list to traits for SegmentedQueue /** The metafunction can be useful if a few fields in \p segmented_queue::traits should be changed. For example: \code typedef cds::intrusive::segmented_queue::make_traits< cds::opt::item_counter< cds::atomicity::item_counter > >::type my_segmented_queue_traits; \endcode This code creates \p %SegmentedQueue type traits with item counting feature, all other \p %segmented_queue::traits members left unchanged. \p Options are: - \p opt::disposer - the functor used to dispose removed items. - \p opt::stat - internal statistics, possible type: \p segmented_queue::stat, \p segmented_queue::empty_stat (the default) - \p opt::item_counter - item counting feature. Note that \p atomicity::empty_item_counetr is not suitable for segmented queue. - \p opt::memory_model - memory model, default is \p opt::v::relaxed_ordering. See option description for the full list of possible models - \p opt::alignment - the alignment for critical data, see option description for explanation - \p opt::padding - the padding of segment data, default no special padding. See \p traits::padding for explanation. - \p opt::allocator - the allocator to be used for maintaining segments. - \p opt::lock_type - a mutual exclusion lock type used to maintain internal list of allocated segments. Default is \p cds::opt::Spin, \p std::mutex is also suitable. - \p opt::permutation_generator - a random permutation generator for sequence [0, quasi_factor), default is \p cds::opt::v::random2_permutation */ template struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type ; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type ,Options... >::type type; # endif }; } // namespace segmented_queue /// Segmented queue /** @ingroup cds_intrusive_queue The queue is based on work - [2010] Afek, Korland, Yanovsky "Quasi-Linearizability: relaxed consistency for improved concurrency" In this paper the authors offer a relaxed version of linearizability, so-called quasi-linearizability, that preserves some of the intuition, provides a flexible way to control the level of relaxation and supports th implementation of more concurrent and scalable data structure. Intuitively, the linearizability requires each run to be equivalent in some sense to a serial run of the algorithm. This equivalence to some serial run imposes strong synchronization requirements that in many cases results in limited scalability and synchronization bottleneck. The general idea is that the queue maintains a linked list of segments, each segment is an array of nodes in the size of the quasi factor, and each node has a deleted boolean marker, which states if it has been dequeued. Each producer iterates over last segment in the linked list in some random permutation order. Whet it finds an empty cell it performs a CAS operation attempting to enqueue its new element. In case the entire segment has been scanned and no available cell is found (implying that the segment is full), then it attempts to add a new segment to the list. The dequeue operation is similar: the consumer iterates over the first segment in the linked list in some random permutation order. When it finds an item which has not yet been dequeued, it performs CAS on its deleted marker in order to "delete" it, if succeeded this item is considered dequeued. In case the entire segment was scanned and all the nodes have already been dequeued (implying that the segment is empty), then it attempts to remove this segment from the linked list and starts the same process on the next segment. If there is no next segment, the queue is considered empty. Based on the fact that most of the time threads do not add or remove segments, most of the work is done in parallel on different cells in the segments. This ensures a controlled contention depending on the segment size, which is quasi factor. The segmented queue is an unfair queue since it violates the strong FIFO order but no more than quasi factor. This means that the consumer dequeues any item from the current first segment. Template parameters: - \p GC - a garbage collector, possible types are cds::gc::HP, cds::gc::DHP - \p T - the type of values stored in the queue - \p Traits - queue type traits, default is \p segmented_queue::traits. \p segmented_queue::make_traits metafunction can be used to construct the type traits. The queue stores the pointers to enqueued items so no special node hooks are needed. */ template class SegmentedQueue { public: typedef GC gc; ///< Garbage collector typedef T value_type; ///< type of the value stored in the queue typedef Traits traits; ///< Queue traits typedef typename traits::disposer disposer ; ///< value disposer, called only in \p clear() when the element to be dequeued typedef typename traits::allocator allocator; ///< Allocator maintaining the segments typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option typedef typename traits::item_counter item_counter; ///< Item counting policy, see cds::opt::item_counter option setter typedef typename traits::stat stat; ///< Internal statistics policy typedef typename traits::lock_type lock_type; ///< Type of mutex for maintaining an internal list of allocated segments. typedef typename traits::permutation_generator permutation_generator; ///< Random permutation generator for sequence [0, quasi-factor) static const size_t c_nHazardPtrCount = 2 ; ///< Count of hazard pointer required for the algorithm protected: //@cond // Segment cell. LSB is used as deleted mark typedef cds::details::marked_ptr< value_type, 1 > regular_cell; typedef atomics::atomic< regular_cell > atomic_cell; typedef typename cds::opt::details::apply_padding< atomic_cell, traits::padding >::type cell; // Segment struct segment: public boost::intrusive::slist_base_hook<> { cell * cells; // Cell array of size \ref m_nQuasiFactor size_t version; // version tag (ABA prevention tag) // cell array is placed here in one continuous memory block // Initializes the segment explicit segment( size_t nCellCount ) // MSVC warning C4355: 'this': used in base member initializer list : cells( reinterpret_cast< cell *>( this + 1 )) , version( 0 ) { init( nCellCount ); } segment() = delete; void init( size_t nCellCount ) { cell * pLastCell = cells + nCellCount; for ( cell* pCell = cells; pCell < pLastCell; ++pCell ) pCell->data.store( regular_cell(), atomics::memory_order_relaxed ); atomics::atomic_thread_fence( memory_model::memory_order_release ); } }; typedef typename opt::details::alignment_setter< atomics::atomic, traits::alignment >::type aligned_segment_ptr; //@endcond protected: //@cond class segment_list { typedef boost::intrusive::slist< segment, boost::intrusive::cache_last< true > > list_impl; typedef std::unique_lock< lock_type > scoped_lock; aligned_segment_ptr m_pHead; aligned_segment_ptr m_pTail; list_impl m_List; mutable lock_type m_Lock; size_t const m_nQuasiFactor; stat& m_Stat; private: struct segment_disposer { void operator()( segment * pSegment ) { assert( pSegment != nullptr ); free_segment( pSegment ); } }; struct gc_segment_disposer { void operator()( segment * pSegment ) { assert( pSegment != nullptr ); retire_segment( pSegment ); } }; public: segment_list( size_t nQuasiFactor, stat& st ) : m_pHead( nullptr ) , m_pTail( nullptr ) , m_nQuasiFactor( nQuasiFactor ) , m_Stat( st ) { assert( cds::beans::is_power2( nQuasiFactor )); } ~segment_list() { m_List.clear_and_dispose( gc_segment_disposer()); } segment * head( typename gc::Guard& guard ) { return guard.protect( m_pHead ); } segment * tail( typename gc::Guard& guard ) { return guard.protect( m_pTail ); } # ifdef _DEBUG bool populated( segment const& s ) const { // The lock should be held cell const * pLastCell = s.cells + quasi_factor(); for ( cell const * pCell = s.cells; pCell < pLastCell; ++pCell ) { if ( !pCell->data.load( memory_model::memory_order_relaxed ).all()) return false; } return true; } bool exhausted( segment const& s ) const { // The lock should be held cell const * pLastCell = s.cells + quasi_factor(); for ( cell const * pCell = s.cells; pCell < pLastCell; ++pCell ) { if ( !pCell->data.load( memory_model::memory_order_relaxed ).bits()) return false; } return true; } # endif segment * create_tail( segment * pTail, typename gc::Guard& guard ) { // pTail is guarded by GC m_Stat.onCreateSegmentReq(); scoped_lock l( m_Lock ); if ( !m_List.empty() && ( pTail != &m_List.back() || get_version(pTail) != m_List.back().version )) { m_pTail.store( &m_List.back(), memory_model::memory_order_relaxed ); return guard.assign( &m_List.back()); } # ifdef _DEBUG assert( m_List.empty() || populated( m_List.back())); # endif segment * pNew = allocate_segment(); m_Stat.onSegmentCreated(); if ( m_List.empty()) m_pHead.store( pNew, memory_model::memory_order_release ); m_List.push_back( *pNew ); m_pTail.store( pNew, memory_model::memory_order_release ); return guard.assign( pNew ); } segment * remove_head( segment * pHead, typename gc::Guard& guard ) { // pHead is guarded by GC m_Stat.onDeleteSegmentReq(); segment * pRet; { scoped_lock l( m_Lock ); if ( m_List.empty()) { m_pTail.store( nullptr, memory_model::memory_order_relaxed ); m_pHead.store( nullptr, memory_model::memory_order_relaxed ); return guard.assign( nullptr ); } if ( pHead != &m_List.front() || get_version(pHead) != m_List.front().version ) { m_pHead.store( &m_List.front(), memory_model::memory_order_relaxed ); return guard.assign( &m_List.front()); } # ifdef _DEBUG assert( exhausted( m_List.front())); # endif m_List.pop_front(); if ( m_List.empty()) { pRet = guard.assign( nullptr ); m_pTail.store( nullptr, memory_model::memory_order_relaxed ); } else pRet = guard.assign( &m_List.front()); m_pHead.store( pRet, memory_model::memory_order_release ); } retire_segment( pHead ); m_Stat.onSegmentDeleted(); return pRet; } size_t quasi_factor() const { return m_nQuasiFactor; } private: typedef cds::details::Allocator< segment, allocator > segment_allocator; static size_t get_version( segment * pSegment ) { return pSegment ? pSegment->version : 0; } segment * allocate_segment() { return segment_allocator().NewBlock( sizeof(segment) + sizeof(cell) * m_nQuasiFactor, quasi_factor()); } static void free_segment( segment * pSegment ) { segment_allocator().Delete( pSegment ); } static void retire_segment( segment * pSegment ) { gc::template retire( pSegment ); } }; //@endcond protected: segment_list m_SegmentList; ///< List of segments item_counter m_ItemCounter; ///< Item counter stat m_Stat; ///< Internal statistics public: /// Initializes the empty queue SegmentedQueue( size_t nQuasiFactor ///< Quasi factor. If it is not a power of 2 it is rounded up to nearest power of 2. Minimum is 2. ) : m_SegmentList( cds::beans::ceil2(nQuasiFactor), m_Stat ) { static_assert( (!std::is_same< item_counter, cds::atomicity::empty_item_counter >::value), "cds::atomicity::empty_item_counter is not supported for SegmentedQueue" ); assert( m_SegmentList.quasi_factor() > 1 ); } /// Clears the queue and deletes all internal data ~SegmentedQueue() { clear(); } /// Inserts a new element at last segment of the queue bool enqueue( value_type& val ) { // LSB is used as a flag in marked pointer assert( (reinterpret_cast( &val ) & 1) == 0 ); typename gc::Guard segmentGuard; segment * pTailSegment = m_SegmentList.tail( segmentGuard ); if ( !pTailSegment ) { // no segments, create the new one pTailSegment = m_SegmentList.create_tail( pTailSegment, segmentGuard ); assert( pTailSegment ); } permutation_generator gen( quasi_factor()); // First, increment item counter. // We sure that the item will be enqueued // but if we increment the counter after inserting we can get a negative counter value // if dequeuing occurs before incrementing (enqueue/dequeue race) ++m_ItemCounter; while ( true ) { CDS_DEBUG_ONLY( size_t nLoopCount = 0); do { typename permutation_generator::integer_type i = gen; CDS_DEBUG_ONLY( ++nLoopCount ); if ( pTailSegment->cells[i].data.load(memory_model::memory_order_relaxed).all()) { // Cell is not empty, go next m_Stat.onPushPopulated(); } else { // Empty cell found, try to enqueue here regular_cell nullCell; if ( pTailSegment->cells[i].data.compare_exchange_strong( nullCell, regular_cell( &val ), memory_model::memory_order_release, atomics::memory_order_relaxed )) { // Ok to push item m_Stat.onPush(); return true; } assert( nullCell.ptr()); m_Stat.onPushContended(); } } while ( gen.next()); assert( nLoopCount == quasi_factor()); // No available position, create a new segment pTailSegment = m_SegmentList.create_tail( pTailSegment, segmentGuard ); // Get new permutation gen.reset(); } } /// Removes an element from first segment of the queue and returns it /** If the queue is empty the function returns \p nullptr. The disposer specified in \p Traits template argument is not called for returned item. You should manually dispose the item: \code struct my_disposer { void operator()( foo * p ) { delete p; } }; cds::intrusive::SegmentedQueue< cds::gc::HP, foo > theQueue; // ... // Dequeue an item foo * pItem = theQueue.dequeue(); // deal with pItem //... // pItem is not longer needed and can be deleted // Do it via gc::HP::retire cds::gc::HP::template retire< my_disposer >( pItem ); \endcode */ value_type * dequeue() { typename gc::Guard itemGuard; if ( do_dequeue( itemGuard )) { value_type * pVal = itemGuard.template get(); assert( pVal ); return pVal; } return nullptr; } /// Synonym for \p enqueue(value_type&) member function bool push( value_type& val ) { return enqueue( val ); } /// Synonym for \p dequeue() member function value_type * pop() { return dequeue(); } /// Checks if the queue is empty /** The original segmented queue algorithm does not allow to check emptiness accurately because \p empty() is unlinearizable. This function tests queue's emptiness checking size() == 0, so, the item counting feature is an essential part of queue's algorithm. */ bool empty() const { return size() == 0; } /// Clear the queue /** The function repeatedly calls \p dequeue() until it returns \p nullptr. The disposer specified in \p Traits template argument is called for each removed item. */ void clear() { clear_with( disposer()); } /// Clear the queue /** The function repeatedly calls \p dequeue() until it returns \p nullptr. \p Disposer is called for each removed item. */ template void clear_with( Disposer ) { typename gc::Guard itemGuard; while ( do_dequeue( itemGuard )) { assert( itemGuard.template get()); gc::template retire( itemGuard.template get()); itemGuard.clear(); } } /// Returns queue's item count size_t size() const { return m_ItemCounter.value(); } /// Returns reference to internal statistics /** The type of internal statistics is specified by \p Traits template argument. */ const stat& statistics() const { return m_Stat; } /// Returns quasi factor, a power-of-two number size_t quasi_factor() const { return m_SegmentList.quasi_factor(); } protected: //@cond bool do_dequeue( typename gc::Guard& itemGuard ) { typename gc::Guard segmentGuard; segment * pHeadSegment = m_SegmentList.head( segmentGuard ); permutation_generator gen( quasi_factor()); while ( true ) { if ( !pHeadSegment ) { // Queue is empty m_Stat.onPopEmpty(); return false; } bool bHadNullValue = false; regular_cell item; CDS_DEBUG_ONLY( size_t nLoopCount = 0 ); do { typename permutation_generator::integer_type i = gen; CDS_DEBUG_ONLY( ++nLoopCount ); // Guard the item // In segmented queue the cell cannot be reused // So no loop is needed here to protect the cell item = pHeadSegment->cells[i].data.load( memory_model::memory_order_relaxed ); itemGuard.assign( item.ptr()); // Check if this cell is empty, which means an element // can be enqueued to this cell in the future if ( !item.ptr()) bHadNullValue = true; else { // If the item is not deleted yet if ( !item.bits()) { // Try to mark the cell as deleted if ( pHeadSegment->cells[i].data.compare_exchange_strong( item, item | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { --m_ItemCounter; m_Stat.onPop(); return true; } assert( item.bits()); m_Stat.onPopContended(); } } } while ( gen.next()); assert( nLoopCount == quasi_factor()); // scanning the entire segment without finding a candidate to dequeue // If there was an empty cell, the queue is considered empty if ( bHadNullValue ) { m_Stat.onPopEmpty(); return false; } // All nodes have been dequeued, we can safely remove the first segment pHeadSegment = m_SegmentList.remove_head( pHeadSegment, segmentGuard ); // Get new permutation gen.reset(); } } //@endcond }; }} // namespace cds::intrusive #if CDS_COMPILER == CDS_COMPILER_MSVC # pragma warning( pop ) #endif #endif // #ifndef CDSLIB_INTRUSIVE_SEGMENTED_QUEUE_H libcds-2.3.3/cds/intrusive/skip_list_dhp.h000066400000000000000000000006021341244201700205610ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_SKIP_LIST_DHP_H #define CDSLIB_INTRUSIVE_SKIP_LIST_DHP_H #include #include #endif // CDSLIB_INTRUSIVE_SKIP_LIST_DHP_H libcds-2.3.3/cds/intrusive/skip_list_hp.h000066400000000000000000000005331341244201700204200ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_SKIP_LIST_HP_H #define CDSLIB_INTRUSIVE_SKIP_LIST_HP_H #include #include #endif libcds-2.3.3/cds/intrusive/skip_list_nogc.h000066400000000000000000001052751341244201700207500ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_SKIP_LIST_NOGC_H #define CDSLIB_INTRUSIVE_SKIP_LIST_NOGC_H #include #include #include #include #include #include namespace cds { namespace intrusive { //@cond namespace skip_list { template class node< cds::gc::nogc, Tag > { public: typedef cds::gc::nogc gc; ///< Garbage collector typedef Tag tag; ///< tag typedef atomics::atomic atomic_ptr; typedef atomic_ptr tower_item_type; protected: atomic_ptr m_pNext; ///< Next item in bottom-list (list at level 0) unsigned int m_nHeight; ///< Node height (size of m_arrNext array). For node at level 0 the height is 1. atomic_ptr * m_arrNext; ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p nullptr public: /// Constructs a node of height 1 (a bottom-list node) node() : m_pNext( nullptr ) , m_nHeight(1) , m_arrNext( nullptr ) {} /// Constructs a node of height \p nHeight void make_tower( unsigned int nHeight, atomic_ptr * nextTower ) { assert( nHeight > 0 ); assert( (nHeight == 1 && nextTower == nullptr) // bottom-list node || (nHeight > 1 && nextTower != nullptr) // node at level of more than 0 ); m_arrNext = nextTower; m_nHeight = nHeight; } atomic_ptr * release_tower() { atomic_ptr * pTower = m_arrNext; m_arrNext = nullptr; m_nHeight = 1; return pTower; } atomic_ptr * get_tower() const { return m_arrNext; } /// Access to element of next pointer array atomic_ptr& next( unsigned int nLevel ) { assert( nLevel < height()); assert( nLevel == 0 || (nLevel > 0 && m_arrNext != nullptr)); return nLevel ? m_arrNext[ nLevel - 1] : m_pNext; } /// Access to element of next pointer array (const version) atomic_ptr const& next( unsigned int nLevel ) const { assert( nLevel < height()); assert( nLevel == 0 || nLevel > 0 && m_arrNext != nullptr ); return nLevel ? m_arrNext[ nLevel - 1] : m_pNext; } /// Access to element of next pointer array (same as \ref next function) atomic_ptr& operator[]( unsigned int nLevel ) { return next( nLevel ); } /// Access to element of next pointer array (same as \ref next function) atomic_ptr const& operator[]( unsigned int nLevel ) const { return next( nLevel ); } /// Height of the node unsigned int height() const { return m_nHeight; } /// Clears internal links void clear() { assert( m_arrNext == nullptr ); m_pNext.store( nullptr, atomics::memory_order_release ); } bool is_cleared() const { return m_pNext.load( atomics::memory_order_relaxed ) == nullptr && m_arrNext == nullptr && m_nHeight <= 1 ; } }; } // namespace skip_list namespace skip_list { namespace details { template class iterator< cds::gc::nogc, NodeTraits, BackOff, IsConst> { public: typedef cds::gc::nogc gc; typedef NodeTraits node_traits; typedef BackOff back_off; typedef typename node_traits::node_type node_type; typedef typename node_traits::value_type value_type; static constexpr bool const c_isConst = IsConst; typedef typename std::conditional< c_isConst, value_type const &, value_type &>::type value_ref; friend class iterator< gc, node_traits, back_off, !c_isConst >; protected: typedef typename node_type::atomic_ptr atomic_ptr; node_type * m_pNode; public: // for internal use only!!! iterator( node_type& refHead ) : m_pNode( refHead[0].load( atomics::memory_order_relaxed )) {} static iterator from_node( node_type * pNode ) { iterator it; it.m_pNode = pNode; return it; } public: iterator() : m_pNode( nullptr ) {} iterator( iterator const& s) : m_pNode( s.m_pNode ) {} value_type * operator ->() const { assert( m_pNode != nullptr ); assert( node_traits::to_value_ptr( m_pNode ) != nullptr ); return node_traits::to_value_ptr( m_pNode ); } value_ref operator *() const { assert( m_pNode != nullptr ); assert( node_traits::to_value_ptr( m_pNode ) != nullptr ); return *node_traits::to_value_ptr( m_pNode ); } /// Pre-increment iterator& operator ++() { if ( m_pNode ) m_pNode = m_pNode->next(0).load( atomics::memory_order_relaxed ); return *this; } iterator& operator =(const iterator& src) { m_pNode = src.m_pNode; return *this; } template bool operator ==(iterator const& i ) const { return m_pNode == i.m_pNode; } template bool operator !=(iterator const& i ) const { return !( *this == i ); } }; }} // namespace skip_list::details //@endcond /// Lock-free skip-list set (template specialization for gc::nogc) /** @ingroup cds_intrusive_map @anchor cds_intrusive_SkipListSet_nogc This specialization is so-called append-only when no item reclamation may be performed. The class does not support deleting of list item. See \ref cds_intrusive_SkipListSet_hp "SkipListSet" for description of skip-list. Template arguments : - \p T - type to be stored in the set. The type must be based on \p skip_list::node (for \p skip_list::base_hook) or it must have a member of type \p skip_list::node (for \p skip_list::member_hook). - \p Traits - type traits, default is \p skip_list::traits. It is possible to declare option-based list with \p cds::intrusive::skip_list::make_traits metafunction istead of \p Traits template argument. Iterators The class supports a forward iterator (\ref iterator and \ref const_iterator). The iteration is ordered. The iterator class supports the following minimalistic interface: \code struct iterator { // Default ctor iterator(); // Copy ctor iterator( iterator const& s); value_type * operator ->() const; value_type& operator *() const; // Pre-increment iterator& operator ++(); // Copy assignment iterator& operator = (const iterator& src); bool operator ==(iterator const& i ) const; bool operator !=(iterator const& i ) const; }; \endcode Note, the iterator object returned by \ref end, \p cend member functions points to \p nullptr and should not be dereferenced. How to use You should incorporate \p skip_list::node into your struct \p T and provide appropriate \p skip_list::traits::hook in your \p Traits template parameters. Usually, for \p Traits you define a struct based on \p skip_list::traits. Example for base hook: \code #include // Data stored in skip list struct my_data: public cds::intrusive::skip_list::node< cds::gc::nogc > { // key field std::string strKey; // other data // ... }; // my_data compare functor struct my_data_cmp { int operator()( const my_data& d1, const my_data& d2 ) { return d1.strKey.compare( d2.strKey ); } int operator()( const my_data& d, const std::string& s ) { return d.strKey.compare(s); } int operator()( const std::string& s, const my_data& d ) { return s.compare( d.strKey ); } }; // Declare traits struct my_traits: public cds::intrusive::skip_list::traits { typedef cds::intrusive::skip_list::base_hook< cds::opt::gc< cds::gc::nogc > > hook; typedef my_data_cmp compare; }; // Declare skip-list set type typedef cds::intrusive::SkipListSet< cds::gc::nogc, my_data, my_traits > traits_based_set; \endcode Equivalent option-based code: \code // GC-related specialization #include struct my_data { // see above }; struct compare { // see above }; // Declare option-based skip-list set typedef cds::intrusive::SkipListSet< cds::gc::nogc ,my_data , typename cds::intrusive::skip_list::make_traits< cds::intrusive::opt::hook< cds::intrusive::skip_list::base_hook< cds::opt::gc< cds::gc::nogc > > > ,cds::intrusive::opt::compare< my_data_cmp > >::type > option_based_set; \endcode */ template < typename T #ifdef CDS_DOXYGEN_INVOKED ,typename Traits = skip_list::traits #else ,typename Traits #endif > class SkipListSet< cds::gc::nogc, T, Traits > { public: typedef cds::gc::nogc gc; ///< No garbage collector is used typedef T value_type; ///< type of value stored in the skip-list typedef Traits traits; ///< Traits template parameter typedef typename traits::hook hook; ///< hook type typedef typename hook::node_type node_type; ///< node type # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined key_comparator; ///< key comparison functor based on \p Traits::compare and \p Traits::less # else typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator; # endif typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< node traits typedef typename traits::item_counter item_counter; ///< Item counting policy typedef typename traits::memory_model memory_model; ///< Memory ordering. See \p cds::opt::memory_model option typedef typename traits::random_level_generator random_level_generator ; ///< random level generator typedef typename traits::allocator allocator_type; ///< allocator for maintaining array of next pointers of the node typedef typename traits::back_off back_off; ///< Back-off strategy typedef typename traits::stat stat; ///< internal statistics type typedef typename traits::disposer disposer; ///< disposer /// Max node height. The actual node height should be in range [0 .. c_nMaxHeight) /** The max height is specified by \ref skip_list::random_level_generator "random level generator" constant \p m_nUpperBound but it should be no more than 32 (\p skip_list::c_nHeightLimit). */ static unsigned int const c_nMaxHeight = std::conditional< (random_level_generator::c_nUpperBound <= skip_list::c_nHeightLimit), std::integral_constant< unsigned int, random_level_generator::c_nUpperBound >, std::integral_constant< unsigned int, skip_list::c_nHeightLimit > >::type::value; //@cond static unsigned int const c_nMinHeight = 3; //@endcond protected: typedef typename node_type::atomic_ptr atomic_node_ptr; ///< Atomic node pointer protected: //@cond typedef skip_list::details::intrusive_node_builder< node_type, atomic_node_ptr, allocator_type > intrusive_node_builder; typedef typename std::conditional< std::is_same< typename traits::internal_node_builder, cds::opt::none >::value ,intrusive_node_builder ,typename traits::internal_node_builder >::type node_builder; typedef std::unique_ptr< node_type, typename node_builder::node_disposer > scoped_node_ptr; struct position { node_type * pPrev[ c_nMaxHeight ]; node_type * pSucc[ c_nMaxHeight ]; node_type * pCur; }; class head_node: public node_type { typename node_type::atomic_ptr m_Tower[c_nMaxHeight]; public: head_node( unsigned int nHeight ) { for ( size_t i = 0; i < sizeof(m_Tower) / sizeof(m_Tower[0]); ++i ) m_Tower[i].store( nullptr, atomics::memory_order_relaxed ); node_type::make_tower( nHeight, m_Tower ); } node_type * head() const { return const_cast( static_cast(this)); } void clear() { for (unsigned int i = 0; i < sizeof(m_Tower) / sizeof(m_Tower[0]); ++i ) m_Tower[i].store( nullptr, atomics::memory_order_relaxed ); node_type::m_pNext.store( nullptr, atomics::memory_order_relaxed ); } }; //@endcond protected: head_node m_Head; ///< head tower (max height) random_level_generator m_RandomLevelGen; ///< random level generator instance atomics::atomic m_nHeight; ///< estimated high level item_counter m_ItemCounter; ///< item counter mutable stat m_Stat; ///< internal statistics protected: //@cond unsigned int random_level() { // Random generator produces a number from range [0..31] // We need a number from range [1..32] return m_RandomLevelGen() + 1; } template node_type * build_node( Q v ) { return node_builder::make_tower( v, m_RandomLevelGen ); } static void dispose_node( node_type * pNode ) { assert( pNode != nullptr ); typename node_builder::node_disposer()( pNode ); disposer()( node_traits::to_value_ptr( pNode )); } template bool find_position( Q const& val, position& pos, Compare cmp, bool bStopIfFound, bool bStrictSearch ) const { node_type * pPred; node_type * pSucc; node_type * pCur = nullptr; int nCmp = 1; unsigned int nHeight = c_nMaxHeight; retry: if ( !bStrictSearch ) nHeight = m_nHeight.load( memory_model::memory_order_relaxed ); pPred = m_Head.head(); for ( int nLevel = (int) nHeight - 1; nLevel >= 0; --nLevel ) { while ( true ) { pCur = pPred->next( nLevel ).load( memory_model::memory_order_relaxed ); if ( !pCur ) { // end of the list at level nLevel - goto next level break; } pSucc = pCur->next( nLevel ).load( memory_model::memory_order_relaxed ); if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ) != pCur || pCur->next( nLevel ).load( memory_model::memory_order_acquire ) != pSucc ) { goto retry; } nCmp = cmp( *node_traits::to_value_ptr( pCur ), val ); if ( nCmp < 0 ) pPred = pCur; else if ( nCmp == 0 && bStopIfFound ) goto found; else break; } pos.pPrev[ nLevel ] = pPred; pos.pSucc[ nLevel ] = pCur; } if ( nCmp != 0 ) return false; found: pos.pCur = pCur; return pCur && nCmp == 0; } template bool insert_at_position( value_type& val, node_type * pNode, position& pos, Func f ) { unsigned int nHeight = pNode->height(); for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) pNode->next( nLevel ).store( nullptr, memory_model::memory_order_relaxed ); { node_type * p = pos.pSucc[0]; pNode->next( 0 ).store( pos.pSucc[ 0 ], memory_model::memory_order_release ); if ( !pos.pPrev[0]->next(0).compare_exchange_strong( p, pNode, memory_model::memory_order_release, memory_model::memory_order_relaxed )) { return false; } f( val ); } for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) { node_type * p = nullptr; while ( true ) { node_type * q = pos.pSucc[ nLevel ]; if ( pNode->next( nLevel ).compare_exchange_strong( p, q, memory_model::memory_order_release, memory_model::memory_order_relaxed )) { p = q; if ( pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( q, pNode, memory_model::memory_order_release, memory_model::memory_order_relaxed )) break; } // Renew insert position find_position( val, pos, key_comparator(), false, true ); } } return true; } template node_type * find_with_( Q& val, Compare cmp, Func f ) const { position pos; if ( find_position( val, pos, cmp, true, false )) { assert( cmp( *node_traits::to_value_ptr( pos.pCur ), val ) == 0 ); f( *node_traits::to_value_ptr( pos.pCur ), val ); m_Stat.onFindFastSuccess(); return pos.pCur; } else { m_Stat.onFindFastFailed(); return nullptr; } } void increase_height( unsigned int nHeight ) { unsigned int nCur = m_nHeight.load( memory_model::memory_order_relaxed ); while ( nCur < nHeight && !m_nHeight.compare_exchange_weak( nCur, nHeight, memory_model::memory_order_acquire, atomics::memory_order_relaxed )); } //@endcond public: /// Default constructor /** The constructor checks whether the count of guards is enough for skip-list and may raise an exception if not. */ SkipListSet() : m_Head( c_nMaxHeight ) , m_nHeight( c_nMinHeight ) { static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" ); // Barrier for head node atomics::atomic_thread_fence( memory_model::memory_order_release ); } /// Clears and destructs the skip-list ~SkipListSet() { clear(); } public: ///@name Forward iterators //@{ /// Forward iterator /** The forward iterator for a split-list has some features: - it has no post-increment operator - it depends on iterator of underlying \p OrderedList */ typedef skip_list::details::iterator< gc, node_traits, back_off, false > iterator; /// Const iterator type typedef skip_list::details::iterator< gc, node_traits, back_off, true > const_iterator; /// Returns a forward iterator addressing the first element in a set iterator begin() { return iterator( *m_Head.head()); } /// Returns a forward const iterator addressing the first element in a set const_iterator begin() const { return const_iterator( *m_Head.head()); } /// Returns a forward const iterator addressing the first element in a set const_iterator cbegin() const { return const_iterator( *m_Head.head()); } /// Returns a forward iterator that addresses the location succeeding the last element in a set. iterator end() { return iterator(); } /// Returns a forward const iterator that addresses the location succeeding the last element in a set. const_iterator end() const { return const_iterator(); } /// Returns a forward const iterator that addresses the location succeeding the last element in a set. const_iterator cend() const { return const_iterator(); } //@} protected: //@cond iterator nonconst_end() const { return iterator(); } //@endcond public: /// Inserts new node /** The function inserts \p val in the set if it does not contain an item with key equal to \p val. Returns \p true if \p val is placed into the set, \p false otherwise. */ bool insert( value_type& val ) { node_type * pNode = node_traits::to_node_ptr( val ); scoped_node_ptr scp( pNode ); unsigned int nHeight = pNode->height(); bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr; bool bTowerMade = false; position pos; while ( true ) { bool bFound = find_position( val, pos, key_comparator(), true, true ); if ( bFound ) { // scoped_node_ptr deletes the node tower if we create it if ( !bTowerMade ) scp.release(); m_Stat.onInsertFailed(); return false; } if ( !bTowerOk ) { build_node( pNode ); nHeight = pNode->height(); bTowerMade = bTowerOk = true; } if ( !insert_at_position( val, pNode, pos, []( value_type& ) {} )) { m_Stat.onInsertRetry(); continue; } increase_height( nHeight ); ++m_ItemCounter; m_Stat.onAddNode( nHeight ); m_Stat.onInsertSuccess(); scp.release(); return true; } } /// Updates the node /** The operation performs inserting or changing data with lock-free manner. If the item \p val is not found in the set, then \p val is inserted into the set iff \p bInsert is \p true. Otherwise, the functor \p func is called with item found. The functor signature is: \code void func( bool bNew, value_type& item, value_type& val ); \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the set - \p val - argument \p val passed into the \p %update() function If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments refer to the same thing. The functor can change non-key fields of the \p item; however, \p func must guarantee that during changing no any other modifications could be made on this item by concurrent threads. Returns std::pair where \p first is \p true if operation is successful, \p second is \p true if new item has been added or \p false if the item with \p key already is in the set. @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" */ template std::pair update( value_type& val, Func func, bool bInsert = true ) { node_type * pNode = node_traits::to_node_ptr( val ); scoped_node_ptr scp( pNode ); unsigned int nHeight = pNode->height(); bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr; bool bTowerMade = false; position pos; while ( true ) { bool bFound = find_position( val, pos, key_comparator(), true, true ); if ( bFound ) { // scoped_node_ptr deletes the node tower if we create it before if ( !bTowerMade ) scp.release(); func( false, *node_traits::to_value_ptr(pos.pCur), val ); m_Stat.onUpdateExist(); return std::make_pair( true, false ); } if ( !bInsert ) { scp.release(); return std::make_pair( false, false ); } if ( !bTowerOk ) { build_node( pNode ); nHeight = pNode->height(); bTowerMade = bTowerOk = true; } if ( !insert_at_position( val, pNode, pos, [&func]( value_type& item ) { func( true, item, item ); })) { m_Stat.onInsertRetry(); continue; } increase_height( nHeight ); ++m_ItemCounter; scp.release(); m_Stat.onAddNode( nHeight ); m_Stat.onUpdateNew(); return std::make_pair( true, true ); } } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( value_type& val, Func func ) { return update( val, func, true ); } //@endcond /// Finds \p key /** \anchor cds_intrusive_SkipListSet_nogc_find_func The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& key ); }; \endcode where \p item is the item found, \p key is the find function argument. The functor can change non-key fields of \p item. Note that the functor is only guarantee that \p item cannot be disposed during functor is executing. The functor does not serialize simultaneous access to the set \p item. If such access is possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor can modify both arguments. Note the hash functor specified for class \p Traits template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( Q& key, Func f ) const { return find_with_( key, key_comparator(), f ) != nullptr; } //@cond template bool find( Q const& key, Func f ) const { return find_with_( key, key_comparator(), f ) != nullptr; } //@endcond /// Finds the key \p key using \p pred predicate for comparing /** The function is an analog of \ref cds_intrusive_SkipListSet_nogc_find_func "find(Q&, Func)" but \p pred predicate is used for key compare. \p Less has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. */ template bool find_with( Q& key, Less pred, Func f ) const { CDS_UNUSED( pred ); return find_with_( key, cds::opt::details::make_comparator_from_less(), f ) != nullptr; } //@cond template bool find_with( Q const& key, Less pred, Func f ) const { CDS_UNUSED( pred ); return find_with_( key, cds::opt::details::make_comparator_from_less(), f ) != nullptr; } //@endcond /// Checks whether the set contains \p key /** The function searches the item with key equal to \p key and returns pointer to item found or \p nullptr. */ template value_type * contains( Q const& key ) const { node_type * pNode = find_with_( key, key_comparator(), [](value_type& , Q const& ) {} ); if ( pNode ) return node_traits::to_value_ptr( pNode ); return nullptr; } //@cond template CDS_DEPRECATED("deprecated, use contains()") value_type * find( Q const& key ) const { return contains( key ); } //@endcond /// Checks whether the set contains \p key using \p pred predicate for searching /** The function is similar to contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template value_type * contains( Q const& key, Less pred ) const { CDS_UNUSED( pred ); node_type * pNode = find_with_( key, cds::opt::details::make_comparator_from_less(), [](value_type& , Q const& ) {} ); if ( pNode ) return node_traits::to_value_ptr( pNode ); return nullptr; } //@cond template CDS_DEPRECATED("deprecated, use contains()") value_type * find_with( Q const& key, Less pred ) const { return contains( key, pred ); } //@endcond /// Gets minimum key from the set /** If the set is empty the function returns \p nullptr */ value_type * get_min() const { return node_traits::to_value_ptr( m_Head.head()->next( 0 )); } /// Gets maximum key from the set /** The function returns \p nullptr if the set is empty */ value_type * get_max() const { node_type * pPred; unsigned int nHeight = m_nHeight.load( memory_model::memory_order_relaxed ); pPred = m_Head.head(); for ( int nLevel = (int) nHeight - 1; nLevel >= 0; --nLevel ) { while ( true ) { node_type * pCur = pPred->next( nLevel ).load( memory_model::memory_order_relaxed ); if ( !pCur ) { // end of the list at level nLevel - goto next level break; } pPred = pCur; } } return pPred && pPred != m_Head.head() ? node_traits::to_value_ptr( pPred ) : nullptr; } /// Clears the set (non-atomic) /** The function is not atomic. Finding and/or inserting is prohibited while clearing. Otherwise an unpredictable result may be encountered. Thus, \p clear() may be used only for debugging purposes. */ void clear() { node_type * pNode = m_Head.head()->next(0).load( memory_model::memory_order_relaxed ); m_Head.clear(); m_ItemCounter.reset(); m_nHeight.store( c_nMinHeight, memory_model::memory_order_release ); while ( pNode ) { node_type * pNext = pNode->next(0).load( memory_model::memory_order_relaxed ); dispose_node( pNode ); pNode = pNext; } } /// Returns item count in the set /** The value returned depends on item counter type provided by \p Traits template parameter. For \p atomicity::empty_item_counter the function always returns 0. The function is not suitable for checking the set emptiness, use \p empty(). */ size_t size() const { return m_ItemCounter; } /// Checks if the set is empty bool empty() const { return m_Head.head()->next( 0 ).load( memory_model::memory_order_relaxed ) == nullptr; } /// Returns maximum height of skip-list. The max height is a constant for each object and does not exceed 32. static constexpr unsigned int max_height() noexcept { return c_nMaxHeight; } /// Returns const reference to internal statistics stat const& statistics() const { return m_Stat; } }; }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_SKIP_LIST_IMPL_H libcds-2.3.3/cds/intrusive/skip_list_rcu.h000066400000000000000000002263671341244201700206210ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H #define CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H #include #include #include #include #include #include #include #include #include namespace cds { namespace intrusive { //@cond namespace skip_list { template class node< cds::urcu::gc< RCU >, Tag > { public: typedef cds::urcu::gc< RCU > gc; ///< Garbage collector typedef Tag tag ; ///< tag // Mark bits: // bit 0 - the item is logically deleted // bit 1 - the item is extracted (only for level 0) typedef cds::details::marked_ptr marked_ptr; ///< marked pointer typedef atomics::atomic< marked_ptr > atomic_marked_ptr; ///< atomic marked pointer typedef atomic_marked_ptr tower_item_type; protected: atomic_marked_ptr m_pNext; ///< Next item in bottom-list (list at level 0) public: node * m_pDelChain; ///< Deleted node chain (local for a thread) protected: unsigned int m_nHeight; ///< Node height (size of m_arrNext array). For node at level 0 the height is 1. atomic_marked_ptr * m_arrNext; ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p nullptr atomics::atomic m_nUnlink; ///< Unlink helper public: /// Constructs a node of height 1 (a bottom-list node) node() : m_pNext( nullptr ) , m_pDelChain( nullptr ) , m_nHeight(1) , m_arrNext( nullptr ) { m_nUnlink.store( 1, atomics::memory_order_release ); } /// Constructs a node of height \p nHeight void make_tower( unsigned int nHeight, atomic_marked_ptr * nextTower ) { assert( nHeight > 0 ); assert( (nHeight == 1 && nextTower == nullptr) // bottom-list node || (nHeight > 1 && nextTower != nullptr) // node at level of more than 0 ); m_arrNext = nextTower; m_nHeight = nHeight; m_nUnlink.store( nHeight, atomics::memory_order_release ); } atomic_marked_ptr * release_tower() { atomic_marked_ptr * pTower = m_arrNext; m_arrNext = nullptr; m_nHeight = 1; return pTower; } atomic_marked_ptr * get_tower() const { return m_arrNext; } void clear_tower() { for ( unsigned int nLevel = 1; nLevel < m_nHeight; ++nLevel ) next(nLevel).store( marked_ptr(), atomics::memory_order_relaxed ); } /// Access to element of next pointer array atomic_marked_ptr& next( unsigned int nLevel ) { assert( nLevel < height()); assert( nLevel == 0 || (nLevel > 0 && m_arrNext != nullptr)); return nLevel ? m_arrNext[ nLevel - 1] : m_pNext; } /// Access to element of next pointer array (const version) atomic_marked_ptr const& next( unsigned int nLevel ) const { assert( nLevel < height()); assert( nLevel == 0 || nLevel > 0 && m_arrNext != nullptr ); return nLevel ? m_arrNext[ nLevel - 1] : m_pNext; } /// Access to element of next pointer array (same as \ref next function) atomic_marked_ptr& operator[]( unsigned int nLevel ) { return next( nLevel ); } /// Access to element of next pointer array (same as \ref next function) atomic_marked_ptr const& operator[]( unsigned int nLevel ) const { return next( nLevel ); } /// Height of the node unsigned int height() const { return m_nHeight; } /// Clears internal links void clear() { assert( m_arrNext == nullptr ); m_pNext.store( marked_ptr(), atomics::memory_order_release ); m_pDelChain = nullptr; } bool is_cleared() const { return m_pNext == atomic_marked_ptr() && m_arrNext == nullptr && m_nHeight <= 1; } bool level_unlinked( unsigned nCount = 1 ) { return m_nUnlink.fetch_sub( nCount, std::memory_order_relaxed ) == 1; } bool is_upper_level( unsigned nLevel ) const { return m_nUnlink.load( atomics::memory_order_relaxed ) == nLevel + 1; } }; } // namespace skip_list //@endcond //@cond namespace skip_list { namespace details { template class iterator< cds::urcu::gc< RCU >, NodeTraits, BackOff, IsConst > { public: typedef cds::urcu::gc< RCU > gc; typedef NodeTraits node_traits; typedef BackOff back_off; typedef typename node_traits::node_type node_type; typedef typename node_traits::value_type value_type; static bool const c_isConst = IsConst; typedef typename std::conditional< c_isConst, value_type const &, value_type &>::type value_ref; protected: typedef typename node_type::marked_ptr marked_ptr; typedef typename node_type::atomic_marked_ptr atomic_marked_ptr; node_type * m_pNode; protected: void next() { back_off bkoff; for (;;) { if ( m_pNode->next( m_pNode->height() - 1 ).load( atomics::memory_order_acquire ).bits()) { // Current node is marked as deleted. So, its next pointer can point to anything // In this case we interrupt our iteration and returns end() iterator. *this = iterator(); return; } marked_ptr p = m_pNode->next(0).load( atomics::memory_order_relaxed ); node_type * pp = p.ptr(); if ( p.bits()) { // p is marked as deleted. Spin waiting for physical removal bkoff(); continue; } else if ( pp && pp->next( pp->height() - 1 ).load( atomics::memory_order_relaxed ).bits()) { // p is marked as deleted. Spin waiting for physical removal bkoff(); continue; } m_pNode = pp; break; } } public: // for internal use only!!! iterator( node_type& refHead ) : m_pNode( nullptr ) { back_off bkoff; for (;;) { marked_ptr p = refHead.next(0).load( atomics::memory_order_relaxed ); if ( !p.ptr()) { // empty skip-list break; } node_type * pp = p.ptr(); // Logically deleted node is marked from highest level if ( !pp->next( pp->height() - 1 ).load( atomics::memory_order_acquire ).bits()) { m_pNode = pp; break; } bkoff(); } } public: iterator() : m_pNode( nullptr ) {} iterator( iterator const& s) : m_pNode( s.m_pNode ) {} value_type * operator ->() const { assert( m_pNode != nullptr ); assert( node_traits::to_value_ptr( m_pNode ) != nullptr ); return node_traits::to_value_ptr( m_pNode ); } value_ref operator *() const { assert( m_pNode != nullptr ); assert( node_traits::to_value_ptr( m_pNode ) != nullptr ); return *node_traits::to_value_ptr( m_pNode ); } /// Pre-increment iterator& operator ++() { next(); return *this; } iterator& operator = (const iterator& src) { m_pNode = src.m_pNode; return *this; } template bool operator ==(iterator const& i ) const { return m_pNode == i.m_pNode; } template bool operator !=(iterator const& i ) const { return !( *this == i ); } }; }} // namespace skip_list::details //@endcond /// Lock-free skip-list set (template specialization for \ref cds_urcu_desc "RCU") /** @ingroup cds_intrusive_map @anchor cds_intrusive_SkipListSet_rcu The implementation of well-known probabilistic data structure called skip-list invented by W.Pugh in his papers: - [1989] W.Pugh Skip Lists: A Probabilistic Alternative to Balanced Trees - [1990] W.Pugh A Skip List Cookbook A skip-list is a probabilistic data structure that provides expected logarithmic time search without the need of rebalance. The skip-list is a collection of sorted linked list. Nodes are ordered by key. Each node is linked into a subset of the lists. Each list has a level, ranging from 0 to 32. The bottom-level list contains all the nodes, and each higher-level list is a sublist of the lower-level lists. Each node is created with a random top level (with a random height), and belongs to all lists up to that level. The probability that a node has the height 1 is 1/2. The probability that a node has the height N is 1/2 ** N (more precisely, the distribution depends on an random generator provided, but our generators have this property). The lock-free variant of skip-list is implemented according to book - [2008] M.Herlihy, N.Shavit "The Art of Multiprocessor Programming", chapter 14.4 "A Lock-Free Concurrent Skiplist". Template arguments: - \p RCU - one of \ref cds_urcu_gc "RCU type" - \p T - type to be stored in the list. The type must be based on \p skip_list::node (for \p skip_list::base_hook) or it must have a member of type \p skip_list::node (for \p skip_list::member_hook). - \p Traits - set traits, default is \p skip_list::traits It is possible to declare option-based list with \p cds::intrusive::skip_list::make_traits metafunction instead of \p Traits template argument. @note Before including you should include appropriate RCU header file, see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. Iterators The class supports a forward iterator (\ref iterator and \ref const_iterator). The iteration is ordered. You may iterate over skip-list set items only under RCU lock. Only in this case the iterator is thread-safe since while RCU is locked any set's item cannot be reclaimed. @note The requirement of RCU lock during iterating means that any type of modification of the skip list (i.e. inserting, erasing and so on) is not possible. @warning The iterator object cannot be passed between threads. Example how to use skip-list set iterators: \code // First, you should include the header for RCU type you have chosen #include #include typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type; struct Foo { // ... }; // Traits for your skip-list. // At least, you should define cds::opt::less or cds::opt::compare for Foo struct struct my_traits: public cds::intrusive::skip_list::traits { // ... }; typedef cds::intrusive::SkipListSet< rcu_type, Foo, my_traits > my_skiplist_set; my_skiplist_set theSet; // ... // Begin iteration { // Apply RCU locking manually typename rcu_type::scoped_lock sl; for ( auto it = theList.begin(); it != theList.end(); ++it ) { // ... } // rcu_type::scoped_lock destructor releases RCU lock implicitly } \endcode The iterator class supports the following minimalistic interface: \code struct iterator { // Default ctor iterator(); // Copy ctor iterator( iterator const& s); value_type * operator ->() const; value_type& operator *() const; // Pre-increment iterator& operator ++(); // Copy assignment iterator& operator = (const iterator& src); bool operator ==(iterator const& i ) const; bool operator !=(iterator const& i ) const; }; \endcode Note, the iterator object returned by \ref end, \p cend member functions points to \p nullptr and should not be dereferenced. How to use You should incorporate skip_list::node into your struct \p T and provide appropriate skip_list::traits::hook in your \p Traits template parameters. Usually, for \p Traits you define a struct based on \p skip_list::traits. Example for cds::urcu::general_buffered<> RCU and base hook: \code // First, you should include the header for RCU type you have chosen #include // Include RCU skip-list specialization #include // RCU type typedef typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type; // Data stored in skip list struct my_data: public cds::intrusive::skip_list::node< rcu_type > { // key field std::string strKey; // other data // ... }; // my_data compare functor struct my_data_cmp { int operator()( const my_data& d1, const my_data& d2 ) { return d1.strKey.compare( d2.strKey ); } int operator()( const my_data& d, const std::string& s ) { return d.strKey.compare(s); } int operator()( const std::string& s, const my_data& d ) { return s.compare( d.strKey ); } }; // Declare traits struct my_traits: public cds::intrusive::skip_list::traits { typedef cds::intrusive::skip_list::base_hook< cds::opt::gc< rcu_type > > hook; typedef my_data_cmp compare; }; // Declare skip-list set type typedef cds::intrusive::SkipListSet< rcu_type, my_data, my_traits > traits_based_set; \endcode Equivalent option-based code: \code #include #include typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type; struct my_data { // see above }; struct compare { // see above }; // Declare option-based skip-list set typedef cds::intrusive::SkipListSet< rcu_type ,my_data , typename cds::intrusive::skip_list::make_traits< cds::intrusive::opt::hook< cds::intrusive::skip_list::base_hook< cds::opt::gc< rcu_type > > > ,cds::intrusive::opt::compare< my_data_cmp > >::type > option_based_set; \endcode */ template < class RCU ,typename T #ifdef CDS_DOXYGEN_INVOKED ,typename Traits = skip_list::traits #else ,typename Traits #endif > class SkipListSet< cds::urcu::gc< RCU >, T, Traits > { public: typedef cds::urcu::gc< RCU > gc; ///< Garbage collector typedef T value_type; ///< type of value stored in the skip-list typedef Traits traits; ///< Traits template parameter typedef typename traits::hook hook; ///< hook type typedef typename hook::node_type node_type; ///< node type # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined key_comparator ; ///< key comparison functor based on \p Traits::compare and \p Traits::less # else typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator; # endif typedef typename traits::disposer disposer; ///< disposer typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< node traits typedef typename traits::item_counter item_counter; ///< Item counting policy used typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option typedef typename traits::random_level_generator random_level_generator; ///< random level generator typedef typename traits::allocator allocator_type; ///< allocator for maintaining array of next pointers of the node typedef typename traits::back_off back_off; ///< Back-off strategy typedef typename traits::stat stat; ///< internal statistics type typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock static constexpr const bool c_bExtractLockExternal = false; ///< Group of \p extract_xxx functions does not require external locking /// Max node height. The actual node height should be in range [0 .. c_nMaxHeight) /** The max height is specified by \ref skip_list::random_level_generator "random level generator" constant \p m_nUpperBound but it should be no more than 32 (\ref skip_list::c_nHeightLimit). */ static unsigned int const c_nMaxHeight = std::conditional< (random_level_generator::c_nUpperBound <= skip_list::c_nHeightLimit), std::integral_constant< unsigned int, random_level_generator::c_nUpperBound >, std::integral_constant< unsigned int, skip_list::c_nHeightLimit > >::type::value; //@cond static unsigned int const c_nMinHeight = 5; //@endcond protected: typedef typename node_type::atomic_marked_ptr atomic_node_ptr ; ///< Atomic marked node pointer typedef typename node_type::marked_ptr marked_node_ptr ; ///< Node marked pointer protected: //@cond typedef skip_list::details::intrusive_node_builder< node_type, atomic_node_ptr, allocator_type > intrusive_node_builder; typedef typename std::conditional< std::is_same< typename traits::internal_node_builder, cds::opt::none >::value ,intrusive_node_builder ,typename traits::internal_node_builder >::type node_builder; typedef std::unique_ptr< node_type, typename node_builder::node_disposer > scoped_node_ptr; static void dispose_node( value_type * pVal ) { assert( pVal ); typename node_builder::node_disposer()( node_traits::to_node_ptr(pVal)); disposer()( pVal ); } struct node_disposer { void operator()( value_type * pVal ) { dispose_node( pVal ); } }; static void dispose_chain( node_type * pChain ) { if ( pChain ) { assert( !gc::is_locked()); auto f = [&pChain]() -> cds::urcu::retired_ptr { node_type * p = pChain; if ( p ) { pChain = p->m_pDelChain; return cds::urcu::make_retired_ptr( node_traits::to_value_ptr( p )); } return cds::urcu::make_retired_ptr( static_cast(nullptr)); }; gc::batch_retire(std::ref(f)); } } struct position { node_type * pPrev[ c_nMaxHeight ]; node_type * pSucc[ c_nMaxHeight ]; node_type * pNext[ c_nMaxHeight ]; node_type * pCur; node_type * pDelChain; position() : pDelChain( nullptr ) {} ~position() { dispose_chain( pDelChain ); } void dispose( node_type * p ) { assert( p != nullptr ); assert( p->m_pDelChain == nullptr ); p->m_pDelChain = pDelChain; pDelChain = p; } }; typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock> check_deadlock_policy; //@endcond protected: skip_list::details::head_node< node_type > m_Head; ///< head tower (max height) random_level_generator m_RandomLevelGen; ///< random level generator instance atomics::atomic m_nHeight; ///< estimated high level atomics::atomic m_pDeferredDelChain ; ///< Deferred deleted node chain item_counter m_ItemCounter; ///< item counter mutable stat m_Stat; ///< internal statistics protected: //@cond unsigned int random_level() { // Random generator produces a number from range [0..31] // We need a number from range [1..32] return m_RandomLevelGen() + 1; } template node_type * build_node( Q v ) { return node_builder::make_tower( v, m_RandomLevelGen ); } //@endcond public: using exempt_ptr = cds::urcu::exempt_ptr< gc, value_type, value_type, node_disposer, void >; ///< pointer to extracted node private: //@cond struct chain_disposer { void operator()( node_type * pChain ) const { dispose_chain( pChain ); } }; typedef cds::intrusive::details::raw_ptr_disposer< gc, node_type, chain_disposer> raw_ptr_disposer; //@endcond public: /// Result of \p get(), \p get_with() functions - pointer to the node found typedef cds::urcu::raw_ptr< gc, value_type, raw_ptr_disposer > raw_ptr; public: /// Default constructor SkipListSet() : m_Head( c_nMaxHeight ) , m_nHeight( c_nMinHeight ) , m_pDeferredDelChain( nullptr ) { static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" ); // Barrier for head node atomics::atomic_thread_fence( memory_model::memory_order_release ); } /// Clears and destructs the skip-list ~SkipListSet() { destroy(); } public: ///@name Forward iterators (thread-safe under RCU lock) //@{ /// Forward iterator /** The forward iterator has some features: - it has no post-increment operator - it depends on iterator of underlying \p OrderedList You may safely use iterators in multi-threaded environment only under RCU lock. Otherwise, a crash is possible if another thread deletes the element the iterator points to. */ typedef skip_list::details::iterator< gc, node_traits, back_off, false > iterator; /// Const iterator type typedef skip_list::details::iterator< gc, node_traits, back_off, true > const_iterator; /// Returns a forward iterator addressing the first element in a set iterator begin() { return iterator( *m_Head.head()); } /// Returns a forward const iterator addressing the first element in a set const_iterator begin() const { return const_iterator( *m_Head.head()); } /// Returns a forward const iterator addressing the first element in a set const_iterator cbegin() const { return const_iterator( *m_Head.head()); } /// Returns a forward iterator that addresses the location succeeding the last element in a set. iterator end() { return iterator(); } /// Returns a forward const iterator that addresses the location succeeding the last element in a set. const_iterator end() const { return const_iterator(); } /// Returns a forward const iterator that addresses the location succeeding the last element in a set. const_iterator cend() const { return const_iterator(); } //@} public: /// Inserts new node /** The function inserts \p val in the set if it does not contain an item with key equal to \p val. The function applies RCU lock internally. Returns \p true if \p val is placed into the set, \p false otherwise. */ bool insert( value_type& val ) { return insert( val, []( value_type& ) {} ); } /// Inserts new node /** This function is intended for derived non-intrusive containers. The function allows to split creating of new item into two part: - create item with key only - insert new item into the set - if inserting is success, calls \p f functor to initialize value-field of \p val. The functor signature is: \code void func( value_type& val ); \endcode where \p val is the item inserted. User-defined functor \p f should guarantee that during changing \p val no any other changes could be made on this set's item by concurrent threads. The user-defined functor is called only if the inserting is success. RCU \p synchronize method can be called. RCU should not be locked. */ template bool insert( value_type& val, Func f ) { check_deadlock_policy::check(); position pos; bool bRet; { node_type * pNode = node_traits::to_node_ptr( val ); scoped_node_ptr scp( pNode ); unsigned int nHeight = pNode->height(); bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr; bool bTowerMade = false; rcu_lock rcuLock; while ( true ) { bool bFound = find_position( val, pos, key_comparator(), true ); if ( bFound ) { // scoped_node_ptr deletes the node tower if we create it if ( !bTowerMade ) scp.release(); m_Stat.onInsertFailed(); bRet = false; break; } if ( !bTowerOk ) { build_node( pNode ); nHeight = pNode->height(); bTowerMade = bTowerOk = true; } if ( !insert_at_position( val, pNode, pos, f )) { m_Stat.onInsertRetry(); continue; } increase_height( nHeight ); ++m_ItemCounter; m_Stat.onAddNode( nHeight ); m_Stat.onInsertSuccess(); scp.release(); bRet = true; break; } } return bRet; } /// Updates the node /** The operation performs inserting or changing data with lock-free manner. If the item \p val is not found in the set, then \p val is inserted into the set iff \p bInsert is \p true. Otherwise, the functor \p func is called with item found. The functor signature is: \code void func( bool bNew, value_type& item, value_type& val ); \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the set - \p val - argument \p val passed into the \p %update() function If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments refer to the same thing. The functor can change non-key fields of the \p item; however, \p func must guarantee that during changing no any other modifications could be made on this item by concurrent threads. RCU \p synchronize method can be called. RCU should not be locked. Returns std::pair where \p first is \p true if operation is successful, i.e. the node has been inserted or updated, \p second is \p true if new item has been added or \p false if the item with \p key already exists. @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" */ template std::pair update( value_type& val, Func func, bool bInsert = true ) { check_deadlock_policy::check(); position pos; std::pair bRet( true, false ); { node_type * pNode = node_traits::to_node_ptr( val ); scoped_node_ptr scp( pNode ); unsigned int nHeight = pNode->height(); bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr; bool bTowerMade = false; rcu_lock rcuLock; while ( true ) { bool bFound = find_position( val, pos, key_comparator(), true ); if ( bFound ) { // scoped_node_ptr deletes the node tower if we create it before if ( !bTowerMade ) scp.release(); func( false, *node_traits::to_value_ptr(pos.pCur), val ); m_Stat.onUpdateExist(); break; } if ( !bInsert ) { scp.release(); bRet.first = false; break; } if ( !bTowerOk ) { build_node( pNode ); nHeight = pNode->height(); bTowerMade = bTowerOk = true; } if ( !insert_at_position( val, pNode, pos, [&func]( value_type& item ) { func( true, item, item ); })) { m_Stat.onInsertRetry(); continue; } increase_height( nHeight ); ++m_ItemCounter; scp.release(); m_Stat.onAddNode( nHeight ); m_Stat.onUpdateNew(); bRet.second = true; break; } } return bRet; } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( value_type& val, Func func ) { return update( val, func, true ); } //@endcond /// Unlinks the item \p val from the set /** The function searches the item \p val in the set and unlink it from the set if it is found and is equal to \p val. Difference between \p erase() and \p %unlink() functions: \p erase() finds a key and deletes the item found. \p %unlink() searches an item by key and deletes it only if \p val is an item of that set, i.e. the pointer to item found is equal to &val . RCU \p synchronize method can be called. RCU should not be locked. The \ref disposer specified in \p Traits class template parameter is called by garbage collector \p GC asynchronously. The function returns \p true if success and \p false otherwise. */ bool unlink( value_type& val ) { check_deadlock_policy::check(); position pos; bool bRet; { rcu_lock l; if ( !find_position( val, pos, key_comparator(), false )) { m_Stat.onUnlinkFailed(); bRet = false; } else { node_type * pDel = pos.pCur; assert( key_comparator()( *node_traits::to_value_ptr( pDel ), val ) == 0 ); unsigned int nHeight = pDel->height(); if ( node_traits::to_value_ptr( pDel ) == &val && try_remove_at( pDel, pos, [](value_type const&) {}, false )) { --m_ItemCounter; m_Stat.onRemoveNode( nHeight ); m_Stat.onUnlinkSuccess(); bRet = true; } else { m_Stat.onUnlinkFailed(); bRet = false; } } } return bRet; } /// Extracts the item from the set with specified \p key /** \anchor cds_intrusive_SkipListSet_rcu_extract The function searches an item with key equal to \p key in the set, unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found. If the item with key equal to \p key is not found the function returns an empty \p exempt_ptr. Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. RCU \p synchronize method can be called. RCU should NOT be locked. The function does not call the disposer for the item found. The disposer will be implicitly invoked when the returned object is destroyed or when its \p release() member function is called. Example: \code typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list; skip_list theList; // ... typename skip_list::exempt_ptr ep( theList.extract( 5 )); if ( ep ) { // Deal with ep //... // Dispose returned item. ep.release(); } \endcode */ template exempt_ptr extract( Q const& key ) { return exempt_ptr( do_extract( key )); } /// Extracts the item from the set with comparing functor \p pred /** The function is an analog of \p extract(Q const&) but \p pred predicate is used for key comparing. \p Less has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. */ template exempt_ptr extract_with( Q const& key, Less pred ) { return exempt_ptr( do_extract_with( key, pred )); } /// Extracts an item with minimal key from the list /** The function searches an item with minimal key, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item. If the skip-list is empty the function returns an empty \p exempt_ptr. RCU \p synchronize method can be called. RCU should NOT be locked. The function does not call the disposer for the item found. The disposer will be implicitly invoked when the returned object is destroyed or when its \p release() member function is manually called. Example: \code typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list; skip_list theList; // ... typename skip_list::exempt_ptr ep(theList.extract_min()); if ( ep ) { // Deal with ep //... // Dispose returned item. ep.release(); } \endcode @note Due the concurrent nature of the list, the function extracts nearly minimum key. It means that the function gets leftmost item and tries to unlink it. During unlinking, a concurrent thread may insert an item with key less than leftmost item's key. So, the function returns the item with minimum key at the moment of list traversing. */ exempt_ptr extract_min() { return exempt_ptr( do_extract_min()); } /// Extracts an item with maximal key from the list /** The function searches an item with maximal key, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item. If the skip-list is empty the function returns an empty \p exempt_ptr. RCU \p synchronize method can be called. RCU should NOT be locked. The function does not call the disposer for the item found. The disposer will be implicitly invoked when the returned object is destroyed or when its \p release() member function is manually called. Example: \code typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list; skip_list theList; // ... typename skip_list::exempt_ptr ep( theList.extract_max()); if ( ep ) { // Deal with ep //... // Dispose returned item. ep.release(); } \endcode @note Due the concurrent nature of the list, the function extracts nearly maximal key. It means that the function gets rightmost item and tries to unlink it. During unlinking, a concurrent thread can insert an item with key greater than rightmost item's key. So, the function returns the item with maximum key at the moment of list traversing. */ exempt_ptr extract_max() { return exempt_ptr( do_extract_max()); } /// Deletes the item from the set /** \anchor cds_intrusive_SkipListSet_rcu_erase The function searches an item with key equal to \p key in the set, unlinks it from the set, and returns \p true. If the item with key equal to \p key is not found the function return \p false. Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. RCU \p synchronize method can be called. RCU should not be locked. */ template bool erase( const Q& key ) { return do_erase( key, key_comparator(), [](value_type const&) {} ); } /// Delete the item from the set with comparing functor \p pred /** The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase "erase(Q const&)" but \p pred predicate is used for key comparing. \p Less has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. */ template bool erase_with( const Q& key, Less pred ) { CDS_UNUSED( pred ); return do_erase( key, cds::opt::details::make_comparator_from_less(), [](value_type const&) {} ); } /// Deletes the item from the set /** \anchor cds_intrusive_SkipListSet_rcu_erase_func The function searches an item with key equal to \p key in the set, call \p f functor with item found, unlinks it from the set, and returns \p true. The \ref disposer specified in \p Traits class template parameter is called by garbage collector \p GC asynchronously. The \p Func interface is \code struct functor { void operator()( value_type const& item ); }; \endcode If the item with key equal to \p key is not found the function return \p false. Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. RCU \p synchronize method can be called. RCU should not be locked. */ template bool erase( Q const& key, Func f ) { return do_erase( key, key_comparator(), f ); } /// Delete the item from the set with comparing functor \p pred /** The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase_func "erase(Q const&, Func)" but \p pred predicate is used for key comparing. \p Less has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. */ template bool erase_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return do_erase( key, cds::opt::details::make_comparator_from_less(), f ); } /// Finds \p key /** @anchor cds_intrusive_SkipListSet_rcu_find_func The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& key ); }; \endcode where \p item is the item found, \p key is the find function argument. The functor can change non-key fields of \p item. Note that the functor is only guarantee that \p item cannot be disposed during functor is executing. The functor does not serialize simultaneous access to the set \p item. If such access is possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor can modify both arguments. The function applies RCU lock internally. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( Q& key, Func f ) { return do_find_with( key, key_comparator(), f ); } //@cond template bool find( Q const& key, Func f ) { return do_find_with( key, key_comparator(), f ); } //@endcond /// Finds the key \p key with comparing functor \p pred /** The function is an analog of \ref cds_intrusive_SkipListSet_rcu_find_func "find(Q&, Func)" but \p cmp is used for key comparison. \p Less functor has the interface like \p std::less. \p cmp must imply the same element order as the comparator used for building the set. */ template bool find_with( Q& key, Less pred, Func f ) { CDS_UNUSED( pred ); return do_find_with( key, cds::opt::details::make_comparator_from_less(), f ); } //@cond template bool find_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return do_find_with( key, cds::opt::details::make_comparator_from_less(), f ); } //@endcond /// Checks whether the set contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. The function applies RCU lock internally. */ template bool contains( Q const& key ) { return do_find_with( key, key_comparator(), [](value_type& , Q const& ) {} ); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find( Q const& key ) { return contains( key ); } //@endcond /// Checks whether the set contains \p key using \p pred predicate for searching /** The function is similar to contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool contains( Q const& key, Less pred ) { CDS_UNUSED( pred ); return do_find_with( key, cds::opt::details::make_comparator_from_less(), [](value_type& , Q const& ) {} ); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find_with( Q const& key, Less pred ) { return contains( key, pred ); } //@endcond /// Finds \p key and return the item found /** \anchor cds_intrusive_SkipListSet_rcu_get The function searches the item with key equal to \p key and returns a \p raw_ptr object pointed to item found. If \p key is not found it returns empty \p raw_ptr. Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. RCU should be locked before call of this function. Returned item is valid only while RCU is locked: \code typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list; skip_list theList; // ... typename skip_list::raw_ptr pVal; { // Lock RCU skip_list::rcu_lock lock; pVal = theList.get( 5 ); if ( pVal ) { // Deal with pVal //... } } // You can manually release pVal after RCU-locked section pVal.release(); \endcode */ template raw_ptr get( Q const& key ) { assert( gc::is_locked()); position pos; value_type * pFound; if ( do_find_with( key, key_comparator(), [&pFound](value_type& found, Q const& ) { pFound = &found; }, pos )) return raw_ptr( pFound, raw_ptr_disposer( pos )); return raw_ptr( raw_ptr_disposer( pos )); } /// Finds \p key and return the item found /** The function is an analog of \ref cds_intrusive_SkipListSet_rcu_get "get(Q const&)" but \p pred is used for comparing the keys. \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the set. */ template raw_ptr get_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); assert( gc::is_locked()); value_type * pFound = nullptr; position pos; if ( do_find_with( key, cds::opt::details::make_comparator_from_less(), [&pFound](value_type& found, Q const& ) { pFound = &found; }, pos )) { return raw_ptr( pFound, raw_ptr_disposer( pos )); } return raw_ptr( raw_ptr_disposer( pos )); } /// Returns item count in the set /** The value returned depends on item counter type provided by \p Traits template parameter. For \p atomicity::empty_item_counter the function always returns 0. Therefore, the function is not suitable for checking the set emptiness, use \p empty() member function for this purpose. */ size_t size() const { return m_ItemCounter; } /// Checks if the set is empty bool empty() const { return m_Head.head()->next( 0 ).load( memory_model::memory_order_relaxed ) == nullptr; } /// Clears the set (not atomic) /** The function unlink all items from the set. The function is not atomic, thus, in multi-threaded environment with parallel insertions this sequence \code set.clear(); assert( set.empty()); \endcode the assertion could be raised. For each item the \p disposer will be called automatically after unlinking. */ void clear() { exempt_ptr ep; while ( (ep = extract_min())); } /// Returns maximum height of skip-list. The max height is a constant for each object and does not exceed 32. static constexpr unsigned int max_height() noexcept { return c_nMaxHeight; } /// Returns const reference to internal statistics stat const& statistics() const { return m_Stat; } protected: //@cond bool is_extracted( marked_node_ptr const p ) const { return ( p.bits() & 2 ) != 0; } void help_remove( int nLevel, node_type* pPred, marked_node_ptr pCur, marked_node_ptr pSucc, position& pos ) { marked_node_ptr p( pCur.ptr()); if ( pCur->is_upper_level( nLevel ) && pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr()), memory_model::memory_order_release, atomics::memory_order_relaxed )) { if ( pCur->level_unlinked()) { if ( !is_extracted( pSucc )) { // We cannot free the node at this moment because RCU is locked // Link deleted nodes to a chain to free later pos.dispose( pCur.ptr()); m_Stat.onEraseWhileFind(); } else m_Stat.onExtractWhileFind(); } } } template bool find_position( Q const& val, position& pos, Compare cmp, bool bStopIfFound ) { assert( gc::is_locked()); node_type * pPred; marked_node_ptr pSucc; marked_node_ptr pCur; int nCmp = 1; retry: pPred = m_Head.head(); for ( int nLevel = static_cast( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) { while ( true ) { pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire ); if ( pCur.bits()) { // pCur.bits() means that pPred is logically deleted goto retry; } if ( pCur.ptr() == nullptr ) { // end of the list at level nLevel - goto next level break; } // pSucc contains deletion mark for pCur pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire ); if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr()) goto retry; if ( pSucc.bits()) { // pCur is marked, i.e. logically deleted. help_remove( nLevel, pPred, pCur, pSucc, pos ); goto retry; } else { nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val ); if ( nCmp < 0 ) pPred = pCur.ptr(); else if ( nCmp == 0 && bStopIfFound ) goto found; else break; } } // Next level pos.pPrev[nLevel] = pPred; pos.pSucc[nLevel] = pCur.ptr(); } if ( nCmp != 0 ) return false; found: pos.pCur = pCur.ptr(); return pCur.ptr() && nCmp == 0; } bool find_min_position( position& pos ) { assert( gc::is_locked()); node_type * pPred; marked_node_ptr pSucc; marked_node_ptr pCur; retry: pPred = m_Head.head(); for ( int nLevel = static_cast( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) { pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire ); // pCur.bits() means that pPred is logically deleted // head cannot be deleted assert( pCur.bits() == 0 ); if ( pCur.ptr()) { // pSucc contains deletion mark for pCur pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire ); if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr()) goto retry; if ( pSucc.bits()) { // pCur is marked, i.e. logically deleted. help_remove( nLevel, pPred, pCur, pSucc, pos ); goto retry; } } // Next level pos.pPrev[nLevel] = pPred; pos.pSucc[nLevel] = pCur.ptr(); } return ( pos.pCur = pCur.ptr()) != nullptr; } bool find_max_position( position& pos ) { assert( gc::is_locked()); node_type * pPred; marked_node_ptr pSucc; marked_node_ptr pCur; retry: pPred = m_Head.head(); for ( int nLevel = static_cast( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) { while ( true ) { pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire ); if ( pCur.bits()) { // pCur.bits() means that pPred is logically deleted goto retry; } if ( pCur.ptr() == nullptr ) { // end of the list at level nLevel - goto next level break; } // pSucc contains deletion mark for pCur pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire ); if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr()) goto retry; if ( pSucc.bits()) { // pCur is marked, i.e. logically deleted. help_remove( nLevel, pPred, pCur, pSucc, pos ); goto retry; } else { if ( !pSucc.ptr()) break; pPred = pCur.ptr(); } } // Next level pos.pPrev[nLevel] = pPred; pos.pSucc[nLevel] = pCur.ptr(); } return ( pos.pCur = pCur.ptr()) != nullptr; } bool renew_insert_position( value_type& val, node_type * pNode, position& pos ) { assert( gc::is_locked()); node_type * pPred; marked_node_ptr pSucc; marked_node_ptr pCur; key_comparator cmp; int nCmp = 1; retry: pPred = m_Head.head(); for ( int nLevel = static_cast( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) { while ( true ) { pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire ); if ( pCur.bits()) { // pCur.bits() means that pPred is logically deleted goto retry; } if ( pCur.ptr() == nullptr ) { // end of the list at level nLevel - goto next level break; } // pSucc contains deletion mark for pCur pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire ); if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr()) goto retry; if ( pSucc.bits()) { // pCur is marked, i.e. logically deleted. if ( pCur.ptr() == pNode ) { // Node is removing while we are inserting it return false; } // try to help deleting pCur help_remove( nLevel, pPred, pCur, pSucc, pos ); goto retry; } else { nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val ); if ( nCmp < 0 ) pPred = pCur.ptr(); else break; } } // Next level pos.pPrev[nLevel] = pPred; pos.pSucc[nLevel] = pCur.ptr(); } return nCmp == 0; } template bool insert_at_position( value_type& val, node_type * pNode, position& pos, Func f ) { assert( gc::is_locked()); unsigned int const nHeight = pNode->height(); pNode->clear_tower(); // Insert at level 0 { marked_node_ptr p( pos.pSucc[0] ); pNode->next( 0 ).store( p, memory_model::memory_order_relaxed ); if ( !pos.pPrev[0]->next( 0 ).compare_exchange_strong( p, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed )) return false; f( val ); } // Insert at level 1..max for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) { marked_node_ptr p; while ( true ) { marked_node_ptr pSucc( pos.pSucc[nLevel] ); // Set pNode->next // pNode->next must be null but can have a "logical deleted" flag if another thread is removing pNode right now if ( !pNode->next( nLevel ).compare_exchange_strong( p, pSucc, memory_model::memory_order_acq_rel, atomics::memory_order_acquire )) { // pNode has been marked as removed while we are inserting it // Stop inserting assert( p.bits() != 0 ); // Here pNode is linked at least level 0 so level_unlinked() cannot returns true CDS_VERIFY_FALSE( pNode->level_unlinked( nHeight - nLevel )); // pNode is linked up to nLevel - 1 // Remove it via find_position() find_position( val, pos, key_comparator(), false ); m_Stat.onLogicDeleteWhileInsert(); return true; } p = pSucc; // Link pNode into the list at nLevel if ( pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( pSucc, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed )) { // go to next level break; } // Renew insert position m_Stat.onRenewInsertPosition(); if ( !renew_insert_position( val, pNode, pos )) { // The node has been deleted while we are inserting it // Update current height for concurent removing CDS_VERIFY_FALSE( pNode->level_unlinked( nHeight - nLevel )); m_Stat.onRemoveWhileInsert(); // help to removing val find_position( val, pos, key_comparator(), false ); return true; } } } return true; } template bool try_remove_at( node_type * pDel, position& pos, Func f, bool bExtract ) { assert( pDel != nullptr ); assert( gc::is_locked()); marked_node_ptr pSucc; back_off bkoff; unsigned const nMask = bExtract ? 3u : 1u; // logical deletion (marking) for ( unsigned int nLevel = pDel->height() - 1; nLevel > 0; --nLevel ) { pSucc = pDel->next( nLevel ).load( memory_model::memory_order_relaxed ); if ( pSucc.bits() == 0 ) { bkoff.reset(); while ( !pDel->next( nLevel ).compare_exchange_weak( pSucc, pSucc | nMask, memory_model::memory_order_release, atomics::memory_order_acquire )) { if ( pSucc.bits() == 0 ) { bkoff(); m_Stat.onMarkFailed(); } else if ( pSucc.bits() != nMask ) return false; } } } marked_node_ptr p( pDel->next( 0 ).load( memory_model::memory_order_relaxed ).ptr()); while ( true ) { if ( pDel->next( 0 ).compare_exchange_strong( p, p | nMask, memory_model::memory_order_release, atomics::memory_order_acquire )) { f( *node_traits::to_value_ptr( pDel )); // physical deletion // try fast erase p = pDel; for ( int nLevel = static_cast( pDel->height() - 1 ); nLevel >= 0; --nLevel ) { pSucc = pDel->next( nLevel ).load( memory_model::memory_order_acquire ); if ( pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr()), memory_model::memory_order_acq_rel, atomics::memory_order_relaxed )) { pDel->level_unlinked(); } else { // Make slow erase # ifdef CDS_DEBUG if ( find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false )) assert( pDel != pos.pCur ); # else find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false ); # endif if ( bExtract ) m_Stat.onSlowExtract(); else m_Stat.onSlowErase(); return true; } } // Fast erasing success if ( !bExtract ) { // We cannot free the node at this moment since RCU is locked // Link deleted nodes to a chain to free later pos.dispose( pDel ); m_Stat.onFastErase(); } else m_Stat.onFastExtract(); return true; } else if ( p.bits()) { // Another thread is deleting pDel right now m_Stat.onEraseContention(); return false; } m_Stat.onEraseRetry(); bkoff(); } } enum finsd_fastpath_result { find_fastpath_found, find_fastpath_not_found, find_fastpath_abort }; template finsd_fastpath_result find_fastpath( Q& val, Compare cmp, Func f ) const { node_type * pPred; marked_node_ptr pCur; marked_node_ptr pSucc; marked_node_ptr pNull; back_off bkoff; unsigned attempt = 0; try_again: pPred = m_Head.head(); for ( int nLevel = static_cast( m_nHeight.load( memory_model::memory_order_relaxed ) - 1 ); nLevel >= 0; --nLevel ) { pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire ); while ( pCur != pNull ) { if ( pCur.bits()) { // pPred is being removed if ( ++attempt < 4 ) { bkoff(); goto try_again; } return find_fastpath_abort; } if ( pCur.ptr()) { int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val ); if ( nCmp < 0 ) { pPred = pCur.ptr(); pCur = pCur->next( nLevel ).load( memory_model::memory_order_acquire ); } else if ( nCmp == 0 ) { // found f( *node_traits::to_value_ptr( pCur.ptr()), val ); return find_fastpath_found; } else // pCur > val - go down break; } } } return find_fastpath_not_found; } template bool find_slowpath( Q& val, Compare cmp, Func f, position& pos ) { if ( find_position( val, pos, cmp, true )) { assert( cmp( *node_traits::to_value_ptr( pos.pCur ), val ) == 0 ); f( *node_traits::to_value_ptr( pos.pCur ), val ); return true; } else return false; } template bool do_find_with( Q& val, Compare cmp, Func f ) { position pos; return do_find_with( val, cmp, f, pos ); } template bool do_find_with( Q& val, Compare cmp, Func f, position& pos ) { bool bRet; { rcu_lock l; switch ( find_fastpath( val, cmp, f )) { case find_fastpath_found: m_Stat.onFindFastSuccess(); return true; case find_fastpath_not_found: m_Stat.onFindFastFailed(); return false; default: break; } if ( find_slowpath( val, cmp, f, pos )) { m_Stat.onFindSlowSuccess(); bRet = true; } else { m_Stat.onFindSlowFailed(); bRet = false; } } return bRet; } template bool do_erase( Q const& val, Compare cmp, Func f ) { check_deadlock_policy::check(); position pos; bool bRet; { rcu_lock rcuLock; if ( !find_position( val, pos, cmp, false )) { m_Stat.onEraseFailed(); bRet = false; } else { node_type * pDel = pos.pCur; assert( cmp( *node_traits::to_value_ptr( pDel ), val ) == 0 ); unsigned int nHeight = pDel->height(); if ( try_remove_at( pDel, pos, f, false )) { --m_ItemCounter; m_Stat.onRemoveNode( nHeight ); m_Stat.onEraseSuccess(); bRet = true; } else { m_Stat.onEraseFailed(); bRet = false; } } } return bRet; } template value_type * do_extract_key( Q const& key, Compare cmp, position& pos ) { // RCU should be locked!!! assert( gc::is_locked()); node_type * pDel; if ( !find_position( key, pos, cmp, false )) { m_Stat.onExtractFailed(); pDel = nullptr; } else { pDel = pos.pCur; assert( cmp( *node_traits::to_value_ptr( pDel ), key ) == 0 ); unsigned int const nHeight = pDel->height(); if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true )) { --m_ItemCounter; m_Stat.onRemoveNode( nHeight ); m_Stat.onExtractSuccess(); } else { m_Stat.onExtractFailed(); pDel = nullptr; } } return pDel ? node_traits::to_value_ptr( pDel ) : nullptr; } template value_type * do_extract( Q const& key ) { check_deadlock_policy::check(); value_type * pDel = nullptr; position pos; { rcu_lock l; pDel = do_extract_key( key, key_comparator(), pos ); } return pDel; } template value_type * do_extract_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); check_deadlock_policy::check(); value_type * pDel = nullptr; position pos; { rcu_lock l; pDel = do_extract_key( key, cds::opt::details::make_comparator_from_less(), pos ); } return pDel; } value_type * do_extract_min() { assert( !gc::is_locked()); position pos; node_type * pDel; { rcu_lock l; if ( !find_min_position( pos )) { m_Stat.onExtractMinFailed(); pDel = nullptr; } else { pDel = pos.pCur; unsigned int const nHeight = pDel->height(); if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true )) { --m_ItemCounter; m_Stat.onRemoveNode( nHeight ); m_Stat.onExtractMinSuccess(); } else { m_Stat.onExtractMinFailed(); pDel = nullptr; } } } return pDel ? node_traits::to_value_ptr( pDel ) : nullptr; } value_type * do_extract_max() { assert( !gc::is_locked()); position pos; node_type * pDel; { rcu_lock l; if ( !find_max_position( pos )) { m_Stat.onExtractMaxFailed(); pDel = nullptr; } else { pDel = pos.pCur; unsigned int const nHeight = pDel->height(); if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true )) { --m_ItemCounter; m_Stat.onRemoveNode( nHeight ); m_Stat.onExtractMaxSuccess(); } else { m_Stat.onExtractMaxFailed(); pDel = nullptr; } } } return pDel ? node_traits::to_value_ptr( pDel ) : nullptr; } void increase_height( unsigned int nHeight ) { unsigned int nCur = m_nHeight.load( memory_model::memory_order_relaxed ); if ( nCur < nHeight ) m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, atomics::memory_order_relaxed ); } void destroy() { node_type* p = m_Head.head()->next( 0 ).load( atomics::memory_order_relaxed ).ptr(); while ( p ) { node_type* pNext = p->next( 0 ).load( atomics::memory_order_relaxed ).ptr(); dispose_node( node_traits::to_value_ptr( p )); p = pNext; } } //@endcond }; }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H libcds-2.3.3/cds/intrusive/split_list.h000066400000000000000000001627601341244201700201310ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_SPLIT_LIST_H #define CDSLIB_INTRUSIVE_SPLIT_LIST_H #include #include #include namespace cds { namespace intrusive { /// Split-ordered list /** @ingroup cds_intrusive_map \anchor cds_intrusive_SplitListSet_hp Hash table implementation based on split-ordered list algorithm discovered by Ori Shalev and Nir Shavit, see - [2003] Ori Shalev, Nir Shavit "Split-Ordered Lists - Lock-free Resizable Hash Tables" - [2008] Nir Shavit "The Art of Multiprocessor Programming" The split-ordered list is a lock-free implementation of an extensible unbounded hash table. It uses original recursive split-ordering algorithm discovered by Ori Shalev and Nir Shavit that allows to split buckets without item moving on resizing. \anchor cds_SplitList_algo_desc Short description [from [2003] Ori Shalev, Nir Shavit "Split-Ordered Lists - Lock-free Resizable Hash Tables"] The algorithm keeps all the items in one lock-free linked list, and gradually assigns the bucket pointers to the places in the list where a sublist of 'correct' items can be found. A bucket is initialized upon first access by assigning it to a new 'dummy' node (dashed contour) in the list, preceding all items that should be in that bucket. A newly created bucket splits an older bucket's chain, reducing the access cost to its items. The table uses a modulo 2**i hash (there are known techniques for 'pre-hashing' before a modulo 2**i hash to overcome possible binary correlations among values). The table starts at size 2 and repeatedly doubles in size. Unlike moving an item, the operation of directing a bucket pointer can be done in a single CAS operation, and since items are not moved, they are never 'lost'. However, to make this approach work, one must be able to keep the items in the list sorted in such a way that any bucket's sublist can be 'split' by directing a new bucket pointer within it. This operation must be recursively repeatable, as every split bucket may be split again and again as the hash table grows. To achieve this goal the authors introduced recursive split-ordering, a new ordering on keys that keeps items in a given bucket adjacent in the list throughout the repeated splitting process. Magically, yet perhaps not surprisingly, recursive split-ordering is achieved by simple binary reversal: reversing the bits of the hash key so that the new key's most significant bits (MSB) are those that were originally its least significant. The split-order keys of regular nodes are exactly the bit-reverse image of the original keys after turning on their MSB. For example, items 9 and 13 are in the 1 mod 4 bucket, which can be recursively split in two by inserting a new node between them. To insert (respectively delete or search for) an item in the hash table, hash its key to the appropriate bucket using recursive split-ordering, follow the pointer to the appropriate location in the sorted items list, and traverse the list until the key's proper location in the split-ordering (respectively until the key or a key indicating the item is not in the list is found). Because of the combinatorial structure induced by the split-ordering, this will require traversal of no more than an expected constant number of items. The design is modular: to implement the ordered items list, you can use one of several non-blocking list-based set algorithms: MichaelList, LazyList. Implementation Template parameters are: - \p GC - Garbage collector. Note the \p GC must be the same as the \p GC used for \p OrderedList - \p OrderedList - ordered list implementation used as a bucket for hash set, for example, \p MichaelList, \p LazyList. The intrusive ordered list implementation specifies the type \p T stored in the split-list set, the comparison functor for the type \p T and other features specific for the ordered list. - \p Traits - split-list traits, default is \p split_list::traits. Instead of defining \p Traits struct you can use option-based syntax provided by \p split_list::make_traits metafunction. There are several specialization of the split-list class for different \p GC: - for \ref cds_urcu_gc "RCU type" include - see \ref cds_intrusive_SplitListSet_rcu "RCU-based split-list" - for cds::gc::nogc include - see \ref cds_intrusive_SplitListSet_nogc "persistent SplitListSet". \anchor cds_SplitList_hash_functor Hash functor Some member functions of split-ordered list accept the key parameter of type \p Q which differs from \p value_type. It is expected that type \p Q contains full key of \p value_type, and for equal keys of type \p Q and \p value_type the hash values of these keys must be equal too. The hash functor \p Traits::hash should accept parameters of both type: \code // Our node type struct Foo { std::string key_ ; // key field // ... other fields }; // Hash functor struct fooHash { size_t operator()( const std::string& s ) const { return std::hash( s ); } size_t operator()( const Foo& f ) const { return (*this)( f.key_ ); } }; \endcode How to use Split-list based on \p IterableList differs from split-list based on \p MichaelList or \p LazyList because \p %IterableList stores data "as is" - it cannot use any hook. Suppose, your split-list contains values of type \p Foo. For \p %MichaelList and \p %LazyList, \p Foo declaration should be based on ordered-list node: - \p %MichaelList: \code struct Foo: public cds::intrusive::split_list::node< cds::intrusive::michael_list::node< cds::gc::HP > > { // ... field declarations }; \endcode - \p %LazyList: \code struct Foo: public cds::intrusive::split_list::node< cds::intrusive::lazy_list::node< cds::gc::HP > > { // ... field declarations }; \endcode For \p %IterableList, \p Foo should be based on \p void: \code struct Foo: public cds::intrusive::split_list::node { // ... field declarations }; \endcode Everything else is the same. Consider split-list based on \p MichaelList. First, you should choose ordered list type to use in your split-list set: \code // For gc::HP-based MichaelList implementation #include // cds::intrusive::SplitListSet declaration #include // Type of set items // Note you should declare your struct based on cds::intrusive::split_list::node // which is a wrapper for ordered-list node struct. // In our case, the node type for HP-based MichaelList is cds::intrusive::michael_list::node< cds::gc::HP > struct Foo: public cds::intrusive::split_list::node< cds::intrusive::michael_list::node< cds::gc::HP > > { std::string key_ ; // key field unsigned val_ ; // value field // ... other value fields }; // Declare comparator for the item struct FooCmp { int operator()( const Foo& f1, const Foo& f2 ) const { return f1.key_.compare( f2.key_ ); } }; // Declare base ordered-list type for split-list typedef cds::intrusive::MichaelList< cds::gc::HP, Foo, typename cds::intrusive::michael_list::make_traits< // hook option cds::intrusive::opt::hook< cds::intrusive::michael_list::base_hook< cds::opt::gc< cds::gc::HP > > > // item comparator option ,cds::opt::compare< FooCmp > >::type > Foo_list; \endcode Second, you should declare split-list set container: \code // Declare hash functor // Note, the hash functor accepts parameter type Foo and std::string struct FooHash { size_t operator()( const Foo& f ) const { return cds::opt::v::hash()( f.key_ ); } size_t operator()( const std::string& s ) const { return cds::opt::v::hash()( s ); } }; // Split-list set typedef typedef cds::intrusive::SplitListSet< cds::gc::HP ,Foo_list ,typename cds::intrusive::split_list::make_traits< cds::opt::hash< FooHash > >::type > Foo_set; \endcode Now, you can use \p Foo_set in your application. \code Foo_set fooSet; Foo * foo = new Foo; foo->key_ = "First"; fooSet.insert( *foo ); // and so on ... \endcode */ template < class GC, class OrderedList, # ifdef CDS_DOXYGEN_INVOKED class Traits = split_list::traits # else class Traits # endif > class SplitListSet { public: typedef GC gc; ///< Garbage collector typedef Traits traits; ///< Set traits protected: //@cond typedef split_list::details::rebind_list_traits ordered_list_adapter; //@endcond public: # ifdef CDS_DOXYGEN_INVOKED typedef OrderedList ordered_list; ///< type of ordered list used as a base for split-list # else typedef typename ordered_list_adapter::result ordered_list; # endif typedef typename ordered_list::value_type value_type; ///< type of value stored in the split-list typedef typename ordered_list::key_comparator key_comparator; ///< key comparison functor typedef typename ordered_list::disposer disposer; ///< Node disposer functor /// Hash functor for \p %value_type and all its derivatives you use typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash; typedef typename traits::bit_reversal bit_reversal; ///< Bit reversal algorithm, see \p split_list::traits::bit_reversal typedef typename traits::item_counter item_counter; ///< Item counter type typedef typename traits::back_off back_off; ///< back-off strategy for spinning typedef typename traits::memory_model memory_model; ///< Memory ordering. See \p cds::opt::memory_model option typedef typename traits::stat stat; ///< Internal statistics, see \p spit_list::stat typedef typename ordered_list::guarded_ptr guarded_ptr; ///< Guarded pointer /// Count of hazard pointer required static constexpr const size_t c_nHazardPtrCount = ordered_list::c_nHazardPtrCount + 4; // +4 - for iterators protected: //@cond typedef split_list::node node_type; ///< split-list node type typedef typename ordered_list_adapter::node_traits node_traits; /// Bucket table implementation typedef typename split_list::details::bucket_table_selector< traits::dynamic_bucket_table , gc , typename ordered_list_adapter::aux_node , opt::allocator< typename traits::allocator > , opt::memory_model< memory_model > , opt::free_list< typename traits::free_list > >::type bucket_table; typedef typename bucket_table::aux_node_type aux_node_type; ///< auxiliary node type //@endcond protected: //@cond /// Ordered list wrapper to access protected members class ordered_list_wrapper: public ordered_list { typedef ordered_list base_class; typedef typename base_class::auxiliary_head bucket_head_type; public: bool insert_at( aux_node_type* pHead, value_type& val ) { assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::insert_at( h, val ); } template bool insert_at( aux_node_type * pHead, value_type& val, Func f ) { assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::insert_at( h, val, f ); } template std::pair update_at( aux_node_type * pHead, value_type& val, Func func, bool bAllowInsert ) { assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::update_at( h, val, func, bAllowInsert ); } template typename std::enable_if< std::is_same< Q, value_type>::value && is_iterable_list< ordered_list >::value, std::pair >::type upsert_at( aux_node_type * pHead, Q& val, bool bAllowInsert ) { assert( pHead != nullptr ); bucket_head_type h( pHead ); return base_class::upsert_at( h, val, bAllowInsert ); } bool unlink_at( aux_node_type * pHead, value_type& val ) { assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::unlink_at( h, val ); } template typename std::enable_if< std::is_same< Iterator, typename ordered_list::iterator>::value && is_iterable_list< ordered_list >::value, bool >::type erase_at( Iterator iter ) { return base_class::erase_at( iter ); } template bool erase_at( aux_node_type * pHead, split_list::details::search_value_type const& val, Compare cmp, Func f ) { assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::erase_at( h, val, cmp, f ); } template bool erase_at( aux_node_type * pHead, split_list::details::search_value_type const& val, Compare cmp ) { assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::erase_at( h, val, cmp ); } template guarded_ptr extract_at( aux_node_type * pHead, split_list::details::search_value_type const& val, Compare cmp ) { assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::extract_at( h, val, cmp ); } template bool find_at( aux_node_type * pHead, split_list::details::search_value_type& val, Compare cmp, Func f ) { assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::find_at( h, val, cmp, f ); } template bool find_at( aux_node_type * pHead, split_list::details::search_value_type const& val, Compare cmp ) { assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::find_at( h, val, cmp ); } template typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, typename base_class::iterator >::type find_iterator_at( aux_node_type * pHead, split_list::details::search_value_type const& val, Compare cmp ) { assert( pHead != nullptr ); bucket_head_type h( pHead ); return base_class::find_iterator_at( h, val, cmp ); } template guarded_ptr get_at( aux_node_type * pHead, split_list::details::search_value_type const& val, Compare cmp ) { assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::get_at( h, val, cmp ); } bool insert_aux_node( aux_node_type * pNode ) { return base_class::insert_aux_node( pNode ); } bool insert_aux_node( aux_node_type * pHead, aux_node_type * pNode ) { bucket_head_type h(pHead); return base_class::insert_aux_node( h, pNode ); } template void destroy( Predicate pred ) { base_class::destroy( pred ); } }; //@endcond protected: //@cond template class iterator_type : public split_list::details::iterator_type { typedef split_list::details::iterator_type iterator_base_class; typedef typename iterator_base_class::list_iterator list_iterator; friend class SplitListSet; public: iterator_type() : iterator_base_class() {} iterator_type( iterator_type const& src ) : iterator_base_class( src ) {} // This ctor should be protected... iterator_type( list_iterator itCur, list_iterator itEnd ) : iterator_base_class( itCur, itEnd ) {} }; //@endcond public: ///@name Forward iterators //@{ /// Forward iterator /** The forward iterator is based on \p OrderedList forward iterator and has some features: - it has no post-increment operator - it iterates items in unordered fashion - iterator cannot be moved across thread boundary because it may contain GC's guard that is thread-private GC data. Iterator thread safety depends on type of \p OrderedList: - for \p MichaelList and \p LazyList: iterator guarantees safety even if you delete the item that iterator points to because that item is guarded by hazard pointer. However, in case of concurrent deleting operations it is no guarantee that you iterate all item in the set. Moreover, a crash is possible when you try to iterate the next element that has been deleted by concurrent thread. Use this iterator on the concurrent container for debugging purpose only. - for \p IterableList: iterator is thread-safe. You may use it freely in concurrent environment. */ typedef iterator_type iterator; /// Const forward iterator /** For iterator's features and requirements see \ref iterator */ typedef iterator_type const_iterator; /// Returns a forward iterator addressing the first element in a split-list /** For empty list \code begin() == end() \endcode */ iterator begin() { return iterator( m_List.begin(), m_List.end()); } /// Returns an iterator that addresses the location succeeding the last element in a split-list /** Do not use the value returned by end function to access any item. The returned value can be used only to control reaching the end of the split-list. For empty list \code begin() == end() \endcode */ iterator end() { return iterator( m_List.end(), m_List.end()); } /// Returns a forward const iterator addressing the first element in a split-list const_iterator begin() const { return cbegin(); } /// Returns a forward const iterator addressing the first element in a split-list const_iterator cbegin() const { return const_iterator( m_List.cbegin(), m_List.cend()); } /// Returns an const iterator that addresses the location succeeding the last element in a split-list const_iterator end() const { return cend(); } /// Returns an const iterator that addresses the location succeeding the last element in a split-list const_iterator cend() const { return const_iterator( m_List.cend(), m_List.cend()); } //@} public: /// Initialize split-ordered list of default capacity /** The default capacity is defined in bucket table constructor. See \p split_list::expandable_bucket_table, \p split_list::static_bucket_table which selects by \p split_list::dynamic_bucket_table option. */ SplitListSet() : m_nBucketCountLog2(1) , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor())) { init(); } /// Initialize split-ordered list SplitListSet( size_t nItemCount ///< estimate average of item count , size_t nLoadFactor = 1 ///< load factor - average item count per bucket. Small integer up to 8, default is 1. ) : m_Buckets( nItemCount, nLoadFactor ) , m_nBucketCountLog2(1) , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor())) { init(); } /// Destroys split-list set ~SplitListSet() { // list contains aux node that cannot be retired // all aux nodes will be destroyed by bucket table dtor m_List.destroy( []( node_type * pNode ) -> bool { return !pNode->is_dummy(); } ); gc::force_dispose(); } public: /// Inserts new node /** The function inserts \p val in the set if it does not contain an item with key equal to \p val. Returns \p true if \p val is placed into the set, \p false otherwise. */ bool insert( value_type& val ) { size_t nHash = hash_value( val ); aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); if ( m_List.insert_at( pHead, val )) { inc_item_count(); m_Stat.onInsertSuccess(); return true; } m_Stat.onInsertFailed(); return false; } /// Inserts new node /** This function is intended for derived non-intrusive containers. The function allows to split creating of new item into two part: - create item with key only - insert new item into the set - if inserting is success, calls \p f functor to initialize value-field of \p val. The functor signature is: \code void func( value_type& val ); \endcode where \p val is the item inserted. The user-defined functor is called only if the inserting is success. @warning For \ref cds_intrusive_MichaelList_hp "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". \ref cds_intrusive_LazyList_hp "LazyList" provides exclusive access to inserted item and does not require any node-level synchronization. */ template bool insert( value_type& val, Func f ) { size_t nHash = hash_value( val ); aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); if ( m_List.insert_at( pHead, val, f )) { inc_item_count(); m_Stat.onInsertSuccess(); return true; } m_Stat.onInsertFailed(); return false; } /// Updates the node /** The operation performs inserting or changing data with lock-free manner. If the item \p val is not found in the set, then \p val is inserted iff \p bAllowInsert is \p true. Otherwise, the functor \p func is called with item found. The functor signature depends of the type of \p OrderedList: for \p MichaelList, \p LazyList \code struct functor { void operator()( bool bNew, value_type& item, value_type& val ); }; \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the set - \p val - argument \p val passed into the \p %update() function If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments refers to the same thing. The functor may change non-key fields of the \p item. @warning For \ref cds_intrusive_MichaelList_hp "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". \ref cds_intrusive_LazyList_hp "LazyList" provides exclusive access to inserted item and does not require any node-level synchronization. for \p IterableList \code void func( value_type& val, value_type * old ); \endcode where - \p val - argument \p val passed into the \p %update() function - \p old - old value that will be retired. If new item has been inserted then \p old is \p nullptr. Returns std::pair where \p first is \p true if operation is successful, \p second is \p true if new item has been added or \p false if the item with \p val already is in the list. */ template std::pair update( value_type& val, Func func, bool bAllowInsert = true ) { size_t nHash = hash_value( val ); aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); std::pair bRet = m_List.update_at( pHead, val, func, bAllowInsert ); if ( bRet.first && bRet.second ) { inc_item_count(); m_Stat.onUpdateNew(); } else m_Stat.onUpdateExist(); return bRet; } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( value_type& val, Func func ) { return update( val, func, true ); } //@endcond /// Inserts or updates the node (only for \p IterableList) /** The operation performs inserting or changing data with lock-free manner. If the item \p val is not found in the set, then \p val is inserted iff \p bAllowInsert is \p true. Otherwise, the current element is changed to \p val, the old element will be retired later by call \p Traits::disposer. Returns std::pair where \p first is \p true if operation is successful, \p second is \p true if \p val has been added or \p false if the item with that key already in the set. */ #ifdef CDS_DOXYGEN_INVOKED std::pair upsert( value_type& val, bool bAllowInsert = true ) #else template typename std::enable_if< std::is_same< Q, value_type>::value && is_iterable_list< ordered_list >::value, std::pair >::type upsert( Q& val, bool bAllowInsert = true ) #endif { size_t nHash = hash_value( val ); aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); std::pair bRet = m_List.upsert_at( pHead, val, bAllowInsert ); if ( bRet.first && bRet.second ) { inc_item_count(); m_Stat.onUpdateNew(); } else m_Stat.onUpdateExist(); return bRet; } /// Unlinks the item \p val from the set /** The function searches the item \p val in the set and unlinks it from the set if it is found and is equal to \p val. Difference between \ref erase and \p unlink functions: \p erase finds a key and deletes the item found. \p unlink finds an item by key and deletes it only if \p val is an item of that set, i.e. the pointer to item found is equal to &val . The function returns \p true if success and \p false otherwise. */ bool unlink( value_type& val ) { size_t nHash = hash_value( val ); aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); if ( m_List.unlink_at( pHead, val )) { --m_ItemCounter; m_Stat.onEraseSuccess(); return true; } m_Stat.onEraseFailed(); return false; } /// Deletes the item from the set /** \anchor cds_intrusive_SplitListSet_hp_erase The function searches an item with key equal to \p key in the set, unlinks it from the set, and returns \p true. If the item with key equal to \p key is not found the function return \p false. Difference between \ref erase and \p unlink functions: \p erase finds a key and deletes the item found. \p unlink finds an item by key and deletes it only if \p key is an item of that set, i.e. the pointer to item found is equal to &key . Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. */ template bool erase( Q const& key ) { return erase_( key, key_comparator()); } /// Deletes the item from the set with comparing functor \p pred /** The function is an analog of \ref cds_intrusive_SplitListSet_hp_erase "erase(Q const&)" but \p pred predicate is used for key comparing. \p Less has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. */ template bool erase_with( const Q& key, Less pred ) { CDS_UNUSED( pred ); return erase_( key, typename ordered_list_adapter::template make_compare_from_less()); } /// Deletes the item from the set /** \anchor cds_intrusive_SplitListSet_hp_erase_func The function searches an item with key equal to \p key in the set, call \p f functor with item found, unlinks it from the set, and returns \p true. The \ref disposer specified by \p OrderedList class template parameter is called by garbage collector \p GC asynchronously. The \p Func interface is \code struct functor { void operator()( value_type const& item ); }; \endcode If the item with key equal to \p key is not found the function return \p false. Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. */ template bool erase( Q const& key, Func f ) { return erase_( key, key_comparator(), f ); } /// Deletes the item from the set with comparing functor \p pred /** The function is an analog of \ref cds_intrusive_SplitListSet_hp_erase_func "erase(Q const&, Func)" but \p pred predicate is used for key comparing. \p Less has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. */ template bool erase_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return erase_( key, typename ordered_list_adapter::template make_compare_from_less(), f ); } /// Deletes the item pointed by iterator \p iter (only for \p IterableList based set) /** Returns \p true if the operation is successful, \p false otherwise. The function can return \p false if the node the iterator points to has already been deleted by other thread. The function does not invalidate the iterator, it remains valid and can be used for further traversing. @note \p %erase_at() is supported only for \p %SplitListSet based on \p IterableList. */ #ifdef CDS_DOXYGEN_INVOKED bool erase_at( iterator const& iter ) #else template typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, bool >::type erase_at( Iterator const& iter ) #endif { assert( iter != end()); if ( m_List.erase_at( iter.underlying_iterator())) { --m_ItemCounter; m_Stat.onEraseSuccess(); return true; } return false; } /// Extracts the item with specified \p key /** \anchor cds_intrusive_SplitListSet_hp_extract The function searches an item with key equal to \p key, unlinks it from the set, and returns it as \p guarded_ptr. If \p key is not found the function returns an empty guarded pointer. Note the compare functor should accept a parameter of type \p Q that may be not the same as \p value_type. The \p disposer specified in \p OrderedList class' template parameter is called automatically by garbage collector \p GC when returned \p guarded_ptr object will be destroyed or released. @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. Usage: \code typedef cds::intrusive::SplitListSet< your_template_args > splitlist_set; splitlist_set theSet; // ... { splitlist_set::guarded_ptr gp( theSet.extract( 5 )); if ( gp) { // Deal with gp // ... } // Destructor of gp releases internal HP guard } \endcode */ template guarded_ptr extract( Q const& key ) { return extract_( key ); } /// Extracts the item using compare functor \p pred /** The function is an analog of \ref cds_intrusive_SplitListSet_hp_extract "extract(Q const&)" but \p pred predicate is used for key comparing. \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the set. */ template guarded_ptr extract_with( Q const& key, Less pred ) { return extract_with_( key, pred ); } /// Finds the key \p key /** \anchor cds_intrusive_SplitListSet_hp_find_func The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& key ); }; \endcode where \p item is the item found, \p key is the find function argument. The functor can change non-key fields of \p item. Note that the functor is only guarantee that \p item cannot be disposed during functor is executing. The functor does not serialize simultaneous access to the set \p item. If such access is possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. Note the hash functor specified for class \p Traits template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( Q& key, Func f ) { return find_( key, key_comparator(), f ); } //@cond template bool find( Q const& key, Func f ) { return find_( key, key_comparator(), f ); } //@endcond /// Finds \p key and returns iterator pointed to the item found (only for \p IterableList) /** If \p key is not found the function returns \p end(). @note This function is supported only for the set based on \p IterableList */ template #ifdef CDS_DOXYGEN_INVOKED iterator #else typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type #endif find( Q& key ) { return find_iterator_( key, key_comparator()); } //@cond template typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type find( Q const& key ) { return find_iterator_( key, key_comparator()); } //@endcond /// Finds the key \p key with \p pred predicate for comparing /** The function is an analog of \ref cds_intrusive_SplitListSet_hp_find_func "find(Q&, Func)" but \p cmp is used for key compare. \p Less has the interface like \p std::less. \p cmp must imply the same element order as the comparator used for building the set. */ template bool find_with( Q& key, Less pred, Func f ) { CDS_UNUSED( pred ); return find_( key, typename ordered_list_adapter::template make_compare_from_less(), f ); } //@cond template bool find_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return find_( key, typename ordered_list_adapter::template make_compare_from_less(), f ); } //@endcond /// Finds \p key using \p pred predicate and returns iterator pointed to the item found (only for \p IterableList) /** The function is an analog of \p find(Q&) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. If \p key is not found the function returns \p end(). @note This function is supported only for the set based on \p IterableList */ template #ifdef CDS_DOXYGEN_INVOKED iterator #else typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type #endif find_with( Q& key, Less pred ) { CDS_UNUSED( pred ); return find_iterator_( key, typename ordered_list_adapter::template make_compare_from_less()); } //@cond template typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type find_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return find_iterator_( key, typename ordered_list_adapter::template make_compare_from_less()); } //@endcond /// Checks whether the set contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. Note the hash functor specified for class \p Traits template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. Otherwise, you may use \p contains( Q const&, Less pred ) functions with explicit predicate for key comparing. */ template bool contains( Q const& key ) { return find_( key, key_comparator()); } /// Checks whether the set contains \p key using \p pred predicate for searching /** The function is an analog of contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool contains( Q const& key, Less pred ) { CDS_UNUSED( pred ); return find_( key, typename ordered_list_adapter::template make_compare_from_less()); } /// Finds the key \p key and return the item found /** \anchor cds_intrusive_SplitListSet_hp_get The function searches the item with key equal to \p key and returns the item found as \p guarded_ptr. If \p key is not found the function returns an empty guarded pointer. The \p disposer specified in \p OrderedList class' template parameter is called by garbage collector \p GC automatically when returned \p guarded_ptr object will be destroyed or released. @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. Usage: \code typedef cds::intrusive::SplitListSet< your_template_params > splitlist_set; splitlist_set theSet; // ... { splitlist_set::guarded_ptr gp = theSet.get( 5 ); if ( gp ) { // Deal with gp //... } // Destructor of guarded_ptr releases internal HP guard } \endcode Note the compare functor specified for \p OrderedList template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. */ template guarded_ptr get( Q const& key ) { return get_( key ); } /// Finds the key \p key and return the item found /** The function is an analog of \ref cds_intrusive_SplitListSet_hp_get "get( Q const&)" but \p pred is used for comparing the keys. \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the set. */ template guarded_ptr get_with( Q const& key, Less pred ) { return get_with_( key, pred ); } /// Returns item count in the set size_t size() const { return m_ItemCounter; } /// Checks if the set is empty /** Emptiness is checked by item counting: if item count is zero then the set is empty. Thus, the correct item counting feature is an important part of split-list set implementation. */ bool empty() const { return size() == 0; } /// Clears the set (non-atomic) /** The function unlink all items from the set. The function is not atomic. After call the split-list can be non-empty. For each item the \p disposer is called after unlinking. */ void clear() { iterator it = begin(); while ( it != end()) { iterator i(it); ++i; unlink( *it ); it = i; } } /// Returns internal statistics stat const& statistics() const { return m_Stat; } /// Returns internal statistics for \p OrderedList typename OrderedList::stat const& list_statistics() const { return m_List.statistics(); } protected: //@cond aux_node_type * alloc_aux_node( size_t nHash ) { m_Stat.onHeadNodeAllocated(); aux_node_type* p = m_Buckets.alloc_aux_node(); if ( p ) { CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN; // p->m_nHash is read-only data member p->m_nHash = nHash; CDS_TSAN_ANNOTATE_IGNORE_WRITES_END; # ifdef CDS_DEBUG cds_assert( !p->m_busy.load( atomics::memory_order_acquire )); p->m_busy.store( true, atomics::memory_order_release ); # endif } return p; } void free_aux_node( aux_node_type * p ) { # ifdef CDS_DEBUG cds_assert( p->m_busy.load( atomics::memory_order_acquire )); p->m_busy.store( false, atomics::memory_order_release ); # endif m_Buckets.free_aux_node( p ); m_Stat.onHeadNodeFreed(); } /// Calculates hash value of \p key template size_t hash_value( Q const& key ) const { return m_HashFunctor( key ); } size_t bucket_no( size_t nHash ) const { return nHash & ((1 << m_nBucketCountLog2.load( memory_model::memory_order_relaxed )) - 1); } static size_t parent_bucket( size_t nBucket ) { assert( nBucket > 0 ); return nBucket & ~(1 << bitop::MSBnz( nBucket )); } aux_node_type * init_bucket( size_t const nBucket ) { assert( nBucket > 0 ); size_t nParent = parent_bucket( nBucket ); aux_node_type * pParentBucket = m_Buckets.bucket( nParent ); if ( pParentBucket == nullptr ) { pParentBucket = init_bucket( nParent ); m_Stat.onRecursiveInitBucket(); } assert( pParentBucket != nullptr ); // Allocate an aux node for new bucket aux_node_type * pBucket = m_Buckets.bucket( nBucket ); back_off bkoff; for ( ;; pBucket = m_Buckets.bucket( nBucket )) { if ( pBucket ) return pBucket; pBucket = alloc_aux_node( split_list::dummy_hash( nBucket )); if ( pBucket ) { if ( m_List.insert_aux_node( pParentBucket, pBucket )) { m_Buckets.bucket( nBucket, pBucket ); m_Stat.onNewBucket(); return pBucket; } // Another thread set the bucket. Wait while it done free_aux_node( pBucket ); m_Stat.onBucketInitContenton(); break; } // There are no free buckets. It means that the bucket table is full // Wait while another thread set the bucket or a free bucket will be available m_Stat.onBucketsExhausted(); bkoff(); } // Another thread set the bucket. Wait while it done for ( pBucket = m_Buckets.bucket( nBucket ); pBucket == nullptr; pBucket = m_Buckets.bucket( nBucket )) { bkoff(); m_Stat.onBusyWaitBucketInit(); } return pBucket; } aux_node_type * get_bucket( size_t nHash ) { size_t nBucket = bucket_no( nHash ); aux_node_type * pHead = m_Buckets.bucket( nBucket ); if ( pHead == nullptr ) pHead = init_bucket( nBucket ); assert( pHead->is_dummy()); return pHead; } void init() { // GC and OrderedList::gc must be the same static_assert(std::is_same::value, "GC and OrderedList::gc must be the same"); // atomicity::empty_item_counter is not allowed as a item counter static_assert(!std::is_same::value, "cds::atomicity::empty_item_counter is not allowed as a item counter"); // Initialize bucket 0 aux_node_type * pNode = alloc_aux_node( 0 /*split_list::dummy_hash(0)*/ ); assert( pNode != nullptr ); // insert_aux_node cannot return false for empty list CDS_VERIFY( m_List.insert_aux_node( pNode )); m_Buckets.bucket( 0, pNode ); } static size_t max_item_count( size_t nBucketCount, size_t nLoadFactor ) { return nBucketCount * nLoadFactor; } void inc_item_count() { size_t nMaxCount = m_nMaxItemCount.load( memory_model::memory_order_relaxed ); if ( ++m_ItemCounter <= nMaxCount ) return; size_t sz = m_nBucketCountLog2.load( memory_model::memory_order_relaxed ); const size_t nBucketCount = static_cast(1) << sz; if ( nBucketCount < m_Buckets.capacity()) { // we may grow the bucket table const size_t nLoadFactor = m_Buckets.load_factor(); if ( nMaxCount < max_item_count( nBucketCount, nLoadFactor )) return; // someone already have updated m_nBucketCountLog2, so stop here m_nMaxItemCount.compare_exchange_strong( nMaxCount, max_item_count( nBucketCount << 1, nLoadFactor ), memory_model::memory_order_relaxed, atomics::memory_order_relaxed ); m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, memory_model::memory_order_relaxed, atomics::memory_order_relaxed ); } else m_nMaxItemCount.store( std::numeric_limits::max(), memory_model::memory_order_relaxed ); } template bool find_( Q& val, Compare cmp, Func f ) { size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); return m_Stat.onFind( m_List.find_at( pHead, sv, cmp, [&f]( value_type& item, split_list::details::search_value_type& v ) { f( item, v.val ); } ) ); } template bool find_( Q const& val, Compare cmp ) { size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); return m_Stat.onFind( m_List.find_at( pHead, sv, cmp )); } template iterator find_iterator_( Q const& val, Compare cmp ) { size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); return iterator( m_List.find_iterator_at( pHead, sv, cmp ), m_List.end()); } template guarded_ptr get_( Q const& val, Compare cmp ) { size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); guarded_ptr gp = m_List.get_at( pHead, sv, cmp ); m_Stat.onFind( !gp.empty()); return gp; } template guarded_ptr get_( Q const& key ) { return get_( key, key_comparator()); } template guarded_ptr get_with_( Q const& key, Less ) { return get_( key, typename ordered_list_adapter::template make_compare_from_less()); } template bool erase_( Q const& val, Compare cmp, Func f ) { size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); if ( m_List.erase_at( pHead, sv, cmp, f )) { --m_ItemCounter; m_Stat.onEraseSuccess(); return true; } m_Stat.onEraseFailed(); return false; } template bool erase_( Q const& val, Compare cmp ) { size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); if ( m_List.erase_at( pHead, sv, cmp )) { --m_ItemCounter; m_Stat.onEraseSuccess(); return true; } m_Stat.onEraseFailed(); return false; } template guarded_ptr extract_( Q const& val, Compare cmp ) { size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); guarded_ptr gp = m_List.extract_at( pHead, sv, cmp ); if ( gp ) { --m_ItemCounter; m_Stat.onExtractSuccess(); } else m_Stat.onExtractFailed(); return gp; } template guarded_ptr extract_( Q const& key ) { return extract_( key, key_comparator()); } template guarded_ptr extract_with_( Q const& key, Less ) { return extract_( key, typename ordered_list_adapter::template make_compare_from_less()); } //@endcond protected: //@cond static unsigned const c_padding = cds::opt::actual_padding< traits::padding >::value; typedef typename cds::details::type_padding< bucket_table, c_padding >::type padded_bucket_table; padded_bucket_table m_Buckets; ///< bucket table typedef typename cds::details::type_padding< ordered_list_wrapper, c_padding >::type padded_ordered_list; padded_ordered_list m_List; ///< Ordered list containing split-list items atomics::atomic m_nBucketCountLog2; ///< log2( current bucket count ) atomics::atomic m_nMaxItemCount; ///< number of items container can hold, before we have to resize hash m_HashFunctor; ///< Hash functor item_counter m_ItemCounter; ///< Item counter stat m_Stat; ///< Internal statistics //@endcond }; }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_SPLIT_LIST_H libcds-2.3.3/cds/intrusive/split_list_nogc.h000066400000000000000000000672271341244201700211410ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_SPLIT_LIST_NOGC_H #define CDSLIB_INTRUSIVE_SPLIT_LIST_NOGC_H #include #include #include #include namespace cds { namespace intrusive { /// Split-ordered list (template specialization for gc::nogc) /** @ingroup cds_intrusive_map \anchor cds_intrusive_SplitListSet_nogc This specialization is intended for so-called persistent usage when no item reclamation may be performed. The class does not support deleting of list item. See \ref cds_intrusive_SplitListSet_hp "SplitListSet" for description of template parameters. The template parameter \p OrderedList should be any gc::nogc-derived ordered list, for example, \ref cds_intrusive_MichaelList_nogc "persistent MichaelList", \ref cds_intrusive_LazyList_nogc "persistent LazyList" */ template < class OrderedList, #ifdef CDS_DOXYGEN_INVOKED class Traits = split_list::traits #else class Traits #endif > class SplitListSet< cds::gc::nogc, OrderedList, Traits > { public: typedef cds::gc::nogc gc; ///< Garbage collector typedef Traits traits; ///< Traits template parameters /// Hash functor for \p value_type and all its derivatives that you use typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash; protected: //@cond typedef split_list::details::rebind_list_traits ordered_list_adapter; //@endcond public: # ifdef CDS_DOXYGEN_INVOKED typedef OrderedList ordered_list; ///< type of ordered list used as base for split-list # else typedef typename ordered_list_adapter::result ordered_list; # endif typedef typename ordered_list::value_type value_type; ///< type of value stored in the split-list typedef typename ordered_list::key_comparator key_comparator; ///< key comparison functor typedef typename ordered_list::disposer disposer; ///< Node disposer functor typedef typename traits::bit_reversal bit_reversal; ///< Bit reversal algorithm, see \p split_list::traits::bit_reversal typedef typename traits::item_counter item_counter; ///< Item counter type typedef typename traits::back_off back_off; ///< back-off strategy typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option typedef typename traits::stat stat; ///< Internal statistics, see \p spit_list::stat // GC and OrderedList::gc must be the same static_assert(std::is_same::value, "GC and OrderedList::gc must be the same"); // atomicity::empty_item_counter is not allowed as a item counter static_assert(!std::is_same::value, "cds::atomicity::empty_item_counter is not allowed as a item counter"); protected: //@cond typedef typename ordered_list::node_type list_node_type; ///< Node type as declared in ordered list typedef split_list::node node_type; ///< split-list node type /// Split-list node traits /** This traits is intended for converting between underlying ordered list node type \ref list_node_type and split-list node type \ref node_type */ typedef typename ordered_list_adapter::node_traits node_traits; /// Bucket table implementation typedef typename split_list::details::bucket_table_selector< traits::dynamic_bucket_table , gc , typename ordered_list_adapter::aux_node , opt::allocator< typename traits::allocator > , opt::memory_model< memory_model > , opt::free_list< typename traits::free_list > >::type bucket_table; typedef typename bucket_table::aux_node_type aux_node_type; ///< dummy node type typedef typename ordered_list::iterator list_iterator; typedef typename ordered_list::const_iterator list_const_iterator; //@endcond protected: //@cond /// Ordered list wrapper to access protected members class ordered_list_wrapper: public ordered_list { typedef ordered_list base_class; typedef typename base_class::auxiliary_head bucket_head_type; public: list_iterator insert_at_( aux_node_type * pHead, value_type& val ) { assert( pHead != nullptr ); bucket_head_type h(static_cast(pHead)); return base_class::insert_at_( h, val ); } template std::pair update_at_( aux_node_type * pHead, value_type& val, Func func, bool bAllowInsert ) { assert( pHead != nullptr ); bucket_head_type h(static_cast(pHead)); return base_class::update_at_( h, val, func, bAllowInsert ); } template bool find_at( aux_node_type * pHead, split_list::details::search_value_type& val, Compare cmp, Func f ) { assert( pHead != nullptr ); bucket_head_type h(static_cast(pHead)); return base_class::find_at( h, val, cmp, f ); } template list_iterator find_at_( aux_node_type * pHead, split_list::details::search_value_type const & val, Compare cmp ) { assert( pHead != nullptr ); bucket_head_type h(static_cast(pHead)); return base_class::find_at_( h, val, cmp ); } bool insert_aux_node( aux_node_type * pNode ) { return base_class::insert_aux_node( pNode ); } bool insert_aux_node( aux_node_type * pHead, aux_node_type * pNode ) { bucket_head_type h(static_cast(pHead)); return base_class::insert_aux_node( h, pNode ); } template void erase_for( Predicate pred ) { return base_class::erase_for( pred ); } }; //@endcond public: /// Initialize split-ordered list of default capacity /** The default capacity is defined in bucket table constructor. See split_list::expandable_bucket_table, split_list::static_ducket_table which selects by split_list::dynamic_bucket_table option. */ SplitListSet() : m_nBucketCountLog2(1) , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor())) { init(); } /// Initialize split-ordered list SplitListSet( size_t nItemCount ///< estimate average of item count , size_t nLoadFactor = 1 ///< load factor - average item count per bucket. Small integer up to 10, default is 1. ) : m_Buckets( nItemCount, nLoadFactor ) , m_nBucketCountLog2(1) , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor())) { init(); } /// Destroys split-list ~SplitListSet() { m_List.clear(); } public: /// Inserts new node /** The function inserts \p val in the set if it does not contain an item with key equal to \p val. Returns \p true if \p val is placed into the set, \p false otherwise. */ bool insert( value_type& val ) { return insert_( val ) != end(); } /// Updates the node /** The operation performs inserting or changing data with lock-free manner. If the item \p val is not found in the set, then \p val is inserted iff \p bAllowInsert is \p true. Otherwise, the functor \p func is called with item found. The functor signature is: \code void func( bool bNew, value_type& item, value_type& val ); \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the set - \p val - argument \p val passed into the \p update() function If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments refers to the same thing. The functor may change non-key fields of the \p item. Returns std::pair where \p first is \p true if operation is successful, \p second is \p true if new item has been added or \p false if the item with \p key already is in the list. @warning For \ref cds_intrusive_MichaelList_hp "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". \ref cds_intrusive_LazyList_hp "LazyList" provides exclusive access to inserted item and does not require any node-level synchronization. */ template std::pair update( value_type& val, Func func, bool bAllowInsert = true ) { std::pair ret = update_( val, func, bAllowInsert ); return std::make_pair( ret.first != end(), ret.second ); } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( value_type& val, Func func ) { return update( val, func, true ); } //@endcond /// Checks whether the set contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. Note the hash functor specified for class \p Traits template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. Otherwise, you may use \p contains( Q const&, Less pred ) functions with explicit predicate for key comparing. */ template value_type * contains( Q const& key ) { iterator it = find_( key ); if ( it == end()) return nullptr; return &*it; } //@cond template CDS_DEPRECATED("deprecated, use contains()") value_type * find( Q const& key ) { return contains( key ); } //@endcond /// Checks whether the set contains \p key using \p pred predicate for searching /** The function is similar to contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the list. */ template value_type * contains( Q const& key, Less pred ) { iterator it = find_with_( key, pred ); if ( it == end()) return nullptr; return &*it; } //@cond template CDS_DEPRECATED("deprecated, use contains()") value_type * find_with( Q const& key, Less pred ) { return contains( key, pred ); } //@endcond /// Finds the key \p key /** \anchor cds_intrusive_SplitListSet_nogc_find_func The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& key ); }; \endcode where \p item is the item found, \p key is the find function argument. The functor can change non-key fields of \p item. The functor does not serialize simultaneous access to the set \p item. If such access is possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. Note the hash functor specified for class \p Traits template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( Q& key, Func f ) { return find_( key, key_comparator(), f ); } //@cond template bool find( Q const& key, Func f ) { return find_( key, key_comparator(), f ); } //@endcond /// Finds the key \p key with \p pred predicate for comparing /** The function is an analog of \ref cds_intrusive_SplitListSet_nogc_find_func "find(Q&, Func)" but \p cmp is used for key compare. \p Less has the interface like \p std::less. \p cmp must imply the same element order as the comparator used for building the set. */ template bool find_with( Q& key, Less pred, Func f ) { CDS_UNUSED( pred ); return find_( key, typename ordered_list_adapter::template make_compare_from_less(), f ); } //@cond template bool find_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return find_( key, typename ordered_list_adapter::template make_compare_from_less(), f ); } //@endcond /// Clears the set (non-atomic, not thread-safe) /** The function unlink all items from the set. The function is not atomic. It cleans up each bucket and then resets the item counter to zero. If there are a thread that performs insertion while \p %clear() is working the result is undefined in general case: empty() may return \p true but the set may contain item(s). Therefore, \p %clear() may be used only for debugging purposes. For each item the \p disposer is called after unlinking. */ void clear() { m_List.erase_for( []( value_type const& val ) -> bool { return !node_traits::to_node_ptr( val )->is_dummy(); } ); m_ItemCounter.reset(); } /// Checks if the set is empty /** Emptiness is checked by item counting: if item count is zero then the set is empty. Thus, the correct item counting feature is an important part of split-list implementation. */ bool empty() const { return size() == 0; } /// Returns item count in the set size_t size() const { return m_ItemCounter; } /// Returns internal statistics stat const& statistics() const { return m_Stat; } /// Returns internal statistics for \p OrderedList typename OrderedList::stat const& list_statistics() const { return m_List.statistics(); } protected: //@cond template class iterator_type : public split_list::details::iterator_type { typedef split_list::details::iterator_type iterator_base_class; typedef typename iterator_base_class::list_iterator list_iterator; public: iterator_type() : iterator_base_class() {} iterator_type( iterator_type const& src ) : iterator_base_class( src ) {} // This ctor should be protected... iterator_type( list_iterator itCur, list_iterator itEnd ) : iterator_base_class( itCur, itEnd ) {} }; //@endcond public: ///@name Forward iterators //@{ /// Forward iterator /** The forward iterator for a split-list has some features: - it has no post-increment operator - it depends on iterator of underlying \p OrderedList */ typedef iterator_type iterator; /// Const forward iterator /** For iterator's features and requirements see \ref iterator */ typedef iterator_type const_iterator; /// Returns a forward iterator addressing the first element in a split-list /** For empty list \code begin() == end() \endcode */ iterator begin() { return iterator( m_List.begin(), m_List.end()); } /// Returns an iterator that addresses the location succeeding the last element in a split-list /** Do not use the value returned by end function to access any item. The returned value can be used only to control reaching the end of the split-list. For empty list \code begin() == end() \endcode */ iterator end() { return iterator( m_List.end(), m_List.end()); } /// Returns a forward const iterator addressing the first element in a split-list const_iterator begin() const { return const_iterator( m_List.begin(), m_List.end()); } /// Returns a forward const iterator addressing the first element in a split-list const_iterator cbegin() const { return const_iterator( m_List.cbegin(), m_List.cend()); } /// Returns an const iterator that addresses the location succeeding the last element in a split-list const_iterator end() const { return const_iterator( m_List.end(), m_List.end()); } /// Returns an const iterator that addresses the location succeeding the last element in a split-list const_iterator cend() const { return const_iterator( m_List.cend(), m_List.cend()); } //@} protected: //@cond iterator insert_( value_type& val ) { size_t nHash = hash_value( val ); aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); list_iterator it = m_List.insert_at_( pHead, val ); if ( it != m_List.end()) { inc_item_count(); m_Stat.onInsertSuccess(); return iterator( it, m_List.end()); } m_Stat.onInsertFailed(); return end(); } template std::pair update_( value_type& val, Func func, bool bAllowInsert ) { size_t nHash = hash_value( val ); aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); std::pair ret = m_List.update_at_( pHead, val, func, bAllowInsert ); if ( ret.first != m_List.end()) { if ( ret.second ) { inc_item_count(); m_Stat.onUpdateNew(); } else m_Stat.onUpdateExist(); return std::make_pair( iterator(ret.first, m_List.end()), ret.second ); } return std::make_pair( end(), ret.second ); } template iterator find_with_( Q& val, Less pred ) { CDS_UNUSED( pred ); size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); auto it = m_List.find_at_( pHead, sv, typename ordered_list_adapter::template make_compare_from_less()); m_Stat.onFind( it != m_List.end()); return iterator( it, m_List.end()); } template iterator find_( Q const& val ) { size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); auto it = m_List.find_at_( pHead, sv, key_comparator()); m_Stat.onFind( it != m_List.end()); return iterator( it, m_List.end()); } template bool find_( Q& val, Compare cmp, Func f ) { size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); return m_Stat.onFind( m_List.find_at( pHead, sv, cmp, [&f](value_type& item, split_list::details::search_value_type& v){ f(item, v.val ); })); } aux_node_type * alloc_aux_node( size_t nHash ) { m_Stat.onHeadNodeAllocated(); aux_node_type* p = m_Buckets.alloc_aux_node(); if ( p ) p->m_nHash = nHash; return p; } void free_aux_node( aux_node_type * p ) { m_Buckets.free_aux_node( p ); m_Stat.onHeadNodeFreed(); } /// Calculates hash value of \p key template size_t hash_value( Q const& key ) const { return m_HashFunctor( key ); } size_t bucket_no( size_t nHash ) const { return nHash & ((1 << m_nBucketCountLog2.load( memory_model::memory_order_relaxed )) - 1); } static size_t parent_bucket( size_t nBucket ) { assert( nBucket > 0 ); return nBucket & ~(1 << bitop::MSBnz( nBucket )); } aux_node_type * init_bucket( size_t const nBucket ) { assert( nBucket > 0 ); size_t nParent = parent_bucket( nBucket ); aux_node_type * pParentBucket = m_Buckets.bucket( nParent ); if ( pParentBucket == nullptr ) { pParentBucket = init_bucket( nParent ); m_Stat.onRecursiveInitBucket(); } assert( pParentBucket != nullptr ); // Allocate an aux node for new bucket aux_node_type * pBucket = m_Buckets.bucket( nBucket ); back_off bkoff; for ( ;; pBucket = m_Buckets.bucket( nBucket )) { if ( pBucket ) return pBucket; pBucket = alloc_aux_node( split_list::dummy_hash( nBucket )); if ( pBucket ) { if ( m_List.insert_aux_node( pParentBucket, pBucket )) { m_Buckets.bucket( nBucket, pBucket ); m_Stat.onNewBucket(); return pBucket; } // Another thread set the bucket. Wait while it done free_aux_node( pBucket ); m_Stat.onBucketInitContenton(); break; } // There are no free buckets. It means that the bucket table is full // Wait while another thread set the bucket or a free bucket will be available m_Stat.onBucketsExhausted(); bkoff(); } // Another thread set the bucket. Wait while it done for ( pBucket = m_Buckets.bucket( nBucket ); pBucket == nullptr; pBucket = m_Buckets.bucket( nBucket )) { bkoff(); m_Stat.onBusyWaitBucketInit(); } return pBucket; } aux_node_type * get_bucket( size_t nHash ) { size_t nBucket = bucket_no( nHash ); aux_node_type * pHead = m_Buckets.bucket( nBucket ); if ( pHead == nullptr ) pHead = init_bucket( nBucket ); assert( pHead->is_dummy()); return pHead; } void init() { // Initialize bucket 0 aux_node_type * pNode = alloc_aux_node( 0 /*split_list::dummy_hash(0)*/ ); // insert_aux_node cannot return false for empty list CDS_VERIFY( m_List.insert_aux_node( pNode )); m_Buckets.bucket( 0, pNode ); } static size_t max_item_count( size_t nBucketCount, size_t nLoadFactor ) { return nBucketCount * nLoadFactor; } void inc_item_count() { size_t nMaxCount = m_nMaxItemCount.load( memory_model::memory_order_relaxed ); if ( ++m_ItemCounter <= nMaxCount ) return; size_t sz = m_nBucketCountLog2.load( memory_model::memory_order_relaxed ); const size_t nBucketCount = static_cast(1) << sz; if ( nBucketCount < m_Buckets.capacity()) { // we may grow the bucket table const size_t nLoadFactor = m_Buckets.load_factor(); if ( nMaxCount < max_item_count( nBucketCount, nLoadFactor )) return; // someone already have updated m_nBucketCountLog2, so stop here m_nMaxItemCount.compare_exchange_strong( nMaxCount, max_item_count( nBucketCount << 1, nLoadFactor ), memory_model::memory_order_relaxed, atomics::memory_order_relaxed ); m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, memory_model::memory_order_relaxed, atomics::memory_order_relaxed ); } else m_nMaxItemCount.store( std::numeric_limits::max(), memory_model::memory_order_relaxed ); } //@endcond protected: //@cond static unsigned const c_padding = cds::opt::actual_padding< traits::padding >::value; typedef typename cds::details::type_padding< bucket_table, c_padding >::type padded_bucket_table; padded_bucket_table m_Buckets; ///< bucket table typedef typename cds::details::type_padding< ordered_list_wrapper, c_padding >::type padded_ordered_list; padded_ordered_list m_List; ///< Ordered list containing split-list items atomics::atomic m_nBucketCountLog2; ///< log2( current bucket count ) atomics::atomic m_nMaxItemCount; ///< number of items container can hold, before we have to resize hash m_HashFunctor; ///< Hash functor item_counter m_ItemCounter; ///< Item counter stat m_Stat; ///< Internal statistics //@endcond }; }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_SPLIT_LIST_NOGC_H libcds-2.3.3/cds/intrusive/split_list_rcu.h000066400000000000000000001272711341244201700210000ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_SPLIT_LIST_RCU_H #define CDSLIB_INTRUSIVE_SPLIT_LIST_RCU_H #include #include #include #include namespace cds { namespace intrusive { /// Split-ordered list RCU specialization /** @ingroup cds_intrusive_map \anchor cds_intrusive_SplitListSet_rcu Hash table implementation based on split-ordered list algorithm discovered by Ori Shalev and Nir Shavit, see - [2003] Ori Shalev, Nir Shavit "Split-Ordered Lists - Lock-free Resizable Hash Tables" - [2008] Nir Shavit "The Art of Multiprocessor Programming" The split-ordered list is a lock-free implementation of an extensible unbounded hash table. It uses original recursive split-ordering algorithm discovered by Ori Shalev and Nir Shavit that allows to split buckets without moving an item on resizing, see \ref cds_SplitList_algo_desc "short algo description". Implementation Template parameters are: - \p RCU - one of \ref cds_urcu_gc "RCU type" - \p OrderedList - ordered list implementation used as bucket for hash set, for example, MichaelList, LazyList. The intrusive ordered list implementation specifies the type \p T stored in the hash-set, the comparing functor for the type \p T and other features specific for the ordered list. - \p Traits - set traits, default isd \p split_list::traits. Instead of defining \p Traits struct you can use option-based syntax provided by \p split_list::make_traits metafunction. @note About required features of hash functor see \ref cds_SplitList_hash_functor "SplitList general description". \par How to use Before including you should include appropriate RCU header file, see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. For example, for \ref cds_urcu_general_buffered_gc "general-purpose buffered RCU" and MichaelList-based split-list you should include: \code #include #include #include // Declare Michael's list for type Foo and default traits: typedef cds::intrusive::MichaelList< cds::urcu::gc< cds::urcu::general_buffered<> >, Foo > rcu_michael_list; // Declare split-list based on rcu_michael_list typedef cds::intrusive::SplitListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, rcu_michael_list > rcu_split_list; \endcode */ template < class RCU, class OrderedList, # ifdef CDS_DOXYGEN_INVOKED class Traits = split_list::traits # else class Traits # endif > class SplitListSet< cds::urcu::gc< RCU >, OrderedList, Traits > { public: typedef cds::urcu::gc< RCU > gc; ///< RCU garbage collector typedef Traits traits; ///< Traits template parameters /// Hash functor for \ref value_type and all its derivatives that you use typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash; protected: //@cond typedef split_list::details::rebind_list_traits ordered_list_adapter; //@endcond public: # ifdef CDS_DOXYGEN_INVOKED typedef OrderedList ordered_list; ///< type of ordered list used as base for split-list # else typedef typename ordered_list_adapter::result ordered_list; # endif typedef typename ordered_list::value_type value_type; ///< type of value stored in the split-list typedef typename ordered_list::key_comparator key_comparator; ///< key compare functor typedef typename ordered_list::disposer disposer; ///< Node disposer functor typedef typename ordered_list::rcu_lock rcu_lock; ///< RCU scoped lock typedef typename ordered_list::exempt_ptr exempt_ptr; ///< pointer to extracted node typedef typename ordered_list::raw_ptr raw_ptr; ///< pointer to the node for \p get() function /// Group of \p extract_xxx functions require external locking if underlying ordered list requires that static constexpr const bool c_bExtractLockExternal = ordered_list::c_bExtractLockExternal; typedef typename traits::bit_reversal bit_reversal; ///< Bit reversal algorithm, see \p split_list::traits::bit_reversal typedef typename traits::item_counter item_counter; ///< Item counter type typedef typename traits::back_off back_off; ///< back-off strategy for spinning typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option typedef typename traits::stat stat; ///< Internal statistics // GC and OrderedList::gc must be the same static_assert( std::is_same::value, "GC and OrderedList::gc must be the same"); // atomicity::empty_item_counter is not allowed as a item counter static_assert( !std::is_same::value, "cds::atomicity::empty_item_counter is not allowed as a item counter"); protected: //@cond typedef typename ordered_list::node_type list_node_type; ///< Node type as declared in ordered list typedef split_list::node node_type; ///< split-list node type /// Split-list node traits /** This traits is intended for converting between underlying ordered list node type \ref list_node_type and split-list node type \ref node_type */ typedef typename ordered_list_adapter::node_traits node_traits; /// Bucket table implementation typedef typename split_list::details::bucket_table_selector< traits::dynamic_bucket_table , gc , typename ordered_list_adapter::aux_node , opt::allocator< typename traits::allocator > , opt::memory_model< memory_model > , opt::free_list< typename traits::free_list > >::type bucket_table; typedef typename bucket_table::aux_node_type aux_node_type; ///< auxiliary node type //@endcond protected: //@cond /// Ordered list wrapper to access protected members of OrderedList class ordered_list_wrapper: public ordered_list { typedef ordered_list base_class; typedef typename base_class::auxiliary_head bucket_head_type; public: bool insert_at( aux_node_type * pHead, value_type& val ) { assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::insert_at( h, val ); } template bool insert_at( aux_node_type * pHead, value_type& val, Func f ) { assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::insert_at( h, val, f ); } template std::pair update_at( aux_node_type * pHead, value_type& val, Func func, bool bAllowInsert ) { assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::update_at( h, val, func, bAllowInsert ); } bool unlink_at( aux_node_type * pHead, value_type& val ) { assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::unlink_at( h, val ); } template bool erase_at( aux_node_type * pHead, split_list::details::search_value_type const& val, Compare cmp, Func f ) { assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::erase_at( h, val, cmp, f ); } template bool erase_at( aux_node_type * pHead, split_list::details::search_value_type const& val, Compare cmp ) { assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::erase_at( h, val, cmp ); } template value_type * extract_at( aux_node_type * pHead, split_list::details::search_value_type& val, Compare cmp ) { assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::extract_at( h, val, cmp ); } template bool find_at( aux_node_type * pHead, split_list::details::search_value_type& val, Compare cmp, Func f ) { assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::find_at( h, val, cmp, f ); } template bool find_at( aux_node_type * pHead, split_list::details::search_value_type const & val, Compare cmp ) { assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::find_at( h, val, cmp ); } template raw_ptr get_at( aux_node_type * pHead, split_list::details::search_value_type& val, Compare cmp ) { assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::get_at( h, val, cmp ); } bool insert_aux_node( aux_node_type * pNode ) { return base_class::insert_aux_node( pNode ); } bool insert_aux_node( aux_node_type * pHead, aux_node_type * pNode ) { bucket_head_type h(pHead); return base_class::insert_aux_node( h, pNode ); } }; template struct less_wrapper: public cds::opt::details::make_comparator_from_less { typedef cds::opt::details::make_comparator_from_less base_wrapper; template int operator()( split_list::details::search_value_type const& v1, Q2 const& v2 ) const { return base_wrapper::operator()( v1.val, v2 ); } template int operator()( Q1 const& v1, split_list::details::search_value_type const& v2 ) const { return base_wrapper::operator()( v1, v2.val ); } }; //@endcond public: /// Initialize split-ordered list of default capacity /** The default capacity is defined in bucket table constructor. See split_list::expandable_bucket_table, split_list::static_ducket_table which selects by split_list::dynamic_bucket_table option. */ SplitListSet() : m_nBucketCountLog2(1) , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor())) { init(); } /// Initialize split-ordered list SplitListSet( size_t nItemCount ///< estimate average of item count , size_t nLoadFactor = 1 ///< load factor - average item count per bucket. Small integer up to 8, default is 1. ) : m_Buckets( nItemCount, nLoadFactor ) , m_nBucketCountLog2(1) , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor())) { init(); } /// Destroys split-list ~SplitListSet() { m_List.clear(); gc::force_dispose(); } public: /// Inserts new node /** The function inserts \p val in the set if it does not contain an item with key equal to \p val. The function makes RCU lock internally. Returns \p true if \p val is placed into the set, \p false otherwise. */ bool insert( value_type& val ) { size_t nHash = hash_value( val ); aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); if ( m_List.insert_at( pHead, val )) { inc_item_count(); m_Stat.onInsertSuccess(); return true; } m_Stat.onInsertFailed(); return false; } /// Inserts new node /** This function is intended for derived non-intrusive containers. The function allows to split creating of new item into two part: - create item with key only - insert new item into the set - if inserting is success, calls \p f functor to initialize value-field of \p val. The functor signature is: \code void func( value_type& val ); \endcode where \p val is the item inserted. The function makes RCU lock internally. @warning For \ref cds_intrusive_MichaelList_rcu "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". \ref cds_intrusive_LazyList_rcu "LazyList" provides exclusive access to inserted item and does not require any node-level synchronization. */ template bool insert( value_type& val, Func f ) { size_t nHash = hash_value( val ); aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); if ( m_List.insert_at( pHead, val, f )) { inc_item_count(); m_Stat.onInsertSuccess(); return true; } m_Stat.onInsertFailed(); return false; } /// Updates the node /** The operation performs inserting or changing data with lock-free manner. If the item \p val is not found in the set, then \p val is inserted iff \p bAllowInsert is \p true. Otherwise, the functor \p func is called with item found. The functor signature is: \code void func( bool bNew, value_type& item, value_type& val ); \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the set - \p val - argument \p val passed into the \p update() function If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments refers to the same stuff. The functor may change non-key fields of the \p item. The function applies RCU lock internally. Returns std::pair where \p first is \p true if operation is successful, \p second is \p true if new item has been added or \p false if the item with \p key already is in the list. @warning For \ref cds_intrusive_MichaelList_rcu "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". \ref cds_intrusive_LazyList_rcu "LazyList" provides exclusive access to inserted item and does not require any node-level synchronization. */ template std::pair update( value_type& val, Func func, bool bAllowInsert = true ) { size_t nHash = hash_value( val ); aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); std::pair bRet = m_List.update_at( pHead, val, func, bAllowInsert ); if ( bRet.first && bRet.second ) { inc_item_count(); m_Stat.onUpdateNew(); } else m_Stat.onUpdateExist(); return bRet; } //@cond template CDS_DEPRECATED("ensure() is deprecated, use update()") std::pair ensure( value_type& val, Func func ) { return update( val, func, true ); } //@endcond /// Unlinks the item \p val from the set /** The function searches the item \p val in the set and unlinks it from the set if it is found and is equal to \p val. Difference between \ref erase and \p unlink functions: \p erase finds a key and deletes the item found. \p unlink finds an item by key and deletes it only if \p val is an item of that set, i.e. the pointer to item found is equal to &val . RCU \p synchronize method can be called, therefore, RCU should not be locked. The function returns \p true if success and \p false otherwise. */ bool unlink( value_type& val ) { size_t nHash = hash_value( val ); aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); if ( m_List.unlink_at( pHead, val )) { --m_ItemCounter; m_Stat.onEraseSuccess(); return true; } m_Stat.onEraseFailed(); return false; } /// Deletes the item from the set /** \anchor cds_intrusive_SplitListSet_rcu_erase The function searches an item with key equal to \p key in the set, unlinks it from the set, and returns \p true. If the item with key equal to \p key is not found the function return \p false. Difference between \ref erase and \p unlink functions: \p erase finds a key and deletes the item found. \p unlink finds an item by key and deletes it only if \p key is an item of that set, i.e. the pointer to item found is equal to &key . RCU \p synchronize method can be called, therefore, RCU should not be locked. Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. */ template bool erase( Q const& key ) { return erase_( key, key_comparator()); } /// Deletes the item from the set using \p pred for searching /** The function is an analog of \ref cds_intrusive_SplitListSet_rcu_erase "erase(Q const&)" but \p cmp is used for key compare. \p Less has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. */ template bool erase_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return erase_( key, typename ordered_list_adapter::template make_compare_from_less()); } /// Deletes the item from the set /** \anchor cds_intrusive_SplitListSet_rcu_erase_func The function searches an item with key equal to \p key in the set, call \p f functor with item found, unlinks it from the set, and returns \p true. The \ref disposer specified by \p OrderedList class template parameter is called by garbage collector \p GC asynchronously. The \p Func interface is \code struct functor { void operator()( value_type const& item ); }; \endcode If the item with key equal to \p key is not found the function return \p false. RCU \p synchronize method can be called, therefore, RCU should not be locked. Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. */ template bool erase( Q const& key, Func f ) { return erase_( key, key_comparator(), f ); } /// Deletes the item from the set using \p pred for searching /** The function is an analog of \ref cds_intrusive_SplitListSet_rcu_erase_func "erase(Q const&, Func)" but \p cmp is used for key compare. \p Less has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. */ template bool erase_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return erase_( key, typename ordered_list_adapter::template make_compare_from_less(), f ); } /// Extracts an item from the set /** \anchor cds_intrusive_SplitListSet_rcu_extract The function searches an item with key equal to \p key in the set, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found. If the item with the key equal to \p key is not found the function returns an empty \p exempt_ptr. Depends on \p bucket_type you should or should not lock RCU before calling of this function: - for the set based on \ref cds_intrusive_MichaelList_rcu "MichaelList" RCU should not be locked - for the set based on \ref cds_intrusive_LazyList_rcu "LazyList" RCU should be locked See ordered list implementation for details. \code typedef cds::urcu::gc< general_buffered<> > rcu; typedef cds::intrusive::MichaelList< rcu, Foo > rcu_michael_list; typedef cds::intrusive::SplitListSet< rcu, rcu_michael_list, foo_traits > rcu_splitlist_set; rcu_splitlist_set theSet; // ... rcu_splitlist_set::exempt_ptr p; // For MichaelList we should not lock RCU // Now, you can apply extract function // Note that you must not delete the item found inside the RCU lock p = theList.extract( 10 ); if ( p ) { // do something with p ... } // We may safely release p here // release() passes the pointer to RCU reclamation cycle: // it invokes RCU retire_ptr function with the disposer you provided for rcu_michael_list. p.release(); \endcode */ template exempt_ptr extract( Q const& key ) { return exempt_ptr(extract_( key, key_comparator())); } /// Extracts an item from the set using \p pred for searching /** The function is an analog of \p extract(Q const&) but \p pred is used for key compare. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. */ template exempt_ptr extract_with( Q const& key, Less pred ) { return exempt_ptr( extract_with_( key, pred )); } /// Finds the key \p key /** \anchor cds_intrusive_SplitListSet_rcu_find_func The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& key ); }; \endcode where \p item is the item found, \p key is the find function argument. The functor can change non-key fields of \p item. Note that the functor is only guarantee that \p item cannot be disposed during functor is executing. The functor does not serialize simultaneous access to the set \p item. If such access is possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor can modify both arguments. Note the hash functor specified for class \p Traits template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. The function applies RCU lock internally. The function returns \p true if \p key is found, \p false otherwise. */ template bool find( Q& key, Func f ) { return find_( key, key_comparator(), f ); } //@cond template bool find( Q const& key, Func f ) { return find_( key, key_comparator(), f ); } //@endcond /// Finds the key \p key with \p pred predicate for comparing /** The function is an analog of \ref cds_intrusive_SplitListSet_rcu_find_func "find(Q&, Func)" but \p cmp is used for key compare. \p Less has the interface like \p std::less. \p cmp must imply the same element order as the comparator used for building the set. */ template bool find_with( Q& key, Less pred, Func f ) { CDS_UNUSED( pred ); return find_( key, typename ordered_list_adapter::template make_compare_from_less(), f ); } //@cond template bool find_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); return find_( key, typename ordered_list_adapter::template make_compare_from_less(), f ); } //@endcond /// Checks whether the set contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. Note the hash functor specified for class \p Traits template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. Otherwise, you may use \p contains( Q const&, Less pred ) functions with explicit predicate for key comparing. */ template bool contains( Q const& key ) { return find_value( key, key_comparator()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find( Q const& key ) { return contains( key ); } //@endcond /// Checks whether the set contains \p key using \p pred predicate for searching /** The function is an analog of contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the list. */ template bool contains( Q const& key, Less pred ) { CDS_UNUSED( pred ); return find_value( key, typename ordered_list_adapter::template make_compare_from_less()); } //@cond template CDS_DEPRECATED("deprecated, use contains()") bool find_with( Q const& key, Less pred ) { return contains( key, pred ); } //@endcond /// Finds the key \p key and return the item found /** \anchor cds_intrusive_SplitListSet_rcu_get The function searches the item with key equal to \p key and returns the pointer to item found. If \p key is not found it returns \p nullptr. Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. RCU should be locked before call of this function. Returned item is valid only while RCU is locked: \code typedef cds::intrusive::SplitListSet< your_template_parameters > set_class; set_class theSet; // ... typename set_class::raw_ptr rp; { // Lock RCU hash_set::rcu_lock lock; rp = theSet.get( 5 ); if ( rp ) { // Deal with rp //... } // Unlock RCU by rcu_lock destructor // rp can be retired by disposer at any time after RCU has been unlocked } \endcode */ template raw_ptr get( Q const& key ) { return get_( key, key_comparator()); } /// Finds the key \p key and return the item found /** The function is an analog of \ref cds_intrusive_SplitListSet_rcu_get "get(Q const&)" but \p pred is used for comparing the keys. \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q in any order. \p pred must imply the same element order as the comparator used for building the set. */ template raw_ptr get_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return get_( key, typename ordered_list_adapter::template make_compare_from_less()); } /// Returns item count in the set size_t size() const { return m_ItemCounter; } /// Checks if the set is empty /** Emptiness is checked by item counting: if item count is zero then the set is empty. Thus, the correct item counting feature is an important part of split-list set implementation. */ bool empty() const { return size() == 0; } /// Clears the set (not atomic) void clear() { iterator it = begin(); while ( it != end()) { iterator i(it); ++i; unlink( *it ); it = i; } } /// Returns internal statistics stat const& statistics() const { return m_Stat; } /// Returns internal statistics for \p OrderedList typename OrderedList::stat const& list_statistics() const { return m_List.statistics(); } protected: //@cond template class iterator_type :public split_list::details::iterator_type { typedef split_list::details::iterator_type iterator_base_class; typedef typename iterator_base_class::list_iterator list_iterator; public: iterator_type() : iterator_base_class() {} iterator_type( iterator_type const& src ) : iterator_base_class( src ) {} // This ctor should be protected... iterator_type( list_iterator itCur, list_iterator itEnd ) : iterator_base_class( itCur, itEnd ) {} }; //@endcond public: ///@name Forward iterators (thread-safe under RCU lock) //@{ /// Forward iterator /** The forward iterator for a split-list has some features: - it has no post-increment operator - it depends on iterator of underlying \p OrderedList You may safely use iterators in multi-threaded environment only under RCU lock. Otherwise, a crash is possible if another thread deletes the element the iterator points to. */ typedef iterator_type iterator; /// Const forward iterator /** For iterator's features and requirements see \ref iterator */ typedef iterator_type const_iterator; /// Returns a forward iterator addressing the first element in a split-list /** For empty list \code begin() == end() \endcode */ iterator begin() { return iterator( m_List.begin(), m_List.end()); } /// Returns an iterator that addresses the location succeeding the last element in a split-list /** Do not use the value returned by end function to access any item. The returned value can be used only to control reaching the end of the split-list. For empty list \code begin() == end() \endcode */ iterator end() { return iterator( m_List.end(), m_List.end()); } /// Returns a forward const iterator addressing the first element in a split-list const_iterator begin() const { return cbegin(); } /// Returns a forward const iterator addressing the first element in a split-list const_iterator cbegin() const { return const_iterator( m_List.cbegin(), m_List.cend()); } /// Returns an const iterator that addresses the location succeeding the last element in a split-list const_iterator end() const { return cend(); } /// Returns an const iterator that addresses the location succeeding the last element in a split-list const_iterator cend() const { return const_iterator( m_List.cend(), m_List.cend()); } //@} protected: //@cond aux_node_type * alloc_aux_node( size_t nHash ) { m_Stat.onHeadNodeAllocated(); aux_node_type* p = m_Buckets.alloc_aux_node(); if ( p ) p->m_nHash = nHash; return p; } void free_aux_node( aux_node_type * p ) { m_Buckets.free_aux_node( p ); m_Stat.onHeadNodeFreed(); } /// Calculates hash value of \p key template size_t hash_value( Q const& key ) const { return m_HashFunctor( key ); } size_t bucket_no( size_t nHash ) const { return nHash & ( (1 << m_nBucketCountLog2.load(memory_model::memory_order_relaxed)) - 1 ); } static size_t parent_bucket( size_t nBucket ) { assert( nBucket > 0 ); return nBucket & ~( 1 << bitop::MSBnz( nBucket )); } aux_node_type * init_bucket( size_t const nBucket ) { assert( nBucket > 0 ); size_t nParent = parent_bucket( nBucket ); aux_node_type * pParentBucket = m_Buckets.bucket( nParent ); if ( pParentBucket == nullptr ) { pParentBucket = init_bucket( nParent ); m_Stat.onRecursiveInitBucket(); } assert( pParentBucket != nullptr ); // Allocate an aux node for new bucket aux_node_type * pBucket = m_Buckets.bucket( nBucket ); back_off bkoff; for ( ;; pBucket = m_Buckets.bucket( nBucket )) { if ( pBucket ) return pBucket; pBucket = alloc_aux_node( split_list::dummy_hash( nBucket )); if ( pBucket ) { if ( m_List.insert_aux_node( pParentBucket, pBucket )) { m_Buckets.bucket( nBucket, pBucket ); m_Stat.onNewBucket(); return pBucket; } // Another thread set the bucket. Wait while it done free_aux_node( pBucket ); m_Stat.onBucketInitContenton(); break; } // There are no free buckets. It means that the bucket table is full // Wait while another thread set the bucket or a free bucket will be available m_Stat.onBucketsExhausted(); bkoff(); } // Another thread set the bucket. Wait while it done for ( pBucket = m_Buckets.bucket( nBucket ); pBucket == nullptr; pBucket = m_Buckets.bucket( nBucket )) { bkoff(); m_Stat.onBusyWaitBucketInit(); } return pBucket; } aux_node_type * get_bucket( size_t nHash ) { size_t nBucket = bucket_no( nHash ); aux_node_type * pHead = m_Buckets.bucket( nBucket ); if ( pHead == nullptr ) pHead = init_bucket( nBucket ); assert( pHead->is_dummy()); return pHead; } void init() { // Initialize bucket 0 aux_node_type * pNode = alloc_aux_node( 0 /*split_list::dummy_hash(0)*/ ); // insert_aux_node cannot return false for empty list CDS_VERIFY( m_List.insert_aux_node( pNode )); m_Buckets.bucket( 0, pNode ); } static size_t max_item_count( size_t nBucketCount, size_t nLoadFactor ) { return nBucketCount * nLoadFactor; } void inc_item_count() { size_t nMaxCount = m_nMaxItemCount.load(memory_model::memory_order_relaxed); if ( ++m_ItemCounter <= nMaxCount ) return; size_t sz = m_nBucketCountLog2.load(memory_model::memory_order_relaxed); const size_t nBucketCount = static_cast(1) << sz; if ( nBucketCount < m_Buckets.capacity()) { // we may grow the bucket table const size_t nLoadFactor = m_Buckets.load_factor(); if ( nMaxCount < max_item_count( nBucketCount, nLoadFactor )) return; // someone already have updated m_nBucketCountLog2, so stop here m_nMaxItemCount.compare_exchange_strong( nMaxCount, max_item_count( nBucketCount << 1, nLoadFactor ), memory_model::memory_order_relaxed, atomics::memory_order_relaxed ); m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, memory_model::memory_order_relaxed, atomics::memory_order_relaxed ); } else m_nMaxItemCount.store( std::numeric_limits::max(), memory_model::memory_order_relaxed ); } template bool find_( Q& val, Compare cmp, Func f ) { size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); return m_Stat.onFind( m_List.find_at( pHead, sv, cmp, [&f](value_type& item, split_list::details::search_value_type& v){ f(item, v.val ); })); } template bool find_value( Q const& val, Compare cmp ) { size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); return m_Stat.onFind( m_List.find_at( pHead, sv, cmp )); } template raw_ptr get_( Q const& val, Compare cmp ) { size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); raw_ptr p = m_List.get_at( pHead, sv, cmp ); m_Stat.onFind( !!p ); return p; } template value_type * extract_( Q const& val, Compare cmp ) { size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); value_type * pNode = m_List.extract_at( pHead, sv, cmp ); if ( pNode ) { --m_ItemCounter; m_Stat.onExtractSuccess(); } else m_Stat.onExtractFailed(); return pNode; } template value_type * extract_with_( Q const& val, Less pred ) { CDS_UNUSED( pred ); return extract_( val, typename ordered_list_adapter::template make_compare_from_less()); } template bool erase_( const Q& val, Compare cmp ) { size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); if ( m_List.erase_at( pHead, sv, cmp )) { --m_ItemCounter; m_Stat.onEraseSuccess(); return true; } m_Stat.onEraseFailed(); return false; } template bool erase_( Q const& val, Compare cmp, Func f ) { size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); if ( m_List.erase_at( pHead, sv, cmp, f )) { --m_ItemCounter; m_Stat.onEraseSuccess(); return true; } m_Stat.onEraseFailed(); return false; } //@endcond protected: //@cond static unsigned const c_padding = cds::opt::actual_padding< traits::padding >::value; typedef typename cds::details::type_padding< bucket_table, c_padding >::type padded_bucket_table; padded_bucket_table m_Buckets; ///< bucket table typedef typename cds::details::type_padding< ordered_list_wrapper, c_padding >::type padded_ordered_list; padded_ordered_list m_List; ///< Ordered list containing split-list items atomics::atomic m_nBucketCountLog2; ///< log2( current bucket count ) atomics::atomic m_nMaxItemCount; ///< number of items container can hold, before we have to resize hash m_HashFunctor; ///< Hash functor item_counter m_ItemCounter; ///< Item counter stat m_Stat; ///< Internal statistics accumulator //@endcond }; }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_SPLIT_LIST_RCU_H libcds-2.3.3/cds/intrusive/striped_set.h000066400000000000000000001147041341244201700202630ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_STRIPED_SET_H #define CDSLIB_INTRUSIVE_STRIPED_SET_H #include #include #include namespace cds { namespace intrusive { /// StripedSet related definitions namespace striped_set { /** @defgroup cds_striped_resizing_policy Resizing policy for striped/refinable set/map Resizing policy for \p intrusive::StripedSet, \p container::StripedSet and \p container::StripedMap. */ } // namespace striped_set /// Striped hash set /** @ingroup cds_intrusive_map Source - [2008] Maurice Herlihy, Nir Shavit "The Art of Multiprocessor Programming" Lock striping is very simple technique. The set consists of the bucket table and the array of locks. Initially, the capacity of lock array and bucket table is the same. When set is resized, bucket table capacity will be doubled but lock array will not. The lock \p i protects each bucket \p j, where j = i mod L , where \p L - the size of lock array. Template arguments: - \p Container - the container class that is used as bucket table entry. The \p Container class should support an uniform interface described below. - \p Options - options The \p %StripedSet class does not exactly dictate the type of container that should be used as a \p Container bucket. Instead, the class supports different intrusive container type for the bucket, for exampe, \p boost::intrusive::list, \p boost::intrusive::set and others. Remember that \p %StripedSet class algorithm ensures sequential blocking access to its bucket through the mutex type you specify among \p Options template arguments. The \p Options are: - \p opt::mutex_policy - concurrent access policy. Available policies: \p striped_set::striping, \p striped_set::refinable. Default is \p %striped_set::striping. - \p cds::opt::hash - hash functor. Default option value see opt::v::hash_selector which selects default hash functor for your compiler. - \p cds::opt::compare - key comparison functor. No default functor is provided. If the option is not specified, the \p opt::less is used. - \p cds::opt::less - specifies binary predicate used for key comparison. Default is \p std::less. - \p cds::opt::item_counter - item counter type. Default is \p atomicity::item_counter since some operation on the counter is performed without locks. Note that item counting is an essential part of the set algorithm, so dummy counter like \p atomicity::empty_item_counter is not suitable. - \p cds::opt::allocator - the allocator type using for memory allocation of bucket table and lock array. Default is \ref CDS_DEFAULT_ALLOCATOR. - \p cds::opt::resizing_policy - the resizing policy - a functor that decides when to resize the hash set. Default option value depends on bucket container type: for sequential containers like \p boost::intrusive::list the resizing policy is cds::container::striped_set::load_factor_resizing<4> ; for other type of containers like \p boost::intrusive::set the resizing policy is cds::container::striped_set::no_resizing. See \ref cds_striped_resizing_policy "available resizing policy". Note that the choose of resizing policy depends of \p Container type: for sequential containers like \p boost::intrusive::list the right policy can significantly improve performance. For other, non-sequential types of \p Container (like a \p boost::intrusive::set) the resizing policy is not so important. - \p cds::opt::buffer - an initialized buffer type used only for \p boost::intrusive::unordered_set. Default is cds::opt::v::initialized_static_buffer< cds::any_type, 256 > . \p opt::compare or \p opt::less options are used in some \p Container class for ordering. \p %opt::compare option has the highest priority: if \p %opt::compare is specified, \p %opt::less is not used. You can pass other option that would be passed to \p adapt metafunction, see below. Internal details The \p %StripedSet class cannot utilize the \p Container specified directly, but only its adapted variant which supports an unified interface. Internally, the adaptation is made via \p intrusive::striped_set::adapt metafunction that wraps bucket container and provides the unified bucket interface suitable for \p %StripedSet. Such adaptation is completely transparent for you - you don't need to call \p adapt metafunction directly, \p %StripedSet class's internal machinery itself invokes appropriate \p adapt metafunction specialization to adjust your \p Container container class to \p %StripedSet bucket's internal interface. All you need is to include a right header before striped_set.h. By default, intrusive::striped_set::adapt metafunction does not make any wrapping to \p AnyContainer, so, the result intrusive::striped_set::adapt::type is the same as \p AnyContainer. However, there are a lot of specializations of \p %intrusive::striped_set::adapt for \p boost::intrusive containers, see table below. Any of this specialization wraps corresponding container making it suitable for the set's bucket. Remember, you should include the proper header file for \p adapt before including striped_set.h. \note It is important to specify boost::intrusive::constant_time_size option for all \p boost::intrusive container that supports this option. Fast item counting feature is essential part of \p %StripedSet resizing algorithm.
Container .h-file for \p adapt Example Notes
\p boost::intrusive::list \code #include #include typedef cds::intrusive::StripedSet< boost::intrusive::list >, cds::opt::less< std::less > > striped_set; \endcode The list is ordered. Template argument pack \p Options must contain cds::opt::less or cds::opt::compare for type \p T stored in the list
\p boost::intrusive::slist \code #include #include typedef cds::intrusive::StripedSet< boost::intrusive::slist >, cds::opt::less< std::less > > striped_set; \endcode The list is ordered. Template argument pack \p Options must contain \p cds::opt::less or \p cds::opt::compare for type \p T stored in the list
\p boost::intrusive::set \code #include #include typedef cds::intrusive::StripedSet< boost::intrusive::set > > striped_set; \endcode Note that \p boost::intrusive::compare option using in \p boost::intrusive::set should support \p T type stored in the set and any type \p Q that you can use in \p erase() and \p find() member functions.
\p boost::intrusive::unordered_set \code #include #include typedef cds::intrusive::StripedSet< boost::intrusive::unordered_set ,boost::intrusive::hash< user_provided_hash_functor > > > striped_set; \endcode You should provide two different hash function \p h1 and \p h2 - one for \p boost::intrusive::unordered_set and other for \p %StripedSet. For the best result, \p h1 and \p h2 must be orthogonal i.e. h1(X) != h2(X) for any value \p X The option \p opt::buffer is used for \p boost::intrusive::bucket_traits. Default is cds::opt::v::initialized_static_buffer< cds::any_type, 256 > . The resizing policy should correlate with the buffer capacity. The default resizing policy is cds::container::striped_set::load_factor_resizing<256> what gives load factor 1 for default bucket buffer that is the best for \p boost::intrusive::unordered_set.
\p boost::intrusive::avl_set \code #include #include typedef cds::intrusive::StripedSet< boost::intrusive::avl_set > > striped_set; \endcode Note that \p boost::intrusive::compare option using in \p boost::intrusive::avl_set should support \p T type stored in the set and any type \p Q that you can use in \p erase() and \p find() member functions.
\p boost::intrusive::sg_set \code #include #include typedef cds::intrusive::StripedSet< boost::intrusive::sg_set > > striped_set; \endcode Note that \p boost::intrusive::compare option using in \p boost::intrusive::sg_set should support \p T type stored in the set and any type \p Q that you can use in \p erase() and \p find() member functions.
\p boost::intrusive::splay_set \code #include #include typedef cds::intrusive::StripedSet< boost::intrusive::splay_set > > striped_set; \endcode Note that \p boost::intrusive::compare option using in \p boost::intrusive::splay_set should support \p T type stored in the set and any type \p Q that you can use in \p erase() and \p find() member functions.
\p boost::intrusive::treap_set \code #include #include typedef cds::intrusive::StripedSet< boost::intrusive::treap_set > > striped_set; \endcode Note that \p boost::intrusive::compare option using in \p boost::intrusive::treap_set should support \p T type stored in the set and any type \p Q that you can use in \p erase() and \p find() member functions.
You can use another intrusive container type as striped set's bucket. Suppose, you have a container class \p MyBestContainer and you want to integrate it with \p StripedSet as bucket type. There are two possibility: - either your \p MyBestContainer class has native support of bucket's interface; in this case, you can use default \p intrusive::striped_set::adapt metafunction; - or your \p MyBestContainer class does not support bucket's interface, which means, that you should create a specialization of cds::intrusive::striped_set::adapt metafunction providing necessary interface. The intrusive::striped_set::adapt< Container, OptionPack > metafunction has two template argument: - \p Container is the class that should be used as the bucket, for example, boost::intrusive::list< T >. - \p OptionPack is the packed options from \p %StripedSet declaration. The \p adapt metafunction can use any option from \p OptionPack for its internal use. For example, a \p compare option can be passed to \p adapt metafunction via \p OptionPack argument of \p %StripedSet declaration. See \p intrusive::striped_set::adapt metafunction for the description of interface that the bucket container must provide to be \p %StripedSet compatible. */ template class StripedSet { public: //@cond struct default_options { typedef striped_set::striping<> mutex_policy; typedef typename cds::opt::v::hash_selector< cds::opt::none >::type hash; typedef cds::atomicity::item_counter item_counter; typedef CDS_DEFAULT_ALLOCATOR allocator; typedef cds::opt::none resizing_policy; typedef cds::opt::none compare; typedef cds::opt::none less; }; typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< default_options, Options... >::type ,Options... >::type options; //@endcond typedef Container underlying_container_type ; ///< original intrusive container type for the bucket typedef typename cds::intrusive::striped_set::adapt< underlying_container_type, Options... >::type bucket_type ; ///< container type adapted for hash set typedef typename bucket_type::value_type value_type ; ///< value type stored in the set typedef typename options::hash hash ; ///< Hash functor typedef typename options::item_counter item_counter ; ///< Item counter typedef typename cds::opt::select_default< typename options::resizing_policy, typename bucket_type::default_resizing_policy >::type resizing_policy ; ///< Resizing policy typedef typename options::allocator allocator_type ; ///< allocator type specified in options. typedef typename options::mutex_policy mutex_policy ; ///< Mutex policy typedef cds::details::Allocator< bucket_type, allocator_type > bucket_allocator; ///< bucket allocator type based on allocator_type protected: bucket_type * m_Buckets; ///< Bucket table atomics::atomic m_nBucketMask; ///< Bucket table size - 1. m_nBucketMask + 1 should be power of two. item_counter m_ItemCounter; ///< Item counter hash m_Hash; ///< Hash functor mutex_policy m_MutexPolicy ; ///< Mutex policy resizing_policy m_ResizingPolicy; ///< Resizing policy static const size_t c_nMinimalCapacity = 16 ; ///< Minimal capacity protected: //@cond typedef typename mutex_policy::scoped_cell_lock scoped_cell_lock; typedef typename mutex_policy::scoped_full_lock scoped_full_lock; typedef typename mutex_policy::scoped_resize_lock scoped_resize_lock; //@endcond protected: //@cond static size_t calc_init_capacity( size_t nCapacity ) { nCapacity = cds::beans::ceil2( nCapacity ); return nCapacity < c_nMinimalCapacity ? c_nMinimalCapacity : nCapacity; } void alloc_bucket_table( size_t nSize ) { assert( cds::beans::is_power2( nSize )); m_nBucketMask.store( nSize - 1, atomics::memory_order_release ); m_Buckets = bucket_allocator().NewArray( nSize ); } static void free_bucket_table( bucket_type * pBuckets, size_t nSize ) { bucket_allocator().Delete( pBuckets, nSize ); } template size_t hashing( Q const& v ) const { return m_Hash( v ); } bucket_type * bucket( size_t nHash ) const noexcept { return m_Buckets + (nHash & m_nBucketMask.load( atomics::memory_order_relaxed )); } template bool find_( Q& val, Func f ) { size_t nHash = hashing( val ); scoped_cell_lock sl( m_MutexPolicy, nHash ); return bucket( nHash )->find( val, f ); } template bool find_with_( Q& val, Less pred, Func f ) { size_t nHash = hashing( val ); scoped_cell_lock sl( m_MutexPolicy, nHash ); return bucket( nHash )->find( val, pred, f ); } void internal_resize( size_t nNewCapacity ) { // All locks are already locked! m_MutexPolicy.resize( nNewCapacity ); size_t nOldCapacity = bucket_count(); bucket_type * pOldBuckets = m_Buckets; alloc_bucket_table( nNewCapacity ); typedef typename bucket_type::iterator bucket_iterator; bucket_type * pEnd = pOldBuckets + nOldCapacity; for ( bucket_type * pCur = pOldBuckets; pCur != pEnd; ++pCur ) { bucket_iterator itEnd = pCur->end(); bucket_iterator itNext; for ( bucket_iterator it = pCur->begin(); it != itEnd; it = itNext ) { itNext = it; ++itNext; bucket( m_Hash( *it ))->move_item( *pCur, it ); } pCur->clear(); } free_bucket_table( pOldBuckets, nOldCapacity ); m_ResizingPolicy.reset(); } void resize() { size_t nOldCapacity = bucket_count( atomics::memory_order_acquire ); scoped_resize_lock al( m_MutexPolicy ); if ( al.success()) { if ( nOldCapacity != bucket_count( atomics::memory_order_acquire )) { // someone resized already return; } internal_resize( nOldCapacity * 2 ); } } //@endcond public: /// Default ctor. The initial capacity is 16. StripedSet() : m_Buckets( nullptr ) , m_nBucketMask( c_nMinimalCapacity - 1 ) , m_MutexPolicy( c_nMinimalCapacity ) { alloc_bucket_table( bucket_count()); } /// Ctor with initial capacity specified StripedSet( size_t nCapacity ///< Initial size of bucket table and lock array. Must be power of two, the minimum is 16. ) : m_Buckets( nullptr ) , m_nBucketMask( calc_init_capacity(nCapacity) - 1 ) , m_MutexPolicy( bucket_count()) { alloc_bucket_table( bucket_count()); } /// Ctor with resizing policy (copy semantics) /** This constructor initializes m_ResizingPolicy member with copy of \p resizingPolicy parameter */ StripedSet( size_t nCapacity ///< Initial size of bucket table and lock array. Must be power of two, the minimum is 16. ,resizing_policy const& resizingPolicy ///< Resizing policy ) : m_Buckets( nullptr ) , m_nBucketMask( ( nCapacity ? calc_init_capacity(nCapacity) : c_nMinimalCapacity ) - 1 ) , m_MutexPolicy( bucket_count()) , m_ResizingPolicy( resizingPolicy ) { alloc_bucket_table( bucket_count()); } /// Ctor with resizing policy (move semantics) /** This constructor initializes m_ResizingPolicy member moving \p resizingPolicy parameter Move semantics is used. */ StripedSet( size_t nCapacity ///< Initial size of bucket table and lock array. Must be power of two, the minimum is 16. ,resizing_policy&& resizingPolicy ///< Resizing policy ) : m_Buckets( nullptr ) , m_nBucketMask( ( nCapacity ? calc_init_capacity(nCapacity) : c_nMinimalCapacity ) - 1 ) , m_MutexPolicy( bucket_count()) , m_ResizingPolicy( std::forward( resizingPolicy )) { alloc_bucket_table( bucket_count()); } /// Destructor destroys internal data ~StripedSet() { free_bucket_table( m_Buckets, bucket_count()); } public: /// Inserts new node /** The function inserts \p val in the set if it does not contain an item with key equal to \p val. Returns \p true if \p val is placed into the set, \p false otherwise. */ bool insert( value_type& val ) { return insert( val, []( value_type& ) {} ); } /// Inserts new node /** The function allows to split creating of new item into two part: - create item with key only - insert new item into the set - if inserting is success, calls \p f functor to initialize value-field of \p val. The functor signature is: \code void func( value_type& val ); \endcode where \p val is the item inserted. */ template bool insert( value_type& val, Func f ) { bool bOk; bool bResize; size_t nHash = hashing( val ); bucket_type * pBucket; { scoped_cell_lock sl( m_MutexPolicy, nHash ); pBucket = bucket( nHash ); bOk = pBucket->insert( val, f ); bResize = bOk && m_ResizingPolicy( ++m_ItemCounter, *this, *pBucket ); } if ( bResize ) resize(); return bOk; } /// Updates the node /** The operation performs inserting or changing data with lock-free manner. If the item \p val is not found in the set, then \p val is inserted iff \p bAllowInsert is \p true. Otherwise, the functor \p func is called with item found. The functor signature is: \code void func( bool bNew, value_type& item, value_type& val ); \endcode with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the set - \p val - argument \p val passed into the \p update() function If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments refers to the same thing. The functor may change non-key fields of the \p item. Returns std::pair where \p first is \p true if operation is successful, \p second is \p true if new item has been added or \p false if the item with \p val already is in the set. */ template std::pair update( value_type& val, Func func, bool bAllowInsert = true ) { std::pair result; bool bResize; size_t nHash = hashing( val ); bucket_type * pBucket; { scoped_cell_lock sl( m_MutexPolicy, nHash ); pBucket = bucket( nHash ); result = pBucket->update( val, func, bAllowInsert ); bResize = result.first && result.second && m_ResizingPolicy( ++m_ItemCounter, *this, *pBucket ); } if ( bResize ) resize(); return result; } //@cond template std::pair ensure( value_type& val, Func func ) { return update( val, func, true ); } //@endcond /// Unlink the item \p val from the set /** The function searches the item \p val in the set and unlink it if it is found and is equal to \p val (here, the equality means that \p val belongs to the set: if \p item is an item found then unlink is successful iif &val == &item) The function returns \p true if success and \p false otherwise. */ bool unlink( value_type& val ) { bool bOk; size_t nHash = hashing( val ); { scoped_cell_lock sl( m_MutexPolicy, nHash ); bOk = bucket( nHash )->unlink( val ); } if ( bOk ) --m_ItemCounter; return bOk; } /// Deletes the item from the set /** \anchor cds_intrusive_StripedSet_erase The function searches an item with key equal to \p val in the set, unlinks it from the set, and returns a pointer to unlinked item. If the item with key equal to \p val is not found the function return \p nullptr. Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. */ template value_type * erase( Q const& val ) { return erase( val, [](value_type const&) {} ); } /// Deletes the item from the set using \p pred predicate for searching /** The function is an analog of \ref cds_intrusive_StripedSet_erase "erase(Q const&)" but \p pred is used for key comparing \p Less has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. */ template value_type * erase_with( Q const& val, Less pred ) { return erase_with( val, pred, [](value_type const&) {} ); } /// Deletes the item from the set /** \anchor cds_intrusive_StripedSet_erase_func The function searches an item with key equal to \p val in the set, call \p f functor with item found, unlinks it from the set, and returns a pointer to unlinked item. The \p Func interface is \code struct functor { void operator()( value_type const& item ); }; \endcode If the item with key equal to \p val is not found the function return \p false. Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. */ template value_type * erase( Q const& val, Func f ) { size_t nHash = hashing( val ); value_type * pVal; { scoped_cell_lock sl( m_MutexPolicy, nHash ); pVal = bucket( nHash )->erase( val, f ); } if ( pVal ) --m_ItemCounter; return pVal; } /// Deletes the item from the set using \p pred predicate for searching /** The function is an analog of \ref cds_intrusive_StripedSet_erase_func "erase(Q const&, Func)" but \p pred is used for key comparing \p Less has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. */ template value_type * erase_with( Q const& val, Less pred, Func f ) { size_t nHash = hashing( val ); value_type * pVal; { scoped_cell_lock sl( m_MutexPolicy, nHash ); pVal = bucket( nHash )->erase( val, pred, f ); } if ( pVal ) --m_ItemCounter; return pVal; } /// Find the key \p val /** \anchor cds_intrusive_StripedSet_find_func The function searches the item with key equal to \p val and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& val ); }; \endcode where \p item is the item found, \p val is the find function argument. The functor may change non-key fields of \p item. The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor may modify both arguments. Note the hash functor specified for class \p Traits template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. The function returns \p true if \p val is found, \p false otherwise. */ template bool find( Q& val, Func f ) { return find_( val, f ); } /// Find the key \p val using \p pred predicate /** The function is an analog of \ref cds_intrusive_StripedSet_find_func "find(Q&, Func)" but \p pred is used for key comparing \p Less has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. */ template bool find_with( Q& val, Less pred, Func f ) { return find_with_( val, pred, f ); } /// Find the key \p val /** \anchor cds_intrusive_StripedSet_find_cfunc The function searches the item with key equal to \p val and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q const& val ); }; \endcode where \p item is the item found, \p val is the find function argument. The functor may change non-key fields of \p item. The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor may modify both arguments. The function returns \p true if \p val is found, \p false otherwise. */ template bool find( Q const& val, Func f ) { return find_( val, f ); } /// Find the key \p val using \p pred predicate /** The function is an analog of \ref cds_intrusive_StripedSet_find_cfunc "find(Q const&, Func)" but \p pred is used for key comparing \p Less has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. */ template bool find_with( Q const& val, Less pred, Func f ) { return find_with_( val, pred, f ); } /// Checks whether the set contains \p key /** The function searches the item with key equal to \p key and returns \p true if it is found, and \p false otherwise. Note the hash functor specified for class \p Traits template parameter should accept a parameter of type \p Q that can be not the same as \p value_type. Otherwise, you may use \p contains( Q const&, Less pred ) functions with explicit predicate for key comparing. */ template bool contains( Q const& key ) { return find( key, [](value_type&, Q const& ) {} ); } //@cond template CDS_DEPRECATED("use contains()") bool find( Q const& val ) { return contains( val ); } //@endcond /// Checks whether the set contains \p key using \p pred predicate for searching /** The function is an analog of contains( key ) but \p pred is used for key comparing. \p Less functor has the interface like \p std::less. \p Less must imply the same element order as the comparator used for building the set. */ template bool contains( Q const& key, Less pred ) { return find_with( key, pred, [](value_type& , Q const& ) {} ); } //@cond template CDS_DEPRECATED("use contains()") bool find_with( Q const& val, Less pred ) { return contains( val, pred ); } //@endcond /// Clears the set /** The function unlinks all items from the set. */ void clear() { // locks entire array scoped_full_lock sl( m_MutexPolicy ); size_t nBucketCount = bucket_count(); bucket_type * pBucket = m_Buckets; for ( size_t i = 0; i < nBucketCount; ++i, ++pBucket ) pBucket->clear(); m_ItemCounter.reset(); } /// Clears the set and calls \p disposer for each item /** The function unlinks all items from the set calling \p disposer for each item. \p Disposer functor interface is: \code struct Disposer{ void operator()( value_type * p ); }; \endcode */ template void clear_and_dispose( Disposer disposer ) { // locks entire array scoped_full_lock sl( m_MutexPolicy ); size_t nBucketCount = bucket_count(); bucket_type * pBucket = m_Buckets; for ( size_t i = 0; i < nBucketCount; ++i, ++pBucket ) pBucket->clear( disposer ); m_ItemCounter.reset(); } /// Checks if the set is empty /** Emptiness is checked by item counting: if item count is zero then the set is empty. */ bool empty() const { return size() == 0; } /// Returns item count in the set size_t size() const { return m_ItemCounter; } /// Returns the size of hash table /** The hash table size is non-constant and can be increased via resizing. */ size_t bucket_count() const { return m_nBucketMask.load( atomics::memory_order_relaxed ) + 1; } //@cond size_t bucket_count( atomics::memory_order load_mo ) const { return m_nBucketMask.load( load_mo ) + 1; } //@endcond /// Returns lock array size size_t lock_count() const { return m_MutexPolicy.lock_count(); } /// Returns resizing policy object resizing_policy& get_resizing_policy() { return m_ResizingPolicy; } /// Returns resizing policy (const version) resizing_policy const& get_resizing_policy() const { return m_ResizingPolicy; } }; }} // namespace cds::itrusive #endif // #ifndef CDSLIB_INTRUSIVE_STRIPED_SET_H libcds-2.3.3/cds/intrusive/striped_set/000077500000000000000000000000001341244201700201035ustar00rootroot00000000000000libcds-2.3.3/cds/intrusive/striped_set/adapter.h000066400000000000000000000316741341244201700217070ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_STRIPED_SET_ADAPTER_H #define CDSLIB_INTRUSIVE_STRIPED_SET_ADAPTER_H #include #include #include #include // cds::opt::details::make_comparator - for some adapt specializations namespace cds { namespace intrusive { /// StripedSet related definitions namespace striped_set { /// Default adapter for intrusive striped/refinable hash set /** By default, the metafunction does not make any transformation for container type \p Container. \p Container should provide interface suitable for the hash set. The \p Options template argument contains option pack that will be passed to \p cds::intrusive::StripedSet. Bucket interface The result of metafunction is a container (a bucket) that should support the following interface: Public typedefs that the bucket should provide: - \p value_type - the type of the item in the bucket - \p iterator - bucket's item iterator - \p const_iterator - bucket's item constant iterator - \p default_resizing_policy - default resizing policy preferable for the container. By default, the library defines cds::container::striped_set::load_factor_resizing<4> for sequential containers like boost::intrusive::list, and cds::container::striped_set::no_resizing for ordered container like boost::intrusive::set. Insert value \p val of type \p Q \code template bool insert( value_type& val, Func f ) ; \endcode Inserts \p val into the container and, if inserting is successful, calls functor \p f with \p val. The functor signature is: \code struct functor { void operator()( value_type& item ); }; \endcode where \p item is the item inserted. The user-defined functor \p f is called only if the inserting is success.
Updates the item in the container \code template std::pair update( value_type& val, Func f, bool bAllowInsert = true ) \endcode The operation performs inserting or changing data. If the \p val key not found in the container, then \p val is inserted iff \p bAllowInsert is \p true. Otherwise, the functor \p f is called with the item found. The \p Func functor has the following interface: \code void func( bool bNew, value_type& item, value_type& val ); \endcode or like a functor: \code struct functor { void operator()( bool bNew, value_type& item, value_type& val ); }; \endcode where arguments are: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - container's item - \p val - argument \p val passed into the \p update() function If \p val has been inserted (i.e. bNew == true) then \p item and \p val are the same element: &item == &val. Otherwise, they are different. The functor can change non-key fields of the \p item. Returns std::pair where \p first is true if operation is successful, \p second is true if new item has been added or \p false if the item with \p val key already exists.
Unlink an item \code bool unlink( value_type& val ) \endcode Unlink \p val from the container if \p val belongs to it.
Erase \p key \code template bool erase( Q const& key, Func f ) \endcode The function searches an item with key \p key, calls \p f functor and erases the item. If \p key is not found, the functor is not called. The functor \p Func interface is: \code struct functor { void operator()(value_type& val); }; \endcode The type \p Q can differ from \ref value_type of items storing in the container. Therefore, the \p value_type should be comparable with type \p Q. Return \p true if key is found and deleted, \p false otherwise
Find the key \p val \code template bool find( Q& val, Func f ) template bool find( Q& val, Compare cmp, Func f ) \endcode The function searches the item with key equal to \p val and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { void operator()( value_type& item, Q& val ); }; \endcode where \p item is the item found, \p val is the find function argument. The functor can change non-key fields of \p item. The \p val argument may be non-const since it can be used as \p f functor destination i.e., the functor can modify both arguments. The type \p Q can differ from \ref value_type of items storing in the container. Therefore, the \p value_type should be comparable with type \p Q. The first form uses default \p compare function used for key ordering. The second form allows to point specific \p Compare functor \p cmp that can compare \p value_typwe and \p Q type. The interface of \p Compare is the same as \p std::less. The function returns \p true if \p val is found, \p false otherwise.
Clears the container \code void clear() template void clear( Disposer disposer ) \endcode Second form calls \p disposer for each item in the container before clearing.
Get size of bucket \code size_t size() const \endcode This function may be required by some resizing policy
Iterators \code iterator begin(); const_iterator begin() const; iterator end(); const_iterator end() const; \endcode
Move item when resizing \code void move_item( adapted_container& from, iterator it ) \endcode This helper function is invented for the set resizing when the item pointed by \p it iterator is copied from old bucket \p from to a new bucket pointed by \p this.
*/ template < typename Container, typename... Options > class adapt { public: typedef Container type ; ///< adapted container type typedef typename type::value_type value_type ; ///< value type stored in the container }; //@cond struct adapted_sequential_container { typedef striped_set::load_factor_resizing<4> default_resizing_policy; }; struct adapted_container { typedef striped_set::no_resizing default_resizing_policy; }; //@endcond //@cond namespace details { template class boost_intrusive_set_adapter: public cds::intrusive::striped_set::adapted_container { public: typedef Set container_type; typedef typename container_type::value_type value_type ; ///< value type stored in the container typedef typename container_type::iterator iterator ; ///< container iterator typedef typename container_type::const_iterator const_iterator ; ///< container const iterator typedef typename container_type::key_compare key_comparator; private: container_type m_Set; public: boost_intrusive_set_adapter() {} container_type& base_container() { return m_Set; } template bool insert( value_type& val, Func f ) { std::pair res = m_Set.insert( val ); if ( res.second ) f( val ); return res.second; } template std::pair update( value_type& val, Func f, bool bAllowInsert ) { if ( bAllowInsert ) { std::pair res = m_Set.insert( val ); f( res.second, *res.first, val ); return std::make_pair( true, res.second ); } else { auto it = m_Set.find( val, key_comparator()); if ( it == m_Set.end()) return std::make_pair( false, false ); f( false, *it, val ); return std::make_pair( true, false ); } } bool unlink( value_type& val ) { iterator it = m_Set.find( val, key_comparator()); if ( it == m_Set.end() || &(*it) != &val ) return false; m_Set.erase( it ); return true; } template value_type * erase( Q const& key, Func f ) { iterator it = m_Set.find( key, key_comparator()); if (it == m_Set.end()) return nullptr; value_type& val = *it; f( val ); m_Set.erase( it ); return &val; } template value_type * erase( Q const& key, Less pred, Func f ) { iterator it = m_Set.find( key, pred ); if (it == m_Set.end()) return nullptr; value_type& val = *it; f( val ); m_Set.erase( it ); return &val; } template bool find( Q const& key, Func f ) { return find( key, key_comparator(), f ); } template bool find( Q const& key, Compare cmp, Func f ) { iterator it = m_Set.find( key, cmp ); if ( it == m_Set.end()) return false; f( *it, key ); return true; } void clear() { m_Set.clear(); } template void clear( Disposer disposer ) { m_Set.clear_and_dispose( disposer ); } iterator begin() { return m_Set.begin(); } const_iterator begin() const { return m_Set.begin(); } iterator end() { return m_Set.end(); } const_iterator end() const { return m_Set.end(); } size_t size() const { return (size_t) m_Set.size(); } void move_item( boost_intrusive_set_adapter& from, iterator itWhat ) { value_type& val = *itWhat; from.base_container().erase( itWhat ); insert( val, []( value_type& ) {} ); } }; } // namespace details //@endcond } // namespace striped_set }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_STRIPED_SET_ADAPTER_H libcds-2.3.3/cds/intrusive/striped_set/boost_avl_set.h000066400000000000000000000030431341244201700231170ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_AVL_SET_ADAPTER_H #define CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_AVL_SET_ADAPTER_H #include #include //@cond namespace cds { namespace intrusive { namespace striped_set { #if CDS_COMPILER == CDS_COMPILER_INTEL && CDS_COMPILER_VERSION <= 1500 template class adapt< boost::intrusive::avl_set< T, P1, P2, P3, P4, P5 >, Options... > { public: typedef boost::intrusive::avl_set< T, P1, P2, P3, P4, P5 > container_type; ///< underlying intrusive container type public: typedef details::boost_intrusive_set_adapter type; ///< Result of the metafunction }; #else template class adapt< boost::intrusive::avl_set< T, BIOptons... >, Options... > { public: typedef boost::intrusive::avl_set< T, BIOptons... > container_type ; ///< underlying intrusive container type public: typedef details::boost_intrusive_set_adapter type ; ///< Result of the metafunction }; #endif }}} // namespace cds::intrusive::striped_set //@endcond #endif // #ifndef CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_AVL_SET_ADAPTER_H libcds-2.3.3/cds/intrusive/striped_set/boost_list.h000066400000000000000000000175111341244201700224420ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_LIST_ADAPTER_H #define CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_LIST_ADAPTER_H #include #include //@cond namespace cds { namespace intrusive { namespace striped_set { namespace details { template class adapt_boost_list { public: typedef List container_type; ///< underlying intrusive container type private: /// Adapted intrusive container class adapted_container : public cds::intrusive::striped_set::adapted_sequential_container { public: typedef typename container_type::value_type value_type; ///< value type stored in the container typedef typename container_type::iterator iterator; ///< container iterator typedef typename container_type::const_iterator const_iterator; ///< container const iterator typedef typename cds::opt::details::make_comparator_from_option_list< value_type, Options... >::type key_comparator; private: struct find_predicate { bool operator()( value_type const& i1, value_type const& i2 ) const { return key_comparator()(i1, i2) < 0; } template bool operator()( Q const& i1, value_type const& i2 ) const { return key_comparator()(i1, i2) < 0; } template bool operator()( value_type const& i1, Q const& i2 ) const { return key_comparator()(i1, i2) < 0; } }; template iterator find_key( Q const& key, Pred pred ) { iterator itEnd = m_List.end(); iterator it; for ( it = m_List.begin(); it != itEnd; ++it ) { if ( !pred( *it, key )) break; } return it; } private: container_type m_List; public: adapted_container() {} container_type& base_container() { return m_List; } template bool insert( value_type& val, Func f ) { iterator it = find_key( val, find_predicate()); if ( it == m_List.end() || key_comparator()(val, *it) != 0 ) { m_List.insert( it, val ); f( val ); return true; } // key already exists return false; } template std::pair update( value_type& val, Func f, bool bAllowInsert ) { iterator it = find_key( val, find_predicate()); if ( it == m_List.end() || key_comparator()(val, *it) != 0 ) { // insert new if ( !bAllowInsert ) return std::make_pair( false, false ); m_List.insert( it, val ); f( true, val, val ); return std::make_pair( true, true ); } else { // already exists f( false, *it, val ); return std::make_pair( true, false ); } } bool unlink( value_type& val ) { iterator it = find_key( val, find_predicate()); if ( it == m_List.end() || &(*it) != &val ) return false; m_List.erase( it ); return true; } template value_type * erase( Q const& key, Func f ) { iterator it = find_key( key, find_predicate()); if ( it == m_List.end() || key_comparator()(key, *it) != 0 ) return nullptr; // key exists value_type& val = *it; f( val ); m_List.erase( it ); return &val; } template value_type * erase( Q const& key, Less pred, Func f ) { iterator it = find_key( key, pred ); if ( it == m_List.end() || pred( key, *it ) || pred( *it, key )) return nullptr; // key exists value_type& val = *it; f( val ); m_List.erase( it ); return &val; } template bool find( Q& key, Func f ) { return find( key, find_predicate(), f ); } template bool find( Q& key, Less pred, Func f ) { iterator it = find_key( key, pred ); if ( it == m_List.end() || pred( key, *it ) || pred( *it, key )) return false; // key exists f( *it, key ); return true; } void clear() { m_List.clear(); } template void clear( Disposer disposer ) { m_List.clear_and_dispose( disposer ); } iterator begin() { return m_List.begin(); } const_iterator begin() const { return m_List.begin(); } iterator end() { return m_List.end(); } const_iterator end() const { return m_List.end(); } size_t size() const { return (size_t)m_List.size(); } void move_item( adapted_container& from, iterator itWhat ) { value_type& val = *itWhat; from.base_container().erase( itWhat ); insert( val, []( value_type& ) {} ); } }; public: typedef adapted_container type; ///< Result of the metafunction }; } // namespace details #if CDS_COMPILER == CDS_COMPILER_INTEL && CDS_COMPILER_VERSION <= 1500 template class adapt< boost::intrusive::list< T, P1, P2, P3, P4 >, Options... > : public details::adapt_boost_list< boost::intrusive::list< T, P1, P2, P3, P4 >, Options... > {}; #else template class adapt< boost::intrusive::list< T, BIOptions... >, Options... > : public details::adapt_boost_list< boost::intrusive::list< T, BIOptions... >, Options... > {}; #endif }}} // namespace cds::intrusive::striped_set //@endcond #endif // #ifndef CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_LIST_ADAPTER_H libcds-2.3.3/cds/intrusive/striped_set/boost_set.h000066400000000000000000000027561341244201700222670ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_SET_ADAPTER_H #define CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_SET_ADAPTER_H #include #include //@cond namespace cds { namespace intrusive { namespace striped_set { #if CDS_COMPILER == CDS_COMPILER_INTEL && CDS_COMPILER_VERSION <= 1500 template class adapt< boost::intrusive::set< T, O1, O2, O3, O4 >, Options... > { public: typedef boost::intrusive::set< T, O1, O2, O3, O4 > container_type; ///< underlying intrusive container type public: typedef details::boost_intrusive_set_adapter type; ///< Result of the metafunction }; #else template class adapt< boost::intrusive::set< T, BIOptons... >, Options... > { public: typedef boost::intrusive::set< T, BIOptons... > container_type ; ///< underlying intrusive container type public: typedef details::boost_intrusive_set_adapter type ; ///< Result of the metafunction }; #endif }}} // namespace cds::intrusive::striped_set //@endcond #endif // #ifndef CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_SET_ADAPTER_H libcds-2.3.3/cds/intrusive/striped_set/boost_sg_set.h000066400000000000000000000030051341244201700227440ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_SG_SET_ADAPTER_H #define CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_SG_SET_ADAPTER_H #include #include //@cond namespace cds { namespace intrusive { namespace striped_set { #if CDS_COMPILER == CDS_COMPILER_INTEL && CDS_COMPILER_VERSION <= 1500 template class adapt< boost::intrusive::sg_set< T, O1, O2, O3, O4 >, Options... > { public: typedef boost::intrusive::sg_set< T, O1, O2, O3, O4 > container_type; ///< underlying intrusive container type public: typedef details::boost_intrusive_set_adapter type; ///< Result of the metafunction }; #else template class adapt< boost::intrusive::sg_set< T, BIOptons... >, Options... > { public: typedef boost::intrusive::sg_set< T, BIOptons... > container_type ; ///< underlying intrusive container type public: typedef details::boost_intrusive_set_adapter type ; ///< Result of the metafunction }; #endif }}} // namespace cds::intrusive::striped_set //@endcond #endif // #ifndef CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_SG_SET_ADAPTER_H libcds-2.3.3/cds/intrusive/striped_set/boost_slist.h000066400000000000000000000212641341244201700226250ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_SLIST_ADAPTER_H #define CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_SLIST_ADAPTER_H #include #include //@cond namespace cds { namespace intrusive { namespace striped_set { namespace details { template class adapt_boost_slist { public: typedef List container_type; ///< underlying intrusive container type private: /// Adapted intrusive container class adapted_container : public cds::intrusive::striped_set::adapted_sequential_container { public: typedef typename container_type::value_type value_type; ///< value type stored in the container typedef typename container_type::iterator iterator; ///< container iterator typedef typename container_type::const_iterator const_iterator; ///< container const iterator typedef typename cds::opt::details::make_comparator_from_option_list< value_type, Options... >::type key_comparator; private: template std::pair< iterator, bool > find_prev_item( Q const& key, Less pred ) { iterator itPrev = m_List.before_begin(); iterator itEnd = m_List.end(); for ( iterator it = m_List.begin(); it != itEnd; ++it ) { if ( pred( key, *it )) itPrev = it; else if ( pred( *it, key )) break; else return std::make_pair( itPrev, true ); } return std::make_pair( itPrev, false ); } template std::pair< iterator, bool > find_prev_item( Q const& key ) { return find_prev_item_cmp( key, key_comparator()); } template std::pair< iterator, bool > find_prev_item_cmp( Q const& key, Compare cmp ) { iterator itPrev = m_List.before_begin(); iterator itEnd = m_List.end(); for ( iterator it = m_List.begin(); it != itEnd; ++it ) { int nCmp = cmp( key, *it ); if ( nCmp < 0 ) itPrev = it; else if ( nCmp > 0 ) break; else return std::make_pair( itPrev, true ); } return std::make_pair( itPrev, false ); } template value_type * erase_( Q const& key, Compare cmp, Func f ) { std::pair< iterator, bool > pos = find_prev_item_cmp( key, cmp ); if ( !pos.second ) return nullptr; // key exists iterator it = pos.first; value_type& val = *(++it); f( val ); m_List.erase_after( pos.first ); return &val; } private: container_type m_List; public: adapted_container() {} container_type& base_container() { return m_List; } template bool insert( value_type& val, Func f ) { std::pair< iterator, bool > pos = find_prev_item( val ); if ( !pos.second ) { m_List.insert_after( pos.first, val ); f( val ); return true; } // key already exists return false; } template std::pair update( value_type& val, Func f, bool bAllowInsert ) { std::pair< iterator, bool > pos = find_prev_item( val ); if ( !pos.second ) { // insert new if ( !bAllowInsert ) return std::make_pair( false, false ); m_List.insert_after( pos.first, val ); f( true, val, val ); return std::make_pair( true, true ); } else { // already exists f( false, *(++pos.first), val ); return std::make_pair( true, false ); } } bool unlink( value_type& val ) { std::pair< iterator, bool > pos = find_prev_item( val ); if ( !pos.second ) return false; ++pos.first; if ( &(*pos.first) != &val ) return false; m_List.erase( pos.first ); return true; } template value_type * erase( Q const& key, Func f ) { return erase_( key, key_comparator(), f ); } template value_type * erase( Q const& key, Less /*pred*/, Func f ) { return erase_( key, cds::opt::details::make_comparator_from_less(), f ); } template bool find( Q& key, Func f ) { std::pair< iterator, bool > pos = find_prev_item( key ); if ( !pos.second ) return false; // key exists f( *(++pos.first), key ); return true; } template bool find( Q& key, Less pred, Func f ) { std::pair< iterator, bool > pos = find_prev_item( key, pred ); if ( !pos.second ) return false; // key exists f( *(++pos.first), key ); return true; } void clear() { m_List.clear(); } template void clear( Disposer disposer ) { m_List.clear_and_dispose( disposer ); } iterator begin() { return m_List.begin(); } const_iterator begin() const { return m_List.begin(); } iterator end() { return m_List.end(); } const_iterator end() const { return m_List.end(); } size_t size() const { return (size_t)m_List.size(); } void move_item( adapted_container& from, iterator itWhat ) { value_type& val = *itWhat; from.base_container().erase( itWhat ); insert( val, []( value_type& ) {} ); } }; public: typedef adapted_container type; ///< Result of the metafunction }; } // namespace details #if CDS_COMPILER == CDS_COMPILER_INTEL && CDS_COMPILER_VERSION <= 1500 template class adapt< boost::intrusive::slist< T, P1, P2, P3, P4, P5 >, Options... > : public details::adapt_boost_slist< boost::intrusive::slist< T, P1, P2, P3, P4, P5 >, Options... > {}; #else template class adapt< boost::intrusive::slist< T, BIOptions... >, Options... > : public details::adapt_boost_slist< boost::intrusive::slist< T, BIOptions... >, Options... > {}; #endif }}} // namespace cds::intrusive::striped_set //@endcond #endif // #ifndef CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_SLIST_ADAPTER_H libcds-2.3.3/cds/intrusive/striped_set/boost_splay_set.h000066400000000000000000000030341341244201700234650ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_SPLAY_SET_ADAPTER_H #define CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_SPLAY_SET_ADAPTER_H #include #include //@cond namespace cds { namespace intrusive { namespace striped_set { #if CDS_COMPILER == CDS_COMPILER_INTEL && CDS_COMPILER_VERSION <= 1500 template class adapt< boost::intrusive::splay_set< T, O1, O2, O3, O4 >, Options... > { public: typedef boost::intrusive::splay_set< T, O1, O2, O3, O4 > container_type; ///< underlying intrusive container type public: typedef details::boost_intrusive_set_adapter type; ///< Result of the metafunction }; #else template class adapt< boost::intrusive::splay_set< T, BIOptons... >, Options... > { public: typedef boost::intrusive::splay_set< T, BIOptons... > container_type ; ///< underlying intrusive container type public: typedef details::boost_intrusive_set_adapter type ; ///< Result of the metafunction }; #endif }}} // namespace cds::intrusive::striped_set //@endcond #endif // #ifndef CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_SPLAY_SET_ADAPTER_H libcds-2.3.3/cds/intrusive/striped_set/boost_treap_set.h000066400000000000000000000030351341244201700234510ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_TREAP_SET_ADAPTER_H #define CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_TREAP_SET_ADAPTER_H #include #include //@cond namespace cds { namespace intrusive { namespace striped_set { #if CDS_COMPILER == CDS_COMPILER_INTEL && CDS_COMPILER_VERSION <= 1500 template class adapt< boost::intrusive::treap_set< T, O1, O2, O3, O4 >, Options... > { public: typedef boost::intrusive::treap_set< T, O1, O2, O3, O4 > container_type ; ///< underlying intrusive container type public: typedef details::boost_intrusive_set_adapter type ; ///< Result of the metafunction }; #else template class adapt< boost::intrusive::treap_set< T, BIOptons... >, Options... > { public: typedef boost::intrusive::treap_set< T, BIOptons... > container_type; ///< underlying intrusive container type public: typedef details::boost_intrusive_set_adapter type; ///< Result of the metafunction }; #endif }}} // namespace cds::intrusive::striped_set //@endcond #endif // #ifndef CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_TREAP_SET_ADAPTER_H libcds-2.3.3/cds/intrusive/striped_set/boost_unordered_set.h000066400000000000000000000202471341244201700243310ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_UNORDERED_SET_ADAPTER_H #define CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_UNORDERED_SET_ADAPTER_H #include #include #include //@cond namespace cds { namespace intrusive { namespace striped_set { namespace details { template class adapt_boost_unordered_set { public: typedef Set container_type; ///< underlying intrusive container type private: class adapted_container { public: typedef typename container_type::value_type value_type; ///< value type stored in the container typedef typename container_type::iterator iterator; ///< container iterator typedef typename container_type::const_iterator const_iterator; ///< container const iterator typedef typename opt::value < typename opt::find_option < opt::buffer< opt::v::initialized_static_buffer< cds::any_type, 256 > >, Options... > ::type > ::buffer initial_buffer_type; typedef typename initial_buffer_type::template rebind< typename container_type::bucket_type >::other buffer_type; typedef cds::intrusive::striped_set::load_factor_resizing<256> default_resizing_policy; private: template struct equal_from_compare { Compare& m_cmp; equal_from_compare( Compare& cmp ) : m_cmp( cmp ) {} equal_from_compare( equal_from_compare const& src ) : m_cmp( src.m_cmp ) {} template bool operator()( A& a, B& b ) const { return !m_cmp( a, b ) && !m_cmp( b, a ); } template bool operator()( A& a, B& b ) { return !m_cmp( a, b ) && !m_cmp( b, a ); } }; buffer_type m_Buckets; // buffer should be declared first since it is used in m_Set ctor. container_type m_Set; public: adapted_container() : m_Set( typename container_type::bucket_traits( m_Buckets.buffer(), m_Buckets.capacity())) {} container_type& base_container() { return m_Set; } template bool insert( value_type& val, Func f ) { std::pair res = m_Set.insert( val ); if ( res.second ) f( val ); return res.second; } template std::pair update( value_type& val, Func f, bool bAllowInsert ) { if ( bAllowInsert ) { std::pair res = m_Set.insert( val ); f( res.second, *res.first, val ); return std::make_pair( true, res.second ); } else { auto it = m_Set.find( val ); if ( it == m_Set.end()) return std::make_pair( false, false ); f( false, *it, val ); return std::make_pair( true, false ); } } bool unlink( value_type& val ) { iterator it = m_Set.find( value_type( val )); if ( it == m_Set.end() || &(*it) != &val ) return false; m_Set.erase( it ); return true; } template value_type * erase( Q const& key, Func f ) { iterator it = m_Set.find( key, typename container_type::hasher(), typename container_type::key_equal()); if ( it == m_Set.end()) return nullptr; value_type& val = *it; f( val ); m_Set.erase( it ); return &val; } template value_type * erase( Q const& key, Less pred, Func f ) { iterator it = m_Set.find( key, typename container_type::hasher(), equal_from_compare( pred )); if ( it == m_Set.end()) return nullptr; value_type& val = *it; f( val ); m_Set.erase( it ); return &val; } template bool find( Q& key, Func f ) { iterator it = m_Set.find( key, typename container_type::hasher(), typename container_type::key_equal()); if ( it == m_Set.end()) return false; f( *it, key ); return true; } template bool find( Q& key, Less pred, Func f ) { iterator it = m_Set.find( key, typename container_type::hasher(), equal_from_compare( pred )); if ( it == m_Set.end()) return false; f( *it, key ); return true; } void clear() { m_Set.clear(); } template void clear( Disposer disposer ) { m_Set.clear_and_dispose( disposer ); } iterator begin() { return m_Set.begin(); } const_iterator begin() const { return m_Set.begin(); } iterator end() { return m_Set.end(); } const_iterator end() const { return m_Set.end(); } size_t size() const { return (size_t)m_Set.size(); } void move_item( adapted_container& from, iterator itWhat ) { value_type& val = *itWhat; from.base_container().erase( itWhat ); insert( val, []( value_type& ) {} ); } }; public: typedef adapted_container type; ///< Result of the metafunction }; } // namespace details #if CDS_COMPILER == CDS_COMPILER_INTEL && CDS_COMPILER_VERSION <= 1500 template class adapt < boost::intrusive::unordered_set< T, O1, O2, O3, O4, O5, O6, O7, O8, O9, O10 >, Options... > : public details::adapt_boost_unordered_set < boost::intrusive::unordered_set< T, O1, O2, O3, O4, O5, O6, O7, O8, O9, O10 >, Options... > {}; #else template class adapt < boost::intrusive::unordered_set< T, BIOptons... >, Options... > : public details::adapt_boost_unordered_set < boost::intrusive::unordered_set< T, BIOptons... >, Options... > {}; #endif }}} // namespace cds::intrusive::striped_set //@endcond #endif // #ifndef CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_UNORDERED_SET_ADAPTER_H libcds-2.3.3/cds/intrusive/striped_set/resizing_policy.h000066400000000000000000000224131341244201700234670ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_STRIPED_SET_RESIZING_POLICY_H #define CDSLIB_INTRUSIVE_STRIPED_SET_RESIZING_POLICY_H #include namespace cds { namespace intrusive { namespace striped_set { /// Load factor based resizing policy /** @ingroup cds_striped_resizing_policy When total item count in a container exceeds container.bucket_count() * LoadFactor then resizing is needed. This policy is stateless. The reset() function is called after the resizing is done. The function is intended for resetting internal state of the policy. */ template struct load_factor_resizing { /// Main policy operator returns \p true when resizing is needed template bool operator ()( size_t nSize, ///< Current item count of \p container Container const& container, ///< Container Bucket const& /*bucket*/ ///< reference to a container's bucket (not used) ) const { return nSize > container.bucket_count() * LoadFactor; } /// Resets internal state of the policy (does nothing) void reset() {} }; /// Load factor based resizing policy, stateful specialization /** @ingroup cds_striped_resizing_policy This specialization allows to specify a load factor at runtime. */ template <> struct load_factor_resizing<0> { ///@cond const size_t m_nLoadFactor; //@endcond public: /// Default ctor, load factor is 4 load_factor_resizing() : m_nLoadFactor(4) {} /// Ctor with explicitly defined \p nLoadFactor explicit load_factor_resizing( size_t nLoadFactor ) : m_nLoadFactor( nLoadFactor ) {} /// Copy ctor load_factor_resizing( load_factor_resizing const& src ) : m_nLoadFactor( src.m_nLoadFactor ) {} /// Move ctor load_factor_resizing( load_factor_resizing&& src ) : m_nLoadFactor( src.m_nLoadFactor ) {} /// Main policy operator returns \p true when resizing is needed template bool operator ()( size_t nSize, ///< Current item count of \p container Container const& container, ///< Container Bucket const& /*bucket*/ ///< reference to a container's bucket (not used) ) { return nSize > container.bucket_count() * m_nLoadFactor; } /// Resets internal state of the policy (does nothing) void reset() {} }; /// Rational load factor resizing policy /** @ingroup cds_striped_resizing_policy When total item count in a container exceeds container.bucket_count() * Numerator / Denominator then resizing is needed. This policy is stateless: \p Numerator and \p Denominator specifies in compile time as template arguments */ template struct rational_load_factor_resizing { static_assert( Denominator != 0, "Denominator must not be zero" ); /// Main policy operator returns \p true when resizing is needed template bool operator ()( size_t nSize, ///< Current item count of \p container Container const& container, ///< Container Bucket const& /*bucket*/ ///< reference to a container's bucket (not used) ) const { return nSize * Denominator > container.bucket_count() * Numerator; } /// Resets internal state of the policy (does nothing) void reset() {} }; /// Rational load factor resizing policy /** @ingroup cds_striped_resizing_policy When total item count in a container exceeds container.bucket_count() * Numerator / Denominator then resizing is needed. This policy is stateful: \p Numerator and \p Denominator specifies in construction time. */ template struct rational_load_factor_resizing<0, Denominator> { ///@cond const size_t m_nNumerator; const size_t m_nDenominator; //@endcond public: /// Default ctor, load factor is 1/2 rational_load_factor_resizing() : m_nNumerator(1), m_nDenominator(2) {} /// Ctor with explicitly defined \p nLoadFactor rational_load_factor_resizing( size_t nNumerator, size_t nDenominator ) : m_nNumerator( nNumerator ), m_nDenominator( nDenominator ) {} /// Copy ctor rational_load_factor_resizing( rational_load_factor_resizing const& src ) : m_nNumerator( src.m_nNumerator ), m_nDenominator( src.m_nDenominator ) {} /// Move ctor rational_load_factor_resizing( rational_load_factor_resizing&& src ) : m_nNumerator( src.m_nNumerator ), m_nDenominator( src.m_nDenominator ) {} /// Main policy operator returns \p true when resizing is needed template bool operator ()( size_t nSize, ///< Current item count of \p container Container const& container, ///< Container Bucket const& /*bucket*/ ///< reference to a container's bucket (not used) ) { return nSize * m_nDenominator > container.bucket_count() * m_nNumerator; } /// Resets internal state of the policy (does nothing) void reset() {} }; /// Single bucket threshold resizing policy /** @ingroup cds_striped_resizing_policy If any single bucket size exceeds the global \p Threshold then resizing is needed. This policy is stateless. */ template struct single_bucket_size_threshold { /// Main policy operator returns \p true when resizing is needed template bool operator ()( size_t /*nSize*/, ///< Current item count of \p container (not used) Container const& /*container*/, ///< Container (not used) Bucket const& bucket ///< reference to a container's bucket ) const { return bucket.size() > Threshold; } /// Resets internal state of the policy (does nothing) void reset() {} }; /// Single bucket threshold resizing policy, stateful specialization /** @ingroup cds_striped_resizing_policy This specialization allows to specify and modify a threshold at runtime. */ template <> struct single_bucket_size_threshold<0> { size_t m_nThreshold ; ///< The bucket size threshold /// Default ctor, the threshold is 4 single_bucket_size_threshold() : m_nThreshold(4) {} /// Ctor with explicitly defined \p nThreshold explicit single_bucket_size_threshold( size_t nThreshold ) : m_nThreshold( nThreshold ) {} /// Copy ctor single_bucket_size_threshold( single_bucket_size_threshold const& src ) : m_nThreshold( src.m_nThreshold ) {} /// Move ctor single_bucket_size_threshold( single_bucket_size_threshold&& src ) : m_nThreshold( src.m_nThreshold ) {} /// Main policy operator returns \p true when resizing is needed template bool operator ()( size_t /*nSize*/, ///< Current item count of \p container (not used) Container const& /*container*/, ///< Container (not used) Bucket const& bucket ///< reference to a container's bucket ) const { return bucket.size() > m_nThreshold; } /// Resets internal state of the policy (does nothing) void reset() {} }; /// Dummy resizing policy /** @ingroup cds_striped_resizing_policy This policy is dummy and always returns \p false that means no resizing is needed. This policy is stateless. */ struct no_resizing { /// Main policy operator always returns \p false template bool operator ()( size_t /*nSize*/, ///< Current item count of \p container (not used) Container const& /*container*/, ///< Container (not used) Bucket const& /*bucket*/ ///< reference to a container's bucket (not used) ) const { return false; } /// Resets internal state of the policy (does nothing) void reset() {} }; }}} // namespace cds::intrusive::striped_set #endif // #define CDSLIB_INTRUSIVE_STRIPED_SET_RESIZING_POLICY_H libcds-2.3.3/cds/intrusive/striped_set/striping_policy.h000066400000000000000000000301041341244201700234700ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_STRIPED_SET_STRIPING_POLICY_H #define CDSLIB_INTRUSIVE_STRIPED_SET_STRIPING_POLICY_H #include #include #include #include #include namespace cds { namespace intrusive { namespace striped_set { /// Lock striping concurrent access policy /** This is one of available opt::mutex_policy option type for StripedSet Lock striping is very simple technique. The set consists of the bucket table and the array of locks. Initially, the capacity of lock array and bucket table is the same. When set is resized, bucket table capacity will be doubled but lock array will not. The lock \p i protects each bucket \p j, where j = i mod L , where \p L - the size of lock array. The policy contains an internal array of \p Lock locks. Template arguments: - \p Lock - the type of mutex. The default is \p std::mutex. The mutex type should be default-constructible. Note that a spin-lock is not so good suitable for lock striping for performance reason. - \p Alloc - allocator type used for lock array memory allocation. Default is \p CDS_DEFAULT_ALLOCATOR. */ template class striping { public: typedef Lock lock_type ; ///< lock type typedef Alloc allocator_type ; ///< allocator type typedef cds::sync::lock_array< lock_type, cds::sync::pow2_select_policy, allocator_type > lock_array_type ; ///< lock array type protected: //@cond lock_array_type m_Locks; //@endcond public: //@cond class scoped_cell_lock { std::unique_lock< lock_array_type > m_guard; public: scoped_cell_lock( striping& policy, size_t nHash ) : m_guard( policy.m_Locks, nHash ) {} }; class scoped_full_lock { std::unique_lock< lock_array_type > m_guard; public: scoped_full_lock( striping& policy ) : m_guard( policy.m_Locks ) {} }; class scoped_resize_lock: public scoped_full_lock { public: scoped_resize_lock( striping& policy ) : scoped_full_lock( policy ) {} bool success() const { return true; } }; //@endcond public: /// Constructor striping( size_t nLockCount ///< The size of lock array. Must be power of two. ) : m_Locks( nLockCount, cds::sync::pow2_select_policy( nLockCount )) {} /// Returns lock array size /** Lock array size is unchanged during \p striped object lifetime */ size_t lock_count() const { return m_Locks.size(); } //@cond void resize( size_t /*nNewCapacity*/ ) {} //@endcond }; /// Refinable concurrent access policy /** This is one of available opt::mutex_policy option type for StripedSet Refining is like a striping technique (see striped_set::striping) but it allows growing the size of lock array when resizing the hash table. So, the sizes of hash table and lock array are equal. Template arguments: - \p RecursiveLock - the type of mutex. Reentrant (recursive) mutex is required. The default is \p std::recursive_mutex. The mutex type should be default-constructible. - \p BackOff - back-off strategy. Default is cds::backoff::yield - \p Alloc - allocator type used for lock array memory allocation. Default is \p CDS_DEFAULT_ALLOCATOR. */ template < class RecursiveLock = std::recursive_mutex, typename BackOff = cds::backoff::yield, class Alloc = CDS_DEFAULT_ALLOCATOR> class refinable { public: typedef RecursiveLock lock_type ; ///< lock type typedef BackOff back_off ; ///< back-off strategy used typedef Alloc allocator_type; ///< allocator type protected: //@cond typedef cds::sync::trivial_select_policy lock_selection_policy; class lock_array_type : public cds::sync::lock_array< lock_type, lock_selection_policy, allocator_type > , public std::enable_shared_from_this< lock_array_type > { typedef cds::sync::lock_array< lock_type, lock_selection_policy, allocator_type > lock_array_base; public: lock_array_type( size_t nCapacity ) : lock_array_base( nCapacity ) {} }; typedef std::shared_ptr< lock_array_type > lock_array_ptr; typedef cds::details::Allocator< lock_array_type, allocator_type > lock_array_allocator; typedef unsigned long long owner_t; typedef cds::OS::ThreadId threadId_t; typedef cds::sync::spin spinlock_type; typedef std::unique_lock< spinlock_type > scoped_spinlock; //@endcond protected: //@cond static owner_t const c_nOwnerMask = (((owner_t) 1) << (sizeof(owner_t) * 8 - 1)) - 1; lock_array_ptr m_arrLocks ; ///< Lock array. The capacity of array is specified in constructor. atomics::atomic< owner_t > m_Owner ; ///< owner mark (thread id + boolean flag) atomics::atomic m_nCapacity ; ///< Lock array capacity spinlock_type m_access ; ///< access to m_arrLocks //@endcond protected: //@cond struct lock_array_disposer { void operator()( lock_array_type * pArr ) { // Seems, there is a false positive in std::shared_ptr deallocation in uninstrumented libc++ // see, for example, https://groups.google.com/forum/#!topic/thread-sanitizer/eHu4dE_z7Cc // https://reviews.llvm.org/D21609 CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN; lock_array_allocator().Delete( pArr ); CDS_TSAN_ANNOTATE_IGNORE_WRITES_END; } }; lock_array_ptr create_lock_array( size_t nCapacity ) { m_nCapacity.store( nCapacity, atomics::memory_order_relaxed ); return lock_array_ptr( lock_array_allocator().New( nCapacity ), lock_array_disposer()); } lock_type& acquire( size_t nHash ) { owner_t me = (owner_t) cds::OS::get_current_thread_id(); owner_t who; back_off bkoff; while ( true ) { // wait while resizing while ( true ) { who = m_Owner.load( atomics::memory_order_acquire ); if ( !( who & 1 ) || (who >> 1) == (me & c_nOwnerMask)) break; bkoff(); } lock_array_ptr pLocks; { scoped_spinlock sl(m_access); pLocks = m_arrLocks; } lock_type& lock = pLocks->at( nHash & (pLocks->size() - 1)); lock.lock(); who = m_Owner.load( atomics::memory_order_acquire ); if ( ( !(who & 1) || (who >> 1) == (me & c_nOwnerMask)) && m_arrLocks == pLocks ) return lock; lock.unlock(); } } lock_array_ptr acquire_all() { owner_t me = (owner_t) cds::OS::get_current_thread_id(); owner_t who; back_off bkoff; while ( true ) { // wait while resizing while ( true ) { who = m_Owner.load( atomics::memory_order_acquire ); if ( !( who & 1 ) || (who >> 1) == (me & c_nOwnerMask)) break; bkoff(); } lock_array_ptr pLocks; { scoped_spinlock sl(m_access); pLocks = m_arrLocks; } pLocks->lock_all(); who = m_Owner.load( atomics::memory_order_acquire ); if ( ( !(who & 1) || (who >> 1) == (me & c_nOwnerMask)) && m_arrLocks == pLocks ) return pLocks; pLocks->unlock_all(); } } void release_all( lock_array_ptr p ) { p->unlock_all(); } bool acquire_resize() { owner_t me = (owner_t) cds::OS::get_current_thread_id(); back_off bkoff; for (unsigned int nAttempts = 0; nAttempts < 32; ++nAttempts ) { owner_t ownNull = 0; if ( m_Owner.compare_exchange_strong( ownNull, (me << 1) | 1, atomics::memory_order_acquire, atomics::memory_order_relaxed )) { lock_array_ptr pOldLocks = m_arrLocks; size_t const nLockCount = pOldLocks->size(); for ( size_t i = 0; i < nLockCount; ++i ) { typename lock_array_type::lock_type& lock = pOldLocks->at(i); bkoff.reset(); while ( !lock.try_lock()) bkoff(); lock.unlock(); } return true; } else bkoff(); } return false; } void release_resize() { m_Owner.store( 0, atomics::memory_order_release ); } //@endcond public: //@cond class scoped_cell_lock { std::unique_lock< lock_type > m_guard; public: scoped_cell_lock( refinable& policy, size_t nHash ) : m_guard( policy.acquire( nHash ), std::adopt_lock_t()) {} }; class scoped_full_lock { refinable& m_Policy; lock_array_ptr m_Locks; public: scoped_full_lock( refinable& policy ) : m_Policy( policy ) { m_Locks = policy.acquire_all(); } ~scoped_full_lock() { m_Policy.release_all( m_Locks ); } }; class scoped_resize_lock { refinable& m_Policy; bool m_bSucceess; public: scoped_resize_lock( refinable& policy ) : m_Policy( policy ) { m_bSucceess = policy.acquire_resize(); } ~scoped_resize_lock() { if ( m_bSucceess ) m_Policy.release_resize(); } bool success() const { return m_bSucceess; } }; //@endcond public: /// Constructor refinable( size_t nLockCount ///< Initial size of lock array. Must be power of two. ) : m_Owner(0) , m_nCapacity( nLockCount ) { assert( cds::beans::is_power2( nLockCount )); m_arrLocks = create_lock_array( nLockCount ); } /// Returns lock array size /** Lock array size is not a constant for \p refinable policy and can be changed when the set is resized. */ size_t lock_count() const { return m_nCapacity.load( atomics::memory_order_relaxed ); } /// Resize for new capacity void resize( size_t nNewCapacity ) { // Expect the access is locked by scoped_resize_lock!!! lock_array_ptr pNewArr = create_lock_array( nNewCapacity ); scoped_spinlock sl(m_access); m_arrLocks.swap( pNewArr ); } }; }}} // namespace cds::intrusive::striped_set #endif libcds-2.3.3/cds/intrusive/treiber_stack.h000066400000000000000000001032241341244201700205520ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_TREIBER_STACK_H #define CDSLIB_INTRUSIVE_TREIBER_STACK_H #include #include // unique_lock #include #include #include #include #include namespace cds { namespace intrusive { /// TreiberStack related definitions /** @ingroup cds_intrusive_helper */ namespace treiber_stack { /// Stack node /** Template parameters: - GC - garbage collector used - Tag - a \ref cds_intrusive_hook_tag "tag" */ template using node = cds::intrusive::single_link::node< GC, Tag >; /// Base hook /** \p Options are: - opt::gc - garbage collector used. - opt::tag - a \ref cds_intrusive_hook_tag "tag" */ template < typename... Options > using base_hook = cds::intrusive::single_link::base_hook< Options...>; /// Member hook /** \p MemberOffset specifies offset in bytes of \ref node member into your structure. Use \p offsetof macro to define \p MemberOffset \p Options are: - opt::gc - garbage collector used. - opt::tag - a \ref cds_intrusive_hook_tag "tag" */ template < size_t MemberOffset, typename... Options > using member_hook = cds::intrusive::single_link::member_hook< MemberOffset, Options... >; /// Traits hook /** \p NodeTraits defines type traits for node. See \ref node_traits for \p NodeTraits interface description \p Options are: - opt::gc - garbage collector used. - opt::tag - a \ref cds_intrusive_hook_tag "tag" */ template using traits_hook = cds::intrusive::single_link::traits_hook< NodeTraits, Options... >; //@cond /// Operation id for the \ref cds_elimination_description "elimination back-off" enum operation_id { op_push, ///< push op id op_pop ///< pop op id }; /// Operation descriptor for the \ref cds_elimination_description "elimination back-off" template struct operation: public cds::algo::elimination::operation_desc { operation_id idOp; ///< Op id T * pVal; ///< for push: pointer to argument; for pop: accepts a return value atomics::atomic nStatus; ///< Internal elimination status operation() : pVal( nullptr ) , nStatus( 0 /*op_free*/ ) {} }; //@endcond /// Stack internal statistics. May be useful for debugging or profiling /** Template argument \p Counter defines type of counter. Default is cds::atomicity::event_counter. You may use stronger type of counter like as cds::atomicity::item_counter, or even an integral type, for example, \p int */ template struct stat { typedef Counter counter_type ; ///< Counter type counter_type m_PushCount ; ///< Push call count counter_type m_PopCount ; ///< Pop call count counter_type m_PushRace ; ///< Count of push race conditions encountered counter_type m_PopRace ; ///< Count of pop race conditions encountered counter_type m_ActivePushCollision ; ///< Count of active push collision for elimination back-off counter_type m_ActivePopCollision ; ///< Count of active pop collision for elimination back-off counter_type m_PassivePushCollision ; ///< Count of passive push collision for elimination back-off counter_type m_PassivePopCollision ; ///< Count of passive pop collision for elimination back-off counter_type m_EliminationFailed ; ///< Count of unsuccessful elimination back-off //@cond void onPush() { ++m_PushCount; } void onPop() { ++m_PopCount; } void onPushRace() { ++m_PushRace; } void onPopRace() { ++m_PopRace; } void onActiveCollision( operation_id opId ) { if ( opId == treiber_stack::op_push ) ++m_ActivePushCollision; else ++m_ActivePopCollision; } void onPassiveCollision( operation_id opId ) { if ( opId == treiber_stack::op_push ) ++m_PassivePushCollision; else ++m_PassivePopCollision; } void onEliminationFailed() { ++m_EliminationFailed; } //@endcond }; /// Empty (no overhead) stack statistics. Support interface like treiber_stack::stat struct empty_stat { //@cond void onPush() {} void onPop() {} void onPushRace() {} void onPopRace() {} void onActiveCollision( operation_id ) {} void onPassiveCollision( operation_id ) {} void onEliminationFailed() {} //@endcond }; /// TreiberStack default type traits struct traits { /// Back-off strategy typedef cds::backoff::Default back_off; /// Hook, possible types are \p treiber_stack::base_hook, \p treiber_stack::member_hook, \p treiber_stack::traits_hook typedef treiber_stack::base_hook<> hook; /// The functor used for dispose removed items. Default is \p opt::v::empty_disposer. This option is used only in \p TreiberStack::clear() function typedef opt::v::empty_disposer disposer; /// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter to enable item counting typedef cds::atomicity::empty_item_counter item_counter; /// C++ memory ordering model /** Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). */ typedef opt::v::relaxed_ordering memory_model; /// Internal statistics (by default, disabled) /** Possible option value are: \p treiber_stack::stat, \p treiber_stack::empty_stat (the default), user-provided class that supports \p %treiber_stack::stat interface. */ typedef treiber_stack::empty_stat stat; /// Link checking, see \p cds::opt::link_checker static constexpr const opt::link_check_type link_checker = opt::debug_check_link; /** @name Elimination back-off traits The following traits is used only if elimination enabled */ ///@{ /// Enable elimination back-off; by default, it is disabled static constexpr const bool enable_elimination = false; /// Back-off strategy to wait for elimination, default is \p cds::backoff::delay<> typedef cds::backoff::delay<> elimination_backoff; /// Buffer type for elimination array /** Possible types are \p opt::v::initialized_static_buffer, \p opt::v::initialized_dynamic_buffer. The buffer can be any size: \p Exp2 template parameter of those classes can be \p false. The size should be selected empirically for your application and hardware, there are no common rules for that. Default is %opt::v::initialized_static_buffer< any_type, 4 > . */ typedef opt::v::initialized_static_buffer< int, 4 > buffer; /// Random engine to generate a random position in elimination array typedef opt::v::c_rand random_engine; /// Lock type used in elimination, default is cds::sync::spin typedef cds::sync::spin lock_type; ///@} }; /// Metafunction converting option list to \p treiber_stack::traits /** Supported \p Options are: - \p opt::hook - hook used. Possible hooks are: \p treiber_stack::base_hook, \p treiber_stack::member_hook, \p treiber_stack::traits_hook. If the option is not specified, \p %treiber_stack::base_hook<> is used. - \p opt::back_off - back-off strategy used. If the option is not specified, the \p cds::backoff::Default is used. - \p opt::disposer - the functor used for dispose removed items. Default is \p opt::v::empty_disposer. This option is used only in \p TreiberStack::clear function. - \p opt::link_checker - the type of node's link fields checking. Default is \ref opt::debug_check_link. - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). - \p opt::item_counter - the type of item counting feature. Default is \p cds::atomicity::empty_item_counter, i.e. no item counting. Use \p cds::atomicity::item_counter to enable item counting. - \p opt::stat - the type to gather internal statistics. Possible option value are: \p treiber_stack::stat, \p treiber_stack::empty_stat (the default), user-provided class that supports \p treiber_stack::stat interface. - \p opt::enable_elimination - enable elimination back-off for the stack. Default value is \p false. If elimination back-off is enabled, additional options can be specified: - \p opt::buffer - a buffer type for elimination array, see \p opt::v::initialized_static_buffer, \p opt::v::initialized_dynamic_buffer. The buffer can be any size: \p Exp2 template parameter of those classes can be \p false. The size should be selected empirically for your application and hardware, there are no common rules for that. Default is %opt::v::initialized_static_buffer< any_type, 4 > . - \p opt::random_engine - a random engine to generate a random position in elimination array. Default is \p opt::v::c_rand. - \p opt::elimination_backoff - back-off strategy to wait for elimination, default is \p cds::backoff::delay<> - \p opt::lock_type - a lock type used in elimination back-off, default is \p cds::sync::spin Example: declare \p %TreiberStack with elimination enabled and internal statistics \code typedef cds::intrusive::TreiberStack< cds::gc::HP, Foo, typename cds::intrusive::treiber_stack::make_traits< cds::opt::enable_elimination< true >, cds::opt::stat< cds::intrusive::treiber_stack::stat<> > >::type > myStack; \endcode */ template struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type , Options... >::type type; # endif }; //@cond namespace details { template class elimination_backoff; template class elimination_backoff { typedef typename Traits::back_off back_off; struct wrapper { back_off m_bkoff; void reset() { m_bkoff.reset(); } template bool backoff( treiber_stack::operation< T >&, Stat& ) { m_bkoff(); return false; } }; public: elimination_backoff() {} elimination_backoff( size_t ) {} typedef wrapper type; type init() { return wrapper(); } }; template class elimination_backoff { typedef typename Traits::back_off back_off; /// Back-off for elimination (usually delay) typedef typename Traits::elimination_backoff elimination_backoff_type; /// Lock type used in elimination back-off typedef typename Traits::lock_type elimination_lock_type; /// Random engine used in elimination back-off typedef typename Traits::random_engine elimination_random_engine; /// Per-thread elimination record typedef cds::algo::elimination::record elimination_rec; /// Collision array record struct collision_array_record { elimination_rec * pRec; elimination_lock_type lock; }; /// Collision array used in elimination-backoff; each item is optimized for cache-line size typedef typename Traits::buffer::template rebind< typename cds::details::type_padding::type >::other collision_array; /// Operation descriptor used in elimination back-off typedef treiber_stack::operation< T > operation_desc; /// Elimination back-off data struct elimination_data { mutable elimination_random_engine randEngine; ///< random engine collision_array collisions; ///< collision array elimination_data() { //TODO: check Traits::buffer must be static! } elimination_data( size_t nCollisionCapacity ) : collisions( nCollisionCapacity ) {} }; elimination_data m_Elimination; enum operation_status { op_free = 0, op_waiting = 1, op_collided = 2 }; typedef std::unique_lock< elimination_lock_type > slot_scoped_lock; template typename std::enable_if< Exp2, size_t >::type slot_index() const { return m_Elimination.randEngine() & (m_Elimination.collisions.capacity() - 1); } template typename std::enable_if< !Exp2, size_t >::type slot_index() const { return m_Elimination.randEngine() % m_Elimination.collisions.capacity(); } public: elimination_backoff() { m_Elimination.collisions.zeroize(); } elimination_backoff( size_t nCollisionCapacity ) : m_Elimination( nCollisionCapacity ) { m_Elimination.collisions.zeroize(); } typedef elimination_backoff& type; type init() { return *this; } void reset() {} template bool backoff( operation_desc& op, Stat& stat ) { elimination_backoff_type bkoff; op.nStatus.store( op_waiting, atomics::memory_order_relaxed ); elimination_rec * myRec = cds::algo::elimination::init_record( op ); collision_array_record& slot = m_Elimination.collisions[ slot_index() ]; { slot.lock.lock(); elimination_rec * himRec = slot.pRec; if ( himRec ) { operation_desc * himOp = static_cast( himRec->pOp ); assert( himOp ); if ( himOp->idOp != op.idOp ) { if ( op.idOp == treiber_stack::op_push ) himOp->pVal = op.pVal; else op.pVal = himOp->pVal; slot.pRec = nullptr; himOp->nStatus.store( op_collided, atomics::memory_order_release ); slot.lock.unlock(); cds::algo::elimination::clear_record(); stat.onActiveCollision( op.idOp ); return true; } //himOp->nStatus.store( op_free, atomics::memory_order_release ); } slot.pRec = myRec; slot.lock.unlock(); } // Wait for colliding operation bkoff( [&op]() noexcept -> bool { return op.nStatus.load( atomics::memory_order_acquire ) != op_waiting; } ); { slot_scoped_lock l( slot.lock ); if ( slot.pRec == myRec ) slot.pRec = nullptr; } bool bCollided = op.nStatus.load( atomics::memory_order_relaxed ) == op_collided; if ( !bCollided ) stat.onEliminationFailed(); else stat.onPassiveCollision( op.idOp ); cds::algo::elimination::clear_record(); return bCollided; } }; } // namespace details //@endcond } // namespace treiber_stack /// Treiber intrusive stack /** @ingroup cds_intrusive_stack Intrusive implementation of well-known Treiber's stack algorithm: - R. K. Treiber. Systems programming: Coping with parallelism. Technical Report RJ 5118, IBM Almaden Research Center, April 1986. \ref cds_elimination_description "Elimination back-off technique" can be used optionally. The idea of elimination algorithm is taken from: - [2004] Danny Hendler, Nir Shavit, Lena Yerushalmi "A Scalable Lock-free Stack Algorithm" The elimination algorithm uses a single elimination array as a back-off schema on a shared lock-free stack. If the threads fail on the stack, they attempt to eliminate on the array, and if they fail in eliminating, they attempt to access the stack again and so on. @note Hendler's et al paper describes a lock-free implementation of elimination back-off which is quite complex. The main difficulty is the managing life-time of elimination record. This implementation uses simplified lock-based (spin-based) approach which allows the elimination record allocation on thread's stack. This approach demonstrates sufficient performance under high load. Template arguments: - \p GC - garbage collector type: \p gc::HP, \p gc::DHP. Garbage collecting schema must be the same as \p treiber_stack::node GC. - \p T - a type the stack contains. A value of type \p T must be derived from \p treiber_stack::node for \p treiber_stack::base_hook, or it should have a member of type \p %treiber_stack::node for \p treiber_stack::member_hook, or it should be convertible to \p %treiber_stack::node for \p treiber_stack::traits_hook. - \p Traits - stack traits, default is \p treiber_stack::traits. You can use \p treiber_stack::make_traits metafunction to make your traits or just derive your traits from \p %treiber_stack::traits: \code struct myTraits: public cds::intrusive::treiber_stack::traits { typedef cds::intrusive::treiber_stack::stat<> stat; }; typedef cds::intrusive::TreiberStack< cds::gc::HP, Foo, myTraits > myStack; // Equivalent make_traits example: typedef cds::intrusive::TreiberStack< cds::gc::HP, Foo, typename cds::intrusive::treiber_stack::make_traits< cds::opt::stat< cds::intrusive::treiber_stack::stat<> > >::type > myStack; \endcode @note Be careful when you want destroy an item popped, see \ref cds_intrusive_item_destroying "Destroying items of intrusive containers". @anchor cds_intrusive_TreiberStack_examples \par Examples Example of how to use \p treiber_stack::base_hook. Your class that objects will be pushed on \p %TreiberStack should be based on \p treiber_stack::node class \code #include #include namespace ci = cds::intrusive; typedef cds::gc::HP gc; struct myData: public ci::treiber_stack::node< gc > { // ... }; // Stack type typedef ci::TreiberStack< gc, myData, typename cds::intrusive::treiber_stack::make_traits< ci::opt::hook< ci::treiber_stack::base_hook< gc > > >::type > stack_t; // Stack with elimination back-off enabled typedef ci::TreiberStack< gc, myData, typename ci::treiber_stack::make_traits< ci::opt::hook< ci::treiber_stack::base_hook< gc > >, cds::opt::enable_elimination< true > >::type > elimination_stack_t; \endcode Example of how to use \p treiber_stack::base_hook with different tags. \code #include #include namespace ci = cds::intrusive; typedef cds::gc::HP gc; // It is not necessary to declare complete type for tags struct tag1; struct tag2; struct myData : public ci::treiber_stack::node< gc, tag1 > , public ci::treiber_stack::node< gc, tag2 > { // ... }; typedef ci::TreiberStack< gc, myData, typename ci::treiber_stack::make_traits< ci::opt::hook< ci::treiber_stack::base_hook< gc, tag1 > > >::type > stack1_t; typedef ci::TreiberStack< gc, myData, typename ci::treiber_stack::make_traits< ci::opt::hook< ci::treiber_stack::base_hook< gc, tag2 > > >::type > stack2_t; // You may add myData objects into stack1_t and stack2_t independently void foo() { stack1_t s1; stack2_t s2; myData i1, i2; s1.push( i1 ); s2.push( i2 ); s2.push( i1 ) ; // i1 is now contained in s1 and s2. myData * p; p = s1.pop() ; // pop i1 from s1 p = s1.pop() ; // p == nullptr, s1 is empty p = s2.pop() ; // pop i1 from s2 p = s2.pop() ; // pop i2 from s2 p = s2.pop() ; // p == nullptr, s2 is empty } \endcode Example of how to use \p treiber_stack::member_hook. Your class should have a member of type \p treiber_stack::node \code #include // offsetof macro #include #include namespace ci = cds::intrusive; typedef cds::gc::HP gc; struct myData { // ... ci::treiber_stack::node< gc > member_hook_; // ... }; typedef ci::TreiberStack< gc, myData, typename ci::treiber_stack::make_traits< ci::opt::hook< ci::treiber_stack::member_hook< offsetof(myData, member_hook_), gc >> >::type > stack_t; \endcode */ template < typename GC, typename T, typename Traits = treiber_stack::traits > class TreiberStack { public: /// Rebind template arguments template struct rebind { typedef TreiberStack< GC2, T2, Traits2 > other ; ///< Rebinding result }; public: typedef GC gc; ///< Garbage collector typedef T value_type; ///< type of value stored in the stack typedef Traits traits; ///< Stack traits typedef typename traits::hook hook; ///< hook type typedef typename hook::node_type node_type; ///< node type typedef typename traits::disposer disposer; ///< disposer used typedef typename get_node_traits< value_type, node_type, hook>::type node_traits ; ///< node traits typedef typename single_link::get_link_checker< node_type, traits::link_checker >::type link_checker ; ///< link checker typedef typename traits::memory_model memory_model; ///< Memory ordering. See \p cds::opt::memory_model option typedef typename traits::item_counter item_counter; ///< Item counter class typedef typename traits::stat stat; ///< Internal statistics typedef typename traits::back_off back_off; ///< back-off strategy /// How many Hazard pointers is required for Treiber's stack implementation static constexpr size_t const c_nHazardPtrCount = 1; public: // related to elimination back-off /// Elimination back-off is enabled or not static constexpr const bool enable_elimination = traits::enable_elimination; /// back-off strategy used to wait for elimination typedef typename traits::elimination_backoff elimination_backoff_type; /// Lock type used in elimination back-off typedef typename traits::lock_type elimination_lock_type; /// Random engine used in elimination back-off typedef typename traits::random_engine elimination_random_engine; protected: typename node_type::atomic_node_ptr m_Top; ///< Top of the stack item_counter m_ItemCounter; ///< Item counter stat m_stat; ///< Internal statistics //@cond typedef treiber_stack::details::elimination_backoff elimination_backoff; elimination_backoff m_Backoff; typedef treiber_stack::operation< value_type > operation_desc; // GC and node_type::gc must be the same static_assert( std::is_same::value, "GC and node_type::gc must be the same"); static_assert( !enable_elimination || std::is_same::value, "Random engine result type must be unsigned int"); //@endcond protected: //@cond void clear_links( node_type * pNode ) noexcept { pNode->m_pNext.store( nullptr, memory_model::memory_order_relaxed ); } template struct elimination_backoff_impl; //@endcond public: /// Constructs empty stack TreiberStack() : m_Top( nullptr ) {} /// Constructs empty stack and initializes elimination back-off data /** This form should be used if you use elimination back-off with dynamically allocated collision array, i.e \p Traits contains typedef cds::opt::v::initialized_dynamic_buffer buffer. \p nCollisionCapacity parameter specifies the capacity of collision array. */ TreiberStack( size_t nCollisionCapacity ) : m_Top( nullptr ) , m_Backoff( nCollisionCapacity ) {} /// \p %TreiberStack is not copy-constructible TreiberStack( TreiberStack const& ) = delete; /// Destructor calls \ref cds_intrusive_TreiberStack_clear "clear" member function ~TreiberStack() { clear(); } /// Push the item \p val on the stack /** No copying is made since it is intrusive stack. */ bool push( value_type& val ) { node_type * pNew = node_traits::to_node_ptr( val ); link_checker::is_empty( pNew ); typename elimination_backoff::type bkoff = m_Backoff.init(); operation_desc op; constexpr_if( enable_elimination ) { op.idOp = treiber_stack::op_push; op.pVal = &val; } node_type * t = m_Top.load( memory_model::memory_order_relaxed ); while ( true ) { pNew->m_pNext.store( t, memory_model::memory_order_relaxed ); if ( m_Top.compare_exchange_weak( t, pNew, memory_model::memory_order_release, atomics::memory_order_acquire )) { ++m_ItemCounter; m_stat.onPush(); return true; } m_stat.onPushRace(); if ( bkoff.backoff( op, m_stat )) return true; } } /// Pop an item from the stack /** If stack is empty, returns \p nullptr. The disposer is not called for popped item. See \ref cds_intrusive_item_destroying "Destroying items of intrusive containers". */ value_type * pop() { typename elimination_backoff::type bkoff = m_Backoff.init(); typename gc::Guard guard; operation_desc op; constexpr_if( enable_elimination ) { op.idOp = treiber_stack::op_pop; } while ( true ) { node_type * t = guard.protect( m_Top, []( node_type * p ) -> value_type * { return node_traits::to_value_ptr( p ); }); if ( t == nullptr ) return nullptr; // stack is empty node_type * pNext = t->m_pNext.load(memory_model::memory_order_relaxed); if ( m_Top.compare_exchange_weak( t, pNext, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { clear_links( t ); --m_ItemCounter; m_stat.onPop(); return node_traits::to_value_ptr( *t ); } m_stat.onPopRace(); if ( bkoff.backoff( op, m_stat )) { // may return nullptr if stack is empty return op.pVal; } } } /// Check if stack is empty bool empty() const { return m_Top.load( memory_model::memory_order_relaxed ) == nullptr; } /// Clear the stack /** @anchor cds_intrusive_TreiberStack_clear For each removed item the disposer is called. @note It is possible that after clear() the empty() returns \p false if some other thread pushes an item into the stack during \p clear works */ void clear() { back_off bkoff; node_type * pTop; while ( true ) { pTop = m_Top.load( memory_model::memory_order_relaxed ); if ( pTop == nullptr ) return; if ( m_Top.compare_exchange_weak( pTop, nullptr, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { m_ItemCounter.reset(); break; } bkoff(); } while( pTop ) { node_type * p = pTop; pTop = p->m_pNext.load(memory_model::memory_order_relaxed); clear_links( p ); gc::template retire( node_traits::to_value_ptr( *p )); } } /// Returns stack's item count /** The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, this function always returns 0. @warning Even if you use real item counter and it returns 0, this fact is not mean that the stack is empty. To check emptyness use \ref empty() method. */ size_t size() const { return m_ItemCounter.value(); } /// Returns reference to internal statistics stat const& statistics() const { return m_stat; } }; }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_TREIBER_STACK_H libcds-2.3.3/cds/intrusive/vyukov_mpmc_cycle_queue.h000066400000000000000000000213731341244201700226770ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_INTRUSIVE_VYUKOV_MPMC_CYCLE_QUEUE_H #define CDSLIB_INTRUSIVE_VYUKOV_MPMC_CYCLE_QUEUE_H #include #include namespace cds { namespace intrusive { /// VyukovMPMCCycleQueue related definitions /** @ingroup cds_intrusive_helper */ namespace vyukov_queue { /// VyukovMPMCCycleQueue traits struct traits : public cds::container::vyukov_queue::traits { /// The functor used for dispose removed items. Default is \p opt::v::empty_disposer. This option is used only in \p clear() typedef opt::v::empty_disposer disposer; }; /// Metafunction converting option list to \p vyukov_queue::traits /** Supported \p Options are: - \p opt::buffer - an uninitialized buffer type for internal cyclic array. Possible types are: \p opt::v::uninitialized_dynamic_buffer (the default), \p opt::v::uninitialized_static_buffer. The type of element in the buffer is not important: it will be changed via \p rebind metafunction. - \p opt::disposer - the functor used for dispose removed items. Default is \p opt::v::empty_disposer. This option is used only in \p clear() member function. - \p opt::item_counter - the type of item counting feature. Default is \p cds::atomicity::empty_item_counter (item counting disabled) To enable item counting use \p cds::atomicity::item_counter - \p opt::back_off - back-off strategy used. If the option is not specified, the \p cds::backoff::Default is used. - \p opt::padding - padding for internal critical atomic data. Default is \p opt::cache_line_padding - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consistent memory model). Example: declare \p %VyukovMPMCCycleQueue with item counting and static internal buffer of size 1024: \code typedef cds::intrusive::VyukovMPMCCycleQueue< Foo, typename cds::intrusive::vyukov_queue::make_traits< cds::opt::buffer< cds::opt::v::uninitialized_static_buffer< void *, 1024 >, cds::opt::item_counter< cds::atomicity::item_counter > >::type > myQueue; \endcode */ template struct make_traits { # ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type; ///< Metafunction result # else typedef typename cds::opt::make_options< typename cds::opt::find_type_traits< traits, Options... >::type , Options... >::type type; # endif }; } // namespace vyukov_queue /// Vyukov's MPMC bounded queue /** @ingroup cds_intrusive_queue This algorithm is developed by Dmitry Vyukov (see http://www.1024cores.net) Implementation of intrusive version is based on container::VyukovMPMCCycleQueue. Template parameters: - \p T - type stored in queue. - \p Traits - queue traits, default is \p vyukov_queue::traits. You can use \p vyukov_queue::make_traits metafunction to make your traits or just derive your traits from \p %vyukov_queue::traits: \code struct myTraits: public cds::intrusive::vyukov_queue::traits { typedef cds::atomicity::item_counter item_counter; }; typedef cds::intrusive::VyukovMPMCCycleQueue< Foo, myTraits > myQueue; // Equivalent make_traits example: typedef cds::intrusive::VyukovMPMCCycleQueue< cds::gc::HP, Foo, typename cds::intrusive::vyukov_queue::make_traits< cds::opt::item_counter< cds::atomicity::item_counter > >::type > myQueue; \endcode Instead of saving copy of enqueued data, the intrusive implementation stores pointer to passed data. \par Examples: \code #include struct Foo { ... }; // Queue of Foo pointers, capacity is 1024, statically allocated buffer: typedef cds::intrusive::VyukovMPMCCycleQueue< Foo, typename cds::intrusive::vyukov_queue::make_traits< cds::opt::buffer< cds::opt::v::uninitialized_static_buffer< Foo, 1024 > > >::type > static_queue; static_queue stQueue; // Queue of Foo pointers, capacity is 1024, dynamically allocated buffer: struct queue_traits: public cds::intrusive::vyukov_queue::traits { typedef cds::opt::v::uninitialized_dynamic_buffer< Foo > buffer; }; typedef cds::intrusive::VyukovMPMCCycleQueue< Foo, queue_traits > dynamic_queue; dynamic_queue dynQueue( 1024 ); \endcode */ template class VyukovMPMCCycleQueue : private container::VyukovMPMCCycleQueue< T*, Traits > { //@cond typedef container::VyukovMPMCCycleQueue< T*, Traits > base_class; //@endcond public: typedef T value_type; ///< type of data to be stored in the queue typedef Traits traits; ///< Queue traits typedef typename traits::item_counter item_counter; ///< Item counter type typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option typedef typename traits::disposer disposer; ///< Item disposer typedef typename traits::back_off back_off; ///< back-off strategy public: /// Rebind template arguments template struct rebind { typedef VyukovMPMCCycleQueue< T2, Traits2> other ; ///< Rebinding result }; public: /// Constructs the queue of capacity \p nCapacity /** For \p cds::opt::v::uninitialized_static_buffer the \p nCapacity parameter is ignored. */ VyukovMPMCCycleQueue( size_t nCapacity = 0 ) : base_class( nCapacity ) {} /// Enqueues \p data to queue /** @note The intrusive queue stores pointer to \p data passed, not the copy of \p data. */ bool enqueue( value_type& data ) { return base_class::enqueue( &data ); } /// Dequeues an item from queue /** \p Traits::disposer is not called. You may manually delete the returned pointer. If queue is empty, returns \p nullptr. */ value_type * dequeue() { value_type * p = nullptr; return base_class::dequeue( p ) ? p : nullptr; } /// Synonym for \p enqueue() bool push( value_type& data ) { return enqueue( data ); } /// Synonym for \p dequeue() value_type * pop() { return dequeue(); } /// Clears queue in lock-free manner. /** \p f parameter is a functor to dispose removed items. The interface of \p Disposer is: \code struct myDisposer { void operator ()( T * val ); }; \endcode The disposer will be called immediately for each item. */ template void clear( Disposer f ) { value_type * pv; while ( (pv = pop()) != nullptr ) { f( pv ); } } /// Clears the queue /** This function uses the disposer that is specified in \p Traits. */ void clear() { clear( disposer()); } /// Checks if the queue is empty bool empty() const { return base_class::empty(); } /// Returns queue's item count /** The value returned depends on \p vyukov_queue::traits::item_counter option. For \p atomicity::empty_item_counter, this function always returns 0. */ size_t size() const { return base_class::size(); } /// Returns capacity of the queue size_t capacity() const { return base_class::capacity(); } }; }} // namespace cds::intrusive #endif // #ifndef CDSLIB_INTRUSIVE_VYUKOV_MPMC_CYCLE_QUEUE_H libcds-2.3.3/cds/lock/000077500000000000000000000000001341244201700144565ustar00rootroot00000000000000libcds-2.3.3/cds/lock/array.h000066400000000000000000000016671341244201700157570ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_LOCK_ARRAY_H #define CDSLIB_LOCK_ARRAY_H #if CDS_COMPILER == CDS_COMPILER_MSVC # pragma message("cds/lock/array.h is deprecated, use cds/sync/lock_array.h instead") #else # warning "cds/lock/array.h is deprecated, use cds/sync/lock_array.h instead" #endif #include //@cond namespace cds { namespace lock { using cds::sync::trivial_select_policy; using cds::sync::mod_select_policy; using cds::sync::pow2_select_policy; template using array = cds::sync::lock_array< Lock, SelectPolicy, Alloc >; }} // namespace cds::lock //@endcond #endif // #ifndef CDSLIB_LOCK_ARRAY_H libcds-2.3.3/cds/lock/spinlock.h000066400000000000000000000037601341244201700164570ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_LOCK_SPINLOCK_H #define CDSLIB_LOCK_SPINLOCK_H #if CDS_COMPILER == CDS_COMPILER_MSVC # pragma message("cds/lock/spinlock.h is deprecated, use cds/sync/spinlock.h instead") #else # warning "cds/lock/spinlock.h is deprecated, use cds/sync/spinlock.h instead" #endif #include //@cond namespace cds { /// Synchronization primitives (deprecated namespace, use \p cds::sync namespace instead) namespace lock { /// Alias for \p cds::sync::spin_lock for backward compatibility template using Spinlock = cds::sync::spin_lock< Backoff >; /// Spin-lock implementation default for the current platform typedef cds::sync::spin_lock< backoff::LockDefault> Spin; /// Alias for \p cds::sync::reentrant_spin_lock for backward compatibility template using ReentrantSpinT = cds::sync::reentrant_spin_lock< Integral, Backoff >; /// Recursive 32bit spin-lock typedef cds::sync::reentrant_spin32 ReentrantSpin32; /// Recursive 64bit spin-lock typedef cds::sync::reentrant_spin64 ReentrantSpin64; /// Default recursive spin-lock type typedef ReentrantSpin32 ReentrantSpin; } // namespace lock /// Standard (best for the current platform) spin-lock implementation typedef lock::Spin SpinLock; /// Standard (best for the current platform) recursive spin-lock implementation typedef lock::ReentrantSpin RecursiveSpinLock; /// 32bit recursive spin-lock shortcut typedef lock::ReentrantSpin32 RecursiveSpinLock32; /// 64bit recursive spin-lock shortcut typedef lock::ReentrantSpin64 RecursiveSpinLock64; } // namespace cds //@endcond #endif // #ifndef CDSLIB_LOCK_SPINLOCK_H libcds-2.3.3/cds/memory/000077500000000000000000000000001341244201700150365ustar00rootroot00000000000000libcds-2.3.3/cds/memory/pool_allocator.h000066400000000000000000000074551341244201700202330ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_MEMORY_POOL_ALLOCATOR_H #define CDSLIB_MEMORY_POOL_ALLOCATOR_H #include #include namespace cds { namespace memory { ///@defgroup cds_memory_pool Simple memory pool /// Pool allocator adapter /** This class is an adapter for an object pool. It gives \p std::allocator interface for the @ref cds_memory_pool "pool". Template arguments: - \p T - value type - \p Accessor - a functor to access to the pool object. The pool has the following interface: \code template class pool { typedef T value_type ; // Object type maintained by pool T * allocate( size_t n ) ; // Allocate an array of object of type T void deallocate( T * p, size_t n ) ; // Deallocate the array p of size n }; \endcode Usage Suppose, we have a pool with interface above. Usually, the pool is a static object: \code static pool thePool; \endcode The \p %pool_allocator gives \p std::allocator interface for the pool. It is needed to declare an accessor functor to access to \p thePool: \code struct pool_accessor { typedef typename pool::value_type value_type; pool& operator()() const { return thePool; } }; \endcode Now, cds::memory::pool_allocator< T, pool_accessor > can be used instead of \p std::allocator. */ template class pool_allocator { //@cond public: typedef Accessor accessor_type; typedef size_t size_type; typedef ptrdiff_t difference_type; typedef T* pointer; typedef const T* const_pointer; typedef T& reference; typedef const T& const_reference; typedef T value_type; template struct rebind { typedef pool_allocator other; }; public: pool_allocator() noexcept {} pool_allocator(const pool_allocator&) noexcept {} template pool_allocator(const pool_allocator&) noexcept {} ~pool_allocator() {} pointer address(reference x) const noexcept { return &x; } const_pointer address(const_reference x) const noexcept { return &x; } pointer allocate( size_type n, void const * /*hint*/ = 0) { static_assert( sizeof(value_type) <= sizeof(typename accessor_type::value_type), "Incompatible type" ); return reinterpret_cast( accessor_type()().allocate( n )); } void deallocate(pointer p, size_type n) noexcept { accessor_type()().deallocate( reinterpret_cast( p ), n ); } size_type max_size() const noexcept { return size_t(-1) / sizeof(value_type); } template void construct(U* p, Args&&... args) { new((void *)p) U( std::forward(args)...); } template void destroy(U* p) { p->~U(); } //@endcond }; }} // namespace cds::memory #endif // #ifndef CDSLIB_MEMORY_POOL_ALLOCATOR_H libcds-2.3.3/cds/memory/vyukov_queue_pool.h000066400000000000000000000422571341244201700210210ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_MEMORY_VYUKOV_QUEUE_ALLOCATOR_H #define CDSLIB_MEMORY_VYUKOV_QUEUE_ALLOCATOR_H #include #include #include #include namespace cds { namespace memory { /// \p vyukov_queue_pool traits /** @ingroup cds_memory_pool */ struct vyukov_queue_pool_traits : public cds::intrusive::vyukov_queue::traits { /// Allocator type typedef CDS_DEFAULT_ALLOCATOR allocator; }; /// Free-list based on bounded lock-free queue \p cds::intrusive::VyukovMPMCCycleQueue /** @ingroup cds_memory_pool Template parameters: - \p T - the type of object maintaining by free-list. \p T must be default constructible. - \p Traits - traits for \p cds::intrusive::VyukovMPMCCycleQueue class plus \p cds::opt::allocator option, defaul is \p vyukov_queue_pool_traits \b Internals This free-list is very simple. At construction time, the free-list allocates the array of N items and stores them into queue, where N is the queue capacity. When allocating the free-list tries to pop an object from internal queue i.e. from preallocated pool. If success the popped object is returned. Otherwise a new one is allocated. When deallocating, the free-list checks whether the object is from preallocated pool. If so, the object is pushed into queue, otherwise it is deallocated by using the allocator provided. The pool can manage more than \p N items but only \p N items is contained in the free-list. \b Usage \p %vyukov_queue_pool should be used together with \ref pool_allocator. You should declare an static object of type \p %vyukov_queue_pool, provide an accessor to that object and use \p pool_allocator as an allocator: \code #include #include // Pool of Foo object of size 1024. struct pool_traits: public cds::memory::vyukov_queue_pool_traits { typedef cds::opt::v::uninitialized_static_buffer< Foo, 1024 > buffer; }; typedef cds::memory::vyukov_queue_pool< Foo, pool_traits > pool_type; static pool_type thePool; struct pool_accessor { typedef typename pool_type::value_type value_type; pool_type& operator()() const { return thePool; } }; // Declare pool allocator typedef cds::memory::pool_allocator< Foo, pool_accessor > pool_allocator; // Use pool_allocator // Allocate an object Foo * p = pool_allocator().allocate( 1 ); // construct object new(p) Foo; //... // Destruct object p->~Foo(); // Deallocate object pool_allocator().deallocate( p , 1 ); \endcode */ template class vyukov_queue_pool { public: typedef cds::intrusive::VyukovMPMCCycleQueue< T, Traits > queue_type ; ///< Queue type public: typedef T value_type ; ///< Value type typedef Traits traits; ///< Traits type typedef typename std::allocator_traits::template rebind_alloc allocator_type; ///< allocator type typedef typename traits::back_off back_off; ///< back-off strategy protected: //@cond typedef cds::details::Allocator< value_type, allocator_type > cxx_allocator; typedef typename cxx_allocator::allocator_type std_allocator; queue_type m_Queue; value_type * m_pFirst; value_type * m_pLast; //@endcond protected: //@cond void preallocate_pool() { m_pFirst = std_allocator().allocate( m_Queue.capacity()); m_pLast = m_pFirst + m_Queue.capacity(); for ( value_type * p = m_pFirst; p < m_pLast; ++p ) { CDS_VERIFY( m_Queue.push( *p )) ; // must be true } } bool from_pool( value_type * p ) const { return m_pFirst <= p && p < m_pLast; } //@endcond public: /// Preallocates the pool of object /** \p nCapacity argument is the queue capacity. It should be passed if the queue is based on dynamically-allocated buffer. See \p cds::intrusive::VyukovMPMCCycleQueue for explanation. */ vyukov_queue_pool( size_t nCapacity = 0 ) : m_Queue( nCapacity ) { preallocate_pool(); } /// Deallocates the pool. ~vyukov_queue_pool() { m_Queue.clear(); std_allocator().deallocate( m_pFirst, m_Queue.capacity()); } /// Allocates an object from pool /** The pool supports allocation only single object (\p n = 1). If \p n > 1 the behavior is undefined. If the queue is not empty, the popped value is returned. Otherwise, a new value allocated. */ value_type * allocate( size_t n ) { assert( n == 1 ); CDS_UNUSED(n); value_type * p = m_Queue.pop(); if ( p ) { assert( from_pool(p)); return new( p ) value_type; } // The pool is empty - allocate new from the heap return cxx_allocator().New(); } /// Deallocated the object \p p /** The pool supports allocation only single object (\p n = 1). If \p n > 1 the behavior is undefined. If \p p is from preallocated pool, it pushes into the queue. Otherwise, \p p is deallocated by allocator provided. */ void deallocate( value_type * p, size_t n ) { assert( n == 1 ); CDS_UNUSED(n); if ( p ) { if ( from_pool(p)) { p->~value_type(); // The queue can notify about false fullness state // so we push in loop back_off bkoff; while ( !m_Queue.push( *p )) bkoff(); } else cxx_allocator().Delete( p ); } } }; /// Lazy free-list based on bounded lock-free queue \p cds::intrusive::VyukovMPMCCycleQueue /** @ingroup cds_memory_pool Template parameters: - \p T - the type of object maintaining by free-list. \p T must be default constructible - \p Traits - traits for \p cds::intrusive::VyukovMPMCCycleQueue class plus \p cds::opt::allocator option, default is \p vyukov_queue_pool_traits \b Internals This free-list is very simple. At construction time the pool is empty. When allocating the free-list tries to pop an object from internal queue. If success the popped object is returned. Otherwise a new one is allocated. When deallocating, the free-list tries to push the object into the pool. If internal queue is full, the object is deallocated by using the allocator provided. The pool can manage more than \p N items but only \p N items is placed in the free-list. \b Usage \p %lazy_vyukov_queue_pool should be used together with \ref pool_allocator. You should declare an static object of type \p %lazy_vyukov_queue_pool, provide an accessor functor to this object and use \p pool_allocator as an allocator: \code #include #include // Pool of Foo object of size 1024. typedef cds::memory::lazy_vyukov_queue_pool< Foo > pool_type; static pool_type thePool( 1024 ); struct pool_accessor { typedef typename pool_type::value_type value_type; pool_type& operator()() const { return thePool; } }; // Declare pool allocator typedef cds::memory::pool_allocator< Foo, pool_accessor > pool_allocator; // Use pool_allocator // Allocate an object Foo * p = pool_allocator().allocate( 1 ); // construct object new(p) Foo; //... // Destruct object p->~Foo(); // Deallocate object pool_allocator().deallocate( p , 1 ); \endcode */ template class lazy_vyukov_queue_pool { public: typedef cds::intrusive::VyukovMPMCCycleQueue< T, Traits > queue_type ; ///< Queue type public: typedef T value_type ; ///< Value type typedef Traits traits; ///< Pool traits typedef typename std::allocator_traits::template rebind_alloc allocator_type; ///< allocator type protected: //@cond typedef cds::details::Allocator< value_type, allocator_type > cxx_allocator; typedef typename cxx_allocator::allocator_type std_allocator; queue_type m_Queue; //@endcond public: /// Constructs empty pool lazy_vyukov_queue_pool( size_t nCapacity = 0 ) : m_Queue( nCapacity ) {} /// Deallocates all objects from the pool ~lazy_vyukov_queue_pool() { std_allocator a; while ( !m_Queue.empty()) a.deallocate( m_Queue.pop(), 1 ); } /// Allocates an object from pool /** The pool supports allocation only single object (\p n = 1). If \p n > 1 the behavior is undefined. If the queue is not empty, the popped value is returned. Otherwise, a new value allocated. */ value_type * allocate( size_t n ) { assert( n == 1 ); CDS_UNUSED(n); value_type * p = m_Queue.pop(); if ( p ) return new( p ) value_type; return cxx_allocator().New(); } /// Deallocates the object \p p /** The pool supports allocation only single object (\p n = 1). If \p n > 1 the behaviour is undefined. If the queue is not full, \p p is pushed into the queue. Otherwise, \p p is deallocated by allocator provided. */ void deallocate( value_type * p, size_t n ) { assert( n == 1 ); CDS_UNUSED(n); if ( p ) { p->~value_type(); // Here we ignore false fullness state of the queue if ( !m_Queue.push( *p )) std_allocator().deallocate( p, 1 ); } } }; /// Bounded free-list based on bounded lock-free queue \p cds::intrusive::VyukovMPMCCycleQueue /** @ingroup cds_memory_pool Template parameters: - \p T - the type of object maintaining by free-list. \p T must be default-constructible - \p Traits - traits for \p cds::intrusive::VyukovMPMCCycleQueue class plus \p cds::opt::allocator option, defaul is \p vyukov_queue_pool_traits \b Internals At construction time, the free-list allocates the array of N items and stores them into the queue, where N is the queue capacity. When allocating the free-list tries to pop an object from internal queue i.e. from preallocated pool. If success the popped object is returned. Otherwise a \p std::bad_alloc exception is raised. So, the pool can contain up to \p N items. When deallocating, the object is pushed into the queue. In debug mode \p deallocate() member function asserts that the pointer is from preallocated pool. \b Usage \p %bounded_vyukov_queue_pool should be used together with \ref pool_allocator. You should declare an static object of type \p %bounded_vyukov_queue_pool, provide an accessor functor to this object and use \p pool_allocator as an allocator: \code #include #include // Pool of Foo object of size 1024. struct pool_traits: public cds::memory::vyukov_queue_pool_traits { typedef cds::opt::v::uninitialized_static_buffer< Foo, 1024 > buffer; }; typedef cds::memory::bounded_vyukov_queue_pool< Foo, pool_traits > pool_type; static pool_type thePool; struct pool_accessor { typedef typename pool_type::value_type value_type; pool_type& operator()() const { return thePool; } }; // Declare pool allocator typedef cds::memory::pool_allocator< Foo, pool_accessor > pool_allocator; // Use pool_allocator // Allocate an object Foo * p = pool_allocator().allocate( 1 ); // construct object new(p) Foo; //... // Destruct object p->~Foo(); // Deallocate object pool_allocator().deallocate( p , 1 ); \endcode */ template class bounded_vyukov_queue_pool { //@cond struct internal_traits : public Traits { typedef cds::atomicity::item_counter item_counter; }; //@endcond public: typedef cds::intrusive::VyukovMPMCCycleQueue< T, internal_traits > queue_type ; ///< Queue type public: typedef T value_type; ///< Value type typedef Traits traits; ///< Pool traits typedef typename std::allocator_traits::template rebind_alloc allocator_type; ///< allocator type typedef typename traits::back_off back_off; ///< back-off strategy protected: //@cond typedef cds::details::Allocator< value_type, allocator_type > cxx_allocator; typedef typename cxx_allocator::allocator_type std_allocator; queue_type m_Queue; value_type * m_pFirst; value_type * m_pLast; //@endcond protected: //@cond void preallocate_pool() { size_t const nCount = m_Queue.capacity(); m_pFirst = std_allocator().allocate( nCount ); m_pLast = m_pFirst + nCount; for ( value_type * p = m_pFirst; p < m_pLast; ++p ) CDS_VERIFY( m_Queue.push( *p )) ; // must be true } bool from_pool( value_type * p ) const { return m_pFirst <= p && p < m_pLast; } //@endcond public: /// Preallocates the pool of object /** \p nCapacity argument is the queue capacity. It should be passed if the queue is based on dynamically-allocated buffer. See \p cds::intrusive::VyukovMPMCCycleQueue for explanation. */ bounded_vyukov_queue_pool( size_t nCapacity = 0 ) : m_Queue( nCapacity ) { preallocate_pool(); } /// Deallocates the pool. ~bounded_vyukov_queue_pool() { m_Queue.clear(); std_allocator().deallocate( m_pFirst, m_Queue.capacity()); } /// Allocates an object from pool /** The pool supports allocation only single object (\p n = 1). If \p n > 1 the behaviour is undefined. If the queue is not empty, the popped value is returned. Otherwise, a \p std::bad_alloc exception is raised. */ value_type * allocate( size_t n ) { assert( n == 1 ); CDS_UNUSED( n ); value_type * p = m_Queue.pop(); if ( !p ) { back_off bkoff; while ( m_Queue.size()) { p = m_Queue.pop(); if ( p ) goto ok; bkoff(); } // The pool is empty CDS_THROW_EXCEPTION( std::bad_alloc()); } ok: assert( from_pool(p)); return p; } /// Deallocates the object \p p /** The pool supports allocation only single object (\p n = 1). If \p n > 1 the behaviour is undefined. \p p should be from preallocated pool. */ void deallocate( value_type * p, size_t n ) { assert( n == 1 ); CDS_UNUSED( n ); if ( p ) { assert( from_pool( p )); back_off bkoff; // The queue can notify it is full but that is false fullness state // So, we push in loop while ( !m_Queue.push(*p)) bkoff(); } } }; }} // namespace cds::memory #endif // #ifndef CDSLIB_MEMORY_VYUKOV_QUEUE_ALLOCATOR_H libcds-2.3.3/cds/opt/000077500000000000000000000000001341244201700143305ustar00rootroot00000000000000libcds-2.3.3/cds/opt/buffer.h000066400000000000000000000465421341244201700157650ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_OPT_BUFFER_H #define CDSLIB_OPT_BUFFER_H #include #include #include #include #include #include namespace cds { namespace opt { /// [type-option] Option setter for user-provided plain buffer /** This option is used by some container as a random access array for storing container's item; for example, a bounded queue may use this option to define underlying buffer implementation. The template parameter \p Type should be rebindable. Implementations: - \p opt::v::initialized_static_buffer - \p opt::v::uninitialized_static_buffer - \p opt::v::initialized_dynamic_buffer - \p opt::v::uninitialized_dynamic_buffer Uninitialized buffer is just an array of uninitialized elements. Each element should be manually constructed, for example with a placement new operator. When the uninitialized buffer is destroyed the destructor of its element is not called. Initialized buffer contains default-constructed elements. Element destructor is called automatically when the buffer is destroyed. @note Usually, initialized and uninitialized buffers are not interchangeable. */ template struct buffer { //@cond template struct pack: public Base { typedef Type buffer; }; //@endcond }; namespace v { /// Static uninitialized buffer /** One of available type for \p opt::buffer option. This buffer maintains static array of uninitialized elements. You should manually construct each element when needed. No dynamic memory allocation performed. \par Template parameters: - \p T - item type the buffer stores - \p Capacity - the capacity of buffer. The value must be power of two if \p Exp2 is \p true - \p Exp2 - a boolean flag. If it is \p true the buffer capacity must be power of two. Otherwise it can be any positive number. Usually, it is required that the buffer has size of a power of two. */ template class uninitialized_static_buffer { public: typedef T value_type; ///< value type static constexpr const size_t c_nCapacity = Capacity; ///< Capacity static constexpr const bool c_bExp2 = Exp2; ///< \p Exp2 flag /// Rebind buffer for other template parameters template struct rebind { typedef uninitialized_static_buffer other; ///< Rebind result type }; // Capacity must be power of 2 static_assert(!c_bExp2 || (c_nCapacity & (c_nCapacity - 1)) == 0, "Capacity must be power of two"); private: //@cond union element { value_type v; char c; element() {} }; element m_buffer[c_nCapacity]; //@endcond public: /// Construct static buffer uninitialized_static_buffer() noexcept {} /// Construct buffer of given capacity /** This ctor ignores \p nCapacity argument. The capacity of static buffer is defined by template argument \p Capacity */ uninitialized_static_buffer( size_t nCapacity ) noexcept { CDS_UNUSED( nCapacity ); } uninitialized_static_buffer( const uninitialized_static_buffer& ) = delete; uninitialized_static_buffer& operator =( const uninitialized_static_buffer& ) = delete; /// Get item \p i value_type& operator []( size_t i ) { assert( i < capacity()); return m_buffer[i].v; } /// Get item \p i, const version const value_type& operator []( size_t i ) const { assert( i < capacity()); return m_buffer[i].v; } /// Returns buffer capacity constexpr size_t capacity() const noexcept { return c_nCapacity; } /// Zeroize the buffer void zeroize() { memset( m_buffer, 0, capacity() * sizeof(m_buffer[0])); } /// Returns pointer to buffer array value_type * buffer() noexcept { return &( m_buffer[0].v ); } /// Returns pointer to buffer array value_type * buffer() const noexcept { return &( m_buffer[0].v ); } /// Returns idx % capacity() /** If the buffer size is a power of two, binary arithmethics is used instead of modulo arithmetics */ size_t mod( size_t idx ) { constexpr_if ( c_bExp2 ) return idx & ( capacity() - 1 ); else return idx % capacity(); } //@cond template typename std::enable_if< sizeof(I) != sizeof(size_t), size_t >::type mod( I idx ) { constexpr_if ( c_bExp2 ) return static_cast( idx & static_cast( capacity() - 1 )); else return static_cast( idx % capacity()); } //@endcond }; /// Static initialized buffer /** One of available type for \p opt::buffer option. This buffer maintains static array of default-constructed elements. No dynamic memory allocation performed. \par Template parameters: - \p T - item type the buffer stores - \p Capacity - the capacity of buffer. The value must be power of two if \p Exp2 is \p true - \p Exp2 - a boolean flag. If it is \p true the buffer capacity must be power of two. Otherwise it can be any positive number. Usually, it is required that the buffer has size of a power of two. */ template class initialized_static_buffer { public: typedef T value_type; ///< value type static constexpr const size_t c_nCapacity = Capacity; ///< Capacity static constexpr const bool c_bExp2 = Exp2; ///< \p Exp2 flag /// Rebind buffer for other template parameters template struct rebind { typedef initialized_static_buffer other; ///< Rebind result type }; // Capacity must be power of 2 static_assert(!c_bExp2 || (c_nCapacity & (c_nCapacity - 1)) == 0, "Capacity must be power of two"); private: //@cond value_type m_buffer[c_nCapacity]; //@endcond public: /// Construct static buffer initialized_static_buffer() noexcept {} /// Construct buffer of given capacity /** This ctor ignores \p nCapacity argument. The capacity of static buffer is defined by template argument \p Capacity */ initialized_static_buffer( size_t nCapacity ) noexcept { CDS_UNUSED( nCapacity ); } initialized_static_buffer( const initialized_static_buffer& ) = delete; initialized_static_buffer& operator =( const initialized_static_buffer& ) = delete; /// Get item \p i value_type& operator []( size_t i ) { assert( i < capacity()); return m_buffer[i]; } /// Get item \p i, const version const value_type& operator []( size_t i ) const { assert( i < capacity()); return m_buffer[i]; } /// Returns buffer capacity constexpr size_t capacity() const noexcept { return c_nCapacity; } /// Zeroize the buffer void zeroize() { memset( m_buffer, 0, capacity() * sizeof(m_buffer[0])); } /// Returns pointer to buffer array value_type * buffer() noexcept { return m_buffer; } /// Returns pointer to buffer array value_type * buffer() const noexcept { return m_buffer; } /// Returns idx % capacity() /** If the buffer size is a power of two, binary arithmethics is used instead of modulo arithmetics */ size_t mod( size_t idx ) { constexpr_if ( c_bExp2 ) return idx & ( capacity() - 1 ); else return idx % capacity(); } //@cond template typename std::enable_if< sizeof( I ) != sizeof( size_t ), size_t >::type mod( I idx ) { constexpr_if ( c_bExp2 ) return static_cast( idx & static_cast( capacity() - 1 )); else return static_cast( idx % capacity()); } //@endcond }; /// Dynamically allocated uninitialized buffer /** One of available type for \p opt::buffer option. This buffer maintains dynamically allocated array of uninitialized elements. You should manually construct each element when needed. Allocation is performed at construction time. \par Template parameters: - \p T - item type storing in the buffer - \p Alloc - an allocator used for allocating internal buffer (\p std::allocator interface) - \p Exp2 - a boolean flag. If it is \p true the buffer capacity must be power of two. Otherwise it can be any positive number. Usually, it is required that the buffer has size of a power of two. */ template class uninitialized_dynamic_buffer { public: typedef T value_type; ///< Value type typedef Alloc allocator; ///< Allocator type; static constexpr const bool c_bExp2 = Exp2; ///< \p Exp2 flag /// Rebind buffer for other template parameters template struct rebind { typedef uninitialized_dynamic_buffer other; ///< Rebinding result type }; //@cond typedef typename std::allocator_traits::template rebind_alloc allocator_type; //@endcond private: //@cond value_type * m_buffer; size_t const m_nCapacity; //@endcond public: /// Allocates dynamic buffer of given \p nCapacity /** If \p Exp2 class template parameter is \p true then actual capacity of allocating buffer is nearest upper to \p nCapacity power of two. */ uninitialized_dynamic_buffer( size_t nCapacity ) : m_nCapacity( c_bExp2 ? beans::ceil2(nCapacity) : nCapacity ) { assert( m_nCapacity >= 2 ); // Capacity must be power of 2 assert( !c_bExp2 || (m_nCapacity & (m_nCapacity - 1)) == 0 ); m_buffer = allocator_type().allocate( m_nCapacity ); } /// Destroys dynamically allocated buffer ~uninitialized_dynamic_buffer() { allocator_type().deallocate( m_buffer, m_nCapacity ); } uninitialized_dynamic_buffer( const uninitialized_dynamic_buffer& ) = delete; uninitialized_dynamic_buffer& operator =( const uninitialized_dynamic_buffer& ) = delete; /// Get item \p i value_type& operator []( size_t i ) { assert( i < capacity()); return m_buffer[i]; } /// Get item \p i, const version const value_type& operator []( size_t i ) const { assert( i < capacity()); return m_buffer[i]; } /// Returns buffer capacity size_t capacity() const noexcept { return m_nCapacity; } /// Zeroize the buffer void zeroize() { memset( m_buffer, 0, capacity() * sizeof(m_buffer[0])); } /// Returns pointer to buffer array value_type * buffer() noexcept { return m_buffer; } /// Returns pointer to buffer array value_type * buffer() const noexcept { return m_buffer; } /// Returns idx % capacity() /** If the buffer size is a power of two, binary arithmethics is used instead of modulo arithmetics */ size_t mod( size_t idx ) { constexpr_if ( c_bExp2 ) return idx & ( capacity() - 1 ); else return idx % capacity(); } //@cond template typename std::enable_if< sizeof( I ) != sizeof( size_t ), size_t >::type mod( I idx ) { constexpr_if ( c_bExp2 ) return static_cast( idx & static_cast( capacity() - 1 )); else return static_cast( idx % capacity()); } //@endcond }; /// Dynamically allocated initialized buffer /** One of available type for \p opt::buffer option. This buffer maintains dynamically allocated array of initialized default-constructed elements. Allocation is performed at construction time. \par Template parameters: - \p T - item type storing in the buffer - \p Alloc - an allocator used for allocating internal buffer (\p std::allocator interface) - \p Exp2 - a boolean flag. If it is \p true the buffer capacity must be power of two. Otherwise it can be any positive number. Usually, it is required that the buffer has size of a power of two. */ template class initialized_dynamic_buffer { public: typedef T value_type; ///< Value type typedef Alloc allocator; ///< Allocator type static constexpr const bool c_bExp2 = Exp2; ///< \p Exp2 flag /// Rebind buffer for other template parameters template struct rebind { typedef initialized_dynamic_buffer other; ///< Rebinding result type }; //@cond typedef cds::details::Allocator allocator_type; //@endcond private: //@cond value_type * m_buffer; size_t const m_nCapacity; //@endcond public: /// Allocates dynamic buffer of given \p nCapacity /** If \p Exp2 class template parameter is \p true then actual capacity of allocating buffer is nearest upper to \p nCapacity power of two. */ initialized_dynamic_buffer( size_t nCapacity ) : m_nCapacity( c_bExp2 ? beans::ceil2(nCapacity) : nCapacity ) { assert( m_nCapacity >= 2 ); // Capacity must be power of 2 assert( !c_bExp2 || (m_nCapacity & (m_nCapacity - 1)) == 0 ); allocator_type a; m_buffer = a.NewArray( m_nCapacity ); } /// Destroys dynamically allocated buffer ~initialized_dynamic_buffer() { allocator_type a; a.Delete( m_buffer, m_nCapacity ); } initialized_dynamic_buffer( const initialized_dynamic_buffer& ) = delete; initialized_dynamic_buffer& operator =( const initialized_dynamic_buffer& ) = delete; /// Get item \p i value_type& operator []( size_t i ) { assert( i < capacity()); return m_buffer[i]; } /// Get item \p i, const version const value_type& operator []( size_t i ) const { assert( i < capacity()); return m_buffer[i]; } /// Returns buffer capacity size_t capacity() const noexcept { return m_nCapacity; } /// Zeroize the buffer void zeroize() { memset( m_buffer, 0, capacity() * sizeof(m_buffer[0])); } /// Returns pointer to buffer array value_type * buffer() noexcept { return m_buffer; } /// Returns pointer to buffer array value_type * buffer() const noexcept { return m_buffer; } /// Returns idx % capacity() /** If the buffer size is a power of two, binary arithmethics is used instead of modulo arithmetics */ size_t mod( size_t idx ) { constexpr_if ( c_bExp2 ) return idx & ( capacity() - 1 ); else return idx % capacity(); } //@cond template typename std::enable_if< sizeof( I ) != sizeof( size_t ), size_t >::type mod( I idx ) { constexpr_if ( c_bExp2 ) return static_cast( idx & static_cast( capacity() - 1 )); else return static_cast( idx % capacity()); } //@endcond }; } // namespace v }} // namespace cds::opt #endif // #ifndef CDSLIB_OPT_BUFFER_H libcds-2.3.3/cds/opt/compare.h000066400000000000000000000244121341244201700161320ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_OPT_COMPARE_H #define CDSLIB_OPT_COMPARE_H /* Editions: 2011.05.05 khizmax Created */ #include #include #include #include namespace cds { namespace opt { /// [type-option] Option setter for key comparing /** The option sets a type of a functor to compare keys. For comparing two keys \p k1 and \p k2 the functor must return: - 1 if k1 > k2 - 0 if k1 == k2 - -1 if k1 < k2 \p Functor is a functor with following interface: \code template struct Comparator { int operator ()(const T& r1, const T& r2) { // Comparator body } }; \endcode Note that the functor must return \p int, not a \p bool value. There are predefined type for \p Functor: - the functor \p opt::v::less_comparator that implements comparing functor through \p std::less predicate. - the specialization of \p opt::v::less_comparator functor intended for the string comparison You may implement your own comparing functor that satisfies \p Functor interface. About relation between \p %opt::less and \p %opt::compare option setters see \p opt::less description. */ template struct compare { //@cond template struct pack: public Base { typedef Functor compare; }; //@endcond }; namespace v { /// Comparator based on \p std::less predicate /** This functor is predefined type for \p opt::compare option setter. It is based on \p std::less predicate. */ template struct less_comparator { /// Operator that compares two value of type \p T int operator()(T const& v1, T const& v2) { if ( std::less()( v1, v2 )) return -1; if ( std::less()( v2, v1 )) return 1; return 0; } }; /// Comparator specialization for \p std::string /** This functor uses \p std::string::compare() method instead of \p std::less predicate. */ template struct less_comparator< std::basic_string > { //@cond typedef std::basic_string string_type; int operator()(string_type const& v1, string_type const& v2) { return v1.compare( v2 ); } //@endcond }; } // namespace v /// [type-option] Option setter for \p less predicate /** The option sets a binary predicate that tests whether a value of a specified type is less than another value of that type. \p Functor interface is similar to \p std::less predicate interface. The standard predicate \p std::less can act as \p Functor: \code typedef cds::opt::less< std::less< int > > opt_less \endcode In addition, the option setter may sets non-standard 2-type predicate (\p std::binary_function): \code struct foo { int n; }; template struct pred_less { bool operator ()( const T& t, const Q& q ) { return t.n < q ; } bool operator ()( const Q& q, const T& t ) { return q < t.n ; } bool operator ()( const T& t1, const T& t2 ) { return t1.n < t2.n ; } bool operator ()( const Q& q1, const Q& q2 ) { return q1 < q2 ; } }; typedef cds::opt::less< pred_less< foo, int > > opt_less; \endcode Generally, the default type for \p Functor is \p std::less but it depends on the container used. \par Relation between opt::less and opt::compare option setters Unless otherwise specified, \p opt::compare option setter has high priority. If \p %opt::compare and \p %opt::less options are specified for a container, the \p %opt::compare option is used: \code // Suppose, a hypothetical map_type allows to specify // cds::opt::less and cds::opt::compare options typedef map_type< std::string, int, cds::opt::compare< cds::opt::v::less_comparator< std::string > >, cds::opt::less< std::less< std::string > > > my_map_type; // For my_map_type, the cds::opt::compare comparator will be used, // the cds::opt::less option is ignored without any warnings. \endcode */ template struct less { //@cond template struct pack: public Base { typedef Functor less; }; //@endcond }; //@cond namespace details { template struct make_comparator_from_less { typedef Less less_functor; template int operator ()( T const& t, Q const& q ) const { less_functor f; if ( f( t, q )) return -1; if ( f( q, t )) return 1; return 0; } }; template > > struct make_comparator_from { typedef typename Traits::compare compare; typedef typename Traits::less less; typedef typename std::conditional< std::is_same< compare, opt::none >::value, typename std::conditional< std::is_same< less, opt::none >::value, DefaultCmp, make_comparator_from_less< less > >::type, compare >::type type; }; template using make_comparator = make_comparator_from< T, Traits, typename std::conditional< Forced, make_comparator_from_less< std::less>, opt::none >::type >; template struct make_comparator_from_option_list { struct default_traits { typedef opt::none compare; typedef opt::none less; }; typedef typename make_comparator< T, typename opt::make_options< typename opt::find_type_traits< default_traits, Options... >::type ,Options... >::type >::type type; }; } // namespace details //@endcond /// [type-option] Option setter for \p opt::equal_to predicate /** The option sets a binary predicate that tests whether a value of a specified type is equal to another value of that type. \p Functor interface is similar to \p std::equal_to predicate interface. The standard predicate \p std::equal_to can act as \p Functor: \code typedef cds::opt::equal_to< std::equal_to< int > > opt_equal_to \endcode In addition, the option setter may sets non-standard 2-type (or even N-type) predicate (\p std::binary_function): \code struct foo { int n; }; template struct pred_equal_to { bool operator ()( const T& t, const Q& q ) { return t.n == q ; } bool operator ()( const Q& q, const T& t ) { return q == t.n ; } bool operator ()( const T& t1, const T& t2 ) { return t1.n == t2.n ; } bool operator ()( const Q& q1, const Q& q2 ) { return q1 == q2 ; } }; typedef cds::opt::equal_to< pred_equal_to< foo, int > > opt_equal_to; \endcode Generally, the default type for \p Functor is \p std::equal_to but it depends on the container used. */ template struct equal_to { //@cond template struct pack: public Base { typedef Functor equal_to; }; //@endcond }; //@cond namespace details { template struct make_equal_to_from_compare { typedef Compare compare_functor; template bool operator()( T const& t, Q const& q ) const { return compare_functor()(t, q) == 0; } }; template struct make_equal_to_from_less { typedef Less less_functor; template bool operator()( T const& t, Q const& q ) const { less_functor less; return !less(t, q) && !less(q, t); } }; template struct make_equal_to { typedef typename Traits::equal_to equal_to; typedef typename Traits::compare compare; typedef typename Traits::less less; typedef typename std::conditional< std::is_same< equal_to, opt::none >::value, typename std::conditional< std::is_same< compare, opt::none >::value, typename std::conditional< std::is_same< less, opt::none >::value, typename std::conditional< Forced, std::equal_to, opt::none >::type, make_equal_to_from_less< less > >::type, make_equal_to_from_compare< compare > >::type, equal_to >::type type; }; } //@endcond }} // namespace cds::opt #endif // #ifndef CDSLIB_OPT_COMPARE_H libcds-2.3.3/cds/opt/hash.h000066400000000000000000000122451341244201700154300ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_OPT_HASH_H #define CDSLIB_OPT_HASH_H #include #include #include namespace cds { namespace opt { /// [type-option] Option setter for a hash function /** This option setter specifies hash functor used in unordered containers. The default value of template argument \p Functor is \p cds::opt::v::hash that is synonym for std::hash implementation of standard library. */ template struct hash { //@cond template struct pack: public Base { typedef Functor hash; }; //@endcond }; namespace v { //@cond using std::hash; /// Metafunction selecting default hash implementation /** The metafunction selects appropriate hash functor implementation. If \p Hash is not equal to opt::none, then result of metafunction is \p Hash. Otherwise, the result is std::hash . Note that default hash function like std::hash is generally not suitable for complex type \p Q and its derivatives. You should manually provide particular hash functor for such types. */ template struct hash_selector { typedef Hash type; ///< resulting implementation of hash functor }; template <> struct hash_selector { struct type { template size_t operator()( Q const& key ) const { return std::hash()( key ); } }; }; //@endcond } // namespace v //@cond namespace details { template struct hash_list; template struct hash_list< std::tuple > { static size_t const size = sizeof...(Functors); typedef size_t values[size]; typedef std::tuple hash_tuple_type; hash_tuple_type hash_tuple; hash_list() {} hash_list( hash_tuple_type const& t) : hash_tuple( t ) {} hash_list( hash_tuple_type&& t) : hash_tuple( std::forward(t)) {} template typename std::enable_if< (I == sizeof...(Functors)) >::type apply( size_t * /*dest*/, T const& /*v*/ ) const {} template typename std::enable_if< (I < sizeof...(Functors)) >::type apply( size_t * dest, T const& v ) const { dest[I] = std::get( hash_tuple )( v ); apply( dest, v ); } template void operator()( size_t * dest, T const& v ) const { apply<0>( dest, v ); } }; } // namespace details //@endcond /// Declare tuple for hash functors \p Functors template using hash_tuple = details::hash_list< std::tuple< Functors... >>; //@cond // At least, two functors must be provided. Single functor is not supported template struct hash< std::tuple >; //@endcond /// Multi-functor hash option setter - specialization for \p std::tuple template struct hash< std::tuple > { //@cond template struct pack: public Base { typedef details::hash_list< std::tuple > hash; }; //@endcond }; //@cond namespace details { template struct hash_list_wrapper { typedef HashList hash_list; typedef WrappedType wrapped_type; typedef Wrapper wrapper_type; typedef typename hash_list::hash_tuple_type hash_tuple_type; static size_t const size = hash_list::size; hash_list m_wrappedList; hash_list_wrapper() {} hash_list_wrapper( hash_tuple_type const& t) : m_wrappedList( t ) {} hash_list_wrapper( hash_tuple_type&& t) : m_wrappedList( std::forward(t)) {} void operator()( size_t * dest, wrapped_type const& what ) const { m_wrappedList( dest, wrapper_type()( what )); } template void operator()( size_t * dest, Q const& what) const { m_wrappedList( dest, what ); } }; } // namespace details //@endcond }} // namespace cds::opt #endif // #ifndef CDSLIB_OPT_HASH_H libcds-2.3.3/cds/opt/options.h000066400000000000000000001146541341244201700162070ustar00rootroot00000000000000// Copyright (c) 2006-2018 Maxim Khizhinsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_OPT_OPTIONS_H #define CDSLIB_OPT_OPTIONS_H /* Framework to define template options Editions: 2011.01.23 khizmax Created */ #include // rand, srand #include #include #include #include namespace cds { /// Framework to define template options /** There are two kind of options: - \p type-option - option that determines a data type. The template argument \p Type of the option is a type. - \p value-option - option that determines a value. The template argument \p Value of the option is a value. */ namespace opt { /// Type indicates that an option is not specified and the default one should be used struct none { //@cond template struct pack: public Base {}; //@endcond }; /// Metafunction for selecting default option value /** Template parameters: - \p Option - option value - \p Default - default option value - \p Value - option value if \p Option is not opt::none If \p Option is opt::none, the metafunction result is \p Default, otherwise the result is \p Value. Examples: \code // default_spin is cds::sync::spin typedef typename cds::opt::select_default< cds::opt::none, cds::sync::spin >::type default_spin; // spin_32bit is cds::sync::reentrant_spin32 typedef typename cds::opt::select_default< cds::opt::none, cds::sync::reentrant_spin32 >::type spin_32bit; \endcode */ template struct select_default { typedef Value type ; ///< metafunction result }; //@cond template struct select_default< none, Default > { typedef Default type; }; //@endcond /// Metafunction to select option value /** This metafunction is intended for extracting the value of the \p Option option. For example, \code #include #include // only for testing purpose (static_assert) struct tag_a; // Define option typedef cds::opt::tag< tag_a > tag_option; // What is the value of the tag_option? // How we can extract tag_a from tag_option? // Here is a solution: typedef cds::opt::value< tag_option >::tag tag_option_value; // tag_option_value is the same as tag_a static_assert( std::is_same< tag_option_value, tag_a >::value, "Error: tag_option_value != tag_a" ); \endcode */ template struct value: public Option::template pack {}; /// [type-option] Option setter specifies a tag /** Suppose, you have a struct \code struct Feature { .... }; \endcode and you want that your class \p X would be derived from several \p Feature: \code class X: public Feature, public Feature { .... }; \endcode How can you distinguish one \p Feature from another? You may use a tag option: \code template struct Feature { .... }; class tag_a; class tag_b; class X: public Feature< tag_a >, public Feature< tag_b > { .... }; \endcode Now you can distinguish one \p Feature from another: \code X x; Feature& fa = static_cast< Feature >( x ); Feature& fb = static_cast< Feature >( x ); \endcode \p tag option setter allows you to do things like this for an option-centric approach: \code template struct Feature { .... }; class tag_a; class tag_b; class X: public Feature< tag >, public Feature< tag > { .... }; \endcode This option setter is widely used in cds::intrusive containers to distinguish between different intrusive part of container's node. An incomplete type can serve as a \p Tag. */ template struct tag { //@cond template struct pack: public Base { typedef Tag tag; }; //@endcond }; /// [type-option] Option setter specifies lock class /** Specification of the \p Type class is: \code struct Lock { void lock(); void unlock(); }; \endcode */ template struct lock_type { //@cond template struct pack: public Base { typedef Type lock_type; }; //@endcond }; /// [type-option] @ref cds_sync_monitor "Monitor" type setter /** This option setter specifyes @ref cds_sync_monitor "synchronization monitor" for blocking container. */ template struct sync_monitor { //@cond template struct pack : public Base { typedef Type sync_monitor; }; //@endcond }; /// [type-option] Back-off strategy option setter /** Back-off strategy used in some algorithm. See cds::backoff namespace for back-off explanation and supported interface. */ template struct back_off { //@cond template struct pack: public Base { typedef Type back_off; }; //@endcond }; /// [type-option] Option setter for garbage collecting schema used /** Possible values of \p GC template parameter are: - cds::gc::HP - Hazard Pointer garbage collector - cds::gc::DHP - Dynamic Hazard Pointer garbage collector - cds::gc::none::GC - No garbage collector (not supported for some containers) */ template struct gc { //@cond template struct pack: public Base { typedef GC gc; }; //@endcond }; /// [type-option] Option setter for an allocator /** \p Type is allocator with \p std::allocator interface. Default is value of macro CDS_DEFAULT_ALLOCATOR that, in turn, is \p std::allocator. The \p libcds containers actively use rebinding to convert an allocator of one type to another. Thus, you may specify any valid type as std::allocator's template parameter. See also opt::node_allocator */ template struct allocator { //@cond template struct pack: public Base { typedef Type allocator; }; //@endcond }; /// [type-option] Option setter for node allocator /** \p Type is allocator with \p std::allocator interface. Default is value of macro CDS_DEFAULT_ALLOCATOR that, in turn, is \p std::allocator. Many node-base containers require an allocator for maintaining data (container's node) and for internal use. Sometimes, this types of allocator should be different for performance reason. For example, we should like to allocate the node from a pool of preallocated nodes. Such pool can be seen as the node allocator. Usually, if a container supports \p opt::allocator and \p %opt::node_allocator options and \p opt::node_allocator is not specified the \p %opt::allocator option is used for maintaining the nodes. The \p libcds containers actively use rebinding to convert an allocator of one type to another. Thus, you may specify any valid type as std::allocator's template parameter. */ template struct node_allocator { //@cond template struct pack: public Base { typedef Type node_allocator; }; //@endcond }; /// [type-option] Option setter for item counting /** Some data structure (for example, queues) has additional feature for item counting. This option allows to set up appropriate item counting policy for that data structure. Predefined option \p Type: - \p atomicity::empty_item_counter - no item counting performed. It is default policy for many containers - \p atomicity::item_counter - the class that provides atomic item counting - \p atomicity::cache_friendly_item_counter - cache-friendly atomic item counter - \p opt::v::sequential_item_counter - simple non-atomic item counter. This counter is not intended for concurrent containers and may be used only if it is explicitly noted. You may provide other implementation of \p atomicity::item_counter interface for your needs. Note, the item counting in lock-free containers cannot be exact; for example, if item counter for a container returns zero it is not mean that the container is empty. So, the item counter may be used for statistical purposes only. */ template struct item_counter { //@cond template struct pack: public Base { typedef Type item_counter; }; //@endcond }; /// Special alignment constants for \ref cds::opt::alignment option enum special_alignment { no_special_alignment = 0, ///< no special alignment cache_line_alignment = 1 ///< use cache line size defined in cds/user_setup/cache_line.h }; /// [value-option] Alignment option setter /** Alignment for some internal data of containers. May be useful to solve false sharing problem. \p Value defines desired alignment and it may be power of two integer or predefined values from \ref special_alignment enum. */ template struct alignment { //@cond template struct pack: public Base { enum { alignment = Value }; }; //@endcond }; //@cond namespace details { template struct alignment_setter { typedef typename cds::details::aligned_type< Type, Alignment >::type type; }; template struct alignment_setter { typedef Type type; }; template struct alignment_setter { typedef typename cds::details::aligned_type< Type, c_nCacheLineSize >::type type; }; } // namespace details //@endcond /// Special padding constants for \p cds::opt::padding option enum special_padding { no_special_padding = 0, ///< no special padding cache_line_padding = 1, ///< use cache line size defined in cds/user_setup/cache_line.h /// Apply padding only for tiny data when data size is less than required padding /** The flag means that if your data size is less than the cacheline size, the padding is applyed. Otherwise no padding will be applyed. This flag is applyed for padding value: \code cds::opt::padding< cds::opt::cache_line_padding | cds::opt::padding_tiny_data_only >; cds::opt::padding< 256 | cds::opt::padding_tiny_data_only >; \endcode */ padding_tiny_data_only = 0x80000000, //@cond padding_flags = padding_tiny_data_only //@endcond }; /// [value-option] Padding option setter /** The padding for the internal data of some containers. May be useful to solve false sharing problem. \p Value defines desired padding and it may be power of two integer or predefined values from \p special_padding enum. */ template struct padding { //@cond template struct pack: public Base { enum { padding = Value }; }; //@endcond }; //@cond template struct actual_padding { enum { value = Padding & ~padding_flags }; }; template <> struct actual_padding { enum { value = cds::c_nCacheLineSize }; }; template <> struct actual_padding { enum { value = cds::c_nCacheLineSize }; }; //@endcond //@cond namespace details { enum padding_vs_datasize { padding_datasize_less, padding_datasize_equal, padding_datasize_greater }; template < typename T, unsigned int Padding, bool NoPadding, padding_vs_datasize Relation, bool TinyOnly > struct apply_padding_helper; template struct apply_padding_helper < T, 0, true, Relation, TinyOnly > { struct type { T data; }; typedef void padding_type; }; template struct apply_padding_helper < T, Padding, false, padding_datasize_equal, TinyOnly > { struct type { T data; }; typedef void padding_type; }; template struct apply_padding_helper < T, Padding, false, padding_datasize_less, TinyOnly > { typedef uint8_t padding_type[Padding - sizeof( T )]; struct type { T data; padding_type pad_; }; }; template struct apply_padding_helper < T, Padding, false, padding_datasize_greater, false > { typedef uint8_t padding_type[Padding - sizeof( T ) % Padding]; struct type { T data; padding_type pad_; }; }; template struct apply_padding_helper < T, Padding, false, padding_datasize_greater, true > { struct type { T data; }; typedef void padding_type; }; template struct apply_padding { private: enum { padding = Padding & ~padding_flags }; public: static constexpr const size_t c_nPadding = static_cast(padding) == static_cast(cache_line_padding) ? cds::c_nCacheLineSize : static_cast(padding) == static_cast(no_special_padding) ? 0 : padding; static_assert( (c_nPadding & (c_nPadding - 1)) == 0, "Padding must be a power-of-two number" ); typedef apply_padding_helper< T, c_nPadding, c_nPadding == 0, sizeof( T ) < c_nPadding ? padding_datasize_less : sizeof( T ) == c_nPadding ? padding_datasize_equal : padding_datasize_greater, (Padding & padding_tiny_data_only) != 0 > result; typedef typename result::type type; typedef typename std::conditional< std::is_same< typename result::padding_type, void >::value, unsigned int, typename result::padding_type >::type padding_type; }; } // namespace details //@endcond /// [type-option] Generic option setter for statisitcs /** This option sets a type to gather statistics. The option is generic - no predefined type(s) is provided. The particular \p Type of statistics depends on internal structure of the object. */ template struct stat { //@cond template struct pack: public Base { typedef Type stat; }; //@endcond }; /// [type-option] Option setter for C++ memory model /** The cds library supports following memory ordering constraints for atomic operations in container implementation: - \p v::relaxed_ordering - relaxed C++ memory model. This mode supports full set of memory ordering constraints: \p memory_order_relaxed, \p memory_order_acquire, \p memory_order_release and so on. - \p v::sequential_consistent - sequentially consistent C++ memory model (default memory ordering for C++). In this mode any memory ordering constraint maps to \p memory_order_seq_cst. The \p Type template parameter can be \p v::relaxed_ordering or \p v::sequential_consistent. You may mix different memory ordering options for different containers: one declare as sequentially consistent, another declare as relaxed. Usually, \p v::relaxed_ordering is the default memory ordering for libcds containers. */ template struct memory_model { //@cond template struct pack: public Base { typedef Type memory_model; }; //@endcond }; /// [type-option] Base type traits option setter /** This option setter is intended generally for internal use for type rebinding. */ template struct type_traits { //@cond template struct pack: public Base { typedef Type type_traits; }; //@endcond }; /// Resizing policy option /** This option specifies the resizing policy that decides when to resize a container. Used in some containers, for example, in container::StripedHashSet, intrusive::StripedHashSet. The real resizing policy specified by \p Type does strongly depend on a container that supports this option, see container documentation about possibly \p Type values. */ template struct resizing_policy { //@cond template struct pack: public Base { typedef Type resizing_policy; }; //@endcond }; /// Copy policy option /** The copy policy defines an item copying algorithm which is used, for example, when a container is resized. It is very specific algorithm depending on type of the container. */ template struct copy_policy { //@cond template struct pack: public Base { typedef Type copy_policy; }; //@endcond }; /// Swap policy option /** The swap policy specifies an algorithm for swapping two objects. Usually, the default policy is \p std::swap (see opt::v::default_swap_policy): @code struct std_swap { template void operator ()( T& v1, T& v2 ) { std::swap( v1, v2 ); } }; @endcode */ template struct swap_policy { //@cond template struct pack: public Base { typedef Type swap_policy; }; //@endcond }; /// Move policy option /** The move policy specifies an algorithm for moving object content. In trivial case, it can be simple assignment. The move interface is: \code template struct move_policy { void operator()( T& dest, T& src ); }; \endcode Note that in move algorithm the \p src source argument can be changed too. So you can use move semantics. Usually, the default move policy is opt::v::assignment_move_policy */ template struct move_policy { //@cond template struct pack: public Base { typedef Type move_policy; }; //@endcond }; /// [value-option] Enable sorting /** This option enables (Enable = true) or disables (Enable == false) sorting of a container. */ template struct sort { //@cond template struct pack: public Base { static bool const sort = Enable; }; //@endcond }; /// [type-option] Concurrent access policy /** This option specifies synchronization strategy for fine-grained lock-based containers. The option has no predefined \p Policy type. For each container that accepts this option the range of available \p Policy types is unique. */ template struct mutex_policy { //@cond template struct pack: public Base { typedef Policy mutex_policy; }; //@endcond }; /// [type-option] Random number generator /** The option specifies a random number generator. \p Random can be any STL random number generator producing unsigned integer: \p std::linear_congruential_engine, \p std::mersenne_twister_engine, \p std::subtract_with_carry_engine and so on, or \p opt::v::c_rand. */ template struct random_engine { //@cond template struct pack: public Base { typedef Random random_engine; }; //@endcond }; /// [type-option] Free-list implementation /** See \p cds::intrusive::FreeList for free-list interface */ template struct free_list { //@cond template struct pack: public Base { typedef FreeList free_list; }; //@endcond }; //@cond // For internal use template struct key_accessor { template struct pack: public Base { typedef Accessor key_accessor; }; }; template struct replace_key_accessor { typedef typename std::conditional< std::is_same< typename Traits::key_accessor, WhatReplace >::value, typename opt::key_accessor< ReplaceWith >::template pack< Traits >, Traits >::type type; }; //@endcond }} // namespace cds::opt // **************************************************** // Options predefined types and values namespace cds { namespace opt { /// Predefined options value namespace v { /// Sequential non-atomic item counter /** This type of \p opt::item_counter option is not intended for concurrent containers and may be used only if it is explicitly noted. */ class sequential_item_counter { public: typedef size_t counter_type ; ///< Counter type protected: counter_type m_nCounter ; ///< Counter public: sequential_item_counter() : m_nCounter(0) {} /// Returns current value of the counter counter_type value() const { return m_nCounter; } /// Same as \ref value() with relaxed memory ordering operator counter_type() const { return value(); } /// Increments the counter. Semantics: postincrement counter_type inc() { return m_nCounter++; } /// Decrements the counter. Semantics: postdecrement counter_type dec() { return m_nCounter--; } /// Preincrement counter_type operator ++() { return inc() + 1; } /// Postincrement counter_type operator ++(int) { return inc(); } /// Predecrement counter_type operator --() { return dec() - 1; } /// Postdecrement counter_type operator --(int) { return dec(); } /// Resets count to 0 void reset() { m_nCounter = 0; } }; /// Relaxed memory ordering \p opt::memory_model /** In this ordering the memory constraints are defined according to C++ Memory Model specification: each constraint is mapped to \p std::memory_order constraints one-to-one */ struct relaxed_ordering { //@cond static const atomics::memory_order memory_order_relaxed = atomics::memory_order_relaxed; static const atomics::memory_order memory_order_consume = atomics::memory_order_consume; static const atomics::memory_order memory_order_acquire = atomics::memory_order_acquire; static const atomics::memory_order memory_order_release = atomics::memory_order_release; static const atomics::memory_order memory_order_acq_rel = atomics::memory_order_acq_rel; static const atomics::memory_order memory_order_seq_cst = atomics::memory_order_seq_cst; //@endcond }; /// Sequential consistent \p opt::memory_memory ordering /** In this memory model any memory constraint is equivalent to \p std::memory_order_seq_cst. */ struct sequential_consistent { //@cond static const atomics::memory_order memory_order_relaxed = atomics::memory_order_seq_cst; static const atomics::memory_order memory_order_consume = atomics::memory_order_seq_cst; static const atomics::memory_order memory_order_acquire = atomics::memory_order_seq_cst; static const atomics::memory_order memory_order_release = atomics::memory_order_seq_cst; static const atomics::memory_order memory_order_acq_rel = atomics::memory_order_seq_cst; static const atomics::memory_order memory_order_seq_cst = atomics::memory_order_seq_cst; //@endcond }; //@cond /// Totally relaxed \p opt::memory_model ordering (do not use!) /** In this memory model any memory constraint is equivalent to \p std::memory_order_relaxed. @warning Do not use this model! It intended for testing purposes only to verify debugging instruments like Thread Sanitizer. */ struct total_relaxed_ordering { static const atomics::memory_order memory_order_relaxed = atomics::memory_order_relaxed; static const atomics::memory_order memory_order_consume = atomics::memory_order_relaxed; static const atomics::memory_order memory_order_acquire = atomics::memory_order_relaxed; static const atomics::memory_order memory_order_release = atomics::memory_order_relaxed; static const atomics::memory_order memory_order_acq_rel = atomics::memory_order_relaxed; static const atomics::memory_order memory_order_seq_cst = atomics::memory_order_relaxed; }; //@endcond /// Default swap policy for \p opt::swap_policy option /** The default swap policy is wrappr around \p std::swap algorithm. */ struct default_swap_policy { /// Performs swapping of \p v1 and \p v2 using \p std::swap algo template void operator()( T& v1, T& v2 ) const { std::swap( v1, v2 ); } }; /// \p opt::move_policy based on move-assignment operator struct assignment_move_policy { /// dest = std::move( src ) template void operator()( T& dest, T&& src ) const { dest = std::move( src ); } }; /// \p rand() -base random number generator for \p opt::random_engine /** This generator returns a pseudorandom integer in the range 0 to \p RAND_MAX (32767). */ struct c_rand { typedef unsigned int result_type; ///< Result type /// Constructor initializes object calling \p std::srand() c_rand() { std::srand(1); } /// Returns next random number calling \p std::rand() result_type operator()() { return (result_type) std::rand(); } }; } // namespace v }} // namespace cds::opt // **************************************************** // Options metafunctions namespace cds { namespace opt { //@cond namespace details { template struct do_pack { // Use "pack" member template to pack options typedef typename Option::template pack type; }; template class typelist; template struct typelist_head; template struct typelist_head< typelist > { typedef Head type; }; template struct typelist_head< typelist > { typedef Head type; }; template struct typelist_tail; template struct typelist_tail< typelist > { typedef typelist type; }; template struct typelist_tail< typelist > { typedef typelist<> type; }; template struct make_options_impl { typedef typename make_options_impl< typename do_pack< OptionList, typename typelist_head< Typelist >::type >::type, typename typelist_tail::type >::type type; }; template struct make_options_impl > { typedef OptionList type; }; } // namespace details //@endcond /// make_options metafunction /** @headerfile cds/opt/options.h The metafunction converts option list \p Options to traits structure. The result of metafunction is \p type. Template parameter \p OptionList is default option set (default traits). \p Options is option list. */ template struct make_options { #ifdef CDS_DOXYGEN_INVOKED typedef implementation_defined type ; ///< Result of the metafunction #else typedef typename details::make_options_impl< OptionList, details::typelist >::type type; #endif }; // ***************************************************************** // find_type_traits metafunction // ***************************************************************** //@cond namespace details { template struct find_type_traits_option; template <> struct find_type_traits_option<> { typedef cds::opt::none type; }; template struct find_type_traits_option< Any > { typedef cds::opt::none type; }; template struct find_type_traits_option< cds::opt::type_traits< Any > > { typedef Any type; }; template struct find_type_traits_option< cds::opt::type_traits< Any >, Options... > { typedef Any type; }; template struct find_type_traits_option< Any, Options... > { typedef typename find_type_traits_option< Options... >::type type; }; } // namespace details //@endcond /// Metafunction to find opt::type_traits option in \p Options list /** @headerfile cds/opt/options.h If \p Options contains \p opt::type_traits option then it is the metafunction result. Otherwise the result is \p DefaultOptons. */ template struct find_type_traits { typedef typename select_default< typename details::find_type_traits_option::type, DefaultOptions>::type type ; ///< Metafunction result }; // ***************************************************************** // find_option metafunction // ***************************************************************** //@cond namespace details { template struct find_option; struct compare_ok; struct compare_fail; template struct compare_option { typedef compare_fail type; }; template