pax_global_header00006660000000000000000000000064137440332260014516gustar00rootroot0000000000000052 comment=34acfc525b36118a9181e375781b892705ec9647 vmemcache-0.8.1/000077500000000000000000000000001374403322600134545ustar00rootroot00000000000000vmemcache-0.8.1/.gitignore000066400000000000000000000002571374403322600154500ustar00rootroot00000000000000.* *.cmake *~ *.so *.3 core Makefile CMakeCache.txt CMakeFiles Testing !.gitignore !.gitattributes !.clang-format !.travis.yml !.mailmap !.cstyleignore !.codecov.yml /build vmemcache-0.8.1/.travis.yml000077700000000000000000000000001374403322600176132travis.ymlustar00rootroot00000000000000vmemcache-0.8.1/CMakeLists.txt000066400000000000000000000131361374403322600162200ustar00rootroot00000000000000# SPDX-License-Identifier: BSD-3-Clause # Copyright 2018-2019, Intel Corporation cmake_minimum_required(VERSION 3.3) project(vmemcache C) set(VERSION_MAJOR 0) set(VERSION_MINOR 8) set(VERSION ${VERSION_MAJOR}.${VERSION_MINOR}) set(CMAKE_DISABLE_IN_SOURCE_BUILD ON) set(CMAKE_POSITION_INDEPENDENT_CODE ON) set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_SOURCE_DIR}) include(FindThreads) if(NOT CMAKE_BUILD_TYPE) set(CMAKE_BUILD_TYPE "RelWithDebInfo") endif (NOT CMAKE_BUILD_TYPE) include(CheckCCompilerFlag) include(GNUInstallDirs) if(NOT MSVC) find_package(PkgConfig QUIET) if(NOT PKG_CONFIG_FOUND) message(WARNING "Pkg-config not found. Detection of other dependencies may fail.") endif() endif() if(NOT WIN32) if(PKG_CONFIG_FOUND) pkg_check_modules(VALGRIND QUIET valgrind) else() find_package(VALGRIND QUIET) endif() if (NOT VALGRIND_FOUND) message(WARNING "Valgrind not found. Some tests will be skipped.") endif() endif() find_program(PANDOC NAMES pandoc) if(NOT PANDOC) message(WARNING "pandoc not found - documentation will not be generated") endif() set(CMAKE_C_STANDARD 99) include(CheckSymbolExists) CHECK_SYMBOL_EXISTS(getentropy unistd.h HAVE_GETENTROPY) # Checks whether flag is supported by current C compiler and appends # it to the relevant cmake variable. # 1st argument is a flag # 2nd (optional) argument is a build type (debug, release, relwithdebinfo) macro(add_c_flag flag) string(REPLACE - _ flag2 ${flag}) string(REPLACE " " _ flag2 ${flag2}) string(REPLACE = "_" flag2 ${flag2}) set(check_name "C_HAS_${flag2}") check_c_compiler_flag("${flag}" "${check_name}") if (${${check_name}}) if (${ARGC} EQUAL 1) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${flag}") else() set(CMAKE_C_FLAGS_${ARGV1} "${CMAKE_C_FLAGS_${ARGV1}} ${flag}") endif() endif() endmacro() if(NOT MSVC) add_c_flag(-fno-common) add_c_flag(-Wall) add_c_flag(-Wconversion) add_c_flag(-Wfloat-equal) add_c_flag(-Wmissing-field-initializers) add_c_flag(-Wmissing-prototypes) add_c_flag(-Wmissing-variable-declarations) add_c_flag(-Wpointer-arith) add_c_flag(-Wsign-compare) add_c_flag(-Wsign-conversion) add_c_flag(-Wswitch-default) add_c_flag(-Wunused-macros) add_c_flag(-Wunreachable-code-return) add_c_flag(-Werror=incompatible-pointer-types) # Place each function or data item into its own section. Will be used to strip unneeded symbols. add_c_flag(-fdata-sections) add_c_flag(-ffunction-sections) add_c_flag(-ggdb DEBUG) add_c_flag(-DDEBUG DEBUG) add_c_flag(-ggdb RELWITHDEBINFO) add_c_flag(-fno-omit-frame-pointer RELWITHDEBINFO) check_c_compiler_flag(-Wl,-z,relro LINKER_HAS_RELRO) if(LINKER_HAS_RELRO) set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-z,relro") set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,-z,relro") endif() check_c_compiler_flag(-Wl,--warn-common LINKER_HAS_WARN_COMMON) if(LINKER_HAS_WARN_COMMON) set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--warn-common") set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--warn-common") endif() add_c_flag("-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2" RELEASE) endif() configure_file( "${CMAKE_CURRENT_SOURCE_DIR}/cmake_uninstall.cmake.in" "${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake" IMMEDIATE @ONLY) add_custom_target(uninstall COMMAND ${CMAKE_COMMAND} -P ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake) function(add_cstyle name) if(${ARGC} EQUAL 1) add_custom_target(cstyle-${name} COMMAND ${PERL_EXECUTABLE} ${CMAKE_SOURCE_DIR}/utils/cstyle ${CMAKE_CURRENT_SOURCE_DIR}/*.c ${CMAKE_CURRENT_SOURCE_DIR}/*.h) else() add_custom_target(cstyle-${name} COMMAND ${PERL_EXECUTABLE} ${CMAKE_SOURCE_DIR}/utils/cstyle ${ARGN}) endif() add_dependencies(cstyle cstyle-${name}) endfunction() # Generates check-whitespace-$name target and attaches it as a dependency # of global "check-whitespace" target. This target verifies C files in current # source dir do not have any whitespace errors. # If more arguments are used, then they are used as files to be checked # instead. # ${name} must be unique. function(add_check_whitespace name) if(${ARGC} EQUAL 1) add_custom_target(check-whitespace-${name} COMMAND ${PERL_EXECUTABLE} ${CMAKE_SOURCE_DIR}/utils/check_whitespace ${CMAKE_CURRENT_SOURCE_DIR}/*.c ${CMAKE_CURRENT_SOURCE_DIR}/*.h) else() add_custom_target(check-whitespace-${name} COMMAND ${PERL_EXECUTABLE} ${CMAKE_SOURCE_DIR}/utils/check_whitespace ${ARGN}) endif() add_dependencies(check-whitespace check-whitespace-${name}) endfunction() add_custom_target(checkers ALL) add_custom_target(cstyle) add_custom_target(check-whitespace) add_custom_target(check-license COMMAND ${CMAKE_SOURCE_DIR}/utils/check_license/check-headers.sh ${CMAKE_SOURCE_DIR} BSD-3-Clause) add_check_whitespace(other ${CMAKE_SOURCE_DIR}/utils/check_license/*.sh ${CMAKE_SOURCE_DIR}/README.md) option(STATS_ENABLED "statistics are enabled" ON) option(DEVELOPER_MODE "enable developer checks" OFF) if(DEVELOPER_MODE) add_c_flag(-Werror) add_dependencies(checkers cstyle) add_dependencies(checkers check-whitespace) endif(DEVELOPER_MODE) option(TRACE_TESTS "more verbose test outputs" OFF) configure_file(libvmemcache.pc.in libvmemcache.pc) install(FILES ${CMAKE_BINARY_DIR}/libvmemcache.pc DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig) enable_testing() add_subdirectory(src) add_subdirectory(tests) add_subdirectory(benchmarks) if (PANDOC) add_subdirectory(doc) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/doc/vmemcache.3 DESTINATION ${CMAKE_INSTALL_MANDIR}/man3) endif() if(NOT "${CPACK_GENERATOR}" STREQUAL "") include(${CMAKE_SOURCE_DIR}/packages.cmake) endif() vmemcache-0.8.1/ChangeLog000066400000000000000000000034531374403322600152330ustar00rootroot00000000000000Wed Oct 21 2020 Piotr Balcer * Version 0.8.1 This is a minor patch release that includes a couple minor maintance changes, plus a couple of small performance improvements. * Use fast-hash for sharding. * Fix comparison of bytes with 8th bit set in the index radix tree. * Use chattr for the cache's file to enable nocow. * Fix some issues on 32-bit platforms. * Migrate from obsolete __sync_fetch API to __atomic_fetch. * Increase guard size to 4096 bytes. * Various documentation and test improvements. * add vmemcache_exists() to check entry existence without side-effects * Regression fixes for recent compilers Fri Mar 29 2019 Piotr Balcer * Version 0.8 This is the first official release of libvmemcache. It's an embeddable and lightweight in-memory caching solution designed to fully take advantage of large capacity memory, such as Persistent Memory with DAX, through memory mapping in an efficient and scalable way. Among other things, it includes: - Extent-based memory allocator which sidesteps the fragmentation problem that affects most in-memory databases and allows the cache to achieve very high space utilization for most workloads. - Buffered LRU, which combines a traditional LRU doubly-linked list with a non-blocking ring buffer to deliver high degree of scalability on modern multi-core CPUs. - Unique indexing structure, critnib, which delivers high-performance while being very space efficient. The reason this release has version 0.8 is because we are still looking for actual real world feedback before we stabilize the APIs and commit to maintaining backward compatibility. It does not mean that the library is unfinished or unstable. On the contrary, the cache is fully functional and we are confident in its quality. vmemcache-0.8.1/LICENSE000066400000000000000000000033111374403322600144570ustar00rootroot00000000000000Copyright 2018-2019, Intel Corporation Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Everything in this source tree is covered by the previous license with the following exceptions: * src/fast_hash.c and src/fash_hash.h licensed unded MIT. * utils/cstyle (used only during development) licensed under CDDL. vmemcache-0.8.1/README.md000066400000000000000000000042451374403322600147400ustar00rootroot00000000000000libvmemcache: buffer based LRU cache ======================================= [![Build Status](https://travis-ci.org/pmem/vmemcache.svg?branch=master)](https://travis-ci.org/pmem/vmemcache) [![Coverage Status](https://codecov.io/github/pmem/vmemcache/coverage.svg?branch=master)](https://codecov.io/gh/pmem/vmemcache/branch/master) **libvmemcache** is an embeddable and lightweight in-memory caching solution. It's designed to fully take advantage of large capacity memory, such as Persistent Memory with DAX, through memory mapping in an efficient and scalable way. The things that make it unique are: - Extent-based memory allocator which sidesteps the fragmentation problem that affects most in-memory databases and allows the cache to achieve very high space utilization for most workloads. - Buffered LRU, which combines a traditional LRU doubly-linked list with a non-blocking ring buffer to deliver high degree of scalability on modern multi-core CPUs. - Unique indexing structure, critnib, which delivers high-performance while being very space efficient. The cache is tuned to work optimally with relatively large value sizes. The smallest possible size is 256 bytes, but libvmemcache works best if the expected value sizes are above 1 kilobyte. # Building The Source # Requirements: - cmake >= 3.3 Optional: - valgrind (for tests) - pandoc (for documentation) For all systems: ```sh $ git clone https://github.com/pmem/vmemcache.git $ cd vmemcache $ mkdir build $ cd build ``` And then: ### On RPM-based Linux distros (Fedora, openSUSE, RHEL, SLES) ### ```sh $ cmake .. -DCMAKE_INSTALL_PREFIX=/usr -DCPACK_GENERATOR=rpm $ make package $ sudo rpm -i libvmemcache*.rpm ``` ### On DEB-based Linux distros (Debian, Ubuntu) ### ```sh $ cmake .. -DCMAKE_INSTALL_PREFIX=/usr -DCPACK_GENERATOR=deb $ make package $ sudo dpkg -i libvmemcache*.deb ``` ### On other Linux distros ### ```sh $ cmake .. -DCMAKE_INSTALL_PREFIX=~/libvmemcache-bin $ make $ make install ``` # Statistics # Statistics are enabled by default. They can be disabled at the compile time of the libvmemcache library if the **STATS_ENABLED** CMake option is set to OFF. See the man page for more information about statistics. vmemcache-0.8.1/benchmarks/000077500000000000000000000000001374403322600155715ustar00rootroot00000000000000vmemcache-0.8.1/benchmarks/CMakeLists.txt000066400000000000000000000020611374403322600203300ustar00rootroot00000000000000# SPDX-License-Identifier: BSD-3-Clause # Copyright 2018-2019, Intel Corporation add_cstyle(benchmarks ${CMAKE_SOURCE_DIR}/benchmarks/*.c) add_check_whitespace(benchmarks ${CMAKE_SOURCE_DIR}/benchmarks/*.c) set(SOURCES benchmark_time.c rand.c ${CMAKE_SOURCE_DIR}/src/os_posix.c ${CMAKE_SOURCE_DIR}/src/os_thread_posix.c) add_executable(bench_micro bench_micro.c ${SOURCES}) target_include_directories(bench_micro PRIVATE ${CMAKE_SOURCE_DIR}/src ${CMAKE_SOURCE_DIR}/tests) target_link_libraries(bench_micro PRIVATE vmemcache) target_link_libraries(bench_micro PRIVATE ${CMAKE_THREAD_LIBS_INIT}) add_executable(bench_simul bench_simul.c ${SOURCES}) target_include_directories(bench_simul PRIVATE ${CMAKE_SOURCE_DIR}/src ${CMAKE_SOURCE_DIR}/tests) if (HAVE_GETENTROPY) target_compile_definitions(bench_simul PRIVATE HAVE_GETENTROPY) endif() if(STATS_ENABLED) target_compile_definitions(bench_simul PRIVATE STATS_ENABLED=1) endif() target_link_libraries(bench_simul PRIVATE vmemcache) target_link_libraries(bench_simul PRIVATE ${CMAKE_THREAD_LIBS_INIT}) vmemcache-0.8.1/benchmarks/bench_micro.c000066400000000000000000000242071374403322600202120ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2019, Intel Corporation */ /* * bench_micro.c -- multithreaded micro-benchmark for libvmemcache * */ #include #include #include #include #include #include "libvmemcache.h" #include "test_helpers.h" #include "os_thread.h" #include "benchmark_time.h" #define MAX_VALUE_SIZE 256 #define BENCH_PUT (0x01) #define BENCH_GET (0x02) #define BENCH_ALL (BENCH_PUT | BENCH_GET) struct buffers { size_t size; char *buff; }; struct context { unsigned thread_number; VMEMcache *cache; struct buffers *buffs; unsigned nbuffs; unsigned ops_count; double secs; void *(*worker)(void *); }; /* * bench_init -- (internal) initialize benchmark */ static VMEMcache * bench_init(const char *path, size_t size, size_t extent_size, enum vmemcache_repl_p repl_p, unsigned n_threads, struct context *ctx) { VMEMcache *cache = vmemcache_new(); vmemcache_set_size(cache, size); vmemcache_set_eviction_policy(cache, repl_p); if (vmemcache_add(cache, path)) UT_FATAL("vmemcache_add: %s (%s)", vmemcache_errormsg(), path); for (unsigned i = 0; i < n_threads; ++i) { ctx[i].cache = cache; ctx[i].secs = 0.0; } return cache; } /* * bench_fini -- (internal) finalize benchmark */ static void bench_fini(VMEMcache *cache) { vmemcache_delete(cache); } /* * worker_thread_put -- (internal) worker testing vmemcache_put() */ static void * worker_thread_put(void *arg) { struct context *ctx = arg; unsigned long long i; unsigned long long shift = ctx->thread_number * ctx->ops_count; benchmark_time_t t1, t2, tdiff; benchmark_time_get(&t1); for (i = shift; i < (shift + ctx->ops_count); i++) { if (vmemcache_put(ctx->cache, &i, sizeof(i), ctx->buffs[i % ctx->nbuffs].buff, ctx->buffs[i % ctx->nbuffs].size)) UT_FATAL("ERROR: vmemcache_put: %s", vmemcache_errormsg()); } benchmark_time_get(&t2); benchmark_time_diff(&tdiff, &t1, &t2); ctx->secs = benchmark_time_get_secs(&tdiff); return NULL; } /* * worker_thread_get -- (internal) worker testing vmemcache_get() */ static void * worker_thread_get(void *arg) { struct context *ctx = arg; unsigned long long i; benchmark_time_t t1, t2, tdiff; char vbuf[MAX_VALUE_SIZE]; /* user-provided buffer */ size_t vbufsize = MAX_VALUE_SIZE; /* size of vbuf */ size_t vsize = 0; /* real size of the object */ benchmark_time_get(&t1); for (i = 0; i < ctx->ops_count; i++) { vmemcache_get(ctx->cache, &i, sizeof(i), vbuf, vbufsize, 0, &vsize); } benchmark_time_get(&t2); benchmark_time_diff(&tdiff, &t1, &t2); ctx->secs = benchmark_time_get_secs(&tdiff); return NULL; } /* * run_threads -- (internal) create and join threads */ static void run_threads(unsigned n_threads, os_thread_t *threads, struct context *ctx) { for (unsigned i = 0; i < n_threads; ++i) os_thread_create(&threads[i], NULL, ctx[i].worker, &ctx[i]); for (unsigned i = 0; i < n_threads; ++i) os_thread_join(&threads[i], NULL); } /* * print_bench_results -- (internal) print results of the benchmark */ static void print_bench_results(const char *op_name, unsigned n_threads, unsigned ops_per_thread, struct context *ctx) { double total_time = 0.0; for (unsigned i = 0; i < n_threads; ++i) total_time += ctx[i].secs; double ops = n_threads * ops_per_thread; double avg_thread = total_time / (double)n_threads; double avg_put = total_time / ops; double avg_ops = ops / total_time; printf("Total time of all threads : %e secs\n", total_time); printf("Average time of one thread : %e secs\n\n", avg_thread); printf("Average time of one '%s' operation : %e secs\n", op_name, avg_put); printf("Average number of '%s' operations : %e ops/sec\n\n", op_name, avg_ops); } /* * run_test_put -- (internal) run test for vmemcache_put() */ static void run_bench_put(const char *path, size_t size, size_t extent_size, enum vmemcache_repl_p repl_p, unsigned n_threads, os_thread_t *threads, unsigned ops_count, struct context *ctx) { VMEMcache *cache = bench_init(path, size, extent_size, repl_p, n_threads, ctx); unsigned ops_per_thread = ops_count / n_threads; for (unsigned i = 0; i < n_threads; ++i) { ctx[i].worker = worker_thread_put; ctx[i].ops_count = ops_per_thread; } printf("PUT benchmark:\n"); printf("==============\n"); printf("\n"); run_threads(n_threads, threads, ctx); print_bench_results("put", n_threads, ops_per_thread, ctx); bench_fini(cache); } /* * on_evict_cb -- (internal) 'on evict' callback for run_test_get */ static void on_evict_cb(VMEMcache *cache, const void *key, size_t key_size, void *arg) { int *cache_is_full = arg; *cache_is_full = 1; } /* * run_bench_get -- (internal) run test for vmemcache_get() */ static void run_bench_get(const char *path, size_t size, size_t extent_size, enum vmemcache_repl_p repl_p, unsigned n_threads, os_thread_t *threads, unsigned ops_count, struct context *ctx) { VMEMcache *cache = bench_init(path, size, extent_size, repl_p, n_threads, ctx); int cache_is_full = 0; vmemcache_callback_on_evict(cache, on_evict_cb, &cache_is_full); unsigned long long i = 0; while (!cache_is_full) { if (vmemcache_put(ctx->cache, &i, sizeof(i), ctx->buffs[i % ctx->nbuffs].buff, ctx->buffs[i % ctx->nbuffs].size)) UT_FATAL("ERROR: vmemcache_put: %s", vmemcache_errormsg()); i++; } unsigned ops_per_thread = (unsigned)i; vmemcache_callback_on_evict(cache, NULL, NULL); for (unsigned i = 0; i < n_threads; ++i) { ctx[i].worker = worker_thread_get; ctx[i].ops_count = ops_per_thread; } printf("GET benchmark:\n"); printf("==============\n"); printf("\n"); run_threads(n_threads, threads, ctx); print_bench_results("get", n_threads, ops_per_thread, ctx); bench_fini(cache); } #define USAGE_STRING \ "usage: %s [benchmark] [threads] [ops_count] [cache_size] [cache_extent_size] [nbuffs] [min_size] [max_size] [seed]\n"\ " [benchmark] - can be: all (default), put or get\n"\ " Default values of parameters:\n"\ " - benchmark = all (put and get)\n"\ " - threads = %u\n"\ " - ops_count = %u\n"\ " - cache_size = %u\n"\ " - cache_extent_size = %u\n"\ " - nbuffs = %u\n"\ " - min_size = %u\n"\ " - max_size = %u\n"\ " - seed = \n" int main(int argc, char *argv[]) { unsigned seed; int ret = -1; /* default values of parameters */ unsigned benchmark = BENCH_ALL; unsigned n_threads = 10; unsigned ops_count = 100000; unsigned cache_size = VMEMCACHE_MIN_POOL; unsigned cache_extent_size = VMEMCACHE_MIN_EXTENT; unsigned nbuffs = 10; unsigned min_size = 128; unsigned max_size = MAX_VALUE_SIZE; if (argc < 2 || argc > 11) { fprintf(stderr, USAGE_STRING, argv[0], n_threads, ops_count, cache_size, cache_extent_size, nbuffs, min_size, max_size); exit(-1); } const char *dir = argv[1]; if (argc >= 3) { if (strcmp(argv[2], "put") == 0) benchmark = BENCH_PUT; else if (strcmp(argv[2], "get") == 0) benchmark = BENCH_GET; else if (strcmp(argv[2], "all") == 0) benchmark = BENCH_ALL; else { fprintf(stderr, "unknown benchmark: %s\n", argv[2]); exit(-1); } } if (argc >= 4 && (str_to_unsigned(argv[3], &n_threads) || n_threads < 1)) UT_FATAL("incorrect value of n_threads: %s", argv[3]); if (argc >= 5 && (str_to_unsigned(argv[4], &ops_count) || ops_count < 1)) UT_FATAL("incorrect value of ops_count: %s", argv[4]); if (argc >= 6 && (str_to_unsigned(argv[5], &cache_size) || cache_size < VMEMCACHE_MIN_POOL)) UT_FATAL("incorrect value of cache_size: %s", argv[5]); if (argc >= 7 && (str_to_unsigned(argv[6], &cache_extent_size) || cache_extent_size < VMEMCACHE_MIN_EXTENT)) UT_FATAL("incorrect value of cache_extent_size: %s", argv[6]); if (argc >= 8 && (str_to_unsigned(argv[7], &nbuffs) || nbuffs < 2)) UT_FATAL("incorrect value of nbuffs: %s", argv[7]); if (argc >= 9 && (str_to_unsigned(argv[8], &min_size) || min_size < VMEMCACHE_MIN_EXTENT)) UT_FATAL("incorrect value of min_size: %s", argv[8]); if (argc >= 10 && (str_to_unsigned(argv[9], &max_size) || max_size < min_size)) UT_FATAL("incorrect value of max_size: %s", argv[9]); if (argc == 11) { if (str_to_unsigned(argv[10], &seed)) UT_FATAL("incorrect value of seed: %s", argv[10]); } else { seed = (unsigned)time(NULL); } printf("Benchmark parameters:\n"); printf(" directory : %s\n", dir); printf(" n_threads : %u\n", n_threads); printf(" ops_count : %u\n", ops_count); printf(" cache_size : %u\n", cache_size); printf(" cache_extent_size : %u\n", cache_extent_size); printf(" nbuffs : %u\n", nbuffs); printf(" min_size : %u\n", min_size); printf(" max_size : %u\n", max_size); printf(" seed : %u\n\n", seed); srand(seed); struct buffers *buffs = calloc(nbuffs, sizeof(*buffs)); if (buffs == NULL) UT_FATAL("out of memory"); for (unsigned i = 0; i < nbuffs; ++i) { /* generate N random sizes (between A – B bytes) */ buffs[i].size = min_size + (size_t)rand() % (max_size - min_size + 1); /* allocate a buffer and fill it for every generated size */ buffs[i].buff = malloc(buffs[i].size); if (buffs[i].buff == NULL) UT_FATAL("out of memory"); memset(buffs[i].buff, 0xCC, buffs[i].size); } struct context *ctx = calloc(n_threads, sizeof(*ctx)); if (ctx == NULL) UT_FATAL("out of memory"); for (unsigned i = 0; i < n_threads; ++i) { ctx[i].thread_number = i; ctx[i].buffs = buffs; ctx[i].nbuffs = nbuffs; } os_thread_t *threads = calloc(n_threads, sizeof(*threads)); if (threads == NULL) UT_FATAL("out of memory"); if (benchmark & BENCH_PUT) run_bench_put(dir, cache_size, cache_extent_size, VMEMCACHE_REPLACEMENT_LRU, n_threads, threads, ops_count, ctx); if (benchmark & BENCH_GET) run_bench_get(dir, cache_size, cache_extent_size, VMEMCACHE_REPLACEMENT_LRU, n_threads, threads, ops_count, ctx); ret = 0; free(threads); free(ctx); for (unsigned i = 0; i < nbuffs; ++i) free(buffs[i].buff); free(buffs); return ret; } vmemcache-0.8.1/benchmarks/bench_simul.c000066400000000000000000000371751374403322600202420ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019, Intel Corporation */ /* * bench_simul.c -- benchmark simulating expected workloads * */ #include #include #include #include #include #include #include #include #include #include #include "libvmemcache.h" #include "test_helpers.h" #include "os_thread.h" #include "benchmark_time.h" #include "rand.h" #include "util.h" #define PROG "bench_simul" #define MAX_THREADS 4096 #define SIZE_KB (1024ULL) #define SIZE_MB (1024 * 1024ULL) #define SIZE_GB (1024 * 1024 * 1024ULL) #define SIZE_TB (1024 * 1024 * 1024 * 1024ULL) #define NSECPSEC 1000000000 #define PUT_TAG (1ULL << 63) /* type of statistics */ typedef unsigned long long stat_t; enum simul_type { ST_INDEX, ST_REPL, ST_ALLOC, ST_FULL, }; enum size_dist { SD_LINEAR, SD_A, SD_B, }; static const char *dir; static uint64_t n_threads = 0; static uint64_t ops_count = 100000; static uint64_t warm_up = 100000; static uint64_t min_size = 8; static uint64_t max_size = 8 * SIZE_KB; static uint64_t size_distrib = SD_B; static uint64_t cache_size = VMEMCACHE_MIN_POOL; static uint64_t cache_extent_size = VMEMCACHE_MIN_EXTENT; static uint64_t repl_policy = VMEMCACHE_REPLACEMENT_LRU; static uint64_t get_size = 1; static uint64_t type = ST_FULL; static uint64_t key_diversity = 5; static uint64_t key_size = 16; static uint64_t seed = 0; static uint64_t junk_start = 0; static uint64_t latency_samples = 0; static const char *latency_file = NULL; static VMEMcache *cache; static int cache_is_full = 0; static const void *lotta_zeroes; static uint64_t *latencies = NULL; static uint64_t vsize_seed; /* case insensitive */ static const char *enum_repl[] = { "none", "LRU", 0 }; static const char *enum_type[] = { "index", "repl", "alloc", "full", 0 }; static const char *enum_size_distrib[] = { "linear", "a", "b", 0 }; static struct param_t { const char *name; uint64_t *var; uint64_t min; uint64_t max; const char **enums; } params[] = { { "n_threads", &n_threads, 0 /* n_procs */, MAX_THREADS, NULL }, { "ops_count", &ops_count, 1, -1ULL, NULL }, { "warm_up", &warm_up, 0, -1ULL, NULL }, { "min_size", &min_size, 1, -1ULL, NULL }, { "max_size", &max_size, 1, -1ULL, NULL }, { "size_distrib", &size_distrib, SD_LINEAR, SD_B, enum_size_distrib }, { "cache_size", &cache_size, VMEMCACHE_MIN_POOL, -1ULL, NULL }, { "cache_extent_size", &cache_extent_size, VMEMCACHE_MIN_EXTENT, 4 * SIZE_GB, NULL }, { "repl_policy", &repl_policy, 1, 1, enum_repl }, { "get_size", &get_size, 1, 4 * SIZE_GB, NULL }, { "type", &type, ST_INDEX, ST_FULL, enum_type }, { "key_diversity", &key_diversity, 1, 63, NULL }, { "key_size", &key_size, 1, SIZE_GB, NULL }, { "seed", &seed, 0, -1ULL, NULL }, /* 100% fill the cache with bogus entries at the start */ { "junk_start", &junk_start, 0, 1, NULL }, { "latency_samples", &latency_samples, 0, SIZE_GB, NULL }, { "latency_file", NULL, 0, 0, NULL }, { 0 }, }; static struct { os_cond_t cond; os_mutex_t mutex; uint64_t wanted; } ready; #ifdef STATS_ENABLED /* names of statistics */ static const char *stat_str[VMEMCACHE_STATS_NUM] = { "puts", "gets", "hits", "misses", "evicts", "cache entries", "DRAM size used", "pool size used", "heap entries", }; #endif /* STATS_ENABLED */ static void print_stats(VMEMcache *cache); /* * parse_uint_param -- parse an uint, accepting suffixes */ static uint64_t parse_uint_param(const char *val, const char *name) { char *endptr; errno = 0; uint64_t x = strtoull(val, &endptr, 0); if (errno) UT_FATAL("invalid value for %s: \"%s\"", name, val); if (*endptr) { if (strcmp(endptr, "K") == 0 || strcmp(endptr, "KB") == 0) x *= SIZE_KB; else if (strcmp(endptr, "M") == 0 || strcmp(endptr, "MB") == 0) x *= SIZE_MB; else if (strcmp(endptr, "G") == 0 || strcmp(endptr, "GB") == 0) x *= SIZE_GB; else if (strcmp(endptr, "T") == 0 || strcmp(endptr, "TB") == 0) x *= SIZE_TB; else { UT_FATAL("invalid value for %s: \"%s\"", name, val); } } return x; } /* * parse_enum_param -- find an enum by name */ static uint64_t parse_enum_param(const char *val, const char *name, const char **enums) { for (uint64_t x = 0; enums[x]; x++) { if (!strcasecmp(val, enums[x])) return x; } fprintf(stderr, "Unknown value of %s; valid ones:", name); for (uint64_t x = 0; enums[x]; x++) fprintf(stderr, " %s", enums[x]); fprintf(stderr, "\n"); exit(1); } /* * parse_other_param -- params with custom behaviour */ static void parse_other_param(const char *val, const char *name) { if (strcmp(name, "latency_file")) UT_FATAL("unknown other_param"); latency_file = val; } /* * parse_param_arg -- parse a single name=value arg */ static void parse_param_arg(const char *arg) { const char *eq = strchr(arg, '='); if (!eq) UT_FATAL("params need to be var=value, got \"%s\"", arg); if (!eq[1]) UT_FATAL("empty value in \"%s\"", arg); for (struct param_t *p = params; p->name; p++) { if (strncmp(p->name, arg, (size_t)(eq - arg)) || p->name[eq - arg]) { continue; } if (!p->var) { parse_other_param(eq + 1, p->name); return; } uint64_t x = p->enums ? parse_enum_param(eq + 1, p->name, p->enums) : parse_uint_param(eq + 1, p->name); if (x < p->min) { UT_FATAL( "value for %s too small: wanted %lu..%lu, got %lu", p->name, p->min, p->max, x); } if (x > p->max) { UT_FATAL( "value for %s too big: wanted %lu..%lu, got %lu", p->name, p->min, p->max, x); } *p->var = x; return; } fprintf(stderr, "Unknown parameter \"%s\"; valid ones:", arg); for (struct param_t *p = params; p->name; p++) fprintf(stderr, " %s", p->name); fprintf(stderr, "\n"); exit(1); } /* * parse_args -- parse all args */ static void parse_args(const char **argv) { if (! *argv) UT_FATAL("Usage: "PROG" dir [arg=val] [...]"); dir = *argv++; /* * The dir argument is mandatory, but I expect users to forget about * it most of the time. Thus, let's validate it, requiring ./foo * for local paths (almost anyone will use /tmp/ or /path/to/pmem). * And, it's only for benchmarks anyway. */ if (*dir != '.' && !strchr(dir, '/')) UT_FATAL( "implausible dir -- prefix with ./ if you want %s", dir); for (; *argv; argv++) parse_param_arg(*argv); } static void fill_key(char *key, uint64_t r) { rng_t rng; randomize_r(&rng, r); size_t len = key_size; for (; len >= 8; len -= 8, key += 8) *((uint64_t *)key) = rnd64_r(&rng); if (!len) return; uint64_t rest = rnd64_r(&rng); memcpy(key, &rest, len); } /* 64-bit randomness -> float [0..1) */ static double rnddouble(uint64_t x) { return (double)x / (65536.0 * 65536 * 65536 * 65536); } /* linear [0..1) -> exp/etc distributed [0..1) */ static double rndlength(uint64_t id) { switch (size_distrib) { case SD_LINEAR: return rnddouble(id); case SD_A: { /* polynomial (x⁴) */ double x = rnddouble(id); return x * x * x * x; } case SD_B: { /* piecewise-linear exponential */ uint64_t magnitude = id >> (64 - 5); /* 0..31 */ /* Pick a power of two. */ uint64_t y = 1ULL << magnitude; /* Fill lower bits randomly. */ uint64_t x = y | ((y - 1) & id); return (double)x / (65536.0 * 65536); } default: /* someone scribbled over our memory...? */ UT_FATAL("invalid enum for size distrib"); } } static inline uint64_t getticks(void) { struct timespec tv; clock_gettime(CLOCK_MONOTONIC, &tv); return (uint64_t)tv.tv_sec * NSECPSEC + (uint64_t)tv.tv_nsec; } static void run_ops(uint64_t ops, rng_t *rng, uint64_t *lat, void *get_buffer) { uint64_t opt, opt_tag; for (uint64_t count = 0; count < ops; count++) { uint64_t obj = n_lowest_bits(rnd64_r(rng), (int)key_diversity); char key[key_size + 1]; fill_key(key, obj); if (lat) opt = getticks(); if (vmemcache_get(cache, key, key_size, get_buffer, get_size, 0, NULL) < 0) { uint64_t size = min_size + (uint64_t)((double)(max_size - min_size + 1) * rndlength(hash64(obj ^ vsize_seed))); UT_ASSERTin(size, min_size, max_size); if (vmemcache_put(cache, key, key_size, lotta_zeroes, size) && errno != EEXIST) { print_stats(cache); UT_FATAL("vmemcache_put failed: %s", vmemcache_errormsg()); } opt_tag = PUT_TAG; } else { opt_tag = 0; } if (lat) *lat++ = (getticks() - opt) | opt_tag; } } static void __attribute__((noinline)) run_warm_up(rng_t *rng, void *get_buffer) { run_ops(warm_up, rng, NULL, get_buffer); /* Prevent tail call optimizations (to force stack frame, for perf). */ getpid(); } static void *worker(void *arg) { rng_t rng; randomize_r(&rng, seed ? seed + (uintptr_t)arg : 0); void *get_buffer = malloc(get_size); if (!get_buffer) UT_FATAL("couldn't allocate get_buffer"); uint64_t *lat = NULL; if (latencies) lat = latencies + ops_count * (uintptr_t)arg; run_warm_up(&rng, get_buffer); os_mutex_lock(&ready.mutex); if (--ready.wanted) os_cond_wait(&ready.cond, &ready.mutex); else { /* If warm_up disabled memcpy, re-enable it. */ vmemcache_bench_set(cache, VMEMCACHE_BENCH_NO_MEMCPY, type < ST_FULL); printf("Starting measured run...\n"); os_cond_broadcast(&ready.cond); } os_mutex_unlock(&ready.mutex); benchmark_time_t t1, t2; benchmark_time_get(&t1); run_ops(ops_count, &rng, lat, get_buffer); benchmark_time_get(&t2); benchmark_time_diff(&t1, &t1, &t2); free(get_buffer); return (void *)(intptr_t)(t1.tv_sec * NSECPSEC + t1.tv_nsec); } #ifdef STATS_ENABLED /* * get_stat -- (internal) get one statistic */ static void get_stat(VMEMcache *cache, stat_t *stat_vals, enum vmemcache_statistic i_stat) { int ret = vmemcache_get_stat(cache, i_stat, &stat_vals[i_stat], sizeof(*stat_vals)); if (ret == -1) UT_FATAL("vmemcache_get_stat: %s", vmemcache_errormsg()); } #endif /* STATS_ENABLED */ /* * print_stats -- (internal) print all statistics */ static void print_stats(VMEMcache *cache) { #ifdef STATS_ENABLED stat_t stat_vals[VMEMCACHE_STATS_NUM]; get_stat(cache, stat_vals, VMEMCACHE_STAT_PUT); get_stat(cache, stat_vals, VMEMCACHE_STAT_GET); get_stat(cache, stat_vals, VMEMCACHE_STAT_HIT); get_stat(cache, stat_vals, VMEMCACHE_STAT_MISS); get_stat(cache, stat_vals, VMEMCACHE_STAT_EVICT); get_stat(cache, stat_vals, VMEMCACHE_STAT_ENTRIES); get_stat(cache, stat_vals, VMEMCACHE_STAT_DRAM_SIZE_USED); get_stat(cache, stat_vals, VMEMCACHE_STAT_POOL_SIZE_USED); get_stat(cache, stat_vals, VMEMCACHE_STAT_HEAP_ENTRIES); float pool_used_percent = (100 * (float)stat_vals[VMEMCACHE_STAT_POOL_SIZE_USED]) / (float)cache_size; float hits_percent = (100 * (float)stat_vals[VMEMCACHE_STAT_HIT]) / (float)stat_vals[VMEMCACHE_STAT_GET]; printf("\nStatistics:\n"); for (int i = 0; i < VMEMCACHE_STATS_NUM; i++) printf(" %-20s : %llu\n", stat_str[i], stat_vals[i]); printf(" %-20s : %.2f %%\n", "pool size used [%]", pool_used_percent); printf(" %-20s : %.2f %%\n", "hits [%]", hits_percent); printf("\n"); #endif /* STATS_ENABLED */ } /* * on_evict_cb -- (internal) 'on evict' callback for run_test_get */ static void on_evict_cb(VMEMcache *cache, const void *key, size_t key_size, void *arg) { cache_is_full = 1; } static int cmp_u64(const void *a, const void *b) { uint64_t l = *(uint64_t *)a; uint64_t r = *(uint64_t *)b; if (l < r) return -1; if (l > r) return 1; return 0; } static void print_ntiles(FILE *f, uint64_t *t, uint64_t n) { if (!n) { fprintf(f, "-\n"); return; } /* special case: if only one value is called for, give median */ if (latency_samples == 1) { fprintf(f, "%llu\n", t[n / 2] & ~PUT_TAG); return; } /* otherwise, give minimum, evenly spaced values, then maximum */ for (uint64_t i = 0; i < latency_samples; i++) { fprintf(f, i ? ";%llu" : "%llu", t[i * (n - 1) / (latency_samples - 1)] & ~PUT_TAG); } fprintf(f, "\n"); } static void dump_latencies() { FILE *f = stdout; if (latency_file) { f = fopen(latency_file, "w"); if (!f) { UT_FATAL("can't create latency file: %s", strerror(errno)); } } qsort(latencies, n_threads * ops_count, sizeof(uint64_t), cmp_u64); /* sentinel */ latencies[ops_count * n_threads] = -1ULL; uint64_t *latm = latencies; for (; !(*latm & PUT_TAG); latm++) ; uint64_t nhits = (uint64_t)(latm - latencies); uint64_t nmiss = n_threads * ops_count - nhits; print_ntiles(f, latencies, nhits); print_ntiles(f, latm, nmiss); if (latency_file) fclose(f); } static void run_bench() { rng_t rng; randomize_r(&rng, seed); vsize_seed = rnd64_r(&rng); os_cond_init(&ready.cond); os_mutex_init(&ready.mutex); ready.wanted = n_threads; cache = vmemcache_new(); vmemcache_set_size(cache, cache_size); vmemcache_set_extent_size(cache, cache_extent_size); vmemcache_set_eviction_policy(cache, (enum vmemcache_repl_p)repl_policy); if (vmemcache_add(cache, dir)) UT_FATAL("vmemcache_add: %s (%s)", vmemcache_errormsg(), dir); if (latency_samples) { latencies = malloc((ops_count * n_threads + 1) * sizeof(uint64_t)); if (!latencies) UT_FATAL("can't malloc latency ledger"); /* sentinel */ latencies[ops_count * n_threads] = -1ULL; } if (junk_start) { printf("Pre-filling the cache with junk...\n"); char junk[256]; memset(junk, '!' /* arbitrary */, sizeof(junk)); vmemcache_callback_on_evict(cache, on_evict_cb, NULL); uint64_t ndummies = 0; while (!cache_is_full) { ndummies++; vmemcache_put(cache, &ndummies, sizeof(ndummies), junk, sizeof(junk)); } vmemcache_callback_on_evict(cache, NULL, NULL); } vmemcache_bench_set(cache, VMEMCACHE_BENCH_INDEX_ONLY, type <= ST_INDEX); /* memcpy is enabled after warm_up */ vmemcache_bench_set(cache, VMEMCACHE_BENCH_NO_MEMCPY, 1); /* but if there's any warm_up, touch the space once */ if (warm_up) vmemcache_bench_set(cache, VMEMCACHE_BENCH_PREFAULT, 1); printf("Spawning threads...\n"); os_thread_t th[MAX_THREADS]; for (uint64_t i = 0; i < n_threads; i++) { if (os_thread_create(&th[i], 0, worker, (void *)i)) UT_FATAL("thread creation failed: %s", strerror(errno)); } uint64_t total = 0; for (uint64_t i = 0; i < n_threads; i++) { uintptr_t t; if (os_thread_join(&th[i], (void **)&t)) UT_FATAL("thread join failed: %s", strerror(errno)); total += t; } printf("Done.\n"); print_stats(cache); if (latencies) dump_latencies(); vmemcache_delete(cache); printf("Total time: %lu.%09lu s\n", total / NSECPSEC, total % NSECPSEC); total /= n_threads; total /= ops_count; printf("Avg time per op: %lu.%03lu μs\n", total / 1000, total % 1000); free(latencies); } static void print_units(uint64_t x) { if (x == -1ULL) { printf("∞"); return; } const char *units[] = { "", "K", "M", "G", "T", "P", "E" }; int u = 0; while (x && !(x % 1024)) { u++; x /= 1024; } printf("%"PRIu64"%s", x, units[u]); } int main(int argc, const char **argv) { parse_args(argv + 1); if (!n_threads) { n_threads = (uint32_t)sysconf(_SC_NPROCESSORS_ONLN); if (n_threads > MAX_THREADS) n_threads = MAX_THREADS; if (!n_threads) UT_FATAL("can't obtain number of processor cores"); } if (min_size > max_size) UT_FATAL("min_size > max_size"); printf("Parameters:\n %-20s : %s\n", "dir", dir); for (struct param_t *p = params; p->name; p++) { if (!p->var) continue; printf(" %-20s : ", p->name); if (p->enums) { uint64_t nvalid = 0; for (; p->enums[nvalid]; nvalid++) ; if (*p->var < nvalid) printf("%s", p->enums[*p->var]); else printf("enum out of range: %lu", *p->var); } else print_units(*p->var); printf("\n"); } lotta_zeroes = mmap(NULL, max_size, PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS | MAP_POPULATE, -1, 0); if (!lotta_zeroes) { UT_FATAL("couldn't grab a zero buffer: mmap failed: %s", strerror(errno)); } run_bench(); return 0; } vmemcache-0.8.1/benchmarks/benchmark_time.c000066400000000000000000000046571374403322600207210ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2018, Intel Corporation */ /* * benchmark_time.c -- benchmark_time module definitions */ #include #include #include #include #include "benchmark_time.h" #include "os.h" #define NSECPSEC 1000000000 /* * benchmark_time_get -- get timestamp from clock source */ void benchmark_time_get(benchmark_time_t *time) { os_clock_gettime(CLOCK_MONOTONIC, time); } /* * benchmark_time_diff -- get time interval */ void benchmark_time_diff(benchmark_time_t *d, benchmark_time_t *t1, benchmark_time_t *t2) { long long nsecs = ((long long)t2->tv_sec - t1->tv_sec) * NSECPSEC + t2->tv_nsec - t1->tv_nsec; assert(nsecs >= 0); d->tv_sec = nsecs / NSECPSEC; d->tv_nsec = nsecs % NSECPSEC; } /* * benchmark_time_get_secs -- get total number of seconds */ double benchmark_time_get_secs(benchmark_time_t *t) { return (double)t->tv_sec + (double)t->tv_nsec / NSECPSEC; } /* * benchmark_time_get_nsecs -- get total number of nanoseconds */ unsigned long long benchmark_time_get_nsecs(benchmark_time_t *t) { unsigned long long ret = (unsigned long long)t->tv_nsec; ret += (unsigned long long)(t->tv_sec * NSECPSEC); return ret; } /* * benchmark_time_compare -- compare two moments in time */ int benchmark_time_compare(const benchmark_time_t *t1, const benchmark_time_t *t2) { if (t1->tv_sec == t2->tv_sec) return (int)((long long)t1->tv_nsec - (long long)t2->tv_nsec); else return (int)((long long)t1->tv_sec - (long long)t2->tv_sec); } /* * benchmark_time_set -- set time using number of nanoseconds */ void benchmark_time_set(benchmark_time_t *time, unsigned long long nsecs) { time->tv_sec = (long)nsecs / NSECPSEC; time->tv_nsec = (long)nsecs % NSECPSEC; } /* * number of samples used to calculate average time required to get a current * time from the system */ #define N_PROBES_GET_TIME 10000000UL /* * benchmark_get_avg_get_time -- calculates average time required to get the * current time from the system in nanoseconds */ unsigned long long benchmark_get_avg_get_time(void) { benchmark_time_t time; benchmark_time_t start; benchmark_time_t stop; benchmark_time_get(&start); for (size_t i = 0; i < N_PROBES_GET_TIME; i++) { benchmark_time_get(&time); } benchmark_time_get(&stop); benchmark_time_diff(&time, &start, &stop); unsigned long long avg = benchmark_time_get_nsecs(&time) / N_PROBES_GET_TIME; return avg; } vmemcache-0.8.1/benchmarks/benchmark_time.h000066400000000000000000000012711374403322600207130ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2018, Intel Corporation */ /* * benchmark_time.h -- declarations of benchmark_time module */ #include typedef struct timespec benchmark_time_t; void benchmark_time_get(benchmark_time_t *time); void benchmark_time_diff(benchmark_time_t *d, benchmark_time_t *t1, benchmark_time_t *t2); double benchmark_time_get_secs(benchmark_time_t *t); unsigned long long benchmark_time_get_nsecs(benchmark_time_t *t); int benchmark_time_compare(const benchmark_time_t *t1, const benchmark_time_t *t2); void benchmark_time_set(benchmark_time_t *time, unsigned long long nsecs); unsigned long long benchmark_get_avg_get_time(void); vmemcache-0.8.1/benchmarks/rand.c000066400000000000000000000034171374403322600166660ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019, Intel Corporation */ /* * rand.c -- random utils */ #include #include #include #include #include #include "rand.h" /* * hash64 -- a u64 -> u64 hash */ uint64_t hash64(uint64_t x) { x += 0x9e3779b97f4a7c15; x = (x ^ (x >> 30)) * 0xbf58476d1ce4e5b9; x = (x ^ (x >> 27)) * 0x94d049bb133111eb; return x ^ (x >> 31); } /* * xoshiro256** random generator * * Fastest available good PRNG as of 2018 (sub-nanosecond per entry), produces * much better output than old stuff like rand() or Mersenne's Twister. * * By David Blackman and Sebastiano Vigna; PD/CC0 2018. */ static inline uint64_t rotl(const uint64_t x, int k) { /* optimized to a single instruction on x86 */ return (x << k) | (x >> (64 - k)); } /* * rnd64 -- return 64-bits of randomness */ uint64_t rnd64_r(rng_t *state) { uint64_t *s = (void *)state; const uint64_t result = rotl(s[1] * 5, 7) * 9; const uint64_t t = s[1] << 17; s[2] ^= s[0]; s[3] ^= s[1]; s[1] ^= s[2]; s[0] ^= s[3]; s[2] ^= t; s[3] = rotl(s[3], 45); return result; } /* * randomize -- initialize random generator * * Seed of 0 means random. */ void randomize_r(rng_t *state, uint64_t seed) { if (!seed) { #ifdef HAVE_GETENTROPY if (!getentropy(state, sizeof(rng_t))) return; /* nofail, but ENOSYS on kernel < 3.16 */ #endif seed = (uint64_t)getpid(); } uint64_t *s = (void *)state; s[0] = hash64(seed); s[1] = hash64(s[0]); s[2] = hash64(s[1]); s[3] = hash64(s[2]); } /* * n_lowest_bits -- return n lowest 1 bits * * When applied to random numbers, this puts them into nice uneven buckets. */ uint64_t n_lowest_bits(uint64_t x, int n) { uint64_t y = x; while (n-- > 0) y &= y - 1; return x ^ y; } vmemcache-0.8.1/benchmarks/rand.h000066400000000000000000000005411374403322600166660ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019, Intel Corporation */ /* * rand.h -- random utils */ #ifndef RAND_H #define RAND_H 1 #include typedef uint64_t rng_t[4]; uint64_t hash64(uint64_t x); uint64_t rnd64_r(rng_t *rng); void randomize_r(rng_t *rng, uint64_t seed); uint64_t n_lowest_bits(uint64_t x, int n); #endif vmemcache-0.8.1/benchmarks/tools/000077500000000000000000000000001374403322600167315ustar00rootroot00000000000000vmemcache-0.8.1/benchmarks/tools/.gitignore000066400000000000000000000000171374403322600207170ustar00rootroot00000000000000benchconfig.py vmemcache-0.8.1/benchmarks/tools/benchconfig.py.example000066400000000000000000000010101374403322600231720ustar00rootroot00000000000000""" Example file for latency_run configuration. All provided configurations need to be dictionaries starting with 'bench_'. Keys and values are directly translated to 'bench_simul' command line arguments except: * 'testdir': test directory path (required) * 'numa_node': NUMA node number on which the benchmark is run (optional) """ bench_16_threads = { 'testdir': '/tmp', # 'numa_node': 0, 'n_threads': 16 } bench_4_threads = { 'testdir': '/tmp', 'n_threads': 4, 'junk_start': 1 } vmemcache-0.8.1/benchmarks/tools/latency_plot.py000077500000000000000000000057551374403322600220170ustar00rootroot00000000000000#!/usr/bin/env python3 # SPDX-License-Identifier: BSD-3-Clause # Copyright 2019, Intel Corporation """ latency_plot.py - tool for drawing latency benchmarks plots based on output generated by 'bench_simul' and written to a file provided with 'latency_file' argument. """ import argparse import matplotlib.pyplot as plt MARKERS = ('o', '^', 's', 'D', 'X') CUR_MARKER = 0 def _add_series(series, label, marker): """Add data series to plot""" plt.plot(series, label=label, marker=marker, linestyle=':', linewidth=0.5, markersize=4) def draw_plot(yscale='linear'): """Draw a plot of all previously added data series""" plt.yscale(yscale) plt.xticks(list(range(0, 101, 5))) plt.xlabel('percentile [%]') plt.grid(True) plt.ylabel('operation time [ns]') plt.legend() plt.show() def _parse_args(): """Parse command line arguments""" parser = argparse.ArgumentParser() parser.add_argument('out', nargs='*', help='Create a plot for all provided' ' output files') parser.add_argument('--yscale', '-y', help='Y-axis scale', default='linear') parser.add_argument('--hits', help='Draw hits', dest='hits', action='store_true') parser.add_argument('--no-hits', help='Do not draw hits', dest='hits', action='store_false') parser.set_defaults(hits=True) parser.add_argument('--ltrim', help='Remove a number of smallest latency values from the plot', default=0, type=int) parser.add_argument('--rtrim', help='Remove a number of biggest latency values from the plot', default=0, type=int) parser.add_argument('--misses', help='Draw misses', dest='misses', action='store_true') parser.add_argument('--no-misses', help='Do not draw misses', dest='misses', action='store_false') parser.set_defaults(misses=True) args = parser.parse_args() if not args.out: parser.error('at least one output need to be provided') return args def _read_out(path): """Read 'latency_file' output file""" with open(path, 'r') as f: out = f.readlines() hits = [float(h) for h in out[0].split(';')] misses = [float(m) for m in out[1].split(';')] return hits, misses def add_data(output, name, hits=True, misses=True, ltrim=0, rtrim=0): """Add data from 'latency_file' output file to plot""" global CUR_MARKER h, m = _read_out(output) if ltrim: h, m = h[ltrim:], m[ltrim:] if rtrim: h, m = h[:-rtrim], m[:-rtrim] if hits: _add_series(h, '{}_hits'.format(name), MARKERS[CUR_MARKER % len(MARKERS)]) if misses: _add_series(m, '{}_misses'.format(name), MARKERS[CUR_MARKER % len(MARKERS)]) # use different marker for each plotted benchmark data CUR_MARKER += 1 def _main(): args = _parse_args() for out in args.out: add_data(out, out, args.hits, args.misses, args.ltrim, args.rtrim) draw_plot(args.yscale) if __name__ == '__main__': _main() vmemcache-0.8.1/benchmarks/tools/latency_run.py000077500000000000000000000077041374403322600216410ustar00rootroot00000000000000#!/usr/bin/env python3 # SPDX-License-Identifier: BSD-3-Clause # Copyright 2019, Intel Corporation """ latency_run.py - tool for running latency benchmarks and plotting them with use of latency_plot.py. Benchmark configurations are stored in benchconfig.py file as separate dictionaries with names starting with 'bench_'. Configuration key 'testdir' is a benchmark test directory. All other keys and their values are directly translated into 'bench_simul' command line arguments. """ import sys import os import argparse import subprocess as sp import latency_plot as lp try: import benchconfig except ImportError: sys.exit('no benchconfig.py file provided') class Benchmark(): """Benchmark management""" def __init__(self, bin_, cfg): self.bin = bin_ self.cmd = '' self.handle_special_keys(cfg) for k, v in cfg.items(): self.cmd = self.cmd + ' {}={}'.format(k, v) if 'latency_samples' not in cfg: self.cmd = self.cmd + ' latency_samples=101' def handle_special_keys(self, cfg): """ Handle configuration keys that are NOT a direct representations of bench_simul command line arguments """ try: self.title = cfg.pop('title') cfg['latency_file'] = '{}.log'.format(self.title) self.out = cfg['latency_file'] self.cmd = '{} {}'.format(self.bin, cfg.pop('testdir')) if 'numa_node' in cfg: self.cmd = 'numactl -N {} {}'.format(cfg.pop('numa_node'), self.cmd) except KeyError as e: sys.exit('No "{}" key provided to configuration'.format(e.args[0])) def add_to_plot(self): """Add benchmark to plot""" lp.add_data(self.out, self.title) def run(self, verbose): """Run benchmark""" if verbose: print(self.cmd) proc = sp.run(self.cmd.split(' '), universal_newlines=True, stdout=sp.PIPE, stderr=sp.STDOUT) if proc.returncode != 0: sys.exit('benchmark failed: {}{}{}'.format(self.cmd, os.linesep, proc.stdout)) if verbose: print('{}{}{}'.format(self.cmd, os.linesep, proc.stdout)) def parse_config(): """Read configurations from benchconfig.py file""" cfgs = [] for k, v in vars(benchconfig).items(): if k.startswith('bench_') and isinstance(v, dict): v['title'] = k.split('bench_')[1] cfgs.append(v) if not cfgs: sys.exit('No configs found in benchconfig.py - all configs should ' 'be dictionaries starting with "bench_"') return cfgs def parse_args(): """Parse command line arguments""" parser = argparse.ArgumentParser() parser.add_argument('--bin', '-b', help='path to bench_simul binary', required=True) parser.add_argument('--config', '-c', nargs='*', help="run only selected " "configs from benchconfig (provide name without " "'bench' at the beginning)") parser.add_argument('--yscale', '-y', help='Y-axis scale', default='linear') parser.add_argument('--verbose', '-v', help='Print bench_simul output', action='store_true') args = parser.parse_args() return args def main(): args = parse_args() file_cfgs = parse_config() if args.config: cfgs = [c for c in file_cfgs if c['title'] in args.config] if len(args.config) != len(cfgs): titles = os.linesep.join([c['title'] for c in file_cfgs]) sys.exit('Invalid configuration provided, configurations defined' ' in benchconfig.py:{}{}'.format(os.linesep, titles)) else: cfgs = file_cfgs for c in cfgs: b = Benchmark(args.bin, c) b.run(args.verbose) b.add_to_plot() lp.draw_plot(args.yscale) if __name__ == '__main__': main() vmemcache-0.8.1/cmake_uninstall.cmake.in000066400000000000000000000020241374403322600202320ustar00rootroot00000000000000# From: https://cmake.org/Wiki/CMake_FAQ if(NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt") message(FATAL_ERROR "Cannot find install manifest: @CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt") endif(NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt") file(READ "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt" files) string(REGEX REPLACE "\n" ";" files "${files}") foreach(file ${files}) message(STATUS "Uninstalling $ENV{DESTDIR}${file}") if(IS_SYMLINK "$ENV{DESTDIR}${file}" OR EXISTS "$ENV{DESTDIR}${file}") exec_program("@CMAKE_COMMAND@" ARGS "-E remove \"$ENV{DESTDIR}${file}\"" OUTPUT_VARIABLE rm_out RETURN_VALUE rm_retval ) if(NOT "${rm_retval}" STREQUAL 0) message(FATAL_ERROR "Problem when removing $ENV{DESTDIR}${file}") endif(NOT "${rm_retval}" STREQUAL 0) else(IS_SYMLINK "$ENV{DESTDIR}${file}" OR EXISTS "$ENV{DESTDIR}${file}") message(STATUS "File $ENV{DESTDIR}${file} does not exist.") endif(IS_SYMLINK "$ENV{DESTDIR}${file}" OR EXISTS "$ENV{DESTDIR}${file}") endforeach(file) vmemcache-0.8.1/codecov.yml000066400000000000000000000001511374403322600156160ustar00rootroot00000000000000ignore: - tests/ - benchmarks/ comment: layout: "diff" behavior: default require_changes: yes vmemcache-0.8.1/doc/000077500000000000000000000000001374403322600142215ustar00rootroot00000000000000vmemcache-0.8.1/doc/CMakeLists.txt000066400000000000000000000005661374403322600167700ustar00rootroot00000000000000# SPDX-License-Identifier: BSD-3-Clause # Copyright 2019, Intel Corporation add_custom_target(doc ALL BYPRODUCTS ${CMAKE_CURRENT_BINARY_DIR}/vmemcache.3 COMMENT "Building vmemcache.3" COMMAND ${CMAKE_SOURCE_DIR}/utils/md2man/md2man.sh ${CMAKE_CURRENT_SOURCE_DIR}/vmemcache.md ${CMAKE_SOURCE_DIR}/utils/md2man/default.man ${CMAKE_CURRENT_BINARY_DIR}/vmemcache.3 ) vmemcache-0.8.1/doc/vmemcache.md000066400000000000000000000167061374403322600165050ustar00rootroot00000000000000--- layout: manual Content-Style: 'text/css' title: _MP(VMEMCACHE.3) collection: vmemcache header: VMEMCACHE ... [NAME](#name)
[SYNOPSIS](#synopsis)
[DESCRIPTION](#description)
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause) [comment]: <> (Copyright 2019, Intel Corporation) # NAME # **vmemcache** - buffer-based LRU cache # SYNOPSIS # ```c #include VMEMcache *vmemcache_new(); void vmemcache_delete(VMEMcache *cache); int vmemcache_set_eviction_policy(VMEMcache *cache, enum vmemcache_repl_p repl_p); int vmemcache_set_size(VMEMcache *cache, size_t size); int vmemcache_set_extent_size(VMEMcache *cache, size_t extent_size); int vmemcache_add(VMEMcache *cache, const char *path); void vmemcache_callback_on_evict(VMEMcache *cache, vmemcache_on_evict *evict, void *arg); void vmemcache_callback_on_miss(VMEMcache *cache, vmemcache_on_miss *miss, void *arg); ssize_t vmemcache_get(VMEMcache *cache, const void *key, size_t key_size, void *vbuf, size_t vbufsize, size_t offset, size_t *vsize); int vmemcache_put(VMEMcache *cache, const void *key, size_t key_size, const void *value, size_t value_size); int vmemcache_exists(VMEMcache *cache, const void *key, size_t key_size); int vmemcache_evict(VMEMcache *cache, const void *key, size_t ksize); int vmemcache_get_stat(VMEMcache *cache, enum vmemcache_statistic stat, void *value, size_t value_size); const char *vmemcache_errormsg(void); ``` # DESCRIPTION # **libvmemcache** is a volatile key-value store optimized for operating on NVDIMM based space, although it can work with any filesystem, stored in memory (tmpfs) or, less performant, on some kind of a disk. ##### Creation ##### `VMEMcache *vmemcache_new();` : Creates an empty unconfigured vmemcache instance. `int vmemcache_set_size(VMEMcache *cache, size_t size);` : Sets the size of the cache; it will be rounded **up** towards a whole page size alignment (4KB on x86). `int vmemcache_set_extent_size(VMEMcache *cache, size_t extent_size);` : Sets block size of the cache -- 256 bytes minimum, strongly recommended to be a multiple of 64 bytes. If the cache is backed by a non byte-addressable medium, the extent size should be 4096 (or a multiple) or performance will greatly suffer. `int vmemcache_set_eviction_policy(VMEMcache *cache, enum vmemcache_repl_p repl_p);` : Sets what should happen on a put into a full cache. + **VMEMCACHE_REPLACEMENT_NONE**: manual eviction only - puts into a full cache will fail + **VMEMCACHE_REPLACEMENT_LRU**: least recently accessed entry will be evicted to make space when needed `int vmemcache_add(VMEMcache *cache, const char *path);` : Associate the cache with a backing medium in the given *path*, which may be: + a `/dev/dax` device + a directory on a regular filesystem (which may or may not be mounted with -o dax, either on persistent memory or any other backing storage) `void vmemcache_delete(VMEMcache *cache);` : Frees any structures associated with the cache. ##### Use ##### `ssize_t vmemcache_get(VMEMcache *cache, const void *key, size_t key_size, void *vbuf, size_t vbufsize, size_t offset, size_t *vsize);` : Searches for an entry with the given *key*; it doesn't have to be zero-terminated or be text - any sequence of bytes of length *key_size* is okay. If found, the entry's value is copied to *vbuf* that has space for *vbufsize* bytes, optionally skipping *offset* bytes at the start. No matter if the copy was truncated or not, its true size is stored into *vsize*; *vsize* remains unmodified if the key was not found. Return value is number of bytes successfully copied, or -1 on error. In particular, if there's no entry for the given *key* in the cache, the errno will be ENOENT. `int vmemcache_put(VMEMcache *cache, const void *key, size_t key_size, const void *value, size_t value_size);` : Inserts the given key:value pair into the cache. Returns 0 on success, -1 on error. Inserting a key that already exists will fail with EEXIST. `int vmemcache_exists(VMEMcache *cache, const void *key, size_t key_size, size_t *vsize);` : Searches for an entry with the given *key*, and returns 1 if found, 0 if not found, and -1 if search couldn't be performed. The size of the found entry is stored into *vsize*; *vsize* remains unmodified if the key was not found. This function does not impact the replacement policy or statistics. `int vmemcache_evict(VMEMcache *cache, const void *key, size_t ksize);` : Removes the given key from the cache. If *key* is null and there is a replacement policy set, the oldest entry will be removed. Returns 0 if an entry has been evicted, -1 otherwise. ##### Callbacks ##### You can register a hook to be called during eviction or after a cache miss, using **vmemcache_callback_on_evict()** or **vmemcache_callback_on_miss()**, respectively: `void vmemcache_callback_on_evict(VMEMcache *cache, vmemcache_on_evict *evict, void *arg);` `void vmemcache_callback_on_miss(VMEMcache *cache, vmemcache_on_miss *miss, void *arg);` The extra *arg* will be passed to your function. A hook to be called during eviction has to have the following signature: `void vmemcache_on_evict(VMEMcache *cache, const void *key, size_t key_size, void *arg);` : Called when an entry is being removed from the cache. The eviction can't be prevented, but until the callback returns, the entry remains available for queries. The thread that triggered the eviction is blocked in the meantime. A hook to be called after a cache miss has to have the following signature: `void vmemcache_on_miss(VMEMcache *cache, const void *key, size_t key_size, void *arg);` : Called when a *get* query fails, to provide an opportunity to insert the missing key. If the callback calls *put* for that specific key, the *get* will return its value, even if it did not fit into the cache. ##### Misc ##### `int vmemcache_get_stat(VMEMcache *cache, enum vmemcache_statistic stat, void *value, size_t value_size);` : Obtains a piece of statistics about the cache. The *stat* may be: + **VMEMCACHE_STAT_PUT** -- count of puts + **VMEMCACHE_STAT_GET** -- count of gets + **VMEMCACHE_STAT_HIT** -- count of gets that were served from the cache + **VMEMCACHE_STAT_MISS** -- count of gets that were not present in the cache + **VMEMCACHE_STAT_EVICT** -- count of evictions + **VMEMCACHE_STAT_ENTRIES** -- *current* number of cache entries (key:value pairs) + **VMEMCACHE_STAT_DRAM_SIZE_USED** -- current amount of DRAM used + **VMEMCACHE_STAT_POOL_SIZE_USED** -- current usage of data pool + **VMEMCACHE_STAT_HEAP_ENTRIES** -- current number of discontiguous unused regions (ie, free space fragmentation) Statistics are enabled by default. They can be disabled at the compile time of the vmemcache library if the **STATS_ENABLED** CMake option is set to OFF. `const char *vmemcache_errormsg(void);` : Retrieves a human-friendly description of the last error. ##### Errors ##### On an error, a machine-usable description is passed in `errno`. It may be: + **EINVAL** -- nonsensical/invalid parameter + **ENOMEM** -- out of DRAM + **EEXIST** -- (put) entry for that key already exists + **ENOENT** -- (evict, get) no entry for that key + **ESRCH** -- (evict) could not find an evictable entry + **EAGAIN** -- (evict) an entry was used and could not be evicted, please try again + **ENOSPC** -- (create, put) not enough space in the memory pool vmemcache-0.8.1/libvmemcache.pc.in000066400000000000000000000006401374403322600170240ustar00rootroot00000000000000prefix=@CMAKE_INSTALL_PREFIX@ libdir=@CMAKE_INSTALL_PREFIX@/@CMAKE_INSTALL_LIBDIR@ version=@VERSION@ includedir=@CMAKE_INSTALL_PREFIX@/@CMAKE_INSTALL_INCLUDEDIR@ Name: libvmemcache Description: libvmemcache - buffer based LRU cache Version: @VERSION@ URL: http://github.com/pmem/vmemcache Libs: -L@CMAKE_INSTALL_PREFIX@/@CMAKE_INSTALL_LIBDIR@ -lvmemcache Cflags: -I@CMAKE_INSTALL_PREFIX@/@CMAKE_INSTALL_INCLUDEDIR@ vmemcache-0.8.1/packages.cmake000066400000000000000000000040271374403322600162370ustar00rootroot00000000000000# SPDX-License-Identifier: BSD-3-Clause # Copyright 2018-2019, Intel Corporation # # packages.cmake - CPack configuration for rpm and deb generation # string(TOUPPER "${CPACK_GENERATOR}" CPACK_GENERATOR) if(NOT ("${CPACK_GENERATOR}" STREQUAL "DEB" OR "${CPACK_GENERATOR}" STREQUAL "RPM")) message(FATAL_ERROR "Wrong CPACK_GENERATOR value, valid generators are: DEB, RPM") endif() set(CPACK_PACKAGING_INSTALL_PREFIX "${CMAKE_INSTALL_PREFIX}") set(CMAKE_INSTALL_TMPDIR /tmp CACHE PATH "Output dir for tmp") set(CPACK_COMPONENTS_ALL_IN_ONE) # Filter out some of directories from %dir section, which are expected # to exist in filesystem. Leaving them might lead to conflicts with other # packages (for example with 'filesystem' package on fedora which specify # /usr, /usr/local, etc.) set(CPACK_RPM_EXCLUDE_FROM_AUTO_FILELIST_ADDITION ${CPACK_PACKAGING_INSTALL_PREFIX} ${CPACK_PACKAGING_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR} ${CPACK_PACKAGING_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}/pkgconfig ${CPACK_PACKAGING_INSTALL_PREFIX}/${CMAKE_INSTALL_INCDIR} ${CPACK_PACKAGING_INSTALL_PREFIX}/share ${CPACK_PACKAGING_INSTALL_PREFIX}/share/doc) set(CPACK_PACKAGE_NAME "libvmemcache") set(CPACK_PACKAGE_VERSION ${VERSION}) set(CPACK_PACKAGE_VERSION_MAJOR ${VERSION_MAJOR}) set(CPACK_PACKAGE_VERSION_MINOR ${VERSION_MINOR}) set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "Buffer-based LRU cache") set(CPACK_PACKAGE_VENDOR "self-built") set(CPACK_RPM_PACKAGE_NAME "libvmemcache") set(CPACK_RPM_PACKAGE_LICENSE "BSD") set(CPACK_DEBIAN_PACKAGE_NAME "libvmemcache") set(CPACK_DEBIAN_PACKAGE_VERSION ${CPACK_PACKAGE_VERSION}) set(CPACK_DEBIAN_PACKAGE_MAINTAINER "lukasz.dorau@intel.com") if("${CPACK_GENERATOR}" STREQUAL "RPM") set(CPACK_PACKAGE_FILE_NAME ${CPACK_RPM_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}.${CPACK_RPM_PACKAGE_ARCHITECTURE}) elseif("${CPACK_GENERATOR}" STREQUAL "DEB") set(CPACK_PACKAGE_FILE_NAME ${CPACK_DEBIAN_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}_${CPACK_DEBIAN_PACKAGE_ARCHITECTURE}) endif() set(targetDestDir ${CMAKE_INSTALL_TMPDIR}) include(CPack) vmemcache-0.8.1/src/000077500000000000000000000000001374403322600142435ustar00rootroot00000000000000vmemcache-0.8.1/src/CMakeLists.txt000066400000000000000000000020741374403322600170060ustar00rootroot00000000000000# SPDX-License-Identifier: BSD-3-Clause # Copyright 2018-2019, Intel Corporation cmake_minimum_required(VERSION 3.3) project(vmemcache C) add_cstyle(src) add_check_whitespace(src) set(SOURCES out.c os_posix.c os_thread_posix.c util.c util_posix.c file.c file_posix.c fast-hash.c mmap.c mmap_posix.c libvmemcache.c critnib.c ringbuf.c vmemcache.c vmemcache_heap.c vmemcache_index.c vmemcache_repl.c) add_library(vmemcache SHARED ${SOURCES}) target_link_libraries(vmemcache PRIVATE ${CMAKE_THREAD_LIBS_INIT} -Wl,--version-script=${CMAKE_SOURCE_DIR}/src/libvmemcache.map) set_target_properties(vmemcache PROPERTIES SOVERSION 0) target_compile_definitions(vmemcache PRIVATE SRCVERSION="${VERSION}") if(STATS_ENABLED) target_compile_definitions(vmemcache PRIVATE STATS_ENABLED=1) endif() if(VALGRIND_FOUND) target_compile_definitions(vmemcache PRIVATE VALGRIND_ENABLED=1) endif() install(TARGETS vmemcache LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} DESTINATION ${CMAKE_INSTALL_LIBDIR}/) install(FILES libvmemcache.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) vmemcache-0.8.1/src/common.h000066400000000000000000000011341374403322600157030ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2019, Intel Corporation */ /* * common.h -- common definitions */ #ifndef COMMON_H #define COMMON_H 1 #include "util.h" #include "out.h" #include "mmap.h" #ifdef __cplusplus extern "C" { #endif static inline void common_init(const char *log_prefix, const char *log_level_var, const char *log_file_var, int major_version, int minor_version) { util_init(); out_init(log_prefix, log_level_var, log_file_var, major_version, minor_version); } static inline void common_fini(void) { out_fini(); } #ifdef __cplusplus } #endif #endif vmemcache-0.8.1/src/critnib.c000066400000000000000000000142471374403322600160510ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2020, Intel Corporation */ #include #include #include #include #include "os_thread.h" #include "util.h" #include "out.h" #include "critnib.h" /* * WARNING: this implementation fails badly if you try to store two keys * where one is a prefix of another. Pass a struct { int len; char[] key; } * or such if such keys are possible. */ #define NIB ((1 << SLICE) - 1) #define KEYLEN(leaf) (leaf->key.ksize + sizeof(size_t)) typedef struct cache_entry critnib_leaf; /* * is_leaf -- (internal) check tagged pointer for leafness */ static inline bool is_leaf(struct critnib_node *n) { return (uintptr_t)n & 1; } /* * to_leaf -- (internal) untag a leaf pointer */ static inline critnib_leaf * to_leaf(struct critnib_node *n) { return (void *)((uintptr_t)n & ~1ULL); } /* * slice_index -- (internal) get index of radix child at a given shift */ static inline int slice_index(char b, bitn_t bit) { return (b >> bit) & NIB; } /* * critnib_new -- allocate a new hashmap */ struct critnib * critnib_new(void) { struct critnib *c = Zalloc(sizeof(struct critnib)); if (!c) return NULL; return c; } /* * delete_node -- (internal) recursively free a subtree */ static void delete_node(struct critnib_node *n, delete_entry_t del) { if (!n) return; if (is_leaf(n)) { if (del) del(to_leaf(n)); return; } for (int i = 0; i < SLNODES; i++) delete_node(n->child[i], del); Free(n); } /* * critnib_delete -- free a hashmap */ void critnib_delete(struct critnib *c, delete_entry_t del) { delete_node(c->root, del); Free(c); } /* * alloc_node -- (internal) alloc a node */ static struct critnib_node * alloc_node(struct critnib *c) { struct critnib_node *n = Zalloc(sizeof(struct critnib_node)); if (!n) return NULL; #ifdef STATS_ENABLED c->node_count++; #endif return n; } /* * any_leaf -- (internal) find any leaf in a subtree * * We know they're all identical up to the divergence point between a prefix * shared by all of them vs the new key we're inserting. */ static struct critnib_node * any_leaf(struct critnib_node *n) { for (int i = 0; i < SLNODES; i++) { struct critnib_node *m; if ((m = n->child[i])) return is_leaf(m) ? m : any_leaf(m); } return NULL; } /* * critnib_set -- insert a new entry */ int critnib_set(struct critnib *c, struct cache_entry *e) { const char *key = (void *)&e->key; byten_t key_len = (byten_t)KEYLEN(e); critnib_leaf *k = (void *)((uintptr_t)e | 1); struct critnib_node *n = c->root; if (!n) { c->root = (void *)k; return 0; } /* * Need to descend the tree twice: first to find a leaf that * represents a subtree whose all keys share a prefix at least as * long as the one common to the new key and that subtree. */ while (!is_leaf(n) && n->byte < key_len) { struct critnib_node *nn = n->child[slice_index(key[n->byte], n->bit)]; if (nn) n = nn; else { n = any_leaf(n); break; } } ASSERT(n); if (!is_leaf(n)) n = any_leaf(n); ASSERT(n); ASSERT(is_leaf(n)); critnib_leaf *nk = to_leaf(n); const char *nkey = (void *)&nk->key; /* Find the divergence point, accurate to a byte. */ byten_t common_len = ((byten_t)KEYLEN(nk) < key_len) ? (byten_t)KEYLEN(nk) : key_len; byten_t diff; for (diff = 0; diff < common_len; diff++) { if (nkey[diff] != key[diff]) break; } if (diff >= common_len) { /* * Either an update or a conflict between keys being a * prefix of each other. */ return EEXIST; } /* Calculate the divergence point within the single byte. */ char at = nkey[diff] ^ key[diff]; bitn_t sh = util_mssb_index((uint32_t)(uint8_t)at) & (bitn_t)~(SLICE - 1); /* Descend into the tree again. */ n = c->root; struct critnib_node **parent = &c->root; while (n && !is_leaf(n) && (n->byte < diff || (n->byte == diff && n->bit >= sh))) { parent = &n->child[slice_index(key[n->byte], n->bit)]; n = *parent; } /* * If the divergence point is at same nib as an existing node, and * the subtree there is empty, just place our leaf there and we're * done. Obviously this can't happen if SLICE == 1. */ if (!n) { *parent = (void *)k; return 0; } /* If not, we need to insert a new node in the middle of an edge. */ if (!(n = alloc_node(c))) return ENOMEM; n->child[slice_index(nkey[diff], sh)] = *parent; n->child[slice_index(key[diff], sh)] = (void *)k; n->byte = diff; n->bit = sh; *parent = n; return 0; } /* * critnib_get -- query a key */ void * critnib_get(struct critnib *c, const struct cache_entry *e) { const char *key = (void *)&e->key; byten_t key_len = (byten_t)KEYLEN(e); struct critnib_node *n = c->root; while (n && !is_leaf(n)) { if (n->byte >= key_len) return NULL; n = n->child[slice_index(key[n->byte], n->bit)]; } if (!n) return NULL; critnib_leaf *k = to_leaf(n); /* * We checked only nibs at divergence points, have to re-check the * whole key. */ return (key_len != KEYLEN(k) || memcmp(key, (void *)&k->key, key_len)) ? NULL : k; } /* * critnib_remove -- query and delete a key * * Neither the key nor its value are freed, just our private nodes. */ void * critnib_remove(struct critnib *c, const struct cache_entry *e) { const char *key = (void *)&e->key; byten_t key_len = (byten_t)KEYLEN(e); struct critnib_node **pp = NULL; struct critnib_node *n = c->root; struct critnib_node **parent = &c->root; /* First, do a get. */ while (n && !is_leaf(n)) { if (n->byte >= key_len) return NULL; pp = parent; parent = &n->child[slice_index(key[n->byte], n->bit)]; n = *parent; } if (!n) return NULL; critnib_leaf *k = to_leaf(n); if (key_len != KEYLEN(k) || memcmp(key, (void *)&k->key, key_len)) return NULL; /* Remove the entry (leaf). */ *parent = NULL; if (!pp) /* was root */ return k; /* Check if after deletion the node has just a single child left. */ n = *pp; struct critnib_node *only_child = NULL; for (int i = 0; i < SLNODES; i++) { if (n->child[i]) { if (only_child) /* Nope. */ return k; only_child = n->child[i]; } } /* Yes -- shorten the tree's edge. */ ASSERT(only_child); *pp = only_child; Free(n); #ifdef STATS_ENABLED c->node_count--; #endif return k; } vmemcache-0.8.1/src/critnib.h000066400000000000000000000022401374403322600160440ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2019, Intel Corporation */ #ifndef CRITNIB_H #define CRITNIB_H #include "vmemcache.h" #include "os_thread.h" /* * SLICE may be 1, 2, 4 or 8. 1 or 8 could be further optimized (critbit * and critbyte respectively); 4 (critnib) strikes a good balance between * speed and memory use. */ #define SLICE 4 #define SLNODES (1 << SLICE) typedef uint32_t byten_t; typedef unsigned char bitn_t; struct critnib_node { struct critnib_node *child[SLNODES]; byten_t byte; bitn_t bit; }; struct critnib { struct critnib_node *root; os_rwlock_t lock; size_t leaf_count; /* entries */ size_t node_count; /* internal nodes only */ size_t DRAM_usage; /* ... of leaves (nodes are constant-sized) */ /* operation counts */ size_t put_count; size_t evict_count; size_t hit_count; size_t miss_count; }; struct cache_entry; struct critnib *critnib_new(void); void critnib_delete(struct critnib *c, delete_entry_t del); int critnib_set(struct critnib *c, struct cache_entry *e); void *critnib_get(struct critnib *c, const struct cache_entry *e); void *critnib_remove(struct critnib *c, const struct cache_entry *e); #endif vmemcache-0.8.1/src/fast-hash.c000066400000000000000000000036531374403322600162740ustar00rootroot00000000000000/* * The fast-hash algorithm is covered by the MIT License: * * Copyright (C) 2012 Zilong Tan (eric.zltan@gmail.com) * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, * modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "fast-hash.h" #include /* * mix -- (internal) helper for the fast-hash mixing step */ static inline uint64_t mix(uint64_t h) { h ^= h >> 23; h *= 0x2127599bf4325c37ULL; return h ^ h >> 47; } /* * hash -- calculate the hash of a piece of memory */ uint64_t hash(size_t key_size, const char *key) { /* fast-hash, by Zilong Tan */ const uint64_t m = 0x880355f21e6d1965ULL; const uint64_t *pos = (const uint64_t *)key; const uint64_t *end = pos + (key_size / 8); uint64_t h = key_size * m; while (pos != end) h = (h ^ mix(*pos++)) * m; if (key_size & 7) { uint64_t shift = (key_size & 7) * 8; uint64_t mask = (1ULL << shift) - 1; uint64_t v = htole64(*pos) & mask; h = (h ^ mix(v)) * m; } return mix(h); } vmemcache-0.8.1/src/fast-hash.h000066400000000000000000000025021374403322600162710ustar00rootroot00000000000000/* * The fast-hash algorithm is covered by the MIT License: * * Copyright (C) 2012 Zilong Tan (eric.zltan@gmail.com) * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, * modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef FAST_HASH_H #define FAST_HASH_H 1 #include #include uint64_t hash(size_t key_size, const char *key); #endif vmemcache-0.8.1/src/file.c000066400000000000000000000311251374403322600153300ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * file.c -- file utilities */ #include #include #include #include #include #include #include #include #include #if !defined(_WIN32) && !defined(__FreeBSD__) #include #endif #include "file.h" #include "os.h" #include "out.h" #include "mmap.h" #define MAX_SIZE_LENGTH 64 #if 0 #define DEVICE_DAX_ZERO_LEN (2 * MEGABYTE) #endif #ifndef _WIN32 /* * device_dax_size -- (internal) checks the size of a given dax device */ static ssize_t device_dax_size(const char *path) { LOG(3, "path \"%s\"", path); os_stat_t st; int olderrno; if (os_stat(path, &st) < 0) { ERR("!stat \"%s\"", path); return -1; } char spath[PATH_MAX]; snprintf(spath, PATH_MAX, "/sys/dev/char/%u:%u/size", os_major(st.st_rdev), os_minor(st.st_rdev)); LOG(4, "device size path \"%s\"", spath); int fd = os_open(spath, O_RDONLY); if (fd < 0) { ERR("!open \"%s\"", spath); return -1; } ssize_t size = -1; char sizebuf[MAX_SIZE_LENGTH + 1]; ssize_t nread; if ((nread = read(fd, sizebuf, MAX_SIZE_LENGTH)) < 0) { ERR("!read"); goto out; } sizebuf[nread] = 0; /* null termination */ char *endptr; olderrno = errno; errno = 0; size = strtoll(sizebuf, &endptr, 0); if (endptr == sizebuf || *endptr != '\n' || ((size == LLONG_MAX || size == LLONG_MIN) && errno == ERANGE)) { ERR("invalid device size %s", sizebuf); size = -1; goto out; } errno = olderrno; out: olderrno = errno; (void) os_close(fd); errno = olderrno; LOG(4, "device size %zu", size); return size; } #endif /* * util_file_exists -- checks whether file exists */ int util_file_exists(const char *path) { LOG(3, "path \"%s\"", path); if (os_access(path, F_OK) == 0) return 1; if (errno != ENOENT) { ERR("!os_access \"%s\"", path); return -1; } /* * ENOENT means that some component of a pathname does not exists. * * XXX - we should also call os_access on parent directory and * if this also results in ENOENT -1 should be returned. * * The problem is that we would need to use realpath, which fails * if file does not exist. */ return 0; } /* * get_file_type_internal -- (internal) checks whether stat structure describes * device dax or a normal file */ static enum file_type get_file_type_internal(os_stat_t *st) { #ifdef _WIN32 return TYPE_NORMAL; #else if (!S_ISCHR(st->st_mode)) { LOG(4, "not a character device"); return TYPE_NORMAL; } char spath[PATH_MAX]; snprintf(spath, PATH_MAX, "/sys/dev/char/%u:%u/subsystem", os_major(st->st_rdev), os_minor(st->st_rdev)); LOG(4, "device subsystem path \"%s\"", spath); char npath[PATH_MAX]; char *rpath = realpath(spath, npath); if (rpath == NULL) { ERR("!realpath \"%s\"", spath); return OTHER_ERROR; } char *basename = strrchr(rpath, '/'); if (!basename || strcmp("dax", basename + 1) != 0) { LOG(3, "%s path does not match device dax prefix path", rpath); errno = EINVAL; return OTHER_ERROR; } return TYPE_DEVDAX; #endif } #if 0 /* * util_fd_get_type -- checks whether a file descriptor is associated * with a device dax or a normal file */ enum file_type util_fd_get_type(int fd) { LOG(3, "fd %d", fd); #ifdef _WIN32 return TYPE_NORMAL; #else os_stat_t st; if (os_fstat(fd, &st) < 0) { ERR("!fstat"); return OTHER_ERROR; } return get_file_type_internal(&st); #endif } #endif /* * util_file_get_type -- checks whether the path points to a device dax, * normal file or non-existent file */ enum file_type util_file_get_type(const char *path) { LOG(3, "path \"%s\"", path); if (path == NULL) { ERR("invalid (NULL) path"); errno = EINVAL; return OTHER_ERROR; } int exists = util_file_exists(path); if (exists < 0) return OTHER_ERROR; if (!exists) return NOT_EXISTS; #ifdef _WIN32 return TYPE_NORMAL; #else os_stat_t st; if (os_stat(path, &st) < 0) { ERR("!stat"); return OTHER_ERROR; } return get_file_type_internal(&st); #endif } /* * util_file_get_size -- returns size of a file */ ssize_t util_file_get_size(const char *path) { LOG(3, "path \"%s\"", path); int file_type = util_file_get_type(path); if (file_type < 0) return -1; #ifndef _WIN32 if (file_type == TYPE_DEVDAX) { return device_dax_size(path); } #endif os_stat_t stbuf; if (os_stat(path, &stbuf) < 0) { ERR("!stat \"%s\"", path); return -1; } LOG(4, "file length %zu", stbuf.st_size); return stbuf.st_size; } /* * util_file_map_whole -- maps the entire file into memory */ void * util_file_map_whole(const char *path) { LOG(3, "path \"%s\"", path); int fd; int olderrno; void *addr = NULL; if ((fd = os_open(path, O_RDWR)) < 0) { ERR("!open \"%s\"", path); return NULL; } ssize_t size = util_file_get_size(path); if (size < 0) { LOG(2, "cannot determine file length \"%s\"", path); goto out; } addr = util_map(fd, (size_t)size, MAP_SHARED, 0, 0, NULL); if (addr == NULL) { LOG(2, "failed to map entire file \"%s\"", path); goto out; } out: olderrno = errno; (void) os_close(fd); errno = olderrno; return addr; } #if 0 /* * util_file_zero -- zeroes the specified region of the file */ int util_file_zero(const char *path, os_off_t off, size_t len) { LOG(3, "path \"%s\" off %ju len %zu", path, off, len); int fd; int olderrno; int ret = 0; if ((fd = os_open(path, O_RDWR)) < 0) { ERR("!open \"%s\"", path); return -1; } ssize_t size = util_file_get_size(path); if (size < 0) { LOG(2, "cannot determine file length \"%s\"", path); ret = -1; goto out; } if (off > size) { LOG(2, "offset beyond file length, %ju > %ju", off, size); ret = -1; goto out; } if ((size_t)off + len > (size_t)size) { LOG(2, "requested size of write goes beyond the file length, " "%zu > %zu", (size_t)off + len, size); LOG(4, "adjusting len to %zu", size - off); len = (size_t)(size - off); } void *addr = util_map(fd, (size_t)size, MAP_SHARED, 0, 0, NULL); if (addr == NULL) { LOG(2, "failed to map entire file \"%s\"", path); ret = -1; goto out; } /* zero initialize the specified region */ memset((char *)addr + off, 0, len); util_unmap(addr, (size_t)size); out: olderrno = errno; (void) os_close(fd); errno = olderrno; return ret; } /* * util_file_pwrite -- writes to a file with an offset */ ssize_t util_file_pwrite(const char *path, const void *buffer, size_t size, os_off_t offset) { LOG(3, "path \"%s\" buffer %p size %zu offset %ju", path, buffer, size, offset); enum file_type type = util_file_get_type(path); if (type < 0) return -1; if (type == TYPE_NORMAL) { int fd = util_file_open(path, NULL, 0, O_RDWR); if (fd < 0) { LOG(2, "failed to open file \"%s\"", path); return -1; } ssize_t write_len = pwrite(fd, buffer, size, offset); int olderrno = errno; (void) os_close(fd); errno = olderrno; return write_len; } ssize_t file_size = util_file_get_size(path); if (file_size < 0) { LOG(2, "cannot determine file length \"%s\"", path); return -1; } size_t max_size = (size_t)(file_size - offset); if (size > max_size) { LOG(2, "requested size of write goes beyond the file length, " "%zu > %zu", size, max_size); LOG(4, "adjusting size to %zu", max_size); size = max_size; } void *addr = util_file_map_whole(path); if (addr == NULL) { LOG(2, "failed to map entire file \"%s\"", path); return -1; } memcpy(ADDR_SUM(addr, offset), buffer, size); util_unmap(addr, (size_t)file_size); return (ssize_t)size; } /* * util_file_pread -- reads from a file with an offset */ ssize_t util_file_pread(const char *path, void *buffer, size_t size, os_off_t offset) { LOG(3, "path \"%s\" buffer %p size %zu offset %ju", path, buffer, size, offset); enum file_type type = util_file_get_type(path); if (type < 0) return -1; if (type == TYPE_NORMAL) { int fd = util_file_open(path, NULL, 0, O_RDONLY); if (fd < 0) { LOG(2, "failed to open file \"%s\"", path); return -1; } ssize_t read_len = pread(fd, buffer, size, offset); int olderrno = errno; (void) os_close(fd); errno = olderrno; return read_len; } ssize_t file_size = util_file_get_size(path); if (file_size < 0) { LOG(2, "cannot determine file length \"%s\"", path); return -1; } size_t max_size = (size_t)(file_size - offset); if (size > max_size) { LOG(2, "requested size of read goes beyond the file length, " "%zu > %zu", size, max_size); LOG(4, "adjusting size to %zu", max_size); size = max_size; } void *addr = util_file_map_whole(path); if (addr == NULL) { LOG(2, "failed to map entire file \"%s\"", path); return -1; } memcpy(buffer, ADDR_SUM(addr, offset), size); util_unmap(addr, (size_t)file_size); return (ssize_t)size; } /* * util_file_create -- create a new memory pool file */ int util_file_create(const char *path, size_t size, size_t minsize) { LOG(3, "path \"%s\" size %zu minsize %zu", path, size, minsize); ASSERTne(size, 0); if (size < minsize) { ERR("size %zu smaller than %zu", size, minsize); errno = EINVAL; return -1; } if (((os_off_t)size) < 0) { ERR("invalid size (%zu) for os_off_t", size); errno = EFBIG; return -1; } int fd; int mode; int flags = O_RDWR | O_CREAT | O_EXCL; #ifndef _WIN32 mode = 0; #else mode = S_IWRITE | S_IREAD; flags |= O_BINARY; #endif /* * Create file without any permission. It will be granted once * initialization completes. */ if ((fd = os_open(path, flags, mode)) < 0) { ERR("!open \"%s\"", path); return -1; } if ((errno = os_posix_fallocate(fd, 0, (os_off_t)size)) != 0) { ERR("!posix_fallocate \"%s\", %zu", path, size); goto err; } /* for windows we can't flock until after we fallocate */ if (os_flock(fd, OS_LOCK_EX | OS_LOCK_NB) < 0) { ERR("!flock \"%s\"", path); goto err; } return fd; err: LOG(4, "error clean up"); int oerrno = errno; if (fd != -1) (void) os_close(fd); os_unlink(path); errno = oerrno; return -1; } /* * util_file_open -- open a memory pool file */ int util_file_open(const char *path, size_t *size, size_t minsize, int flags) { LOG(3, "path \"%s\" size %p minsize %zu flags %d", path, size, minsize, flags); int oerrno; int fd; #ifdef _WIN32 flags |= O_BINARY; #endif if ((fd = os_open(path, flags)) < 0) { ERR("!open \"%s\"", path); return -1; } if (os_flock(fd, OS_LOCK_EX | OS_LOCK_NB) < 0) { ERR("!flock \"%s\"", path); (void) os_close(fd); return -1; } if (size || minsize) { if (size) ASSERTeq(*size, 0); ssize_t actual_size = util_file_get_size(path); if (actual_size < 0) { ERR("stat \"%s\": negative size", path); errno = EINVAL; goto err; } if ((size_t)actual_size < minsize) { ERR("size %zu smaller than %zu", (size_t)actual_size, minsize); errno = EINVAL; goto err; } if (size) { *size = (size_t)actual_size; LOG(4, "actual file size %zu", *size); } } return fd; err: oerrno = errno; if (os_flock(fd, OS_LOCK_UN)) ERR("!flock unlock"); (void) os_close(fd); errno = oerrno; return -1; } /* * util_unlink -- unlinks a file or zeroes a device dax */ int util_unlink(const char *path) { LOG(3, "path \"%s\"", path); enum file_type type = util_file_get_type(path); if (type < 0) return -1; if (type == TYPE_DEVDAX) { return util_file_zero(path, 0, DEVICE_DAX_ZERO_LEN); } else { #ifdef _WIN32 /* on Windows we can not unlink Read-Only files */ if (os_chmod(path, S_IREAD | S_IWRITE) == -1) { ERR("!chmod \"%s\"", path); return -1; } #endif return os_unlink(path); } } /* * util_unlink_flock -- flocks the file and unlinks it * * The unlink(2) call on a file which is opened and locked using flock(2) * by different process works on linux. Thus in order to forbid removing a * pool when in use by different process we need to flock(2) the pool files * first before unlinking. */ int util_unlink_flock(const char *path) { LOG(3, "path \"%s\"", path); #ifdef WIN32 /* * On Windows it is not possible to unlink the * file if it is flocked. */ return util_unlink(path); #else int fd = util_file_open(path, NULL, 0, O_RDONLY); if (fd < 0) { LOG(2, "failed to open file \"%s\"", path); return -1; } int ret = util_unlink(path); (void) os_close(fd); return ret; #endif } /* * util_write_all -- a wrapper for util_write * * writes exactly count bytes from buf to file referred to by fd * returns -1 on error, 0 otherwise */ int util_write_all(int fd, const char *buf, size_t count) { ssize_t n_wrote = 0; size_t total = 0; while (count > total) { n_wrote = util_write(fd, buf, count - total); if (n_wrote <= 0) return -1; buf += (size_t)n_wrote; total += (size_t)n_wrote; } return 0; } #endif vmemcache-0.8.1/src/file.h000066400000000000000000000044241374403322600153370ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2018, Intel Corporation */ /* * file.h -- internal definitions for file module */ #ifndef PMDK_FILE_H #define PMDK_FILE_H 1 #include #include #include #include #include #include "os.h" #ifdef __cplusplus extern "C" { #endif #ifdef _WIN32 #define NAME_MAX _MAX_FNAME #endif struct file_info { char filename[NAME_MAX + 1]; int is_dir; }; struct dir_handle { const char *path; #ifdef _WIN32 HANDLE handle; char *_file; #else DIR *dirp; #endif }; enum file_type { OTHER_ERROR = -2, NOT_EXISTS = -1, TYPE_NORMAL = 1, TYPE_DEVDAX = 2 }; int util_file_dir_open(struct dir_handle *a, const char *path); int util_file_dir_next(struct dir_handle *a, struct file_info *info); int util_file_dir_close(struct dir_handle *a); int util_file_dir_remove(const char *path); int util_file_exists(const char *path); enum file_type util_fd_get_type(int fd); enum file_type util_file_get_type(const char *path); int util_ddax_region_find(const char *path); ssize_t util_file_get_size(const char *path); size_t util_file_device_dax_alignment(const char *path); void *util_file_map_whole(const char *path); int util_file_zero(const char *path, os_off_t off, size_t len); ssize_t util_file_pread(const char *path, void *buffer, size_t size, os_off_t offset); ssize_t util_file_pwrite(const char *path, const void *buffer, size_t size, os_off_t offset); int util_tmpfile(const char *dir, const char *templ, int flags); int util_is_absolute_path(const char *path); int util_file_create(const char *path, size_t size, size_t minsize); int util_file_open(const char *path, size_t *size, size_t minsize, int flags); int util_unlink(const char *path); int util_unlink_flock(const char *path); int util_file_mkdir(const char *path, mode_t mode); int util_write_all(int fd, const char *buf, size_t count); #ifndef _WIN32 #define util_read read #define util_write write #else /* XXX - consider adding an assertion on (count <= UINT_MAX) */ #define util_read(fd, buf, count) read(fd, buf, (unsigned)(count)) #define util_write(fd, buf, count) write(fd, buf, (unsigned)(count)) #define S_ISCHR(m) (((m) & S_IFMT) == S_IFCHR) #define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR) #endif #ifdef __cplusplus } #endif #endif vmemcache-0.8.1/src/file_posix.c000066400000000000000000000157171374403322600165630ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * file_posix.c -- Posix versions of file APIs */ /* for O_TMPFILE */ #define _GNU_SOURCE #include #include #include #include #include #include #include #include #include #include "os.h" #include "file.h" #include "out.h" #if 0 #define MAX_SIZE_LENGTH 64 #define DAX_REGION_ID_LEN 6 /* 5 digits + \0 */ #endif /* * util_tmpfile_mkstemp -- (internal) create temporary file * if O_TMPFILE not supported */ static int util_tmpfile_mkstemp(const char *dir, const char *templ) { /* the templ must start with a path separator */ ASSERTeq(templ[0], '/'); int oerrno; int fd = -1; char *fullname = alloca(strlen(dir) + strlen(templ) + 1); (void) strcpy(fullname, dir); (void) strcat(fullname, templ); sigset_t set, oldset; sigfillset(&set); (void) sigprocmask(SIG_BLOCK, &set, &oldset); mode_t prev_umask = umask(S_IRWXG | S_IRWXO); fd = os_mkstemp(fullname); umask(prev_umask); if (fd < 0) { ERR("!mkstemp"); goto err; } (void) os_unlink(fullname); (void) sigprocmask(SIG_SETMASK, &oldset, NULL); LOG(3, "unlinked file is \"%s\"", fullname); return fd; err: oerrno = errno; (void) sigprocmask(SIG_SETMASK, &oldset, NULL); if (fd != -1) (void) os_close(fd); errno = oerrno; return -1; } /* * util_tmpfile -- create temporary file */ int util_tmpfile(const char *dir, const char *templ, int flags) { LOG(3, "dir \"%s\" template \"%s\" flags %x", dir, templ, flags); /* only O_EXCL is allowed here */ ASSERT(flags == 0 || flags == O_EXCL); #ifdef O_TMPFILE int fd = open(dir, O_TMPFILE | O_RDWR | flags, S_IRUSR | S_IWUSR); /* * Open can fail if underlying file system does not support O_TMPFILE * flag. */ if (fd >= 0) return fd; if (errno != EOPNOTSUPP) { ERR("!open"); return -1; } #endif return util_tmpfile_mkstemp(dir, templ); } #if 0 /* * util_is_absolute_path -- check if the path is an absolute one */ int util_is_absolute_path(const char *path) { LOG(3, "path: %s", path); if (path[0] == OS_DIR_SEPARATOR) return 1; else return 0; } /* * util_create_mkdir -- creates new dir */ int util_file_mkdir(const char *path, mode_t mode) { LOG(3, "path: %s mode: %o", path, mode); return mkdir(path, mode); } /* * util_file_dir_open -- open a directory */ int util_file_dir_open(struct dir_handle *handle, const char *path) { LOG(3, "handle: %p path: %s", handle, path); handle->dirp = opendir(path); return handle->dirp == NULL; } /* * util_file_dir_next -- read next file in directory */ int util_file_dir_next(struct dir_handle *handle, struct file_info *info) { LOG(3, "handle: %p info: %p", handle, info); struct dirent *d = readdir(handle->dirp); if (d == NULL) return 1; /* break */ info->filename[NAME_MAX] = '\0'; strncpy(info->filename, d->d_name, NAME_MAX + 1); if (info->filename[NAME_MAX] != '\0') return -1; /* filename truncated */ info->is_dir = d->d_type == DT_DIR; return 0; /* continue */ } /* * util_file_dir_close -- close a directory */ int util_file_dir_close(struct dir_handle *handle) { LOG(3, "path: %p", handle); return closedir(handle->dirp); } /* * util_file_dir_remove -- remove directory */ int util_file_dir_remove(const char *path) { LOG(3, "path: %s", path); return rmdir(path); } /* * device_dax_alignment -- (internal) checks the alignment of given Device DAX */ static size_t device_dax_alignment(const char *path) { LOG(3, "path \"%s\"", path); os_stat_t st; int olderrno; if (os_stat(path, &st) < 0) { ERR("!stat \"%s\"", path); return 0; } char spath[PATH_MAX]; snprintf(spath, PATH_MAX, "/sys/dev/char/%u:%u/device/align", os_major(st.st_rdev), os_minor(st.st_rdev)); LOG(4, "device align path \"%s\"", spath); int fd = os_open(spath, O_RDONLY); if (fd < 0) { ERR("!open \"%s\"", spath); return 0; } size_t size = 0; char sizebuf[MAX_SIZE_LENGTH + 1]; ssize_t nread; if ((nread = read(fd, sizebuf, MAX_SIZE_LENGTH)) < 0) { ERR("!read"); goto out; } sizebuf[nread] = 0; /* null termination */ char *endptr; olderrno = errno; errno = 0; /* 'align' is in decimal format */ size = strtoull(sizebuf, &endptr, 10); if (endptr == sizebuf || *endptr != '\n' || (size == ULLONG_MAX && errno == ERANGE)) { ERR("invalid device alignment %s", sizebuf); size = 0; goto out; } /* * If the alignment value is not a power of two, try with * hex format, as this is how it was printed in older kernels. * Just in case someone is using kernel <4.9. */ if ((size & (size - 1)) != 0) { size = strtoull(sizebuf, &endptr, 16); if (endptr == sizebuf || *endptr != '\n' || (size == ULLONG_MAX && errno == ERANGE)) { ERR("invalid device alignment %s", sizebuf); size = 0; goto out; } } errno = olderrno; out: olderrno = errno; (void) os_close(fd); errno = olderrno; LOG(4, "device alignment %zu", size); return size; } /* * util_file_device_dax_alignment -- returns internal Device DAX alignment */ size_t util_file_device_dax_alignment(const char *path) { LOG(3, "path \"%s\"", path); return device_dax_alignment(path); } /* * util_ddax_region_find -- returns Device DAX region id */ int util_ddax_region_find(const char *path) { LOG(3, "path \"%s\"", path); int dax_reg_id_fd; char dax_region_path[PATH_MAX]; char reg_id[DAX_REGION_ID_LEN]; char *end_addr; os_stat_t st; ASSERTne(path, NULL); if (os_stat(path, &st) < 0) { ERR("!stat \"%s\"", path); return -1; } dev_t dev_id = st.st_rdev; unsigned major = os_major(dev_id); unsigned minor = os_minor(dev_id); int ret = snprintf(dax_region_path, PATH_MAX, "/sys/dev/char/%u:%u/device/dax_region/id", major, minor); if (ret < 0) { ERR("snprintf(%p, %d, /sys/dev/char/%u:%u/device/" "dax_region/id, %u, %u): %d", dax_region_path, PATH_MAX, major, minor, major, minor, ret); return -1; } if ((dax_reg_id_fd = os_open(dax_region_path, O_RDONLY)) < 0) { LOG(1, "!open(\"%s\", O_RDONLY)", dax_region_path); return -1; } ssize_t len = read(dax_reg_id_fd, reg_id, DAX_REGION_ID_LEN); if (len == -1) { ERR("!read(%d, %p, %d)", dax_reg_id_fd, reg_id, DAX_REGION_ID_LEN); goto err; } else if (len < 2 || reg_id[len - 1] != '\n') { errno = EINVAL; ERR("!read(%d, %p, %d) invalid format", dax_reg_id_fd, reg_id, DAX_REGION_ID_LEN); goto err; } int olderrno = errno; errno = 0; long reg_num = strtol(reg_id, &end_addr, 10); if ((errno == ERANGE && (reg_num == LONG_MAX || reg_num == LONG_MIN)) || (errno != 0 && reg_num == 0)) { ERR("!strtol(%p, %p, 10)", reg_id, end_addr); goto err; } errno = olderrno; if (end_addr == reg_id) { ERR("!strtol(%p, %p, 10) no digits were found", reg_id, end_addr); goto err; } if (*end_addr != '\n') { ERR("!strtol(%s, %s, 10) invalid format", reg_id, end_addr); goto err; } os_close(dax_reg_id_fd); return (int)reg_num; err: os_close(dax_reg_id_fd); return -1; } #endif vmemcache-0.8.1/src/libvmemcache.c000066400000000000000000000023111374403322600170230ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018, Intel Corporation */ /* * libvmemcache.c -- libvmemcache entry points */ #include #include #include "common.h" #include "libvmemcache.h" #include "vmemcache.h" /* * vmcache_init -- load-time initialization for vmcache * * Called automatically by the run-time loader. */ ATTR_CONSTRUCTOR void libvmemcache_init(void) { common_init(VMEMCACHE_PREFIX, VMEMCACHE_LEVEL_VAR, VMEMCACHE_FILE_VAR, VMEMCACHE_MAJOR_VERSION, VMEMCACHE_MINOR_VERSION); LOG(3, NULL); } /* * libvmemcache_fini -- libvmemcache cleanup routine * * Called automatically when the process terminates. */ ATTR_DESTRUCTOR void libvmemcache_fini(void) { LOG(3, NULL); common_fini(); } /* * vmemcache_errormsgU -- return last error message */ #ifndef _WIN32 static inline #endif const char * vmemcache_errormsgU(void) { return out_get_errormsg(); } #ifndef _WIN32 /* * vmemcache_errormsg -- return last error message */ const char * vmemcache_errormsg(void) { return vmemcache_errormsgU(); } #else /* * vmemcache_errormsgW -- return last error message as wchar_t */ const wchar_t * vmemcache_errormsgW(void) { return out_get_errormsgW(); } #endif vmemcache-0.8.1/src/libvmemcache.h000066400000000000000000000077071374403322600170460ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2019, Intel Corporation */ /* * libvmemcache.h -- definitions of libvmemcache entry points * * This library provides near-zero waste volatile caching. */ #ifndef LIBVMEMCACHE_H #define LIBVMEMCACHE_H 1 #include #include #ifdef __cplusplus extern "C" { #endif #ifdef _WIN32 #ifndef PMDK_UTF8_API #define vmemcache_new vmemcache_newW #define vmemcache_errormsg vmemcache_errormsgW #else #define vmemcache_new vmemcache_newU #define vmemcache_errormsg vmemcache_errormsgU #endif #endif /* * VMEMCACHE_MAJOR_VERSION and VMEMCACHE_MINOR_VERSION provide the current * version of the libvmemcache API as provided by this header file. */ #define VMEMCACHE_MAJOR_VERSION 0 #define VMEMCACHE_MINOR_VERSION 8 #define VMEMCACHE_MIN_POOL ((size_t)(1024 * 1024)) /* minimum pool size: 1MB */ #define VMEMCACHE_MIN_EXTENT ((size_t)256) /* minimum size of extent: 256B */ /* * opaque type, internal to libvmemcache */ typedef struct vmemcache VMEMcache; enum vmemcache_repl_p { VMEMCACHE_REPLACEMENT_NONE, VMEMCACHE_REPLACEMENT_LRU, VMEMCACHE_REPLACEMENT_NUM }; enum vmemcache_statistic { VMEMCACHE_STAT_PUT, /* total number of puts */ VMEMCACHE_STAT_GET, /* total number of gets */ VMEMCACHE_STAT_HIT, /* total number of hits */ VMEMCACHE_STAT_MISS, /* total number of misses */ VMEMCACHE_STAT_EVICT, /* total number of evicts */ VMEMCACHE_STAT_ENTRIES, /* current number of cache entries */ VMEMCACHE_STAT_DRAM_SIZE_USED, /* current size of DRAM used for keys */ VMEMCACHE_STAT_POOL_SIZE_USED, /* current size of memory pool */ /* used for values */ VMEMCACHE_STAT_HEAP_ENTRIES, /* current number of allocator heap */ /* entries */ VMEMCACHE_STATS_NUM /* total number of statistics */ }; enum vmemcache_bench_cfg { /* these will corrupt the data, good only for benchmarking */ VMEMCACHE_BENCH_INDEX_ONLY, /* disable anything but indexing */ VMEMCACHE_BENCH_NO_ALLOC, /* index+repl but no alloc */ VMEMCACHE_BENCH_NO_MEMCPY, /* alloc but don't copy data */ VMEMCACHE_BENCH_PREFAULT, /* prefault the whole pool */ }; typedef void vmemcache_on_evict(VMEMcache *cache, const void *key, size_t key_size, void *arg); typedef void vmemcache_on_miss(VMEMcache *cache, const void *key, size_t key_size, void *arg); VMEMcache * vmemcache_new(void); int vmemcache_set_eviction_policy(VMEMcache *cache, enum vmemcache_repl_p repl_p); int vmemcache_set_size(VMEMcache *cache, size_t size); int vmemcache_set_extent_size(VMEMcache *cache, size_t extent_size); #ifndef _WIN32 int vmemcache_add(VMEMcache *cache, const char *path); #else int vmemcache_addU(VMEMcache *cache, const char *path); int vmemcache_addW(VMEMcache *cache, const wchar_t *path); #endif void vmemcache_delete(VMEMcache *cache); void vmemcache_callback_on_evict(VMEMcache *cache, vmemcache_on_evict *evict, void *arg); void vmemcache_callback_on_miss(VMEMcache *cache, vmemcache_on_miss *miss, void *arg); ssize_t /* returns the number of bytes read */ vmemcache_get(VMEMcache *cache, const void *key, size_t key_size, void *vbuf, /* user-provided buffer */ size_t vbufsize, /* size of vbuf */ size_t offset, /* offset inside of value from which to begin copying */ size_t *vsize /* real size of the object */); int vmemcache_exists(VMEMcache *cache, const void *key, size_t key_size, size_t *vsize); int vmemcache_put(VMEMcache *cache, const void *key, size_t key_size, const void *value, size_t value_size); int vmemcache_evict(VMEMcache *cache, const void *key, size_t ksize); int vmemcache_get_stat(VMEMcache *cache, enum vmemcache_statistic stat, void *value, size_t value_size); #ifndef _WIN32 const char *vmemcache_errormsg(void); #else const char *vmemcache_errormsgU(void); const wchar_t *vmemcache_errormsgW(void); #endif /* UNSTABLE INTEFACE -- DO NOT USE! */ void vmemcache_bench_set(VMEMcache *cache, enum vmemcache_bench_cfg cfg, size_t val); #ifdef __cplusplus } #endif #endif /* libvmemcache.h */ vmemcache-0.8.1/src/libvmemcache.map000066400000000000000000000010211374403322600173530ustar00rootroot00000000000000# SPDX-License-Identifier: BSD-3-Clause # Copyright 2018-2019, Intel Corporation # # # src/libvmemcache.map -- linker map file for libvmemcache # LIBVMEMCACHE_1.0 { global: vmemcache_new; vmemcache_delete; vmemcache_set_eviction_policy; vmemcache_set_size; vmemcache_set_extent_size; vmemcache_add; vmemcache_put; vmemcache_get; vmemcache_exists; vmemcache_evict; vmemcache_callback_on_evict; vmemcache_callback_on_miss; vmemcache_get_stat; vmemcache_bench_set; vmemcache_errormsg; local: *; }; vmemcache-0.8.1/src/mmap.c000066400000000000000000000057211374403322600153460ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * mmap.c -- mmap utilities */ #include #include #include #include #include #include #include #include #include #include "file.h" #include "mmap.h" #include "sys_util.h" #include "os.h" /* * util_map -- memory map a file * * This is just a convenience function that calls mmap() with the * appropriate arguments and includes our trace points. */ void * util_map(int fd, size_t len, int flags, int rdonly, size_t req_align, int *map_sync) { LOG(3, "fd %d len %zu flags %d rdonly %d req_align %zu map_sync %p", fd, len, flags, rdonly, req_align, map_sync); void *base; void *addr = util_map_hint(len, req_align); if (addr == MAP_FAILED) { ERR("cannot find a contiguous region of given size"); return NULL; } if (req_align) ASSERTeq((uintptr_t)addr % req_align, 0); int proto = rdonly ? PROT_READ : PROT_READ|PROT_WRITE; base = util_map_sync(addr, len, proto, flags, fd, 0, map_sync); if (base == MAP_FAILED) { ERR("!mmap %zu bytes", len); return NULL; } LOG(3, "mapped at %p", base); return base; } /* * util_unmap -- unmap a file * * This is just a convenience function that calls munmap() with the * appropriate arguments and includes our trace points. */ int util_unmap(void *addr, size_t len) { LOG(3, "addr %p len %zu", addr, len); /* * XXX Workaround for https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=169608 */ #ifdef __FreeBSD__ if (!IS_PAGE_ALIGNED((uintptr_t)addr)) { errno = EINVAL; ERR("!munmap"); return -1; } #endif int retval = munmap(addr, len); if (retval < 0) ERR("!munmap"); return retval; } /* * chattr -- (internal) set file attributes */ static void chattr(int fd, int set, int clear) { int attr; if (ioctl(fd, FS_IOC_GETFLAGS, &attr) < 0) { LOG(3, "!ioctl(FS_IOC_GETFLAGS) failed"); return; } attr |= set; attr &= ~clear; if (ioctl(fd, FS_IOC_SETFLAGS, &attr) < 0) { LOG(3, "!ioctl(FS_IOC_SETFLAGS) failed"); return; } } /* * util_map_tmpfile -- reserve space in an unlinked file and memory-map it * * size must be multiple of page size. */ void * util_map_tmpfile(const char *dir, size_t size, size_t req_align) { int oerrno; if (((os_off_t)size) < 0) { ERR("invalid size (%zu) for os_off_t", size); errno = EFBIG; return NULL; } int fd = util_tmpfile(dir, OS_DIR_SEP_STR "vmem.XXXXXX", O_EXCL); if (fd == -1) { LOG(2, "cannot create temporary file in dir %s", dir); goto err; } chattr(fd, FS_NOCOW_FL, 0); if ((errno = os_posix_fallocate(fd, 0, (os_off_t)size)) != 0) { ERR("!posix_fallocate"); goto err; } void *base; if ((base = util_map(fd, size, MAP_SHARED, 0, req_align, NULL)) == NULL) { LOG(2, "cannot mmap temporary file"); goto err; } (void) os_close(fd); return base; err: oerrno = errno; if (fd != -1) (void) os_close(fd); errno = oerrno; return NULL; } vmemcache-0.8.1/src/mmap.h000066400000000000000000000054141374403322600153520ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * mmap.h -- internal definitions for mmap module */ #ifndef PMDK_MMAP_H #define PMDK_MMAP_H 1 #include #include #include #include #include #include #include "out.h" #include "sys/queue.h" #include "os.h" #ifdef __cplusplus extern "C" { #endif extern char *Mmap_mapfile; void *util_map_sync(void *addr, size_t len, int proto, int flags, int fd, os_off_t offset, int *map_sync); void *util_map(int fd, size_t len, int flags, int rdonly, size_t req_align, int *map_sync); int util_unmap(void *addr, size_t len); void *util_map_tmpfile(const char *dir, size_t size, size_t req_align); #ifdef __FreeBSD__ #define MAP_NORESERVE 0 #define OS_MAPFILE "/proc/curproc/map" #else #define OS_MAPFILE "/proc/self/maps" #endif #ifndef MAP_SYNC #define MAP_SYNC 0x80000 #endif #ifndef MAP_SHARED_VALIDATE #define MAP_SHARED_VALIDATE 0x03 #endif /* * macros for micromanaging range protections for the debug version */ #ifdef DEBUG #define RANGE(addr, len, is_dev_dax, type) do {\ if (!is_dev_dax) ASSERT(util_range_##type(addr, len) >= 0);\ } while (0) #else #define RANGE(addr, len, is_dev_dax, type) do {} while (0) #endif #define RANGE_RO(addr, len, is_dev_dax) RANGE(addr, len, is_dev_dax, ro) #define RANGE_RW(addr, len, is_dev_dax) RANGE(addr, len, is_dev_dax, rw) #define RANGE_NONE(addr, len, is_dev_dax) RANGE(addr, len, is_dev_dax, none) /* pmem mapping type */ enum pmem_map_type { PMEM_DEV_DAX, /* device dax */ PMEM_MAP_SYNC, /* mapping with MAP_SYNC flag on dax fs */ MAX_PMEM_TYPE }; /* * this structure tracks the file mappings outstanding per file handle */ struct map_tracker { SORTEDQ_ENTRY(map_tracker) entry; uintptr_t base_addr; uintptr_t end_addr; int region_id; enum pmem_map_type type; #ifdef _WIN32 /* Windows-specific data */ HANDLE FileHandle; HANDLE FileMappingHandle; DWORD Access; os_off_t Offset; size_t FileLen; #endif }; char *util_map_hint_unused(void *minaddr, size_t len, size_t align); char *util_map_hint(size_t len, size_t req_align); #define MEGABYTE ((uintptr_t)1 << 20) #define GIGABYTE ((uintptr_t)1 << 30) /* * util_map_hint_align -- choose the desired mapping alignment * * The smallest supported alignment is 2 megabytes because of the object * alignment requirements. Changing this value to 4 kilobytes constitues a * layout change. * * Use 1GB page alignment only if the mapping length is at least * twice as big as the page size. */ static inline size_t util_map_hint_align(size_t len, size_t req_align) { size_t align = 2 * MEGABYTE; if (req_align) align = req_align; else if (len >= 2 * GIGABYTE) align = GIGABYTE; return align; } #ifdef __cplusplus } #endif #endif vmemcache-0.8.1/src/mmap_posix.c000066400000000000000000000114221374403322600165630ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * mmap_posix.c -- memory-mapped files for Posix */ #include #include #include #include "mmap.h" #include "out.h" #include "os.h" #define PROCMAXLEN 2048 /* maximum expected line length in /proc files */ char *Mmap_mapfile = OS_MAPFILE; /* Should be modified only for testing */ #ifdef __FreeBSD__ static const char * const sscanf_os = "%p %p"; #else static const char * const sscanf_os = "%p-%p"; #endif /* * util_map_hint_unused -- use /proc to determine a hint address for mmap() * * This is a helper function for util_map_hint(). * It opens up /proc/self/maps and looks for the first unused address * in the process address space that is: * - greater or equal 'minaddr' argument, * - large enough to hold range of given length, * - aligned to the specified unit. * * Asking for aligned address like this will allow the DAX code to use large * mappings. It is not an error if mmap() ignores the hint and chooses * different address. */ char * util_map_hint_unused(void *minaddr, size_t len, size_t align) { LOG(3, "minaddr %p len %zu align %zu", minaddr, len, align); ASSERT(align > 0); FILE *fp; if ((fp = os_fopen(Mmap_mapfile, "r")) == NULL) { ERR("!%s", Mmap_mapfile); return MAP_FAILED; } char line[PROCMAXLEN]; /* for fgets() */ char *lo = NULL; /* beginning of current range in maps file */ char *hi = NULL; /* end of current range in maps file */ char *raddr = minaddr; /* ignore regions below 'minaddr' */ if (raddr == NULL) raddr += Pagesize; raddr = (char *)roundup((uintptr_t)raddr, align); while (fgets(line, PROCMAXLEN, fp) != NULL) { /* check for range line */ if (sscanf(line, sscanf_os, &lo, &hi) == 2) { LOG(4, "%p-%p", lo, hi); if (lo > raddr) { if ((uintptr_t)(lo - raddr) >= len) { LOG(4, "unused region of size %zu " "found at %p", lo - raddr, raddr); break; } else { LOG(4, "region is too small: %zu < %zu", lo - raddr, len); } } if (hi > raddr) { raddr = (char *)roundup((uintptr_t)hi, align); LOG(4, "nearest aligned addr %p", raddr); } if (raddr == NULL) { LOG(4, "end of address space reached"); break; } } } /* * Check for a case when this is the last unused range in the address * space, but is not large enough. (very unlikely) */ if ((raddr != NULL) && (UINTPTR_MAX - (uintptr_t)raddr < len)) { LOG(4, "end of address space reached"); raddr = MAP_FAILED; } fclose(fp); LOG(3, "returning %p", raddr); return raddr; } /* * util_map_hint -- determine hint address for mmap() * * The system picks the randomized mapping address. * ALSR in 64-bit Linux kernel uses 28-bit of randomness for mmap * (bit positions 12-39), which means the base mapping address is randomized * within [0..1024GB] range, with 4KB granularity. Assuming additional * 1GB alignment, it results in 1024 possible locations. */ char * util_map_hint(size_t len, size_t req_align) { LOG(3, "len %zu req_align %zu", len, req_align); char *hint_addr = MAP_FAILED; /* choose the desired alignment based on the requested length */ size_t align = util_map_hint_align(len, req_align); /* * Create dummy mapping to find an unused region of given size. * Request for increased size for later address alignment. * Use MAP_PRIVATE with read-only access to simulate * zero cost for overcommit accounting. Note: MAP_NORESERVE * flag is ignored if overcommit is disabled (mode 2). */ char *addr = mmap(NULL, len + align, PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); if (addr != MAP_FAILED) { LOG(4, "system choice %p", addr); hint_addr = (char *)roundup((uintptr_t)addr, align); munmap(addr, len + align); } LOG(4, "hint %p", hint_addr); return hint_addr; } /* * util_map_sync -- memory map given file into memory, if MAP_SHARED flag is * provided it attempts to use MAP_SYNC flag. Otherwise it fallbacks to * mmap(2). */ void * util_map_sync(void *addr, size_t len, int proto, int flags, int fd, os_off_t offset, int *map_sync) { LOG(15, "addr %p len %zu proto %x flags %x fd %d offset %ld " "map_sync %p", addr, len, proto, flags, fd, offset, map_sync); if (map_sync) *map_sync = 0; /* if map_sync is NULL do not even try to mmap with MAP_SYNC flag */ if (!map_sync || flags & MAP_PRIVATE) return mmap(addr, len, proto, flags, fd, offset); /* MAP_SHARED */ void *ret = mmap(addr, len, proto, flags | MAP_SHARED_VALIDATE | MAP_SYNC, fd, offset); if (ret != MAP_FAILED) { LOG(4, "mmap with MAP_SYNC succeeded"); *map_sync = 1; return ret; } if (errno == EINVAL || errno == ENOTSUP) { LOG(4, "mmap with MAP_SYNC not supported"); return mmap(addr, len, proto, flags, fd, offset); } /* other error */ return MAP_FAILED; } vmemcache-0.8.1/src/os.h000066400000000000000000000044671374403322600150500ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2018, Intel Corporation */ /* * os.h -- os abstaction layer */ #ifndef PMDK_OS_H #define PMDK_OS_H 1 #include #include #include #ifdef __cplusplus extern "C" { #endif #ifndef _WIN32 #define OS_DIR_SEPARATOR '/' #define OS_DIR_SEP_STR "/" #else #define OS_DIR_SEPARATOR '\\' #define OS_DIR_SEP_STR "\\" #endif #ifndef _WIN32 /* madvise() */ #ifdef __FreeBSD__ #define os_madvise minherit #define MADV_DONTFORK INHERIT_NONE #else #define os_madvise madvise #endif /* dlopen() */ #ifdef __FreeBSD__ #define RTLD_DEEPBIND 0 /* XXX */ #endif /* major(), minor() */ #ifdef __FreeBSD__ #define os_major (unsigned)major #define os_minor (unsigned)minor #else #define os_major major #define os_minor minor #endif #endif /* #ifndef _WIN32 */ struct iovec; /* os_flock */ #define OS_LOCK_SH 1 #define OS_LOCK_EX 2 #define OS_LOCK_NB 4 #define OS_LOCK_UN 8 #ifndef _WIN32 typedef struct stat os_stat_t; #define os_fstat fstat #define os_lseek lseek #else typedef struct _stat64 os_stat_t; #define os_fstat _fstat64 #define os_lseek _lseeki64 #endif #define os_close close #define os_fclose fclose #ifndef _WIN32 typedef off_t os_off_t; #else /* XXX: os_off_t defined in platform.h */ #endif int os_open(const char *pathname, int flags, ...); int os_fsync(int fd); int os_fsync_dir(const char *dir_name); int os_stat(const char *pathname, os_stat_t *buf); int os_unlink(const char *pathname); int os_access(const char *pathname, int mode); FILE *os_fopen(const char *pathname, const char *mode); FILE *os_fdopen(int fd, const char *mode); int os_chmod(const char *pathname, mode_t mode); int os_mkstemp(char *temp); int os_posix_fallocate(int fd, os_off_t offset, os_off_t len); int os_ftruncate(int fd, os_off_t length); int os_flock(int fd, int operation); ssize_t os_writev(int fd, const struct iovec *iov, int iovcnt); int os_clock_gettime(int id, struct timespec *ts); unsigned os_rand_r(unsigned *seedp); int os_unsetenv(const char *name); int os_setenv(const char *name, const char *value, int overwrite); char *os_getenv(const char *name); const char *os_strsignal(int sig); int os_execv(const char *path, char *const argv[]); /* * XXX: missing APis (used in ut_file.c) * * rename * read * write */ #ifdef __cplusplus } #endif #endif /* os.h */ vmemcache-0.8.1/src/os_posix.c000066400000000000000000000124651374403322600162620ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2019, Intel Corporation */ /* * os_posix.c -- abstraction layer for basic Posix functions */ #define _GNU_SOURCE #include #include #include #ifdef __FreeBSD__ #include #endif #include #include #include #include #include #include #include #include #include "util.h" #include "out.h" #include "os.h" /* * os_open -- open abstraction layer */ int os_open(const char *pathname, int flags, ...) { int mode_required = (flags & O_CREAT) == O_CREAT; #ifdef O_TMPFILE mode_required |= (flags & O_TMPFILE) == O_TMPFILE; #endif if (mode_required) { va_list arg; va_start(arg, flags); /* Clang requires int due to auto-promotion */ int mode = va_arg(arg, int); va_end(arg); return open(pathname, flags, (mode_t)mode); } else { return open(pathname, flags); } } #if 0 /* * os_fsync -- fsync abstraction layer */ int os_fsync(int fd) { return fsync(fd); } /* * os_fsync_dir -- fsync the directory */ int os_fsync_dir(const char *dir_name) { int fd = os_open(dir_name, O_RDONLY | O_DIRECTORY); if (fd < 0) return -1; int ret = os_fsync(fd); os_close(fd); return ret; } #endif /* * os_stat -- stat abstraction layer */ int os_stat(const char *pathname, os_stat_t *buf) { return stat(pathname, buf); } /* * os_unlink -- unlink abstraction layer */ int os_unlink(const char *pathname) { return unlink(pathname); } /* * os_access -- access abstraction layer */ int os_access(const char *pathname, int mode) { return access(pathname, mode); } /* * os_fopen -- fopen abstraction layer */ FILE * os_fopen(const char *pathname, const char *mode) { return fopen(pathname, mode); } #if 0 /* * os_fdopen -- fdopen abstraction layer */ FILE * os_fdopen(int fd, const char *mode) { return fdopen(fd, mode); } /* * os_chmod -- chmod abstraction layer */ int os_chmod(const char *pathname, mode_t mode) { return chmod(pathname, mode); } #endif /* * os_mkstemp -- mkstemp abstraction layer */ int os_mkstemp(char *temp) { return mkstemp(temp); } /* * os_posix_fallocate -- posix_fallocate abstraction layer */ int os_posix_fallocate(int fd, os_off_t offset, off_t len) { #ifdef __FreeBSD__ struct stat fbuf; struct statfs fsbuf; /* * XXX Workaround for https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=223287 * * FreeBSD implements posix_fallocate with a simple block allocation/zero * loop. If the requested size is unreasonably large, this can result in * an uninterruptable system call that will suck up all the space in the * file system and could take hours to fail. To avoid this, make a crude * check to see if the requested allocation is larger than the available * space in the file system (minus any blocks already allocated to the * file), and if so, immediately return ENOSPC. We do the check only if * the offset is 0; otherwise, trying to figure out how many additional * blocks are required is too complicated. * * This workaround is here mostly to fail "absurdly" large requests for * testing purposes; however, it is coded to allow normal (albeit slow) * operation if the space can actually be allocated. Because of the way * PMDK uses posix_fallocate, supporting Linux-style fallocate in * FreeBSD should be considered. */ if (offset == 0) { if (fstatfs(fd, &fsbuf) == -1 || fstat(fd, &fbuf) == -1) return errno; size_t reqd_blocks = (((size_t)len + (fsbuf.f_bsize - 1)) / fsbuf.f_bsize) - (size_t)fbuf.st_blocks; if (reqd_blocks > (size_t)fsbuf.f_bavail) return ENOSPC; } #endif return posix_fallocate(fd, offset, len); } #if 0 /* * os_ftruncate -- ftruncate abstraction layer */ int os_ftruncate(int fd, os_off_t length) { return ftruncate(fd, length); } /* * os_flock -- flock abstraction layer */ int os_flock(int fd, int operation) { int opt = 0; if (operation & OS_LOCK_EX) opt |= LOCK_EX; if (operation & OS_LOCK_SH) opt |= LOCK_SH; if (operation & OS_LOCK_UN) opt |= LOCK_UN; if (operation & OS_LOCK_NB) opt |= LOCK_NB; return flock(fd, opt); } /* * os_writev -- writev abstraction layer */ ssize_t os_writev(int fd, const struct iovec *iov, int iovcnt) { return writev(fd, iov, iovcnt); } #endif /* * os_clock_gettime -- clock_gettime abstraction layer */ int os_clock_gettime(int id, struct timespec *ts) { return clock_gettime(id, ts); } #if 0 /* * os_rand_r -- rand_r abstraction layer */ unsigned os_rand_r(unsigned *seedp) { return (unsigned)rand_r(seedp); } /* * os_unsetenv -- unsetenv abstraction layer */ int os_unsetenv(const char *name) { return unsetenv(name); } /* * os_setenv -- setenv abstraction layer */ int os_setenv(const char *name, const char *value, int overwrite) { return setenv(name, value, overwrite); } #endif /* * secure_getenv -- provide GNU secure_getenv for FreeBSD */ #ifndef __USE_GNU static char * secure_getenv(const char *name) { if (issetugid() != 0) return NULL; return getenv(name); } #endif /* * os_getenv -- getenv abstraction layer */ char * os_getenv(const char *name) { return secure_getenv(name); } #if 0 /* * os_strsignal -- strsignal abstraction layer */ const char * os_strsignal(int sig) { return strsignal(sig); } int os_execv(const char *path, char *const argv[]) { return execv(path, argv); } #endif vmemcache-0.8.1/src/os_thread.h000066400000000000000000000133111374403322600163630ustar00rootroot00000000000000/* * Copyright 2015-2018, Intel Corporation * Copyright (c) 2016, Microsoft Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * os_thread.h -- os thread abstraction layer */ #ifndef OS_THREAD_H #define OS_THREAD_H 1 #include #include #ifdef __cplusplus extern "C" { #endif typedef union { long long align; char padding[44]; /* linux: 40 windows: 44 */ } os_mutex_t; typedef union { long long align; char padding[56]; /* linux: 56 windows: 13 */ } os_rwlock_t; typedef union { long long align; char padding[48]; /* linux: 48 windows: 12 */ } os_cond_t; typedef union { long long align; char padding[32]; /* linux: 8 windows: 32 */ } os_thread_t; typedef union { long long align; /* linux: long windows: 8 FreeBSD: 12 */ char padding[16]; /* 16 to be safe */ } os_once_t; #define OS_ONCE_INIT { .padding = {0} } typedef unsigned os_tls_key_t; typedef union { long long align; char padding[56]; /* linux: 56 windows: 8 */ } os_semaphore_t; typedef union { long long align; char padding[56]; /* linux: 56 windows: 8 */ } os_thread_attr_t; typedef union { long long align; char padding[512]; } os_cpu_set_t; #ifdef __FreeBSD__ #define cpu_set_t cpuset_t typedef uintptr_t os_spinlock_t; #else typedef volatile int os_spinlock_t; /* XXX: not implemented on windows */ #endif void os_cpu_zero(os_cpu_set_t *set); void os_cpu_set(size_t cpu, os_cpu_set_t *set); #ifndef _WIN32 #define _When_(...) #endif int os_once(os_once_t *o, void (*func)(void)); int os_tls_key_create(os_tls_key_t *key, void (*destructor)(void *)); int os_tls_key_delete(os_tls_key_t key); int os_tls_set(os_tls_key_t key, const void *value); void *os_tls_get(os_tls_key_t key); int os_mutex_init(os_mutex_t *__restrict mutex); int os_mutex_destroy(os_mutex_t *__restrict mutex); _When_(return == 0, _Acquires_lock_(mutex->lock)) int os_mutex_lock(os_mutex_t *__restrict mutex); _When_(return == 0, _Acquires_lock_(mutex->lock)) int os_mutex_trylock(os_mutex_t *__restrict mutex); int os_mutex_unlock(os_mutex_t *__restrict mutex); /* XXX - non POSIX */ int os_mutex_timedlock(os_mutex_t *__restrict mutex, const struct timespec *abstime); int os_rwlock_init(os_rwlock_t *__restrict rwlock); int os_rwlock_destroy(os_rwlock_t *__restrict rwlock); int os_rwlock_rdlock(os_rwlock_t *__restrict rwlock); int os_rwlock_wrlock(os_rwlock_t *__restrict rwlock); int os_rwlock_tryrdlock(os_rwlock_t *__restrict rwlock); _When_(return == 0, _Acquires_exclusive_lock_(rwlock->lock)) int os_rwlock_trywrlock(os_rwlock_t *__restrict rwlock); _When_(rwlock->is_write != 0, _Requires_exclusive_lock_held_(rwlock->lock)) _When_(rwlock->is_write == 0, _Requires_shared_lock_held_(rwlock->lock)) int os_rwlock_unlock(os_rwlock_t *__restrict rwlock); int os_rwlock_timedrdlock(os_rwlock_t *__restrict rwlock, const struct timespec *abstime); int os_rwlock_timedwrlock(os_rwlock_t *__restrict rwlock, const struct timespec *abstime); int os_spin_init(os_spinlock_t *lock, int pshared); int os_spin_destroy(os_spinlock_t *lock); int os_spin_lock(os_spinlock_t *lock); int os_spin_unlock(os_spinlock_t *lock); int os_spin_trylock(os_spinlock_t *lock); int os_cond_init(os_cond_t *__restrict cond); int os_cond_destroy(os_cond_t *__restrict cond); int os_cond_broadcast(os_cond_t *__restrict cond); int os_cond_signal(os_cond_t *__restrict cond); int os_cond_timedwait(os_cond_t *__restrict cond, os_mutex_t *__restrict mutex, const struct timespec *abstime); int os_cond_wait(os_cond_t *__restrict cond, os_mutex_t *__restrict mutex); /* threading */ int os_thread_create(os_thread_t *thread, const os_thread_attr_t *attr, void *(*start_routine)(void *), void *arg); int os_thread_join(os_thread_t *thread, void **result); void os_thread_self(os_thread_t *thread); /* thread affinity */ int os_thread_setaffinity_np(os_thread_t *thread, size_t set_size, const os_cpu_set_t *set); int os_thread_atfork(void (*prepare)(void), void (*parent)(void), void (*child)(void)); int os_semaphore_init(os_semaphore_t *sem, unsigned value); int os_semaphore_destroy(os_semaphore_t *sem); int os_semaphore_wait(os_semaphore_t *sem); int os_semaphore_trywait(os_semaphore_t *sem); int os_semaphore_post(os_semaphore_t *sem); #ifdef __cplusplus } #endif #endif /* OS_THREAD_H */ vmemcache-0.8.1/src/os_thread_posix.c000066400000000000000000000220331374403322600176010ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2019, Intel Corporation */ /* * os_thread_posix.c -- Posix thread abstraction layer */ #define _GNU_SOURCE #include #ifdef __FreeBSD__ #include #endif #include #include "os_thread.h" #include "util.h" typedef struct { pthread_t thread; } internal_os_thread_t; /* * os_once -- pthread_once abstraction layer */ int os_once(os_once_t *o, void (*func)(void)) { COMPILE_ERROR_ON(sizeof(os_once_t) < sizeof(pthread_once_t)); return pthread_once((pthread_once_t *)o, func); } /* * os_tls_key_create -- pthread_key_create abstraction layer */ int os_tls_key_create(os_tls_key_t *key, void (*destructor)(void *)) { COMPILE_ERROR_ON(sizeof(os_tls_key_t) < sizeof(pthread_key_t)); return pthread_key_create((pthread_key_t *)key, destructor); } /* * os_tls_key_delete -- pthread_key_delete abstraction layer */ int os_tls_key_delete(os_tls_key_t key) { return pthread_key_delete((pthread_key_t)key); } /* * os_tls_setspecific -- pthread_key_setspecific abstraction layer */ int os_tls_set(os_tls_key_t key, const void *value) { return pthread_setspecific((pthread_key_t)key, value); } /* * os_tls_get -- pthread_key_getspecific abstraction layer */ void * os_tls_get(os_tls_key_t key) { return pthread_getspecific((pthread_key_t)key); } /* * os_mutex_init -- pthread_mutex_init abstraction layer */ int os_mutex_init(os_mutex_t *__restrict mutex) { COMPILE_ERROR_ON(sizeof(os_mutex_t) < sizeof(pthread_mutex_t)); return pthread_mutex_init((pthread_mutex_t *)mutex, NULL); } /* * os_mutex_destroy -- pthread_mutex_destroy abstraction layer */ int os_mutex_destroy(os_mutex_t *__restrict mutex) { return pthread_mutex_destroy((pthread_mutex_t *)mutex); } /* * os_mutex_lock -- pthread_mutex_lock abstraction layer */ int os_mutex_lock(os_mutex_t *__restrict mutex) { return pthread_mutex_lock((pthread_mutex_t *)mutex); } /* * os_mutex_trylock -- pthread_mutex_trylock abstraction layer */ int os_mutex_trylock(os_mutex_t *__restrict mutex) { return pthread_mutex_trylock((pthread_mutex_t *)mutex); } /* * os_mutex_unlock -- pthread_mutex_unlock abstraction layer */ int os_mutex_unlock(os_mutex_t *__restrict mutex) { return pthread_mutex_unlock((pthread_mutex_t *)mutex); } #if 0 /* * os_mutex_timedlock -- pthread_mutex_timedlock abstraction layer */ int os_mutex_timedlock(os_mutex_t *__restrict mutex, const struct timespec *abstime) { return pthread_mutex_timedlock((pthread_mutex_t *)mutex, abstime); } #endif /* * os_rwlock_init -- pthread_rwlock_init abstraction layer */ int os_rwlock_init(os_rwlock_t *__restrict rwlock) { COMPILE_ERROR_ON(sizeof(os_rwlock_t) < sizeof(pthread_rwlock_t)); return pthread_rwlock_init((pthread_rwlock_t *)rwlock, NULL); } /* * os_rwlock_destroy -- pthread_rwlock_destroy abstraction layer */ int os_rwlock_destroy(os_rwlock_t *__restrict rwlock) { return pthread_rwlock_destroy((pthread_rwlock_t *)rwlock); } /* * os_rwlock_rdlock - pthread_rwlock_rdlock abstraction layer */ int os_rwlock_rdlock(os_rwlock_t *__restrict rwlock) { return pthread_rwlock_rdlock((pthread_rwlock_t *)rwlock); } /* * os_rwlock_wrlock -- pthread_rwlock_wrlock abstraction layer */ int os_rwlock_wrlock(os_rwlock_t *__restrict rwlock) { return pthread_rwlock_wrlock((pthread_rwlock_t *)rwlock); } /* * os_rwlock_unlock -- pthread_rwlock_unlock abstraction layer */ int os_rwlock_unlock(os_rwlock_t *__restrict rwlock) { return pthread_rwlock_unlock((pthread_rwlock_t *)rwlock); } #if 0 /* * os_rwlock_tryrdlock -- pthread_rwlock_tryrdlock abstraction layer */ int os_rwlock_tryrdlock(os_rwlock_t *__restrict rwlock) { return pthread_rwlock_tryrdlock((pthread_rwlock_t *)rwlock); } /* * os_rwlock_tryrwlock -- pthread_rwlock_trywrlock abstraction layer */ int os_rwlock_trywrlock(os_rwlock_t *__restrict rwlock) { return pthread_rwlock_trywrlock((pthread_rwlock_t *)rwlock); } /* * os_rwlock_timedrdlock -- pthread_rwlock_timedrdlock abstraction layer */ int os_rwlock_timedrdlock(os_rwlock_t *__restrict rwlock, const struct timespec *abstime) { return pthread_rwlock_timedrdlock((pthread_rwlock_t *)rwlock, abstime); } /* * os_rwlock_timedwrlock -- pthread_rwlock_timedwrlock abstraction layer */ int os_rwlock_timedwrlock(os_rwlock_t *__restrict rwlock, const struct timespec *abstime) { return pthread_rwlock_timedwrlock((pthread_rwlock_t *)rwlock, abstime); } #endif #if 0 /* * os_spin_init -- pthread_spin_init abstraction layer */ int os_spin_init(os_spinlock_t *lock, int pshared) { COMPILE_ERROR_ON(sizeof(os_spinlock_t) < sizeof(pthread_spinlock_t)); return pthread_spin_init((pthread_spinlock_t *)lock, pshared); } /* * os_spin_destroy -- pthread_spin_destroy abstraction layer */ int os_spin_destroy(os_spinlock_t *lock) { return pthread_spin_destroy((pthread_spinlock_t *)lock); } /* * os_spin_lock -- pthread_spin_lock abstraction layer */ int os_spin_lock(os_spinlock_t *lock) { return pthread_spin_lock((pthread_spinlock_t *)lock); } /* * os_spin_unlock -- pthread_spin_unlock abstraction layer */ int os_spin_unlock(os_spinlock_t *lock) { return pthread_spin_unlock((pthread_spinlock_t *)lock); } /* * os_spin_trylock -- pthread_spin_trylock abstraction layer */ int os_spin_trylock(os_spinlock_t *lock) { return pthread_spin_trylock((pthread_spinlock_t *)lock); } #endif /* * os_cond_init -- pthread_cond_init abstraction layer */ int os_cond_init(os_cond_t *__restrict cond) { COMPILE_ERROR_ON(sizeof(os_cond_t) < sizeof(pthread_cond_t)); return pthread_cond_init((pthread_cond_t *)cond, NULL); } /* * os_cond_destroy -- pthread_cond_destroy abstraction layer */ int os_cond_destroy(os_cond_t *__restrict cond) { return pthread_cond_destroy((pthread_cond_t *)cond); } /* * os_cond_broadcast -- pthread_cond_broadcast abstraction layer */ int os_cond_broadcast(os_cond_t *__restrict cond) { return pthread_cond_broadcast((pthread_cond_t *)cond); } /* * os_cond_signal -- pthread_cond_signal abstraction layer */ int os_cond_signal(os_cond_t *__restrict cond) { return pthread_cond_signal((pthread_cond_t *)cond); } /* * os_cond_timedwait -- pthread_cond_timedwait abstraction layer */ int os_cond_timedwait(os_cond_t *__restrict cond, os_mutex_t *__restrict mutex, const struct timespec *abstime) { return pthread_cond_timedwait((pthread_cond_t *)cond, (pthread_mutex_t *)mutex, abstime); } /* * os_cond_wait -- pthread_cond_wait abstraction layer */ int os_cond_wait(os_cond_t *__restrict cond, os_mutex_t *__restrict mutex) { return pthread_cond_wait((pthread_cond_t *)cond, (pthread_mutex_t *)mutex); } /* * os_thread_create -- pthread_create abstraction layer */ int os_thread_create(os_thread_t *thread, const os_thread_attr_t *attr, void *(*start_routine)(void *), void *arg) { COMPILE_ERROR_ON(sizeof(os_thread_t) < sizeof(internal_os_thread_t)); internal_os_thread_t *thread_info = (internal_os_thread_t *)thread; return pthread_create(&thread_info->thread, (pthread_attr_t *)attr, start_routine, arg); } /* * os_thread_join -- pthread_join abstraction layer */ int os_thread_join(os_thread_t *thread, void **result) { internal_os_thread_t *thread_info = (internal_os_thread_t *)thread; return pthread_join(thread_info->thread, result); } #if 0 /* * os_thread_self -- pthread_self abstraction layer */ void os_thread_self(os_thread_t *thread) { internal_os_thread_t *thread_info = (internal_os_thread_t *)thread; thread_info->thread = pthread_self(); } /* * os_thread_atfork -- pthread_atfork abstraction layer */ int os_thread_atfork(void (*prepare)(void), void (*parent)(void), void (*child)(void)) { return pthread_atfork(prepare, parent, child); } /* * os_thread_setaffinity_np -- pthread_atfork abstraction layer */ int os_thread_setaffinity_np(os_thread_t *thread, size_t set_size, const os_cpu_set_t *set) { COMPILE_ERROR_ON(sizeof(os_cpu_set_t) < sizeof(cpu_set_t)); internal_os_thread_t *thread_info = (internal_os_thread_t *)thread; return pthread_setaffinity_np(thread_info->thread, set_size, (cpu_set_t *)set); } /* * os_cpu_zero -- CP_ZERO abstraction layer */ void os_cpu_zero(os_cpu_set_t *set) { CPU_ZERO((cpu_set_t *)set); } /* * os_cpu_set -- CP_SET abstraction layer */ void os_cpu_set(size_t cpu, os_cpu_set_t *set) { CPU_SET(cpu, (cpu_set_t *)set); } #endif /* * os_semaphore_init -- initializes semaphore instance */ int os_semaphore_init(os_semaphore_t *sem, unsigned value) { COMPILE_ERROR_ON(sizeof(os_semaphore_t) < sizeof(sem_t)); return sem_init((sem_t *)sem, 0, value); } /* * os_semaphore_destroy -- destroys a semaphore instance */ int os_semaphore_destroy(os_semaphore_t *sem) { return sem_destroy((sem_t *)sem); } /* * os_semaphore_wait -- decreases the value of the semaphore */ int os_semaphore_wait(os_semaphore_t *sem) { return sem_wait((sem_t *)sem); } /* * os_semaphore_trywait -- tries to decrease the value of the semaphore */ int os_semaphore_trywait(os_semaphore_t *sem) { return sem_trywait((sem_t *)sem); } /* * os_semaphore_post -- increases the value of the semaphore */ int os_semaphore_post(os_semaphore_t *sem) { return sem_post((sem_t *)sem); } vmemcache-0.8.1/src/out.c000066400000000000000000000271341374403322600152250ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * out.c -- support for logging, tracing, and assertion output * * Macros like LOG(), OUT, ASSERT(), etc. end up here. */ #include #include #include #include #include #include #include #include "out.h" #include "os.h" #include "os_thread.h" #include "valgrind_internal.h" #include "util.h" /* XXX - modify Linux makefiles to generate srcversion.h and remove #ifdef */ #ifdef _WIN32 #include "srcversion.h" #endif static const char *Log_prefix; static int Log_level; static FILE *Out_fp; static unsigned Log_alignment; #ifndef NO_LIBPTHREAD #define MAXPRINT 8192 /* maximum expected log line */ #else #define MAXPRINT 256 /* maximum expected log line for libpmem */ #endif struct errormsg { char msg[MAXPRINT]; #ifdef _WIN32 wchar_t wmsg[MAXPRINT]; #endif }; #ifndef NO_LIBPTHREAD static os_once_t Last_errormsg_key_once = OS_ONCE_INIT; static os_tls_key_t Last_errormsg_key; static void _Last_errormsg_key_alloc(void) { int pth_ret = os_tls_key_create(&Last_errormsg_key, free); if (pth_ret) FATAL("!os_thread_key_create"); VALGRIND_ANNOTATE_HAPPENS_BEFORE(&Last_errormsg_key_once); } static void Last_errormsg_key_alloc(void) { os_once(&Last_errormsg_key_once, _Last_errormsg_key_alloc); /* * Workaround Helgrind's bug: * https://bugs.kde.org/show_bug.cgi?id=337735 */ VALGRIND_ANNOTATE_HAPPENS_AFTER(&Last_errormsg_key_once); } static inline void Last_errormsg_fini(void) { void *p = os_tls_get(Last_errormsg_key); if (p) { free(p); (void) os_tls_set(Last_errormsg_key, NULL); } (void) os_tls_key_delete(Last_errormsg_key); } static inline struct errormsg * Last_errormsg_get(void) { Last_errormsg_key_alloc(); struct errormsg *errormsg = os_tls_get(Last_errormsg_key); if (errormsg == NULL) { errormsg = malloc(sizeof(struct errormsg)); if (errormsg == NULL) FATAL("!malloc"); /* make sure it contains empty string initially */ errormsg->msg[0] = '\0'; int ret = os_tls_set(Last_errormsg_key, errormsg); if (ret) FATAL("!os_tls_set"); } return errormsg; } #else /* * We don't want libpmem to depend on libpthread. Instead of using pthread * API to dynamically allocate thread-specific error message buffer, we put * it into TLS. However, keeping a pretty large static buffer (8K) in TLS * may lead to some issues, so the maximum message length is reduced. * Fortunately, it looks like the longest error message in libpmem should * not be longer than about 90 chars (in case of pmem_check_version()). */ static __thread struct errormsg Last_errormsg; static inline void Last_errormsg_key_alloc(void) { } static inline void Last_errormsg_fini(void) { } static inline const struct errormsg * Last_errormsg_get(void) { return &Last_errormsg.msg[0]; } #endif /* NO_LIBPTHREAD */ /* * out_init -- initialize the log * * This is called from the library initialization code. */ void out_init(const char *log_prefix, const char *log_level_var, const char *log_file_var, int major_version, int minor_version) { static int once; /* only need to initialize the out module once */ if (once) return; once++; Log_prefix = log_prefix; #ifdef DEBUG char *log_level; char *log_file; if ((log_level = os_getenv(log_level_var)) != NULL) { Log_level = atoi(log_level); if (Log_level < 0) { Log_level = 0; } } if ((log_file = os_getenv(log_file_var)) != NULL && log_file[0] != '\0') { /* reserve more than enough space for a PID + '\0' */ char log_file_pid[PATH_MAX]; size_t len = strlen(log_file); if (len > 0 && log_file[len - 1] == '-') { int ret = snprintf(log_file_pid, PATH_MAX, "%s%d", log_file, getpid()); if (ret < 0 || ret >= PATH_MAX) { ERR("snprintf: %d", ret); abort(); } log_file = log_file_pid; } if ((Out_fp = os_fopen(log_file, "w")) == NULL) { char buff[UTIL_MAX_ERR_MSG]; util_strerror(errno, buff, UTIL_MAX_ERR_MSG); fprintf(stderr, "Error (%s): %s=%s: %s\n", log_prefix, log_file_var, log_file, buff); abort(); } } #endif /* DEBUG */ char *log_alignment = os_getenv("PMDK_LOG_ALIGN"); if (log_alignment) { int align = atoi(log_alignment); if (align > 0) Log_alignment = (unsigned)align; } if (Out_fp == NULL) Out_fp = stderr; else setlinebuf(Out_fp); #ifdef DEBUG static char namepath[PATH_MAX]; LOG(1, "pid %d: program: %s", getpid(), util_getexecname(namepath, PATH_MAX)); #endif LOG(1, "%s version %d.%d", log_prefix, major_version, minor_version); static __attribute__((used)) const char *version_msg = "src version: " SRCVERSION; LOG(1, "%s", version_msg); #if VG_PMEMCHECK_ENABLED /* * Attribute "used" to prevent compiler from optimizing out the variable * when LOG expands to no code (!DEBUG) */ static __attribute__((used)) const char *pmemcheck_msg = "compiled with support for Valgrind pmemcheck"; LOG(1, "%s", pmemcheck_msg); #endif /* VG_PMEMCHECK_ENABLED */ #if VG_HELGRIND_ENABLED static __attribute__((used)) const char *helgrind_msg = "compiled with support for Valgrind helgrind"; LOG(1, "%s", helgrind_msg); #endif /* VG_HELGRIND_ENABLED */ #if VG_MEMCHECK_ENABLED static __attribute__((used)) const char *memcheck_msg = "compiled with support for Valgrind memcheck"; LOG(1, "%s", memcheck_msg); #endif /* VG_MEMCHECK_ENABLED */ #if VG_DRD_ENABLED static __attribute__((used)) const char *drd_msg = "compiled with support for Valgrind drd"; LOG(1, "%s", drd_msg); #endif /* VG_DRD_ENABLED */ #if SDS_ENABLED static __attribute__((used)) const char *shutdown_state_msg = "compiled with support for shutdown state"; LOG(1, "%s", shutdown_state_msg); #endif Last_errormsg_key_alloc(); } /* * out_fini -- close the log file * * This is called to close log file before process stop. */ void out_fini(void) { if (Out_fp != NULL && Out_fp != stderr) { fclose(Out_fp); Out_fp = stderr; } Last_errormsg_fini(); } /* * out_print_func -- default print_func, goes to stderr or Out_fp */ static void out_print_func(const char *s) { /* to suppress drd false-positive */ /* XXX: confirm real nature of this issue: pmem/issues#863 */ #ifdef SUPPRESS_FPUTS_DRD_ERROR VALGRIND_ANNOTATE_IGNORE_READS_BEGIN(); VALGRIND_ANNOTATE_IGNORE_WRITES_BEGIN(); #endif fputs(s, Out_fp); #ifdef SUPPRESS_FPUTS_DRD_ERROR VALGRIND_ANNOTATE_IGNORE_READS_END(); VALGRIND_ANNOTATE_IGNORE_WRITES_END(); #endif } /* * calling Print(s) calls the current print_func... */ typedef void (*Print_func)(const char *s); typedef int (*Vsnprintf_func)(char *str, size_t size, const char *format, va_list ap); static Print_func Print = out_print_func; static Vsnprintf_func Vsnprintf = vsnprintf; #if 0 /* * out_set_print_func -- allow override of print_func used by out module */ void out_set_print_func(void (*print_func)(const char *s)) { LOG(3, "print %p", print_func); Print = (print_func == NULL) ? out_print_func : print_func; } /* * out_set_vsnprintf_func -- allow override of vsnprintf_func used by out module */ void out_set_vsnprintf_func(int (*vsnprintf_func)(char *str, size_t size, const char *format, va_list ap)) { LOG(3, "vsnprintf %p", vsnprintf_func); Vsnprintf = (vsnprintf_func == NULL) ? vsnprintf : vsnprintf_func; } #endif /* * out_snprintf -- (internal) custom snprintf implementation */ FORMAT_PRINTF(3, 4) static int out_snprintf(char *str, size_t size, const char *format, ...) { int ret; va_list ap; va_start(ap, format); ret = Vsnprintf(str, size, format, ap); va_end(ap); return (ret); } /* * out_common -- common output code, all output goes through here */ static void out_common(const char *file, int line, const char *func, int level, const char *suffix, const char *fmt, va_list ap) { int oerrno = errno; char buf[MAXPRINT]; unsigned cc = 0; int ret; const char *sep = ""; char errstr[UTIL_MAX_ERR_MSG] = ""; if (file) { char *f = strrchr(file, OS_DIR_SEPARATOR); if (f) file = f + 1; ret = out_snprintf(&buf[cc], MAXPRINT - cc, "<%s>: <%d> [%s:%d %s] ", Log_prefix, level, file, line, func); if (ret < 0) { Print("out_snprintf failed"); goto end; } cc += (unsigned)ret; if (cc < Log_alignment) { memset(buf + cc, ' ', Log_alignment - cc); cc = Log_alignment; } } if (fmt) { if (*fmt == '!') { fmt++; sep = ": "; util_strerror(errno, errstr, UTIL_MAX_ERR_MSG); } ret = Vsnprintf(&buf[cc], MAXPRINT - cc, fmt, ap); if (ret < 0) { Print("Vsnprintf failed"); goto end; } cc += (unsigned)ret; } out_snprintf(&buf[cc], MAXPRINT - cc, "%s%s%s", sep, errstr, suffix); Print(buf); end: errno = oerrno; } /* * out_error -- common error output code, all error messages go through here */ static void out_error(const char *file, int line, const char *func, const char *suffix, const char *fmt, va_list ap) { int oerrno = errno; unsigned cc = 0; int ret; const char *sep = ""; char errstr[UTIL_MAX_ERR_MSG] = ""; char *errormsg = (char *)out_get_errormsg(); if (fmt) { if (*fmt == '!') { fmt++; sep = ": "; util_strerror(errno, errstr, UTIL_MAX_ERR_MSG); } ret = Vsnprintf(&errormsg[cc], MAXPRINT, fmt, ap); if (ret < 0) { strcpy(errormsg, "Vsnprintf failed"); goto end; } cc += (unsigned)ret; out_snprintf(&errormsg[cc], MAXPRINT - cc, "%s%s", sep, errstr); } #ifdef DEBUG if (Log_level >= 1) { char buf[MAXPRINT]; cc = 0; if (file) { char *f = strrchr(file, OS_DIR_SEPARATOR); if (f) file = f + 1; ret = out_snprintf(&buf[cc], MAXPRINT, "<%s>: <1> [%s:%d %s] ", Log_prefix, file, line, func); if (ret < 0) { Print("out_snprintf failed"); goto end; } cc += (unsigned)ret; if (cc < Log_alignment) { memset(buf + cc, ' ', Log_alignment - cc); cc = Log_alignment; } } out_snprintf(&buf[cc], MAXPRINT - cc, "%s%s", errormsg, suffix); Print(buf); } #endif end: errno = oerrno; } /* * out -- output a line, newline added automatically */ void out(const char *fmt, ...) { va_list ap; va_start(ap, fmt); out_common(NULL, 0, NULL, 0, "\n", fmt, ap); va_end(ap); } /* * out_nonl -- output a line, no newline added automatically */ void out_nonl(int level, const char *fmt, ...) { va_list ap; if (Log_level < level) return; va_start(ap, fmt); out_common(NULL, 0, NULL, level, "", fmt, ap); va_end(ap); } /* * out_log -- output a log line if Log_level >= level */ void out_log(const char *file, int line, const char *func, int level, const char *fmt, ...) { va_list ap; if (Log_level < level) return; va_start(ap, fmt); out_common(file, line, func, level, "\n", fmt, ap); va_end(ap); } /* * out_fatal -- output a fatal error & die (i.e. assertion failure) */ void out_fatal(const char *file, int line, const char *func, const char *fmt, ...) { va_list ap; va_start(ap, fmt); out_common(file, line, func, 1, "\n", fmt, ap); va_end(ap); abort(); } /* * out_err -- output an error message */ void out_err(const char *file, int line, const char *func, const char *fmt, ...) { va_list ap; va_start(ap, fmt); out_error(file, line, func, "\n", fmt, ap); va_end(ap); } /* * out_get_errormsg -- get the last error message */ const char * out_get_errormsg(void) { const struct errormsg *errormsg = Last_errormsg_get(); return &errormsg->msg[0]; } #ifdef _WIN32 /* * out_get_errormsgW -- get the last error message in wchar_t */ const wchar_t * out_get_errormsgW(void) { struct errormsg *errormsg = Last_errormsg_get(); const char *utf8 = &errormsg->msg[0]; wchar_t *utf16 = &errormsg->wmsg[0]; if (util_toUTF16_buff(utf8, utf16, sizeof(errormsg->wmsg)) != 0) FATAL("!Failed to convert string"); return (const wchar_t *)utf16; } #endif vmemcache-0.8.1/src/out.h000066400000000000000000000136621374403322600152330ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2018, Intel Corporation */ /* * out.h -- definitions for "out" module */ #ifndef PMDK_OUT_H #define PMDK_OUT_H 1 #include #include #include #include "util.h" #ifdef __cplusplus extern "C" { #endif /* * Suppress errors which are after appropriate ASSERT* macro for nondebug * builds. */ #if !defined(DEBUG) && (defined(__clang_analyzer__) || defined(__COVERITY__) ||\ defined(__KLOCWORK__)) #define OUT_FATAL_DISCARD_NORETURN __attribute__((noreturn)) #else #define OUT_FATAL_DISCARD_NORETURN #endif #ifndef EVALUATE_DBG_EXPRESSIONS #if defined(DEBUG) || defined(__clang_analyzer__) || defined(__COVERITY__) ||\ defined(__KLOCWORK__) #define EVALUATE_DBG_EXPRESSIONS 1 #else #define EVALUATE_DBG_EXPRESSIONS 0 #endif #endif #ifdef DEBUG #define OUT_LOG out_log #define OUT_NONL out_nonl #define OUT_FATAL out_fatal #define OUT_FATAL_ABORT out_fatal #else static __attribute__((always_inline)) inline void out_log_discard(const char *file, int line, const char *func, int level, const char *fmt, ...) { (void) file; (void) line; (void) func; (void) level; (void) fmt; } static __attribute__((always_inline)) inline void out_nonl_discard(int level, const char *fmt, ...) { (void) level; (void) fmt; } static __attribute__((always_inline)) OUT_FATAL_DISCARD_NORETURN inline void out_fatal_discard(const char *file, int line, const char *func, const char *fmt, ...) { (void) file; (void) line; (void) func; (void) fmt; } static __attribute__((always_inline)) NORETURN inline void out_fatal_abort(const char *file, int line, const char *func, const char *fmt, ...) { (void) file; (void) line; (void) func; (void) fmt; abort(); } #define OUT_LOG out_log_discard #define OUT_NONL out_nonl_discard #define OUT_FATAL out_fatal_discard #define OUT_FATAL_ABORT out_fatal_abort #endif #if defined(__KLOCWORK__) #define TEST_ALWAYS_TRUE_EXPR(cnd) #define TEST_ALWAYS_EQ_EXPR(cnd) #define TEST_ALWAYS_NE_EXPR(cnd) #else #define TEST_ALWAYS_TRUE_EXPR(cnd)\ if (__builtin_constant_p(cnd))\ ASSERT_COMPILE_ERROR_ON(cnd); #define TEST_ALWAYS_EQ_EXPR(lhs, rhs)\ if (__builtin_constant_p(lhs) && __builtin_constant_p(rhs))\ ASSERT_COMPILE_ERROR_ON((lhs) == (rhs)); #define TEST_ALWAYS_NE_EXPR(lhs, rhs)\ if (__builtin_constant_p(lhs) && __builtin_constant_p(rhs))\ ASSERT_COMPILE_ERROR_ON((lhs) != (rhs)); #endif /* produce debug/trace output */ #define LOG(level, ...) do { \ if (!EVALUATE_DBG_EXPRESSIONS) break;\ OUT_LOG(__FILE__, __LINE__, __func__, level, __VA_ARGS__);\ } while (0) /* produce debug/trace output without prefix and new line */ #define LOG_NONL(level, ...) do { \ if (!EVALUATE_DBG_EXPRESSIONS) break; \ OUT_NONL(level, __VA_ARGS__); \ } while (0) /* produce output and exit */ #define FATAL(...)\ OUT_FATAL_ABORT(__FILE__, __LINE__, __func__, __VA_ARGS__) /* assert a condition is true at runtime */ #define ASSERT_rt(cnd) do { \ if (!EVALUATE_DBG_EXPRESSIONS || (cnd)) break; \ OUT_FATAL(__FILE__, __LINE__, __func__, "assertion failure: %s", #cnd);\ } while (0) /* assertion with extra info printed if assertion fails at runtime */ #define ASSERTinfo_rt(cnd, info) do { \ if (!EVALUATE_DBG_EXPRESSIONS || (cnd)) break; \ OUT_FATAL(__FILE__, __LINE__, __func__, \ "assertion failure: %s (%s = %s)", #cnd, #info, info);\ } while (0) /* assert two integer values are equal at runtime */ #define ASSERTeq_rt(lhs, rhs) do { \ if (!EVALUATE_DBG_EXPRESSIONS || ((lhs) == (rhs))) break; \ OUT_FATAL(__FILE__, __LINE__, __func__,\ "assertion failure: %s (0x%llx) == %s (0x%llx)", #lhs,\ (unsigned long long)(lhs), #rhs, (unsigned long long)(rhs)); \ } while (0) /* assert two integer values are not equal at runtime */ #define ASSERTne_rt(lhs, rhs) do { \ if (!EVALUATE_DBG_EXPRESSIONS || ((lhs) != (rhs))) break; \ OUT_FATAL(__FILE__, __LINE__, __func__,\ "assertion failure: %s (0x%llx) != %s (0x%llx)", #lhs,\ (unsigned long long)(lhs), #rhs, (unsigned long long)(rhs)); \ } while (0) /* assert a condition is true */ #define ASSERT(cnd)\ do {\ /*\ * Detect useless asserts on always true expression. Please use\ * COMPILE_ERROR_ON(!cnd) or ASSERT_rt(cnd) in such cases.\ */\ TEST_ALWAYS_TRUE_EXPR(cnd);\ ASSERT_rt(cnd);\ } while (0) /* assertion with extra info printed if assertion fails */ #define ASSERTinfo(cnd, info)\ do {\ /* See comment in ASSERT. */\ TEST_ALWAYS_TRUE_EXPR(cnd);\ ASSERTinfo_rt(cnd, info);\ } while (0) /* assert two integer values are equal */ #define ASSERTeq(lhs, rhs)\ do {\ /* See comment in ASSERT. */\ TEST_ALWAYS_EQ_EXPR(lhs, rhs);\ ASSERTeq_rt(lhs, rhs);\ } while (0) /* assert two integer values are not equal */ #define ASSERTne(lhs, rhs)\ do {\ /* See comment in ASSERT. */\ TEST_ALWAYS_NE_EXPR(lhs, rhs);\ ASSERTne_rt(lhs, rhs);\ } while (0) #define ERR(...)\ out_err(__FILE__, __LINE__, __func__, __VA_ARGS__) void out_init(const char *log_prefix, const char *log_level_var, const char *log_file_var, int major_version, int minor_version); void out_fini(void); void out(const char *fmt, ...) FORMAT_PRINTF(1, 2); void out_nonl(int level, const char *fmt, ...) FORMAT_PRINTF(2, 3); void out_log(const char *file, int line, const char *func, int level, const char *fmt, ...) FORMAT_PRINTF(5, 6); void out_err(const char *file, int line, const char *func, const char *fmt, ...) FORMAT_PRINTF(4, 5); void NORETURN out_fatal(const char *file, int line, const char *func, const char *fmt, ...) FORMAT_PRINTF(4, 5); void out_set_print_func(void (*print_func)(const char *s)); void out_set_vsnprintf_func(int (*vsnprintf_func)(char *str, size_t size, const char *format, va_list ap)); #ifdef _WIN32 #ifndef PMDK_UTF8_API #define out_get_errormsg out_get_errormsgW #else #define out_get_errormsg out_get_errormsgU #endif #endif #ifndef _WIN32 const char *out_get_errormsg(void); #else const char *out_get_errormsgU(void); const wchar_t *out_get_errormsgW(void); #endif #ifdef __cplusplus } #endif #endif vmemcache-0.8.1/src/ringbuf.c000066400000000000000000000143011374403322600160420ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2019, Intel Corporation */ /* * ringbuf.c -- implementation of a simple multi-producer/multi-consumer (MPMC) * ring buffer. It uses atomic instructions for correctness and semaphores for * waiting. */ #include "valgrind_internal.h" #include "ringbuf.h" #include "util.h" #include "out.h" #include "os.h" #include "os_thread.h" #include "sys_util.h" #if 0 /* * This number defines by how much the relevant semaphore will be increased to * unlock waiting threads and thus defines how many threads can wait on the * ring buffer at the same time. */ #define RINGBUF_MAX_CONSUMER_THREADS 1024 #endif /* avoid false sharing by padding the variable */ #define CACHELINE_PADDING(type, name)\ union { type name; uint64_t name##_padding[8]; } struct ringbuf { CACHELINE_PADDING(uint64_t, read_pos); CACHELINE_PADDING(uint64_t, write_pos); CACHELINE_PADDING(os_semaphore_t, nfree); CACHELINE_PADDING(os_semaphore_t, nused); unsigned len; uint64_t len_mask; int running; void *data[]; }; /* * ringbuf_new -- creates a new ring buffer instance */ struct ringbuf * ringbuf_new(unsigned length) { LOG(4, NULL); /* length must be a power of two due to masking */ if (util_popcount(length) > 1) return NULL; struct ringbuf *rbuf = Zalloc(sizeof(*rbuf) + (length * sizeof(void *))); if (rbuf == NULL) return NULL; if (os_semaphore_init(&rbuf->nfree, length)) { Free(rbuf); return NULL; } if (os_semaphore_init(&rbuf->nused, 0)) { util_semaphore_destroy(&rbuf->nfree); Free(rbuf); return NULL; } rbuf->read_pos = 0; rbuf->write_pos = 0; rbuf->len = length; rbuf->len_mask = length - 1; rbuf->running = 1; return rbuf; } #if 0 /* * ringbuf_length -- returns the length of the ring buffer */ unsigned ringbuf_length(struct ringbuf *rbuf) { LOG(4, NULL); return rbuf->len; } /* * ringbuf_stop -- if there are any threads stuck waiting on dequeue, unblocks * them. Those threads, if there are no new elements, will return NULL. */ void ringbuf_stop(struct ringbuf *rbuf) { LOG(4, NULL); /* wait for the buffer to become empty */ while (rbuf->read_pos != rbuf->write_pos) __sync_synchronize(); int ret = util_bool_compare_and_swap64(&rbuf->running, 1, 0); ASSERTeq(ret, 1); /* XXX just unlock all waiting threads somehow... */ for (int64_t i = 0; i < RINGBUF_MAX_CONSUMER_THREADS; ++i) util_semaphore_post(&rbuf->nused); } #endif /* * ringbuf_delete -- destroys an existing ring buffer instance */ void ringbuf_delete(struct ringbuf *rbuf) { LOG(4, NULL); ASSERTeq(rbuf->read_pos, rbuf->write_pos); util_semaphore_destroy(&rbuf->nfree); util_semaphore_destroy(&rbuf->nused); Free(rbuf); } /* * ringbuf_enqueue_atomic -- (internal) performs the lockfree insert of an * element into the ringbuf data array */ static void ringbuf_enqueue_atomic(struct ringbuf *rbuf, void *data) { LOG(4, NULL); size_t w = util_fetch_and_add64(&rbuf->write_pos, 1) & rbuf->len_mask; ASSERT(rbuf->running); /* * In most cases, this won't loop even once, but sometimes if the * semaphore is incremented concurrently in dequeue, we need to wait. */ while (!util_bool_compare_and_swap64(&rbuf->data[w], NULL, data)) ; VALGRIND_ANNOTATE_HAPPENS_BEFORE(&rbuf->data[w]); } #if 0 /* * ringbuf_enqueue -- places a new value into the collection * * This function blocks if there's no space in the buffer. */ int ringbuf_enqueue(struct ringbuf *rbuf, void *data) { LOG(4, NULL); util_semaphore_wait(&rbuf->nfree); ringbuf_enqueue_atomic(rbuf, data); util_semaphore_post(&rbuf->nused); return 0; } #endif /* * ringbuf_tryenqueue -- places a new value into the collection * * This function fails if there's no space in the buffer. */ int ringbuf_tryenqueue(struct ringbuf *rbuf, void *data) { LOG(4, NULL); if (util_semaphore_trywait(&rbuf->nfree) != 0) return -1; ringbuf_enqueue_atomic(rbuf, data); util_semaphore_post(&rbuf->nused); return 0; } /* * ringbuf_dequeue_atomic -- performs a lockfree retrieval of data from ringbuf */ static void * ringbuf_dequeue_atomic(struct ringbuf *rbuf) { LOG(4, NULL); size_t r = util_fetch_and_add64(&rbuf->read_pos, 1) & rbuf->len_mask; /* * Again, in most cases, there won't be even a single loop, but if one * thread stalls while others perform work, it might happen that two * threads get the same read position. */ void *data = NULL; VALGRIND_ANNOTATE_HAPPENS_AFTER(&rbuf->data[r]); do { while ((data = rbuf->data[r]) == NULL) __sync_synchronize(); } while (!util_bool_compare_and_swap64(&rbuf->data[r], data, NULL)); return data; } #if 0 /* * ringbuf_dequeue -- retrieves one value from the collection * * This function blocks if there are no values in the buffer. */ void * ringbuf_dequeue(struct ringbuf *rbuf) { LOG(4, NULL); util_semaphore_wait(&rbuf->nused); if (!rbuf->running) return NULL; void *data = ringbuf_dequeue_atomic(rbuf); util_semaphore_post(&rbuf->nfree); return data; } #endif /* * ringbuf_trydequeue -- retrieves one value from the collection * * This function fails if there are no values in the buffer. */ void * ringbuf_trydequeue(struct ringbuf *rbuf) { LOG(4, NULL); if (util_semaphore_trywait(&rbuf->nused) != 0) return NULL; if (!rbuf->running) return NULL; void *data = ringbuf_dequeue_atomic(rbuf); util_semaphore_post(&rbuf->nfree); return data; } /* * ringbuf_trydequeue_s -- valgrind-safe variant of the trydequeue function * * This function is needed for runtime race detection as a way to avoid false * positives due to usage of atomic instructions that might otherwise confuse * valgrind. */ void * ringbuf_trydequeue_s(struct ringbuf *rbuf, size_t data_size) { LOG(4, NULL); void *r = ringbuf_trydequeue(rbuf); if (r != NULL) VALGRIND_ANNOTATE_NEW_MEMORY(r, data_size); return r; } #if 0 /* * ringbuf_dequeue_s -- valgrind-safe variant of the dequeue function * * This function is needed for runtime race detection as a way to avoid false * positives due to usage of atomic instructions that might otherwise confuse * valgrind. */ void * ringbuf_dequeue_s(struct ringbuf *rbuf, size_t data_size) { LOG(4, NULL); void *r = ringbuf_dequeue(rbuf); VALGRIND_ANNOTATE_NEW_MEMORY(r, data_size); return r; } #endif vmemcache-0.8.1/src/ringbuf.h000066400000000000000000000014331374403322600160510ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2019, Intel Corporation */ /* * ringbuf.h -- internal definitions for mpmc ring buffer */ #ifndef RINGBUF_H #define RINGBUF_H 1 #include #include #include struct ringbuf; struct ringbuf *ringbuf_new(unsigned length); void ringbuf_delete(struct ringbuf *rbuf); unsigned ringbuf_length(struct ringbuf *rbuf); void ringbuf_stop(struct ringbuf *rbuf); int ringbuf_enqueue(struct ringbuf *rbuf, void *data); int ringbuf_tryenqueue(struct ringbuf *rbuf, void *data); void *ringbuf_dequeue(struct ringbuf *rbuf); void *ringbuf_trydequeue(struct ringbuf *rbuf); void *ringbuf_dequeue_s(struct ringbuf *rbuf, size_t data_size); void *ringbuf_trydequeue_s(struct ringbuf *rbuf, size_t data_size); #endif vmemcache-0.8.1/src/sys/000077500000000000000000000000001374403322600150615ustar00rootroot00000000000000vmemcache-0.8.1/src/sys/queue.h000066400000000000000000000530461374403322600163660ustar00rootroot00000000000000/* * Source: glibc 2.24 (git://sourceware.org/glibc.git /misc/sys/queue.h) * * Copyright (c) 1991, 1993 * The Regents of the University of California. All rights reserved. * Copyright (c) 2016, Microsoft Corporation. All rights reserved. * Copyright 2019, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)queue.h 8.5 (Berkeley) 8/20/94 */ #ifndef _SYS_QUEUE_H_ #define _SYS_QUEUE_H_ /* * This file defines five types of data structures: singly-linked lists, * lists, simple queues, tail queues, and circular queues. * * A singly-linked list is headed by a single forward pointer. The * elements are singly linked for minimum space and pointer manipulation * overhead at the expense of O(n) removal for arbitrary elements. New * elements can be added to the list after an existing element or at the * head of the list. Elements being removed from the head of the list * should use the explicit macro for this purpose for optimum * efficiency. A singly-linked list may only be traversed in the forward * direction. Singly-linked lists are ideal for applications with large * datasets and few or no removals or for implementing a LIFO queue. * * A list is headed by a single forward pointer (or an array of forward * pointers for a hash table header). The elements are doubly linked * so that an arbitrary element can be removed without a need to * traverse the list. New elements can be added to the list before * or after an existing element or at the head of the list. A list * may only be traversed in the forward direction. * * A simple queue is headed by a pair of pointers, one the head of the * list and the other to the tail of the list. The elements are singly * linked to save space, so elements can only be removed from the * head of the list. New elements can be added to the list after * an existing element, at the head of the list, or at the end of the * list. A simple queue may only be traversed in the forward direction. * * A tail queue is headed by a pair of pointers, one to the head of the * list and the other to the tail of the list. The elements are doubly * linked so that an arbitrary element can be removed without a need to * traverse the list. New elements can be added to the list before or * after an existing element, at the head of the list, or at the end of * the list. A tail queue may be traversed in either direction. * * A circle queue is headed by a pair of pointers, one to the head of the * list and the other to the tail of the list. The elements are doubly * linked so that an arbitrary element can be removed without a need to * traverse the list. New elements can be added to the list before or after * an existing element, at the head of the list, or at the end of the list. * A circle queue may be traversed in either direction, but has a more * complex end of list detection. * * For details on the use of these macros, see the queue(3) manual page. */ /* * XXX This is a workaround for a bug in the llvm's static analyzer. For more * info see https://github.com/pmem/issues/issues/309. */ #ifdef __clang_analyzer__ static void custom_assert(void) { abort(); } #define ANALYZER_ASSERT(x) (__builtin_expect(!(x), 0) ? (void)0 : custom_assert()) #else #define ANALYZER_ASSERT(x) do {} while (0) #endif /* * List definitions. */ #define LIST_HEAD(name, type) \ struct name { \ struct type *lh_first; /* first element */ \ } #define LIST_HEAD_INITIALIZER(head) \ { NULL } #ifdef __cplusplus #define _CAST_AND_ASSIGN(x, y) x = (__typeof__(x))y; #else #define _CAST_AND_ASSIGN(x, y) x = (void *)(y); #endif #define LIST_ENTRY(type) \ struct { \ struct type *le_next; /* next element */ \ struct type **le_prev; /* address of previous next element */ \ } /* * List functions. */ #define LIST_INIT(head) do { \ (head)->lh_first = NULL; \ } while (/*CONSTCOND*/0) #define LIST_INSERT_AFTER(listelm, elm, field) do { \ if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \ (listelm)->field.le_next->field.le_prev = \ &(elm)->field.le_next; \ (listelm)->field.le_next = (elm); \ (elm)->field.le_prev = &(listelm)->field.le_next; \ } while (/*CONSTCOND*/0) #define LIST_INSERT_BEFORE(listelm, elm, field) do { \ (elm)->field.le_prev = (listelm)->field.le_prev; \ (elm)->field.le_next = (listelm); \ *(listelm)->field.le_prev = (elm); \ (listelm)->field.le_prev = &(elm)->field.le_next; \ } while (/*CONSTCOND*/0) #define LIST_INSERT_HEAD(head, elm, field) do { \ if (((elm)->field.le_next = (head)->lh_first) != NULL) \ (head)->lh_first->field.le_prev = &(elm)->field.le_next;\ (head)->lh_first = (elm); \ (elm)->field.le_prev = &(head)->lh_first; \ } while (/*CONSTCOND*/0) #define LIST_REMOVE(elm, field) do { \ ANALYZER_ASSERT((elm) != NULL); \ if ((elm)->field.le_next != NULL) \ (elm)->field.le_next->field.le_prev = \ (elm)->field.le_prev; \ *(elm)->field.le_prev = (elm)->field.le_next; \ } while (/*CONSTCOND*/0) #define LIST_FOREACH(var, head, field) \ for ((var) = ((head)->lh_first); \ (var); \ (var) = ((var)->field.le_next)) /* * List access methods. */ #define LIST_EMPTY(head) ((head)->lh_first == NULL) #define LIST_FIRST(head) ((head)->lh_first) #define LIST_NEXT(elm, field) ((elm)->field.le_next) /* * Singly-linked List definitions. */ #define SLIST_HEAD(name, type) \ struct name { \ struct type *slh_first; /* first element */ \ } #define SLIST_HEAD_INITIALIZER(head) \ { NULL } #define SLIST_ENTRY(type) \ struct { \ struct type *sle_next; /* next element */ \ } /* * Singly-linked List functions. */ #define SLIST_INIT(head) do { \ (head)->slh_first = NULL; \ } while (/*CONSTCOND*/0) #define SLIST_INSERT_AFTER(slistelm, elm, field) do { \ (elm)->field.sle_next = (slistelm)->field.sle_next; \ (slistelm)->field.sle_next = (elm); \ } while (/*CONSTCOND*/0) #define SLIST_INSERT_HEAD(head, elm, field) do { \ (elm)->field.sle_next = (head)->slh_first; \ (head)->slh_first = (elm); \ } while (/*CONSTCOND*/0) #define SLIST_REMOVE_HEAD(head, field) do { \ (head)->slh_first = (head)->slh_first->field.sle_next; \ } while (/*CONSTCOND*/0) #define SLIST_REMOVE(head, elm, type, field) do { \ if ((head)->slh_first == (elm)) { \ SLIST_REMOVE_HEAD((head), field); \ } \ else { \ struct type *curelm = (head)->slh_first; \ while(curelm->field.sle_next != (elm)) \ curelm = curelm->field.sle_next; \ curelm->field.sle_next = \ curelm->field.sle_next->field.sle_next; \ } \ } while (/*CONSTCOND*/0) #define SLIST_FOREACH(var, head, field) \ for((var) = (head)->slh_first; (var); (var) = (var)->field.sle_next) /* * Singly-linked List access methods. */ #define SLIST_EMPTY(head) ((head)->slh_first == NULL) #define SLIST_FIRST(head) ((head)->slh_first) #define SLIST_NEXT(elm, field) ((elm)->field.sle_next) /* * Singly-linked Tail queue declarations. */ #define STAILQ_HEAD(name, type) \ struct name { \ struct type *stqh_first; /* first element */ \ struct type **stqh_last; /* addr of last next element */ \ } #define STAILQ_HEAD_INITIALIZER(head) \ { NULL, &(head).stqh_first } #define STAILQ_ENTRY(type) \ struct { \ struct type *stqe_next; /* next element */ \ } /* * Singly-linked Tail queue functions. */ #define STAILQ_INIT(head) do { \ (head)->stqh_first = NULL; \ (head)->stqh_last = &(head)->stqh_first; \ } while (/*CONSTCOND*/0) #define STAILQ_INSERT_HEAD(head, elm, field) do { \ if (((elm)->field.stqe_next = (head)->stqh_first) == NULL) \ (head)->stqh_last = &(elm)->field.stqe_next; \ (head)->stqh_first = (elm); \ } while (/*CONSTCOND*/0) #define STAILQ_INSERT_TAIL(head, elm, field) do { \ (elm)->field.stqe_next = NULL; \ *(head)->stqh_last = (elm); \ (head)->stqh_last = &(elm)->field.stqe_next; \ } while (/*CONSTCOND*/0) #define STAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ if (((elm)->field.stqe_next = (listelm)->field.stqe_next) == NULL)\ (head)->stqh_last = &(elm)->field.stqe_next; \ (listelm)->field.stqe_next = (elm); \ } while (/*CONSTCOND*/0) #define STAILQ_REMOVE_HEAD(head, field) do { \ if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) == NULL) \ (head)->stqh_last = &(head)->stqh_first; \ } while (/*CONSTCOND*/0) #define STAILQ_REMOVE(head, elm, type, field) do { \ if ((head)->stqh_first == (elm)) { \ STAILQ_REMOVE_HEAD((head), field); \ } else { \ struct type *curelm = (head)->stqh_first; \ while (curelm->field.stqe_next != (elm)) \ curelm = curelm->field.stqe_next; \ if ((curelm->field.stqe_next = \ curelm->field.stqe_next->field.stqe_next) == NULL) \ (head)->stqh_last = &(curelm)->field.stqe_next; \ } \ } while (/*CONSTCOND*/0) #define STAILQ_FOREACH(var, head, field) \ for ((var) = ((head)->stqh_first); \ (var); \ (var) = ((var)->field.stqe_next)) #define STAILQ_CONCAT(head1, head2) do { \ if (!STAILQ_EMPTY((head2))) { \ *(head1)->stqh_last = (head2)->stqh_first; \ (head1)->stqh_last = (head2)->stqh_last; \ STAILQ_INIT((head2)); \ } \ } while (/*CONSTCOND*/0) /* * Singly-linked Tail queue access methods. */ #define STAILQ_EMPTY(head) ((head)->stqh_first == NULL) #define STAILQ_FIRST(head) ((head)->stqh_first) #define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next) /* * Simple queue definitions. */ #define SIMPLEQ_HEAD(name, type) \ struct name { \ struct type *sqh_first; /* first element */ \ struct type **sqh_last; /* addr of last next element */ \ } #define SIMPLEQ_HEAD_INITIALIZER(head) \ { NULL, &(head).sqh_first } #define SIMPLEQ_ENTRY(type) \ struct { \ struct type *sqe_next; /* next element */ \ } /* * Simple queue functions. */ #define SIMPLEQ_INIT(head) do { \ (head)->sqh_first = NULL; \ (head)->sqh_last = &(head)->sqh_first; \ } while (/*CONSTCOND*/0) #define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \ if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \ (head)->sqh_last = &(elm)->field.sqe_next; \ (head)->sqh_first = (elm); \ } while (/*CONSTCOND*/0) #define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \ (elm)->field.sqe_next = NULL; \ *(head)->sqh_last = (elm); \ (head)->sqh_last = &(elm)->field.sqe_next; \ } while (/*CONSTCOND*/0) #define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\ (head)->sqh_last = &(elm)->field.sqe_next; \ (listelm)->field.sqe_next = (elm); \ } while (/*CONSTCOND*/0) #define SIMPLEQ_REMOVE_HEAD(head, field) do { \ if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \ (head)->sqh_last = &(head)->sqh_first; \ } while (/*CONSTCOND*/0) #define SIMPLEQ_REMOVE(head, elm, type, field) do { \ if ((head)->sqh_first == (elm)) { \ SIMPLEQ_REMOVE_HEAD((head), field); \ } else { \ struct type *curelm = (head)->sqh_first; \ while (curelm->field.sqe_next != (elm)) \ curelm = curelm->field.sqe_next; \ if ((curelm->field.sqe_next = \ curelm->field.sqe_next->field.sqe_next) == NULL) \ (head)->sqh_last = &(curelm)->field.sqe_next; \ } \ } while (/*CONSTCOND*/0) #define SIMPLEQ_FOREACH(var, head, field) \ for ((var) = ((head)->sqh_first); \ (var); \ (var) = ((var)->field.sqe_next)) /* * Simple queue access methods. */ #define SIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL) #define SIMPLEQ_FIRST(head) ((head)->sqh_first) #define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next) /* * Tail queue definitions. */ #define _TAILQ_HEAD(name, type, qual) \ struct name { \ qual type *tqh_first; /* first element */ \ qual type *qual *tqh_last; /* addr of last next element */ \ } #define TAILQ_HEAD(name, type) _TAILQ_HEAD(name, struct type,) #define TAILQ_HEAD_INITIALIZER(head) \ { NULL, &(head).tqh_first } #define _TAILQ_ENTRY(type, qual) \ struct { \ qual type *tqe_next; /* next element */ \ qual type *qual *tqe_prev; /* address of previous next element */\ } #define TAILQ_ENTRY(type) _TAILQ_ENTRY(struct type,) /* * Tail queue functions. */ #define TAILQ_INIT(head) do { \ (head)->tqh_first = NULL; \ (head)->tqh_last = &(head)->tqh_first; \ } while (/*CONSTCOND*/0) #define TAILQ_INSERT_HEAD(head, elm, field) do { \ if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \ (head)->tqh_first->field.tqe_prev = \ &(elm)->field.tqe_next; \ else \ (head)->tqh_last = &(elm)->field.tqe_next; \ (head)->tqh_first = (elm); \ (elm)->field.tqe_prev = &(head)->tqh_first; \ } while (/*CONSTCOND*/0) #define TAILQ_INSERT_TAIL(head, elm, field) do { \ (elm)->field.tqe_next = NULL; \ (elm)->field.tqe_prev = (head)->tqh_last; \ *(head)->tqh_last = (elm); \ (head)->tqh_last = &(elm)->field.tqe_next; \ } while (/*CONSTCOND*/0) #define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\ (elm)->field.tqe_next->field.tqe_prev = \ &(elm)->field.tqe_next; \ else \ (head)->tqh_last = &(elm)->field.tqe_next; \ (listelm)->field.tqe_next = (elm); \ (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \ } while (/*CONSTCOND*/0) #define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ (elm)->field.tqe_next = (listelm); \ *(listelm)->field.tqe_prev = (elm); \ (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \ } while (/*CONSTCOND*/0) #define TAILQ_REMOVE(head, elm, field) do { \ ANALYZER_ASSERT((elm) != NULL); \ if (((elm)->field.tqe_next) != NULL) \ (elm)->field.tqe_next->field.tqe_prev = \ (elm)->field.tqe_prev; \ else \ (head)->tqh_last = (elm)->field.tqe_prev; \ *(elm)->field.tqe_prev = (elm)->field.tqe_next; \ } while (/*CONSTCOND*/0) #define TAILQ_MOVE_TO_TAIL(head, elm, field) do { \ if (((elm)->field.tqe_next) != NULL) { \ /* remove from the current position */ \ (elm)->field.tqe_next->field.tqe_prev = (elm)->field.tqe_prev; \ *(elm)->field.tqe_prev = (elm)->field.tqe_next; \ /* insert to the tail */ \ (elm)->field.tqe_next = NULL; \ (elm)->field.tqe_prev = (head)->tqh_last; \ *(head)->tqh_last = (elm); \ (head)->tqh_last = &(elm)->field.tqe_next; \ } \ } while (/*CONSTCOND*/0) #define TAILQ_FOREACH(var, head, field) \ for ((var) = ((head)->tqh_first); \ (var); \ (var) = ((var)->field.tqe_next)) #define TAILQ_FOREACH_REVERSE(var, head, headname, field) \ for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \ (var); \ (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last))) #define TAILQ_CONCAT(head1, head2, field) do { \ if (!TAILQ_EMPTY(head2)) { \ *(head1)->tqh_last = (head2)->tqh_first; \ (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \ (head1)->tqh_last = (head2)->tqh_last; \ TAILQ_INIT((head2)); \ } \ } while (/*CONSTCOND*/0) /* * Tail queue access methods. */ #define TAILQ_EMPTY(head) ((head)->tqh_first == NULL) #define TAILQ_FIRST(head) ((head)->tqh_first) #define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) #define TAILQ_LAST(head, headname) \ (*(((struct headname *)((head)->tqh_last))->tqh_last)) #define TAILQ_PREV(elm, headname, field) \ (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) /* * Circular queue definitions. */ #define CIRCLEQ_HEAD(name, type) \ struct name { \ struct type *cqh_first; /* first element */ \ struct type *cqh_last; /* last element */ \ } #define CIRCLEQ_HEAD_INITIALIZER(head) \ { (void *)&(head), (void *)&(head) } #define CIRCLEQ_ENTRY(type) \ struct { \ struct type *cqe_next; /* next element */ \ struct type *cqe_prev; /* previous element */ \ } /* * Circular queue functions. */ #define CIRCLEQ_INIT(head) do { \ _CAST_AND_ASSIGN((head)->cqh_first, (head)); \ _CAST_AND_ASSIGN((head)->cqh_last, (head)); \ } while (/*CONSTCOND*/0) #define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ (elm)->field.cqe_next = (listelm)->field.cqe_next; \ (elm)->field.cqe_prev = (listelm); \ if ((listelm)->field.cqe_next == (void *)(head)) \ (head)->cqh_last = (elm); \ else \ (listelm)->field.cqe_next->field.cqe_prev = (elm); \ (listelm)->field.cqe_next = (elm); \ } while (/*CONSTCOND*/0) #define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \ (elm)->field.cqe_next = (listelm); \ (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \ if ((listelm)->field.cqe_prev == (void *)(head)) \ (head)->cqh_first = (elm); \ else \ (listelm)->field.cqe_prev->field.cqe_next = (elm); \ (listelm)->field.cqe_prev = (elm); \ } while (/*CONSTCOND*/0) #define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \ (elm)->field.cqe_next = (head)->cqh_first; \ (elm)->field.cqe_prev = (void *)(head); \ if ((head)->cqh_last == (void *)(head)) \ (head)->cqh_last = (elm); \ else \ (head)->cqh_first->field.cqe_prev = (elm); \ (head)->cqh_first = (elm); \ } while (/*CONSTCOND*/0) #define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \ _CAST_AND_ASSIGN((elm)->field.cqe_next, (head)); \ (elm)->field.cqe_prev = (head)->cqh_last; \ if ((head)->cqh_first == (void *)(head)) \ (head)->cqh_first = (elm); \ else \ (head)->cqh_last->field.cqe_next = (elm); \ (head)->cqh_last = (elm); \ } while (/*CONSTCOND*/0) #define CIRCLEQ_REMOVE(head, elm, field) do { \ if ((elm)->field.cqe_next == (void *)(head)) \ (head)->cqh_last = (elm)->field.cqe_prev; \ else \ (elm)->field.cqe_next->field.cqe_prev = \ (elm)->field.cqe_prev; \ if ((elm)->field.cqe_prev == (void *)(head)) \ (head)->cqh_first = (elm)->field.cqe_next; \ else \ (elm)->field.cqe_prev->field.cqe_next = \ (elm)->field.cqe_next; \ } while (/*CONSTCOND*/0) #define CIRCLEQ_FOREACH(var, head, field) \ for ((var) = ((head)->cqh_first); \ (var) != (const void *)(head); \ (var) = ((var)->field.cqe_next)) #define CIRCLEQ_FOREACH_REVERSE(var, head, field) \ for ((var) = ((head)->cqh_last); \ (var) != (const void *)(head); \ (var) = ((var)->field.cqe_prev)) /* * Circular queue access methods. */ #define CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head)) #define CIRCLEQ_FIRST(head) ((head)->cqh_first) #define CIRCLEQ_LAST(head) ((head)->cqh_last) #define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next) #define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev) #define CIRCLEQ_LOOP_NEXT(head, elm, field) \ (((elm)->field.cqe_next == (void *)(head)) \ ? ((head)->cqh_first) \ : ((elm)->field.cqe_next)) #define CIRCLEQ_LOOP_PREV(head, elm, field) \ (((elm)->field.cqe_prev == (void *)(head)) \ ? ((head)->cqh_last) \ : ((elm)->field.cqe_prev)) /* * Sorted queue functions. */ #define SORTEDQ_HEAD(name, type) CIRCLEQ_HEAD(name, type) #define SORTEDQ_HEAD_INITIALIZER(head) CIRCLEQ_HEAD_INITIALIZER(head) #define SORTEDQ_ENTRY(type) CIRCLEQ_ENTRY(type) #define SORTEDQ_INIT(head) CIRCLEQ_INIT(head) #define SORTEDQ_INSERT(head, elm, field, type, comparer) { \ type *_elm_it; \ for (_elm_it = (head)->cqh_first; \ ((_elm_it != (void *)(head)) && \ (comparer(_elm_it, (elm)) < 0)); \ _elm_it = _elm_it->field.cqe_next) \ /*NOTHING*/; \ if (_elm_it == (void *)(head)) \ CIRCLEQ_INSERT_TAIL(head, elm, field); \ else \ CIRCLEQ_INSERT_BEFORE(head, _elm_it, elm, field); \ } #define SORTEDQ_REMOVE(head, elm, field) CIRCLEQ_REMOVE(head, elm, field) #define SORTEDQ_FOREACH(var, head, field) CIRCLEQ_FOREACH(var, head, field) #define SORTEDQ_FOREACH_REVERSE(var, head, field) \ CIRCLEQ_FOREACH_REVERSE(var, head, field) /* * Sorted queue access methods. */ #define SORTEDQ_EMPTY(head) CIRCLEQ_EMPTY(head) #define SORTEDQ_FIRST(head) CIRCLEQ_FIRST(head) #define SORTEDQ_LAST(head) CIRCLEQ_LAST(head) #define SORTEDQ_NEXT(elm, field) CIRCLEQ_NEXT(elm, field) #define SORTEDQ_PREV(elm, field) CIRCLEQ_PREV(elm, field) #endif /* sys/queue.h */ vmemcache-0.8.1/src/sys_util.h000066400000000000000000000137761374403322600163050ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * sys_util.h -- internal utility wrappers around system functions */ #ifndef PMDK_SYS_UTIL_H #define PMDK_SYS_UTIL_H 1 #include #include "os_thread.h" #include "out.h" #ifdef __cplusplus extern "C" { #endif /* * util_mutex_init -- os_mutex_init variant that never fails from * caller perspective. If os_mutex_init failed, this function aborts * the program. */ static inline void util_mutex_init(os_mutex_t *m) { int tmp = os_mutex_init(m); if (tmp) { errno = tmp; FATAL("!os_mutex_init"); } } /* * util_mutex_destroy -- os_mutex_destroy variant that never fails from * caller perspective. If os_mutex_destroy failed, this function aborts * the program. */ static inline void util_mutex_destroy(os_mutex_t *m) { int tmp = os_mutex_destroy(m); if (tmp) { errno = tmp; FATAL("!os_mutex_destroy"); } } /* * util_mutex_lock -- os_mutex_lock variant that never fails from * caller perspective. If os_mutex_lock failed, this function aborts * the program. */ static inline void util_mutex_lock(os_mutex_t *m) { int tmp = os_mutex_lock(m); if (tmp) { errno = tmp; FATAL("!os_mutex_lock"); } } /* * util_mutex_trylock -- os_mutex_trylock variant that never fails from * caller perspective (other than EBUSY). If util_mutex_trylock failed, this * function aborts the program. * Returns 0 if locked successfully, otherwise returns EBUSY. */ static inline int util_mutex_trylock(os_mutex_t *m) { int tmp = os_mutex_trylock(m); if (tmp && tmp != EBUSY) { errno = tmp; FATAL("!os_mutex_trylock"); } return tmp; } /* * util_mutex_unlock -- os_mutex_unlock variant that never fails from * caller perspective. If os_mutex_unlock failed, this function aborts * the program. */ static inline void util_mutex_unlock(os_mutex_t *m) { int tmp = os_mutex_unlock(m); if (tmp) { errno = tmp; FATAL("!os_mutex_unlock"); } } /* * util_rwlock_init -- os_rwlock_init variant that never fails from * caller perspective. If os_rwlock_init failed, this function aborts * the program. */ static inline void util_rwlock_init(os_rwlock_t *m) { int tmp = os_rwlock_init(m); if (tmp) { errno = tmp; FATAL("!os_rwlock_init"); } } /* * util_rwlock_rdlock -- os_rwlock_rdlock variant that never fails from * caller perspective. If os_rwlock_rdlock failed, this function aborts * the program. */ static inline void util_rwlock_rdlock(os_rwlock_t *m) { int tmp = os_rwlock_rdlock(m); if (tmp) { errno = tmp; FATAL("!os_rwlock_rdlock"); } } /* * util_rwlock_wrlock -- os_rwlock_wrlock variant that never fails from * caller perspective. If os_rwlock_wrlock failed, this function aborts * the program. */ static inline void util_rwlock_wrlock(os_rwlock_t *m) { int tmp = os_rwlock_wrlock(m); if (tmp) { errno = tmp; FATAL("!os_rwlock_wrlock"); } } /* * util_rwlock_unlock -- os_rwlock_unlock variant that never fails from * caller perspective. If os_rwlock_unlock failed, this function aborts * the program. */ static inline void util_rwlock_unlock(os_rwlock_t *m) { int tmp = os_rwlock_unlock(m); if (tmp) { errno = tmp; FATAL("!os_rwlock_unlock"); } } /* * util_rwlock_destroy -- os_rwlock_destroy variant that never fails from * caller perspective. If os_rwlock_destroy failed, this function aborts * the program. */ static inline void util_rwlock_destroy(os_rwlock_t *m) { int tmp = os_rwlock_destroy(m); if (tmp) { errno = tmp; FATAL("!os_rwlock_destroy"); } } /* * util_spin_init -- os_spin_init variant that logs on fail and sets errno. */ static inline int util_spin_init(os_spinlock_t *lock, int pshared) { int tmp = os_spin_init(lock, pshared); if (tmp) { errno = tmp; ERR("!os_spin_init"); } return tmp; } /* * util_spin_destroy -- os_spin_destroy variant that never fails from * caller perspective. If os_spin_destroy failed, this function aborts * the program. */ static inline void util_spin_destroy(os_spinlock_t *lock) { int tmp = os_spin_destroy(lock); if (tmp) { errno = tmp; FATAL("!os_spin_destroy"); } } /* * util_spin_lock -- os_spin_lock variant that never fails from caller * perspective. If os_spin_lock failed, this function aborts the program. */ static inline void util_spin_lock(os_spinlock_t *lock) { int tmp = os_spin_lock(lock); if (tmp) { errno = tmp; FATAL("!os_spin_lock"); } } /* * util_spin_unlock -- os_spin_unlock variant that never fails * from caller perspective. If os_spin_unlock failed, * this function aborts the program. */ static inline void util_spin_unlock(os_spinlock_t *lock) { int tmp = os_spin_unlock(lock); if (tmp) { errno = tmp; FATAL("!os_spin_unlock"); } } /* * util_semaphore_init -- os_semaphore_init variant that never fails * from caller perspective. If os_semaphore_init failed, * this function aborts the program. */ static inline void util_semaphore_init(os_semaphore_t *sem, unsigned value) { if (os_semaphore_init(sem, value)) FATAL("!os_semaphore_init"); } /* * util_semaphore_destroy -- deletes a semaphore instance */ static inline void util_semaphore_destroy(os_semaphore_t *sem) { if (os_semaphore_destroy(sem) != 0) FATAL("!os_semaphore_destroy"); } /* * util_semaphore_wait -- decreases the value of the semaphore */ static inline void util_semaphore_wait(os_semaphore_t *sem) { errno = 0; int ret; do { ret = os_semaphore_wait(sem); } while (errno == EINTR); /* signal interrupt */ if (ret != 0) FATAL("!os_semaphore_wait"); } /* * util_semaphore_trywait -- tries to decrease the value of the semaphore */ static inline int util_semaphore_trywait(os_semaphore_t *sem) { errno = 0; int ret; do { ret = os_semaphore_trywait(sem); } while (errno == EINTR); /* signal interrupt */ if (ret != 0 && errno != EAGAIN) FATAL("!os_semaphore_trywait"); return ret; } /* * util_semaphore_post -- increases the value of the semaphore */ static inline void util_semaphore_post(os_semaphore_t *sem) { if (os_semaphore_post(sem) != 0) FATAL("!os_semaphore_post"); } #ifdef __cplusplus } #endif #endif vmemcache-0.8.1/src/util.c000066400000000000000000000220411374403322600153630ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ /* * util.c -- very basic utilities */ #include #include #include #include #include #include #include #include "out.h" #include "util.h" #include "valgrind_internal.h" /* library-wide page size */ unsigned long long Pagesize; /* allocation/mmap granularity */ unsigned long long Mmap_align; #define GCC_VERSION (__GNUC__ * 10000 \ + __GNUC_MINOR__ * 100 \ + __GNUC_PATCHLEVEL__) /* * Zalloc -- allocate zeroed memory */ void * #if GCC_VERSION >= 40400 __attribute__((optimize(0))) #endif Zalloc(size_t sz) { /* gcc likes to replace this function as calloc() if optimizing */ void *ret = Malloc(sz); if (!ret) return NULL; return memset(ret, 0, sz); } #if ANY_VG_TOOL_ENABLED /* initialized to true if the process is running inside Valgrind */ unsigned _On_valgrind; #endif #if VG_PMEMCHECK_ENABLED #define LIB_LOG_LEN 20 #define FUNC_LOG_LEN 50 #define SUFFIX_LEN 7 /* true if pmreorder instrumentization has to be enabled */ int _Pmreorder_emit; /* * util_emit_log -- emits lib and func name with appropriate suffix * to pmemcheck store log file */ void util_emit_log(const char *lib, const char *func, int order) { char lib_name[LIB_LOG_LEN]; char func_name[FUNC_LOG_LEN]; char suffix[SUFFIX_LEN]; size_t lib_len = strlen(lib); size_t func_len = strlen(func); if (order == 0) strcpy(suffix, ".BEGIN"); else strcpy(suffix, ".END"); size_t suffix_len = strlen(suffix); if (lib_len + suffix_len + 1 > LIB_LOG_LEN) { VALGRIND_EMIT_LOG("Library name is too long"); return; } if (func_len + suffix_len + 1 > FUNC_LOG_LEN) { VALGRIND_EMIT_LOG("Function name is too long"); return; } strcpy(lib_name, lib); strcat(lib_name, suffix); strcpy(func_name, func); strcat(func_name, suffix); if (order == 0) { VALGRIND_EMIT_LOG(func_name); VALGRIND_EMIT_LOG(lib_name); } else { VALGRIND_EMIT_LOG(lib_name); VALGRIND_EMIT_LOG(func_name); } } #endif #if 0 /* * util_is_zeroed -- check if given memory range is all zero */ int util_is_zeroed(const void *addr, size_t len) { const char *a = addr; if (len == 0) return 1; if (a[0] == 0 && memcmp(a, a + 1, len - 1) == 0) return 1; return 0; } /* * util_checksum -- compute Fletcher64 checksum * * csump points to where the checksum lives, so that location * is treated as zeros while calculating the checksum. The * checksummed data is assumed to be in little endian order. * If insert is true, the calculated checksum is inserted into * the range at *csump. Otherwise the calculated checksum is * checked against *csump and the result returned (true means * the range checksummed correctly). */ int util_checksum(void *addr, size_t len, uint64_t *csump, int insert, size_t skip_off) { if (len % 4 != 0) abort(); uint32_t *p32 = addr; uint32_t *p32end = (uint32_t *)((char *)addr + len); uint32_t *skip; uint32_t lo32 = 0; uint32_t hi32 = 0; uint64_t csum; if (skip_off) skip = (uint32_t *)((char *)addr + skip_off); else skip = (uint32_t *)((char *)addr + len); while (p32 < p32end) if (p32 == (uint32_t *)csump || p32 >= skip) { /* lo32 += 0; treat first 32-bits as zero */ p32++; hi32 += lo32; /* lo32 += 0; treat second 32-bits as zero */ p32++; hi32 += lo32; } else { lo32 += le32toh(*p32); ++p32; hi32 += lo32; } csum = (uint64_t)hi32 << 32 | lo32; if (insert) { *csump = htole64(csum); return 1; } return *csump == htole64(csum); } /* * util_checksum_seq -- compute sequential Fletcher64 checksum * * Merges checksum from the old buffer with checksum for current buffer. */ uint64_t util_checksum_seq(const void *addr, size_t len, uint64_t csum) { if (len % 4 != 0) abort(); const uint32_t *p32 = addr; const uint32_t *p32end = (const uint32_t *)((const char *)addr + len); uint32_t lo32 = (uint32_t)csum; uint32_t hi32 = (uint32_t)(csum >> 32); while (p32 < p32end) { lo32 += le32toh(*p32); ++p32; hi32 += lo32; } return (uint64_t)hi32 << 32 | lo32; } /* * util_fgets -- fgets wrapper with conversion CRLF to LF */ char * util_fgets(char *buffer, int max, FILE *stream) { char *str = fgets(buffer, max, stream); if (str == NULL) goto end; int len = (int)strlen(str); if (len < 2) goto end; if (str[len - 2] == '\r' && str[len - 1] == '\n') { str[len - 2] = '\n'; str[len - 1] = '\0'; } end: return str; } struct suff { const char *suff; uint64_t mag; }; /* * util_parse_size -- parse size from string */ int util_parse_size(const char *str, size_t *sizep) { const struct suff suffixes[] = { { "B", 1ULL }, { "K", 1ULL << 10 }, /* JEDEC */ { "M", 1ULL << 20 }, { "G", 1ULL << 30 }, { "T", 1ULL << 40 }, { "P", 1ULL << 50 }, { "KiB", 1ULL << 10 }, /* IEC */ { "MiB", 1ULL << 20 }, { "GiB", 1ULL << 30 }, { "TiB", 1ULL << 40 }, { "PiB", 1ULL << 50 }, { "kB", 1000ULL }, /* SI */ { "MB", 1000ULL * 1000 }, { "GB", 1000ULL * 1000 * 1000 }, { "TB", 1000ULL * 1000 * 1000 * 1000 }, { "PB", 1000ULL * 1000 * 1000 * 1000 * 1000 } }; int res = -1; unsigned i; size_t size = 0; char unit[9] = {0}; int ret = sscanf(str, "%zu%8s", &size, unit); if (ret == 1) { res = 0; } else if (ret == 2) { for (i = 0; i < ARRAY_SIZE(suffixes); ++i) { if (strcmp(suffixes[i].suff, unit) == 0) { size = size * suffixes[i].mag; res = 0; break; } } } else { return -1; } if (sizep && res == 0) *sizep = size; return res; } #endif /* * util_init -- initialize the utils * * This is called from the library initialization code. */ void util_init(void) { /* XXX - replace sysconf() with util_get_sys_xxx() */ if (Pagesize == 0) Pagesize = (unsigned long) sysconf(_SC_PAGESIZE); #ifndef _WIN32 Mmap_align = Pagesize; #else if (Mmap_align == 0) { SYSTEM_INFO si; GetSystemInfo(&si); Mmap_align = si.dwAllocationGranularity; } #endif #if ANY_VG_TOOL_ENABLED _On_valgrind = RUNNING_ON_VALGRIND; #endif #if VG_PMEMCHECK_ENABLED if (On_valgrind) { char *pmreorder_env = getenv("PMREORDER_EMIT_LOG"); if (pmreorder_env) _Pmreorder_emit = atoi(pmreorder_env); } else { _Pmreorder_emit = 0; } #endif } #if 0 /* * util_concat_str -- concatenate two strings */ char * util_concat_str(const char *s1, const char *s2) { char *result = malloc(strlen(s1) + strlen(s2) + 1); if (!result) return NULL; strcpy(result, s1); strcat(result, s2); return result; } /* * util_localtime -- a wrapper for localtime function * * localtime can set nonzero errno even if it succeeds (e.g. when there is no * /etc/localtime file under Linux) and we do not want the errno to be polluted * in such cases. */ struct tm * util_localtime(const time_t *timep) { int oerrno = errno; struct tm *tm = localtime(timep); if (tm != NULL) errno = oerrno; return tm; } /* * util_safe_strcpy -- copies string from src to dst, returns -1 * when length of source string (including null-terminator) * is greater than max_length, 0 otherwise * * For gcc (found in version 8.1.1) calling this function with * max_length equal to dst size produces -Wstringop-truncation warning * * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85902 */ #ifdef STRINGOP_TRUNCATION_SUPPORTED #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wstringop-truncation" #endif int util_safe_strcpy(char *dst, const char *src, size_t max_length) { if (max_length == 0) return -1; strncpy(dst, src, max_length); return dst[max_length - 1] == '\0' ? 0 : -1; } #ifdef STRINGOP_TRUNCATION_SUPPORTED #pragma GCC diagnostic pop #endif #define PARSER_MAX_LINE (PATH_MAX + 1024) /* * util_readline -- read line from stream */ char * util_readline(FILE *fh) { size_t bufsize = PARSER_MAX_LINE; size_t position = 0; char *buffer = NULL; do { char *tmp = buffer; buffer = Realloc(buffer, bufsize); if (buffer == NULL) { Free(tmp); return NULL; } /* ensure if we can cast bufsize to int */ char *s = util_fgets(buffer + position, (int)bufsize / 2, fh); if (s == NULL) { Free(buffer); return NULL; } position = strlen(buffer); bufsize *= 2; } while (!feof(fh) && buffer[position - 1] != '\n'); return buffer; } /* * util_print_mem -- print a near-string piece of memory, escaping non-text */ void util_print_mem(const char *s, size_t len) { for (; len > 0; len--, s++) { switch (*s) { case 0: printf("\\0"); break; case '\b': printf("\\b"); break; case '\e': printf("\\e"); break; case '\n': printf("\\n"); break; case '\r': printf("\\r"); break; case '\\': printf("\\\\"); break; case 0x7f: printf("\\x7f"); break; default: printf((*s >= ' ' && *s < 0x7f) ? "%c" : "\\x%02x", *s); } } } #endif /* * env_yesno10 -- check an env var for 1/0/y/n, fatal if invalid */ int env_yesno10(const char *var, int def_answer) { const char *q = getenv(var); if (!q) return def_answer; if (!strcasecmp(q, "0") || !strcasecmp(q, "n") || !strcasecmp(q, "no")) return 0; if (!strcasecmp(q, "1") || !strcasecmp(q, "y") || !strcasecmp(q, "yes")) return 1; FATAL("env var %s needs to be 0 or 1", var); } vmemcache-0.8.1/src/util.h000066400000000000000000000402441374403322600153750ustar00rootroot00000000000000/* * Copyright 2014-2019, Intel Corporation * Copyright (c) 2016, Microsoft Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * util.h -- internal definitions for util module */ #ifndef PMDK_UTIL_H #define PMDK_UTIL_H 1 #include #include #include #include #include #ifdef _MSC_VER #include /* popcnt, bitscan */ #endif #include #ifdef __cplusplus extern "C" { #endif extern unsigned long long Pagesize; extern unsigned long long Mmap_align; #define CACHELINE_SIZE 64ULL #define PAGE_ALIGNED_DOWN_SIZE(size) ((size) & ~(Pagesize - 1)) #define PAGE_ALIGNED_UP_SIZE(size)\ PAGE_ALIGNED_DOWN_SIZE((size) + (Pagesize - 1)) #define IS_PAGE_ALIGNED(size) (((size) & (Pagesize - 1)) == 0) #define PAGE_ALIGN_UP(addr) ((void *)PAGE_ALIGNED_UP_SIZE((uintptr_t)(addr))) #define ALIGN_UP(size, align) (((size) + (align) - 1) & ~((align) - 1)) #define ALIGN_DOWN(size, align) ((size) & ~((align) - 1)) #define ADDR_SUM(vp, lp) ((void *)((char *)(vp) + (lp))) #define util_alignof(t) offsetof(struct {char _util_c; t _util_m; }, _util_m) #define FORMAT_PRINTF(a, b) __attribute__((__format__(__printf__, (a), (b)))) /* * overridable names for malloc & friends used by this library */ #define Malloc malloc #define Free free #define Realloc realloc #define Strdup strdup extern void *Zalloc(size_t sz); void util_init(void); int util_is_zeroed(const void *addr, size_t len); int util_checksum(void *addr, size_t len, uint64_t *csump, int insert, size_t skip_off); uint64_t util_checksum_seq(const void *addr, size_t len, uint64_t csum); int util_parse_size(const char *str, size_t *sizep); char *util_fgets(char *buffer, int max, FILE *stream); char *util_getexecname(char *path, size_t pathlen); char *util_part_realpath(const char *path); int util_compare_file_inodes(const char *path1, const char *path2); void *util_aligned_malloc(size_t alignment, size_t size); void util_aligned_free(void *ptr); struct tm *util_localtime(const time_t *timep); int util_safe_strcpy(char *dst, const char *src, size_t max_length); void util_emit_log(const char *lib, const char *func, int order); char *util_readline(FILE *fh); int env_yesno10(const char *var, int def); void util_print_mem(const char *s, size_t len); #ifdef _WIN32 char *util_toUTF8(const wchar_t *wstr); wchar_t *util_toUTF16(const char *wstr); void util_free_UTF8(char *str); void util_free_UTF16(wchar_t *str); int util_toUTF16_buff(const char *in, wchar_t *out, size_t out_size); int util_toUTF8_buff(const wchar_t *in, char *out, size_t out_size); #endif #define UTIL_MAX_ERR_MSG 128 void util_strerror(int errnum, char *buff, size_t bufflen); /* * Macro calculates number of elements in given table */ #ifndef ARRAY_SIZE #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) #endif #ifdef _MSC_VER #define force_inline inline __forceinline #define NORETURN __declspec(noreturn) #else #define force_inline __attribute__((always_inline)) inline #define NORETURN __attribute__((noreturn)) #endif #define util_get_not_masked_bits(x, mask) ((x) & ~(mask)) /* * util_setbit -- setbit macro substitution which properly deals with types */ static inline void util_setbit(uint8_t *b, uint32_t i) { b[i / 8] = (uint8_t)(b[i / 8] | (uint8_t)(1 << (i % 8))); } /* * util_clrbit -- clrbit macro substitution which properly deals with types */ static inline void util_clrbit(uint8_t *b, uint32_t i) { b[i / 8] = (uint8_t)(b[i / 8] & (uint8_t)(~(1 << (i % 8)))); } #define util_isset(a, i) isset(a, i) #define util_isclr(a, i) isclr(a, i) #define util_flag_isset(a, f) ((a) & (f)) #define util_flag_isclr(a, f) (((a) & (f)) == 0) /* * util_is_pow2 -- returns !0 when there's only 1 bit set in v, 0 otherwise */ static force_inline int util_is_pow2(uint64_t v) { return v && !(v & (v - 1)); } /* * util_div_ceil -- divides a by b and rounds up the result */ static force_inline unsigned util_div_ceil(unsigned a, unsigned b) { return (unsigned)(((unsigned long)a + b - 1) / b); } /* * util_bool_compare_and_swap -- perform an atomic compare and swap * util_fetch_and_* -- perform an operation atomically, return old value * util_synchronize -- issue a full memory barrier * util_popcount -- count number of set bits * util_lssb_index -- return index of least significant set bit, * undefined on zero * util_mssb_index -- return index of most significant set bit * undefined on zero * * XXX assertions needed on (value != 0) in both versions of bitscans * */ #ifndef _MSC_VER /* * ISO C11 -- 7.17.1.4 * memory_order - an enumerated type whose enumerators identify memory ordering * constraints. */ typedef enum { memory_order_relaxed = __ATOMIC_RELAXED, memory_order_consume = __ATOMIC_CONSUME, memory_order_acquire = __ATOMIC_ACQUIRE, memory_order_release = __ATOMIC_RELEASE, memory_order_acq_rel = __ATOMIC_ACQ_REL, memory_order_seq_cst = __ATOMIC_SEQ_CST } memory_order; /* * ISO C11 -- 7.17.7.2 The atomic_load generic functions * Integer width specific versions as supplement for: * * * #include * C atomic_load(volatile A *object); * C atomic_load_explicit(volatile A *object, memory_order order); * * The atomic_load interface doesn't return the loaded value, but instead * copies it to a specified address -- see comments at the MSVC version. * * Also, instead of generic functions, two versions are available: * for 32 bit fundamental integers, and for 64 bit ones. */ #define util_atomic_load_explicit32 __atomic_load #define util_atomic_load_explicit64 __atomic_load /* * ISO C11 -- 7.17.7.1 The atomic_store generic functions * Integer width specific versions as supplement for: * * #include * void atomic_store(volatile A *object, C desired); * void atomic_store_explicit(volatile A *object, C desired, * memory_order order); */ #define util_atomic_store_explicit32 __atomic_store_n #define util_atomic_store_explicit64 __atomic_store_n /* * https://gcc.gnu.org/onlinedocs/gcc/_005f_005fsync-Builtins.html * https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html * https://clang.llvm.org/docs/LanguageExtensions.html#builtin-functions */ #define util_bool_compare_and_swap32 __sync_bool_compare_and_swap #define util_bool_compare_and_swap64 __sync_bool_compare_and_swap #define util_val_compare_and_swap32 __sync_val_compare_and_swap #define util_val_compare_and_swap64 __sync_val_compare_and_swap #define util_fetch_and_add32(ptr, val) __atomic_fetch_add(ptr, val, \ __ATOMIC_SEQ_CST) #define util_fetch_and_add64(ptr, val) __atomic_fetch_add(ptr, val, \ __ATOMIC_SEQ_CST) #define util_fetch_and_sub32(ptr, val) __atomic_fetch_sub(ptr, val, \ __ATOMIC_SEQ_CST) #define util_fetch_and_sub64(ptr, val) __atomic_fetch_sub(ptr, val, \ __ATOMIC_SEQ_CST) #define util_fetch_and_and32(ptr, val) __atomic_fetch_and(ptr, val, \ __ATOMIC_SEQ_CST) #define util_fetch_and_and64(ptr, val) __atomic_fetch_and(ptr, val, \ __ATOMIC_SEQ_CST) #define util_fetch_and_or32(ptr, val) __atomic_fetch_or(ptr, val, \ __ATOMIC_SEQ_CST) #define util_fetch_and_or64(ptr, val) __atomic_fetch_or(ptr, val, \ __ATOMIC_SEQ_CST) #define util_synchronize __sync_synchronize #define util_popcount(value) ((unsigned char)__builtin_popcount(value)) #define util_popcount64(value) ((unsigned char)__builtin_popcountll(value)) #define util_lssb_index(value) ((unsigned char)__builtin_ctz(value)) #define util_lssb_index64(value) ((unsigned char)__builtin_ctzll(value)) #define util_mssb_index(value) ((unsigned char)(31 - __builtin_clz(value))) #define util_mssb_index64(value) ((unsigned char)(63 - __builtin_clzll(value))) #else /* ISO C11 -- 7.17.1.4 */ typedef enum { memory_order_relaxed, memory_order_consume, memory_order_acquire, memory_order_release, memory_order_acq_rel, memory_order_seq_cst } memory_order; /* * ISO C11 -- 7.17.7.2 The atomic_load generic functions * Integer width specific versions as supplement for: * * * #include * C atomic_load(volatile A *object); * C atomic_load_explicit(volatile A *object, memory_order order); * * The atomic_load interface doesn't return the loaded value, but instead * copies it to a specified address. * The MSVC specific implementation needs to trigger a barrier (at least * compiler barrier) after the load from the volatile value. The actual load * from the volatile value itself is expected to be atomic. * * The actual isnterface here: * #include * void util_atomic_load32(volatile A *object, A *destination); * void util_atomic_load64(volatile A *object, A *destination); * void util_atomic_load_explicit32(volatile A *object, A *destination, * memory_order order); * void util_atomic_load_explicit64(volatile A *object, A *destination, * memory_order order); */ #ifndef _M_X64 #error MSVC ports of util_atomic_ only work on X86_64 #endif #if _MSC_VER >= 2000 #error util_atomic_ utility functions not tested with this version of VC++ #error These utility functions are not future proof, as they are not #error based on publicly available documentation. #endif #define util_atomic_load_explicit(object, dest, order)\ do {\ COMPILE_ERROR_ON(order != memory_order_seq_cst &&\ order != memory_order_consume &&\ order != memory_order_acquire &&\ order != memory_order_relaxed);\ *dest = *object;\ if (order == memory_order_seq_cst ||\ order == memory_order_consume ||\ order == memory_order_acquire)\ _ReadWriteBarrier();\ } while (0) #define util_atomic_load_explicit32 util_atomic_load_explicit #define util_atomic_load_explicit64 util_atomic_load_explicit /* ISO C11 -- 7.17.7.1 The atomic_store generic functions */ #define util_atomic_store_explicit64(object, desired, order)\ do {\ COMPILE_ERROR_ON(order != memory_order_seq_cst &&\ order != memory_order_release &&\ order != memory_order_relaxed);\ if (order == memory_order_seq_cst) {\ _InterlockedExchange64(\ (volatile long long *)object, desired);\ } else {\ if (order == memory_order_release)\ _ReadWriteBarrier();\ *object = desired;\ }\ } while (0) #define util_atomic_store_explicit32(object, desired, order)\ do {\ COMPILE_ERROR_ON(order != memory_order_seq_cst &&\ order != memory_order_release &&\ order != memory_order_relaxed);\ if (order == memory_order_seq_cst) {\ _InterlockedExchange(\ (volatile long *)object, desired);\ } else {\ if (order == memory_order_release)\ _ReadWriteBarrier();\ *object = desired;\ }\ } while (0) /* * https://msdn.microsoft.com/en-us/library/hh977022.aspx */ static __inline int bool_compare_and_swap32_VC(volatile LONG *ptr, LONG oldval, LONG newval) { LONG old = InterlockedCompareExchange(ptr, newval, oldval); return (old == oldval); } static __inline int bool_compare_and_swap64_VC(volatile LONG64 *ptr, LONG64 oldval, LONG64 newval) { LONG64 old = InterlockedCompareExchange64(ptr, newval, oldval); return (old == oldval); } #define util_bool_compare_and_swap32(p, o, n)\ bool_compare_and_swap32_VC((LONG *)(p), (LONG)(o), (LONG)(n)) #define util_bool_compare_and_swap64(p, o, n)\ bool_compare_and_swap64_VC((LONG64 *)(p), (LONG64)(o), (LONG64)(n)) #define util_fetch_and_add32(ptr, value)\ InterlockedExchangeAdd((LONG *)(ptr), value) #define util_fetch_and_add64(ptr, value)\ InterlockedExchangeAdd64((LONG64 *)(ptr), value) #define util_fetch_and_sub32(ptr, value)\ InterlockedExchangeSubtract((LONG *)(ptr), value) #define util_fetch_and_sub64(ptr, value)\ InterlockedExchangeAdd64((LONG64 *)(ptr), -((LONG64)(value))) #define util_fetch_and_and32(ptr, value)\ InterlockedAnd((LONG *)(ptr), value) #define util_fetch_and_and64(ptr, value)\ InterlockedAnd64((LONG64 *)(ptr), value) #define util_fetch_and_or32(ptr, value)\ InterlockedOr((LONG *)(ptr), value) #define util_fetch_and_or64(ptr, value)\ InterlockedOr64((LONG64 *)(ptr), value) static __inline void util_synchronize(void) { MemoryBarrier(); } #define util_popcount(value) (unsigned char)__popcnt(value) #define util_popcount64(value) (unsigned char)__popcnt64(value) static __inline unsigned char util_lssb_index(int value) { unsigned long ret; _BitScanForward(&ret, value); return (unsigned char)ret; } static __inline unsigned char util_lssb_index64(long long value) { unsigned long ret; _BitScanForward64(&ret, value); return (unsigned char)ret; } static __inline unsigned char util_mssb_index(int value) { unsigned long ret; _BitScanReverse(&ret, value); return (unsigned char)ret; } static __inline unsigned char util_mssb_index64(long long value) { unsigned long ret; _BitScanReverse64(&ret, value); return (unsigned char)ret; } #endif /* ISO C11 -- 7.17.7 Operations on atomic types */ #define util_atomic_load32(object, dest)\ util_atomic_load_explicit32(object, dest, memory_order_seq_cst) #define util_atomic_load64(object, dest)\ util_atomic_load_explicit64(object, dest, memory_order_seq_cst) #define util_atomic_store32(object, desired)\ util_atomic_store_explicit32(object, desired, memory_order_seq_cst) #define util_atomic_store64(object, desired)\ util_atomic_store_explicit64(object, desired, memory_order_seq_cst) /* * util_get_printable_ascii -- convert non-printable ascii to dot '.' */ static inline char util_get_printable_ascii(char c) { return isprint((unsigned char)c) ? c : '.'; } char *util_concat_str(const char *s1, const char *s2); #if !defined(likely) #if defined(__GNUC__) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else #define likely(x) (!!(x)) #define unlikely(x) (!!(x)) #endif #endif #if defined(__CHECKER__) #define COMPILE_ERROR_ON(cond) #define ASSERT_COMPILE_ERROR_ON(cond) #elif defined(_MSC_VER) #define COMPILE_ERROR_ON(cond) C_ASSERT(!(cond)) /* XXX - can't be done with C_ASSERT() unless we have __builtin_constant_p() */ #define ASSERT_COMPILE_ERROR_ON(cond) do {} while (0) #else #define COMPILE_ERROR_ON(cond) ((void)sizeof(char[(cond) ? -1 : 1])) #define ASSERT_COMPILE_ERROR_ON(cond) COMPILE_ERROR_ON(cond) #endif #ifndef _MSC_VER #define ATTR_CONSTRUCTOR __attribute__((constructor)) static #define ATTR_DESTRUCTOR __attribute__((destructor)) static #else #define ATTR_CONSTRUCTOR #define ATTR_DESTRUCTOR #endif #ifndef _MSC_VER #define CONSTRUCTOR(fun) ATTR_CONSTRUCTOR #else #ifdef __cplusplus #define CONSTRUCTOR(fun) \ void fun(); \ struct _##fun { \ _##fun() { \ fun(); \ } \ }; static _##fun foo; \ static #else #define CONSTRUCTOR(fun) \ MSVC_CONSTR(fun) \ static #endif #endif #ifdef __GNUC__ #define CHECK_FUNC_COMPATIBLE(func1, func2)\ COMPILE_ERROR_ON(!__builtin_types_compatible_p(typeof(func1),\ typeof(func2))) #else #define CHECK_FUNC_COMPATIBLE(func1, func2) do {} while (0) #endif /* __GNUC__ */ #ifdef __cplusplus } #endif #endif /* util.h */ vmemcache-0.8.1/src/util_posix.c000066400000000000000000000044521374403322600166130ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * util_posix.c -- Abstraction layer for misc utilities (Posix implementation) */ #include #include #include #include #include #include "util.h" #include "os.h" #include "out.h" /* pass through for Posix */ void util_strerror(int errnum, char *buff, size_t bufflen) { strerror_r(errnum, buff, bufflen); } #if 0 /* * util_part_realpath -- get canonicalized absolute pathname * * As paths used in a poolset file have to be absolute (checked when parsing * a poolset file), here we only have to resolve symlinks. */ char * util_part_realpath(const char *path) { return realpath(path, NULL); } /* * util_compare_file_inodes -- compare device and inodes of two files; * this resolves hard links */ int util_compare_file_inodes(const char *path1, const char *path2) { struct stat sb1, sb2; if (os_stat(path1, &sb1)) { if (errno != ENOENT) { ERR("!stat failed for %s", path1); return -1; } LOG(1, "stat failed for %s", path1); errno = 0; return strcmp(path1, path2) != 0; } if (os_stat(path2, &sb2)) { if (errno != ENOENT) { ERR("!stat failed for %s", path2); return -1; } LOG(1, "stat failed for %s", path2); errno = 0; return strcmp(path1, path2) != 0; } return sb1.st_dev != sb2.st_dev || sb1.st_ino != sb2.st_ino; } /* * util_aligned_malloc -- allocate aligned memory */ void * util_aligned_malloc(size_t alignment, size_t size) { void *retval = NULL; errno = posix_memalign(&retval, alignment, size); return retval; } /* * util_aligned_free -- free allocated memory in util_aligned_malloc */ void util_aligned_free(void *ptr) { free(ptr); } #endif /* * util_getexecname -- return name of current executable */ char * util_getexecname(char *path, size_t pathlen) { ASSERT(pathlen != 0); ssize_t cc; #ifdef __FreeBSD__ #include #include int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1}; cc = (sysctl(mib, 4, path, &pathlen, NULL, 0) == -1) ? -1 : (ssize_t)pathlen; #else cc = readlink("/proc/self/exe", path, pathlen); #endif if (cc == -1) { strncpy(path, "unknown", pathlen); path[pathlen - 1] = '\0'; } else { path[cc] = '\0'; } return path; } vmemcache-0.8.1/src/valgrind_internal.h000066400000000000000000000251731374403322600201260ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * valgrind_internal.h -- internal definitions for valgrind macros */ #ifndef PMDK_VALGRIND_INTERNAL_H #define PMDK_VALGRIND_INTERNAL_H 1 #ifndef _WIN32 #ifndef VALGRIND_ENABLED #define VALGRIND_ENABLED 0 #endif #endif #if VALGRIND_ENABLED #define VG_PMEMCHECK_ENABLED 0 #define VG_HELGRIND_ENABLED 1 #define VG_MEMCHECK_ENABLED 1 #define VG_DRD_ENABLED 1 #endif #if VG_PMEMCHECK_ENABLED || VG_HELGRIND_ENABLED || VG_MEMCHECK_ENABLED || \ VG_DRD_ENABLED #define ANY_VG_TOOL_ENABLED 1 #else #define ANY_VG_TOOL_ENABLED 0 #endif #if ANY_VG_TOOL_ENABLED extern unsigned _On_valgrind; #define On_valgrind __builtin_expect(_On_valgrind, 0) #include "valgrind/valgrind.h" #else #define On_valgrind (0) #endif #if VG_HELGRIND_ENABLED #include "valgrind/helgrind.h" #endif #if VG_DRD_ENABLED #include "valgrind/drd.h" #endif #if VG_HELGRIND_ENABLED || VG_DRD_ENABLED #define VALGRIND_ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(obj) do {\ if (On_valgrind) \ ANNOTATE_HAPPENS_BEFORE_FORGET_ALL((obj));\ } while (0) #define VALGRIND_ANNOTATE_HAPPENS_BEFORE(obj) do {\ if (On_valgrind) \ ANNOTATE_HAPPENS_BEFORE((obj));\ } while (0) #define VALGRIND_ANNOTATE_HAPPENS_AFTER(obj) do {\ if (On_valgrind) \ ANNOTATE_HAPPENS_AFTER((obj));\ } while (0) #define VALGRIND_ANNOTATE_NEW_MEMORY(addr, size) do {\ if (On_valgrind) \ ANNOTATE_NEW_MEMORY((addr), (size));\ } while (0) #define VALGRIND_ANNOTATE_IGNORE_READS_BEGIN() do {\ if (On_valgrind) \ ANNOTATE_IGNORE_READS_BEGIN();\ } while (0) #define VALGRIND_ANNOTATE_IGNORE_READS_END() do {\ if (On_valgrind) \ ANNOTATE_IGNORE_READS_END();\ } while (0) #define VALGRIND_ANNOTATE_IGNORE_WRITES_BEGIN() do {\ if (On_valgrind) \ ANNOTATE_IGNORE_WRITES_BEGIN();\ } while (0) #define VALGRIND_ANNOTATE_IGNORE_WRITES_END() do {\ if (On_valgrind) \ ANNOTATE_IGNORE_WRITES_END();\ } while (0) #else #define VALGRIND_ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(obj) \ do { (void)(obj); } while (0) #define VALGRIND_ANNOTATE_HAPPENS_BEFORE(obj) do { (void)(obj); } while (0) #define VALGRIND_ANNOTATE_HAPPENS_AFTER(obj) do { (void)(obj); } while (0) #define VALGRIND_ANNOTATE_NEW_MEMORY(addr, size) do {\ (void) (addr);\ (void) (size);\ } while (0) #define VALGRIND_ANNOTATE_IGNORE_READS_BEGIN() do {} while (0) #define VALGRIND_ANNOTATE_IGNORE_READS_END() do {} while (0) #define VALGRIND_ANNOTATE_IGNORE_WRITES_BEGIN() do {} while (0) #define VALGRIND_ANNOTATE_IGNORE_WRITES_END() do {} while (0) #endif #if VG_PMEMCHECK_ENABLED #include "valgrind/pmemcheck.h" void pobj_emit_log(const char *func, int order); void pmem_emit_log(const char *func, int order); extern int _Pmreorder_emit; #define Pmreorder_emit __builtin_expect(_Pmreorder_emit, 0) #define VALGRIND_REGISTER_PMEM_MAPPING(addr, len) do {\ if (On_valgrind)\ VALGRIND_PMC_REGISTER_PMEM_MAPPING((addr), (len));\ } while (0) #define VALGRIND_REGISTER_PMEM_FILE(desc, base_addr, size, offset) do {\ if (On_valgrind)\ VALGRIND_PMC_REGISTER_PMEM_FILE((desc), (base_addr), (size), \ (offset));\ } while (0) #define VALGRIND_REMOVE_PMEM_MAPPING(addr, len) do {\ if (On_valgrind)\ VALGRIND_PMC_REMOVE_PMEM_MAPPING((addr), (len));\ } while (0) #define VALGRIND_CHECK_IS_PMEM_MAPPING(addr, len) do {\ if (On_valgrind)\ VALGRIND_PMC_CHECK_IS_PMEM_MAPPING((addr), (len));\ } while (0) #define VALGRIND_PRINT_PMEM_MAPPINGS do {\ if (On_valgrind)\ VALGRIND_PMC_PRINT_PMEM_MAPPINGS;\ } while (0) #define VALGRIND_DO_FLUSH(addr, len) do {\ if (On_valgrind)\ VALGRIND_PMC_DO_FLUSH((addr), (len));\ } while (0) #define VALGRIND_DO_FENCE do {\ if (On_valgrind)\ VALGRIND_PMC_DO_FENCE;\ } while (0) #define VALGRIND_DO_PERSIST(addr, len) do {\ if (On_valgrind) {\ VALGRIND_PMC_DO_FLUSH((addr), (len));\ VALGRIND_PMC_DO_FENCE;\ }\ } while (0) #define VALGRIND_SET_CLEAN(addr, len) do {\ if (On_valgrind)\ VALGRIND_PMC_SET_CLEAN(addr, len);\ } while (0) #define VALGRIND_WRITE_STATS do {\ if (On_valgrind)\ VALGRIND_PMC_WRITE_STATS;\ } while (0) #define VALGRIND_LOG_STORES do {\ if (On_valgrind)\ VALGRIND_PMC_LOG_STORES;\ } while (0) #define VALGRIND_NO_LOG_STORES do {\ if (On_valgrind)\ VALGRIND_PMC_NO_LOG_STORES;\ } while (0) #define VALGRIND_ADD_LOG_REGION(addr, len) do {\ if (On_valgrind)\ VALGRIND_PMC_ADD_LOG_REGION((addr), (len));\ } while (0) #define VALGRIND_REMOVE_LOG_REGION(addr, len) do {\ if (On_valgrind)\ \ VALGRIND_PMC_REMOVE_LOG_REGION((addr), (len));\ } while (0) #define VALGRIND_EMIT_LOG(emit_log) do {\ if (On_valgrind)\ VALGRIND_PMC_EMIT_LOG((emit_log));\ } while (0) #define VALGRIND_START_TX do {\ if (On_valgrind)\ VALGRIND_PMC_START_TX;\ } while (0) #define VALGRIND_START_TX_N(txn) do {\ if (On_valgrind)\ VALGRIND_PMC_START_TX_N(txn);\ } while (0) #define VALGRIND_END_TX do {\ if (On_valgrind)\ VALGRIND_PMC_END_TX;\ } while (0) #define VALGRIND_END_TX_N(txn) do {\ if (On_valgrind)\ VALGRIND_PMC_END_TX_N(txn);\ } while (0) #define VALGRIND_ADD_TO_TX(addr, len) do {\ if (On_valgrind)\ VALGRIND_PMC_ADD_TO_TX(addr, len);\ } while (0) #define VALGRIND_ADD_TO_TX_N(txn, addr, len) do {\ if (On_valgrind)\ VALGRIND_PMC_ADD_TO_TX_N(txn, addr, len);\ } while (0) #define VALGRIND_REMOVE_FROM_TX(addr, len) do {\ if (On_valgrind)\ VALGRIND_PMC_REMOVE_FROM_TX(addr, len);\ } while (0) #define VALGRIND_REMOVE_FROM_TX_N(txn, addr, len) do {\ if (On_valgrind)\ VALGRIND_PMC_REMOVE_FROM_TX_N(txn, addr, len);\ } while (0) #define VALGRIND_ADD_TO_GLOBAL_TX_IGNORE(addr, len) do {\ if (On_valgrind)\ VALGRIND_PMC_ADD_TO_GLOBAL_TX_IGNORE(addr, len);\ } while (0) /* * Logs library and function name with proper suffix * to pmemcheck store log file. */ #define PMEMOBJ_API_START()\ if (Pmreorder_emit)\ pobj_emit_log(__func__, 0); #define PMEMOBJ_API_END()\ if (Pmreorder_emit)\ pobj_emit_log(__func__, 1); #define PMEM_API_START()\ if (Pmreorder_emit)\ pmem_emit_log(__func__, 0); #define PMEM_API_END()\ if (Pmreorder_emit)\ pmem_emit_log(__func__, 1); #else #define Pmreorder_emit (0) #define VALGRIND_REGISTER_PMEM_MAPPING(addr, len) do {\ (void) (addr);\ (void) (len);\ } while (0) #define VALGRIND_REGISTER_PMEM_FILE(desc, base_addr, size, offset) do {\ (void) (desc);\ (void) (base_addr);\ (void) (size);\ (void) (offset);\ } while (0) #define VALGRIND_REMOVE_PMEM_MAPPING(addr, len) do {\ (void) (addr);\ (void) (len);\ } while (0) #define VALGRIND_CHECK_IS_PMEM_MAPPING(addr, len) do {\ (void) (addr);\ (void) (len);\ } while (0) #define VALGRIND_PRINT_PMEM_MAPPINGS do {} while (0) #define VALGRIND_DO_FLUSH(addr, len) do {\ (void) (addr);\ (void) (len);\ } while (0) #define VALGRIND_DO_FENCE do {} while (0) #define VALGRIND_DO_PERSIST(addr, len) do {\ (void) (addr);\ (void) (len);\ } while (0) #define VALGRIND_SET_CLEAN(addr, len) do {\ (void) (addr);\ (void) (len);\ } while (0) #define VALGRIND_WRITE_STATS do {} while (0) #define VALGRIND_LOG_STORES do {} while (0) #define VALGRIND_NO_LOG_STORES do {} while (0) #define VALGRIND_ADD_LOG_REGION(addr, len) do {\ (void) (addr);\ (void) (len);\ } while (0) #define VALGRIND_REMOVE_LOG_REGION(addr, len) do {\ (void) (addr);\ (void) (len);\ } while (0) #define VALGRIND_EMIT_LOG(emit_log) do {\ (void) (emit_log);\ } while (0) #define VALGRIND_START_TX do {} while (0) #define VALGRIND_START_TX_N(txn) do { (void) (txn); } while (0) #define VALGRIND_END_TX do {} while (0) #define VALGRIND_END_TX_N(txn) do {\ (void) (txn);\ } while (0) #define VALGRIND_ADD_TO_TX(addr, len) do {\ (void) (addr);\ (void) (len);\ } while (0) #define VALGRIND_ADD_TO_TX_N(txn, addr, len) do {\ (void) (txn);\ (void) (addr);\ (void) (len);\ } while (0) #define VALGRIND_REMOVE_FROM_TX(addr, len) do {\ (void) (addr);\ (void) (len);\ } while (0) #define VALGRIND_REMOVE_FROM_TX_N(txn, addr, len) do {\ (void) (txn);\ (void) (addr);\ (void) (len);\ } while (0) #define VALGRIND_ADD_TO_GLOBAL_TX_IGNORE(addr, len) do {\ (void) (addr);\ (void) (len);\ } while (0) #define PMEMOBJ_API_START() do {} while (0) #define PMEMOBJ_API_END() do {} while (0) #define PMEM_API_START() do {} while (0) #define PMEM_API_END() do {} while (0) #endif #if VG_MEMCHECK_ENABLED #include "valgrind/memcheck.h" #define VALGRIND_DO_DISABLE_ERROR_REPORTING do {\ if (On_valgrind)\ VALGRIND_DISABLE_ERROR_REPORTING;\ } while (0) #define VALGRIND_DO_ENABLE_ERROR_REPORTING do {\ if (On_valgrind)\ VALGRIND_ENABLE_ERROR_REPORTING;\ } while (0) #define VALGRIND_DO_CREATE_MEMPOOL(heap, rzB, is_zeroed) do {\ if (On_valgrind)\ VALGRIND_CREATE_MEMPOOL(heap, rzB, is_zeroed);\ } while (0) #define VALGRIND_DO_DESTROY_MEMPOOL(heap) do {\ if (On_valgrind)\ VALGRIND_DESTROY_MEMPOOL(heap);\ } while (0) #define VALGRIND_DO_MEMPOOL_ALLOC(heap, addr, size) do {\ if (On_valgrind)\ VALGRIND_MEMPOOL_ALLOC(heap, addr, size);\ } while (0) #define VALGRIND_DO_MEMPOOL_FREE(heap, addr) do {\ if (On_valgrind)\ VALGRIND_MEMPOOL_FREE(heap, addr);\ } while (0) #define VALGRIND_DO_MEMPOOL_CHANGE(heap, addrA, addrB, size) do {\ if (On_valgrind)\ VALGRIND_MEMPOOL_CHANGE(heap, addrA, addrB, size);\ } while (0) #define VALGRIND_DO_MAKE_MEM_DEFINED(addr, len) do {\ if (On_valgrind)\ VALGRIND_MAKE_MEM_DEFINED(addr, len);\ } while (0) #define VALGRIND_DO_MAKE_MEM_UNDEFINED(addr, len) do {\ if (On_valgrind)\ VALGRIND_MAKE_MEM_UNDEFINED(addr, len);\ } while (0) #define VALGRIND_DO_MAKE_MEM_NOACCESS(addr, len) do {\ if (On_valgrind)\ VALGRIND_MAKE_MEM_NOACCESS(addr, len);\ } while (0) #define VALGRIND_DO_CHECK_MEM_IS_ADDRESSABLE(addr, len) do {\ if (On_valgrind)\ VALGRIND_CHECK_MEM_IS_ADDRESSABLE(addr, len);\ } while (0) #else #define VALGRIND_DO_DISABLE_ERROR_REPORTING do {} while (0) #define VALGRIND_DO_ENABLE_ERROR_REPORTING do {} while (0) #define VALGRIND_DO_CREATE_MEMPOOL(heap, rzB, is_zeroed)\ do { (void) (heap); (void) (rzB); (void) (is_zeroed); } while (0) #define VALGRIND_DO_DESTROY_MEMPOOL(heap)\ do { (void) (heap); } while (0) #define VALGRIND_DO_MEMPOOL_ALLOC(heap, addr, size)\ do { (void) (heap); (void) (addr); (void) (size); } while (0) #define VALGRIND_DO_MEMPOOL_FREE(heap, addr)\ do { (void) (heap); (void) (addr); } while (0) #define VALGRIND_DO_MEMPOOL_CHANGE(heap, addrA, addrB, size)\ do {\ (void) (heap); (void) (addrA); (void) (addrB); (void) (size);\ } while (0) #define VALGRIND_DO_MAKE_MEM_DEFINED(addr, len)\ do { (void) (addr); (void) (len); } while (0) #define VALGRIND_DO_MAKE_MEM_UNDEFINED(addr, len)\ do { (void) (addr); (void) (len); } while (0) #define VALGRIND_DO_MAKE_MEM_NOACCESS(addr, len)\ do { (void) (addr); (void) (len); } while (0) #define VALGRIND_DO_CHECK_MEM_IS_ADDRESSABLE(addr, len)\ do { (void) (addr); (void) (len); } while (0) #endif #endif vmemcache-0.8.1/src/vmemcache.c000066400000000000000000000415341374403322600163460ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2019, Intel Corporation */ /* * vmemcache.c -- vmemcache implementation */ #include #include #include "out.h" #include "file.h" #include "mmap.h" #include "libvmemcache.h" #include "vmemcache.h" #include "vmemcache_heap.h" #include "vmemcache_index.h" #include "vmemcache_repl.h" #include "valgrind_internal.h" /* * Arguments to currently running get request, during a callback. */ static __thread struct { const char *key; size_t ksize; void *vbuf; size_t vbufsize; size_t offset; size_t *vsize; } get_req = { 0 }; /* * vmemcache_new -- create a vmemcache */ VMEMcache * vmemcache_new() { LOG(3, NULL); VMEMcache *cache = Zalloc(sizeof(VMEMcache)); if (cache == NULL) { ERR("!Zalloc"); return NULL; } cache->repl_p = VMEMCACHE_REPLACEMENT_LRU; cache->extent_size = VMEMCACHE_MIN_EXTENT; return cache; } /* * vmemcache_set_eviction_policy */ int vmemcache_set_eviction_policy(VMEMcache *cache, enum vmemcache_repl_p repl_p) { LOG(3, "cache %p eviction policy %d", cache, repl_p); if (cache->ready) { ERR("cache already in use"); errno = EALREADY; return -1; } cache->repl_p = repl_p; return 0; } /* * vmemcache_set_size */ int vmemcache_set_size(VMEMcache *cache, size_t size) { LOG(3, "cache %p size %zu", cache, size); /* TODO: allow growing this way */ if (cache->ready) { ERR("cache already in use"); errno = EALREADY; return -1; } if (size < VMEMCACHE_MIN_POOL) { ERR("size %zu smaller than %zu", size, VMEMCACHE_MIN_POOL); errno = EINVAL; return -1; } if (size >= 1ULL << ((sizeof(void *) > 4) ? 56 : 31)) { ERR("implausible large size %zu", size); errno = EINVAL; return -1; } cache->size = size; return 0; } /* * vmemcache_set_extent_size */ int vmemcache_set_extent_size(VMEMcache *cache, size_t extent_size) { LOG(3, "cache %p extent_size %zu", cache, extent_size); if (cache->ready) { ERR("cache already in use"); errno = EALREADY; return -1; } if (extent_size < VMEMCACHE_MIN_EXTENT) { ERR("extent size %zu smaller than %zu bytes", extent_size, VMEMCACHE_MIN_EXTENT); errno = EINVAL; return -1; } cache->extent_size = extent_size; return 0; } /* * vmemcache_addU -- (internal) open the backing file */ #ifndef _WIN32 static inline #endif int vmemcache_addU(VMEMcache *cache, const char *dir) { LOG(3, "cache %p dir %s", cache, dir); if (cache->ready) { ERR("the cache is already initialized"); errno = EBUSY; return -1; } size_t size = cache->size; if (size && cache->extent_size > size) { ERR( "extent size %zu larger than cache size: %zu bytes", cache->extent_size, size); errno = EINVAL; return -1; } if (size && size < VMEMCACHE_MIN_POOL) { ERR("cache size %zu smaller than %zu", size, VMEMCACHE_MIN_POOL); errno = EINVAL; return -1; } enum file_type type = util_file_get_type(dir); if (type == OTHER_ERROR) { LOG(1, "checking file type failed"); return -1; } if (type == TYPE_DEVDAX) { const char *devdax = dir; ssize_t dax_size = util_file_get_size(devdax); if (dax_size < 0) { LOG(1, "cannot determine file length \"%s\"", devdax); return -1; } if (size != 0 && size > (size_t)dax_size) { ERR( "error: maximum cache size (%zu) is bigger than the size of the DAX device (%zi)", size, dax_size); errno = EINVAL; return -1; } if (size == 0) { cache->size = (size_t)dax_size; } else { cache->size = roundup(size, Mmap_align); if (cache->size > (size_t)dax_size) cache->size = (size_t)dax_size; } cache->addr = util_file_map_whole(devdax); if (cache->addr == NULL) { LOG(1, "mapping of whole DAX device failed"); return -1; } } else { /* silently enforce multiple of mapping alignment */ cache->size = roundup(cache->size, Mmap_align); /* if not set, start with the default */ if (!cache->size) cache->size = VMEMCACHE_MIN_POOL; /* * XXX: file should be mapped on-demand during allocation, * up to cache->size */ cache->addr = util_map_tmpfile(dir, cache->size, 4 * MEGABYTE); if (cache->addr == NULL) { LOG(1, "mapping of a temporary file failed"); return -1; } } cache->heap = vmcache_heap_create(cache->addr, cache->size, cache->extent_size); if (cache->heap == NULL) { LOG(1, "heap initialization failed"); goto error_unmap; } cache->index = vmcache_index_new(); if (cache->index == NULL) { LOG(1, "indexing structure initialization failed"); goto error_destroy_heap; } cache->repl = repl_p_init(cache->repl_p); if (cache->repl == NULL) { LOG(1, "replacement policy initialization failed"); goto error_destroy_index; } cache->ready = 1; return 0; error_destroy_index: vmcache_index_delete(cache->index, vmemcache_delete_entry_cb); cache->index = NULL; error_destroy_heap: vmcache_heap_destroy(cache->heap); cache->heap = NULL; error_unmap: util_unmap(cache->addr, cache->size); cache->addr = NULL; return -1; } /* * vmemcache_delete_entry_cb -- callback deleting a vmemcache entry * for vmemcache_delete() */ void vmemcache_delete_entry_cb(struct cache_entry *entry) { Free(entry); } /* * vmemcache_delete -- destroy a vmemcache */ void vmemcache_delete(VMEMcache *cache) { LOG(3, "cache %p", cache); if (cache->ready) { repl_p_destroy(cache->repl); vmcache_index_delete(cache->index, vmemcache_delete_entry_cb); vmcache_heap_destroy(cache->heap); util_unmap(cache->addr, cache->size); } Free(cache); } /* * vmemcache_populate_extents -- (internal) copies content of value * to heap entries */ static void vmemcache_populate_extents(struct cache_entry *entry, const void *value, size_t value_size) { struct extent ext; size_t size_left = value_size; EXTENTS_FOREACH(ext, entry->value.extents) { ASSERT(size_left > 0); size_t len = (ext.size < size_left) ? ext.size : size_left; memcpy(ext.ptr, value, len); value = (char *)value + len; size_left -= len; } entry->value.vsize = value_size; } static void vmemcache_put_satisfy_get(const void *key, size_t ksize, const void *value, size_t value_size) { if (get_req.ksize != ksize || memcmp(get_req.key, key, ksize)) return; /* not our key */ get_req.key = NULL; /* mark request as satisfied */ if (get_req.offset >= value_size) { get_req.vbufsize = 0; } else { if (get_req.vbufsize > value_size - get_req.offset) get_req.vbufsize = value_size - get_req.offset; if (get_req.vbuf) memcpy(get_req.vbuf, value, get_req.vbufsize); } if (get_req.vsize) *get_req.vsize = value_size; } /* * vmemcache_put -- put an element into the vmemcache */ int vmemcache_put(VMEMcache *cache, const void *key, size_t ksize, const void *value, size_t value_size) { LOG(3, "cache %p key %p ksize %zu value %p value_size %zu", cache, key, ksize, value, value_size); if (get_req.key) vmemcache_put_satisfy_get(key, ksize, value, value_size); if (value_size > cache->size) { ERR("value larger than entire cache"); errno = ENOSPC; return -1; } struct cache_entry *entry; entry = Zalloc(sizeof(struct cache_entry) + ksize); if (entry == NULL) { ERR("!Zalloc"); return -1; } entry->key.ksize = ksize; memcpy(entry->key.key, key, ksize); if (cache->index_only || cache->no_alloc) goto put_index; ptr_ext_t *small_extent = NULL; /* required by vmcache_alloc() */ size_t left_to_allocate = value_size; while (left_to_allocate != 0) { ssize_t allocated = vmcache_alloc(cache->heap, left_to_allocate, &entry->value.extents, &small_extent); if (allocated < 0) goto error_exit; if (allocated == 0 && vmemcache_evict(cache, NULL, 0)) { LOG(1, "vmemcache_evict() failed"); if (errno == ESRCH) errno = ENOSPC; goto error_exit; } left_to_allocate -= MIN((size_t)allocated, left_to_allocate); } if (cache->no_memcpy) entry->value.vsize = value_size; else vmemcache_populate_extents(entry, value, value_size); put_index: if (vmcache_index_insert(cache->index, entry)) { LOG(1, "inserting to the index failed"); goto error_exit; } if (!cache->index_only) { cache->repl->ops->repl_p_insert(cache->repl->head, entry, &entry->value.p_entry); } return 0; error_exit: vmcache_free(cache->heap, entry->value.extents); Free(entry); return -1; } /* * vmemcache_populate_value -- (internal) copies content of heap entries * to the output value's buffer 'vbuf' starting * from the 'offset' */ static size_t vmemcache_populate_value(void *vbuf, size_t vbufsize, size_t offset, struct cache_entry *entry, int no_memcpy) { if (!vbuf || offset >= entry->value.vsize) return 0; size_t left_to_copy = entry->value.vsize - offset; struct extent ext; size_t copied = 0; EXTENTS_FOREACH(ext, entry->value.extents) { char *ptr = (char *)ext.ptr; size_t len = ext.size; if (offset) { if (offset > ext.size) { offset -= ext.size; continue; } ptr += offset; len -= offset; offset = 0; } size_t max_len = MIN(left_to_copy, vbufsize); if (len > max_len) len = max_len; if (!no_memcpy) memcpy(vbuf, ptr, len); vbufsize -= len; vbuf = (char *)vbuf + len; copied += len; left_to_copy -= len; if (vbufsize == 0 || left_to_copy == 0) return copied; } return copied; } /* * vmemcache_entry_acquire -- acquire pointer to the vmemcache entry */ void vmemcache_entry_acquire(struct cache_entry *entry) { uint64_t ret = util_fetch_and_add32(&entry->value.refcount, 1); ASSERTne(ret, 0); } /* * vmemcache_entry_release -- release or delete the vmemcache entry */ void vmemcache_entry_release(VMEMcache *cache, struct cache_entry *entry) { if (util_fetch_and_sub32(&entry->value.refcount, 1) != 1) { VALGRIND_ANNOTATE_HAPPENS_BEFORE(&entry->value.refcount); return; } /* 'refcount' equals 0 now - it means that the entry should be freed */ VALGRIND_ANNOTATE_HAPPENS_AFTER(&entry->value.refcount); VALGRIND_ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(&entry->value.refcount); vmcache_free(cache->heap, entry->value.extents); Free(entry); } /* * vmemcache_get - get an element from the vmemcache, * returns the number of bytes read */ ssize_t vmemcache_get(VMEMcache *cache, const void *key, size_t ksize, void *vbuf, size_t vbufsize, size_t offset, size_t *vsize) { LOG(3, "cache %p key %p ksize %zu vbuf %p vbufsize %zu offset %zu vsize %p", cache, key, ksize, vbuf, vbufsize, offset, vsize); struct cache_entry *entry; size_t read = 0; int ret = vmcache_index_get(cache->index, key, ksize, &entry, 1); if (ret < 0) return -1; if (entry == NULL) { /* cache miss */ if (cache->on_miss) { get_req.key = key; get_req.ksize = ksize; get_req.vbuf = vbuf; get_req.vbufsize = vbufsize; get_req.offset = offset; get_req.vsize = vsize; (*cache->on_miss)(cache, key, ksize, cache->arg_miss); if (!get_req.key) return (ssize_t)get_req.vbufsize; get_req.key = NULL; } errno = ENOENT; /* * Needed for errormsg but wastes 13% of time. FIXME. * ERR("cache entry not found"); */ return -1; } if (cache->index_only) goto get_index; cache->repl->ops->repl_p_use(cache->repl->head, &entry->value.p_entry); if (cache->no_alloc) goto get_index; read = vmemcache_populate_value(vbuf, vbufsize, offset, entry, cache->no_memcpy); if (vsize) *vsize = entry->value.vsize; get_index: vmemcache_entry_release(cache, entry); return (ssize_t)read; } /* * vmemcache_exists -- checks, without side-effects, if a key exists */ int vmemcache_exists(VMEMcache *cache, const void *key, size_t key_size, size_t *vsize) { LOG(3, "cache %p key %p key_size %zu vsize %p", cache, key, key_size, vsize); struct cache_entry *entry; int ret = vmcache_index_get(cache->index, key, key_size, &entry, 0); if (ret < 0) return -1; if (entry == NULL) return 0; if (vsize) *vsize = entry->value.vsize; vmemcache_entry_release(cache, entry); return 1; } /* * vmemcache_evict -- evict an element from the vmemcache */ int vmemcache_evict(VMEMcache *cache, const void *key, size_t ksize) { LOG(3, "cache %p key %p ksize %zu", cache, key, ksize); struct cache_entry *entry = NULL; int evicted_from_repl_p = 0; if (key == NULL) { do { entry = cache->repl->ops->repl_p_evict( cache->repl->head, NULL); if (entry == NULL) { ERR("no element to evict"); return -1; } evicted_from_repl_p = 1; key = entry->key.key; ksize = entry->key.ksize; } while (!__sync_bool_compare_and_swap(&entry->value.evicting, 0, 1)); } else { int ret = vmcache_index_get(cache->index, key, ksize, &entry, 0); if (ret < 0) return -1; if (entry == NULL) { ERR( "vmemcache_evict: cannot find an element with the given key"); errno = ENOENT; return -1; } if (!__sync_bool_compare_and_swap(&entry->value.evicting, 0, 1)) { /* * Element with the given key is being evicted just now. * Release the reference from vmcache_index_get(). */ vmemcache_entry_release(cache, entry); return 0; } } if (cache->on_evict != NULL) (*cache->on_evict)(cache, key, ksize, cache->arg_evict); if (!evicted_from_repl_p) { if (cache->repl->ops->repl_p_evict(cache->repl->head, &entry->value.p_entry) == NULL) { /* * The given entry is busy * and cannot be evicted right now. * Release the reference from vmcache_index_get(). */ vmemcache_entry_release(cache, entry); /* reset 'evicting' flag */ __sync_bool_compare_and_swap(&entry->value.evicting, 1, 0); return -1; } /* release the reference from the replacement policy */ vmemcache_entry_release(cache, entry); } /* release the element */ vmemcache_entry_release(cache, entry); if (vmcache_index_remove(cache, entry)) { LOG(1, "removing from the index failed"); goto exit_release; } return 0; exit_release: /* release the element */ vmemcache_entry_release(cache, entry); return -1; } /* * vmemcache_callback_on_evict -- install the 'on evict' callback */ void vmemcache_callback_on_evict(VMEMcache *cache, vmemcache_on_evict *evict, void *arg) { LOG(3, "cache %p evict %p arg %p", cache, evict, arg); cache->on_evict = evict; cache->arg_evict = arg; } /* * vmemcache_callback_on_miss -- install the 'on miss' callback */ void vmemcache_callback_on_miss(VMEMcache *cache, vmemcache_on_miss *miss, void *arg) { LOG(3, "cache %p evict %p arg %p", cache, miss, arg); cache->on_miss = miss; cache->arg_miss = arg; } /* * vmemcache_get_stat -- get the statistic */ int vmemcache_get_stat(VMEMcache *cache, enum vmemcache_statistic stat, void *value, size_t value_size) { LOG(3, "cache %p stat %d value %p value_size %zu", cache, stat, value, value_size); if (value_size != sizeof(stat_t)) { ERR("wrong size of the value: %zu (should be: %zu)", value_size, sizeof(stat_t)); errno = EINVAL; return -1; } stat_t *val = value; switch (stat) { case VMEMCACHE_STAT_PUT: case VMEMCACHE_STAT_HIT: case VMEMCACHE_STAT_MISS: case VMEMCACHE_STAT_EVICT: case VMEMCACHE_STAT_ENTRIES: *val = vmemcache_index_get_stat(cache->index, stat); break; case VMEMCACHE_STAT_GET: *val = vmemcache_index_get_stat(cache->index, VMEMCACHE_STAT_HIT) + vmemcache_index_get_stat(cache->index, VMEMCACHE_STAT_MISS); break; case VMEMCACHE_STAT_DRAM_SIZE_USED: *val = vmemcache_index_get_stat(cache->index, VMEMCACHE_STAT_DRAM_SIZE_USED) + cache->repl->ops->dram_per_entry * vmemcache_index_get_stat(cache->index, VMEMCACHE_STAT_ENTRIES); break; case VMEMCACHE_STAT_POOL_SIZE_USED: *val = vmcache_get_heap_used_size(cache->heap); break; case VMEMCACHE_STAT_HEAP_ENTRIES: *val = vmcache_get_heap_entries_count(cache->heap); break; default: ERR("unknown value of statistic: %u", stat); errno = EINVAL; return -1; } return 0; } static void prefault(VMEMcache *cache) { char *p = cache->addr; char *limit = (char *)cache->addr + cache->size; while (p < limit) { *(volatile char *)p = *p; p += 4096; /* once per page is enough */ } } /* * vmemcache_bench_set -- alter a benchmark parameter */ void vmemcache_bench_set(VMEMcache *cache, enum vmemcache_bench_cfg cfg, size_t val) { LOG(3, "cache %p cfg %d val %zu", cache, cfg, val); switch (cfg) { case VMEMCACHE_BENCH_INDEX_ONLY: cache->index_only = !!val; break; case VMEMCACHE_BENCH_NO_MEMCPY: cache->no_memcpy = !!val; break; case VMEMCACHE_BENCH_PREFAULT: prefault(cache); break; default: ERR("invalid config parameter: %u", cfg); } } #ifndef _WIN32 /* * vmemcache_add -- add a backing file to vmemcache */ int vmemcache_add(VMEMcache *cache, const char *path) { return vmemcache_addU(cache, path); } #else /* * vmemcache_addW -- add a backing file to vmemcache, wchar version */ int vmemcache_addW(VMEMcache *cache, const wchar_t *path) { char *upath = util_toUTF8(path); if (upath == NULL) return -1; int ret = vmemcache_addU(cache, upath); util_free_UTF8(upath); return ret; } #endif vmemcache-0.8.1/src/vmemcache.h000066400000000000000000000035521374403322600163510ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2019, Intel Corporation */ /* * vmemcache.h -- internal definitions for vmemcache */ #ifndef VMEMCACHE_H #define VMEMCACHE_H 1 #include #include #include "libvmemcache.h" #include "vmemcache_heap.h" #ifdef __cplusplus extern "C" { #endif #define VMEMCACHE_PREFIX "libvmemcache" #define VMEMCACHE_LEVEL_VAR "VMEMCACHE_LEVEL" #define VMEMCACHE_FILE_VAR "VMEMCACHE_FILE" struct index; struct repl_p; struct vmemcache { void *addr; /* mapping address */ size_t size; /* mapping size */ size_t extent_size; /* heap granularity */ struct heap *heap; /* heap address */ struct index *index; /* indexing structure */ enum vmemcache_repl_p repl_p; /* replacement policy */ struct repl_p *repl; /* replacement policy abstraction */ vmemcache_on_evict *on_evict; /* callback on evict */ void *arg_evict; /* argument for callback on evict */ vmemcache_on_miss *on_miss; /* callback on miss */ void *arg_miss; /* argument for callback on miss */ unsigned ready:1; /* is the cache ready for use? */ unsigned index_only:1; /* bench: disable repl+alloc */ unsigned no_alloc:1; /* bench: disable allocations */ unsigned no_memcpy:1; /* bench: don't copy actual data */ }; struct cache_entry { struct value { uint32_t refcount; int evicting; struct repl_p_entry *p_entry; size_t vsize; ptr_ext_t *extents; } value; struct key { size_t ksize; char key[]; } key; }; /* type of callback deleting a cache entry */ typedef void (*delete_entry_t)(struct cache_entry *entry); /* callback deleting a cache entry (of the above type 'delete_entry_t') */ void vmemcache_delete_entry_cb(struct cache_entry *entry); void vmemcache_entry_acquire(struct cache_entry *entry); void vmemcache_entry_release(VMEMcache *cache, struct cache_entry *entry); #ifdef __cplusplus } #endif #endif vmemcache-0.8.1/src/vmemcache_heap.c000066400000000000000000000315661374403322600173470ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2019, Intel Corporation */ /* * vmemcache_heap.c -- implementation of simple vmemcache linear allocator */ #include "vmemcache_heap.h" #include "sys_util.h" #define GUARD_SIZE ((uintptr_t)0x1000) /* 4096 bytes */ #define IS_ALLOCATED 1 #define IS_FREE 0 /* flag: this extent is allocated */ #define FLAG_ALLOCATED ((sizeof(void *) > 4) ? (1ULL << 63) : (1UL << 31)) /* mask of all flags */ #define MASK_FLAGS (~FLAG_ALLOCATED) #define SIZE_FLAGS(size, is_allocated) \ (is_allocated) ? ((size) | FLAG_ALLOCATED) : (size) struct heap { os_mutex_t lock; size_t extent_size; ptr_ext_t *first_extent; /* statistics */ stat_t size_used; /* current size of memory pool used for values */ stat_t entries; /* current number of heap entries */ }; struct header { ptr_ext_t *next; ptr_ext_t *prev; uint64_t size_flags; }; struct footer { uint64_t size_flags; }; /* heap entry ('struct extent' with header and footer ) */ struct heap_entry { struct header *ptr; size_t size; }; #define HEADER_SIZE (sizeof(struct header)) #define FOOTER_SIZE (sizeof(struct footer)) /* size of a header and a footer */ #define HFER_SIZE (HEADER_SIZE + FOOTER_SIZE) static void vmcache_heap_merge(struct heap *heap, struct extent *ext, struct heap_entry *he); /* * vmcache_new_heap_entry -- create a new heap entry */ static inline struct header * vmcache_new_heap_entry(struct header *ptr, size_t alloc_size) { return (struct header *)((uintptr_t)ptr + alloc_size); } /* * vmcache_create_footer_address -- create a footer address for the heap entry */ static inline struct footer * vmcache_create_footer_address(struct heap_entry *he) { return (struct footer *)((uintptr_t)he->ptr + he->size - FOOTER_SIZE); } /* * vmcache_create_ext_ptr -- create an extent pointer for the heap entry */ static inline ptr_ext_t * vmcache_create_ext_ptr(struct heap_entry *he) { return (ptr_ext_t *)((uintptr_t)he->ptr + HEADER_SIZE); } /* * vmcache_extent_get_header -- get the header of the extent */ static inline struct header * vmcache_extent_get_header(ptr_ext_t *ptr) { return (struct header *)((uintptr_t)ptr - HEADER_SIZE); } /* * vmcache_extent_get_footer -- get the footer of the extent */ static inline struct footer * vmcache_extent_get_footer(ptr_ext_t *ptr) { struct header *header = vmcache_extent_get_header(ptr); size_t size = header->size_flags & MASK_FLAGS; return (struct footer *)((uintptr_t)ptr + size); } /* * vmcache_extent_get_next -- get the pointer to the next extent */ ptr_ext_t * vmcache_extent_get_next(ptr_ext_t *ptr) { if (ptr == NULL) return NULL; return vmcache_extent_get_header(ptr)->next; } /* * vmcache_extent_get_size -- get size of the extent */ size_t vmcache_extent_get_size(ptr_ext_t *ptr) { if (ptr == NULL) return 0; return vmcache_extent_get_header(ptr)->size_flags & MASK_FLAGS; } /* * vmcache_get_prev_footer -- get the address of the footer * of the previous extent */ static inline struct footer * vmcache_get_prev_footer(struct extent *ext) { return (struct footer *)((uintptr_t)ext->ptr - HFER_SIZE); } /* * vmcache_get_next_ptr_ext -- get the pointer to the next extent */ static inline ptr_ext_t * vmcache_get_next_ptr_ext(struct extent *ext) { return (ptr_ext_t *)((uintptr_t)ext->ptr + ext->size + HFER_SIZE); } /* * vmcache_get_prev_ptr_ext -- get the pointer to the previous extent */ static inline ptr_ext_t * vmcache_get_prev_ptr_ext(struct footer *footer, size_t size) { return (ptr_ext_t *)((uintptr_t)footer - size); } /* * vmcache_insert_heap_entry -- insert the 'he' entry into the list of extents */ static inline int vmcache_insert_heap_entry(struct heap *heap, struct heap_entry *he, ptr_ext_t **first_extent, int is_allocated) { struct header *header = he->ptr; struct footer *footer = vmcache_create_footer_address(he); /* pointer and size of a new extent */ ptr_ext_t *new_extent = vmcache_create_ext_ptr(he); size_t size_flags = SIZE_FLAGS((he->size - HFER_SIZE), is_allocated); /* save the header */ header->next = *first_extent; header->prev = NULL; header->size_flags = size_flags; /* save the footer */ footer->size_flags = size_flags; if (*first_extent) { struct header *first_header = vmcache_extent_get_header(*first_extent); ASSERTeq(first_header->prev, NULL); first_header->prev = new_extent; } *first_extent = new_extent; #ifdef STATS_ENABLED if (!is_allocated) heap->entries++; #endif return 0; } /* * vmcache_pop_heap_entry -- pop the free entry from the heap */ static inline int vmcache_pop_heap_entry(struct heap *heap, struct heap_entry *he) { if (heap->first_extent == NULL) return -1; struct header *header = vmcache_extent_get_header(heap->first_extent); struct footer *footer = vmcache_extent_get_footer(heap->first_extent); ASSERTeq(header->prev, NULL); ASSERTeq((header->size_flags & FLAG_ALLOCATED), 0); /* is free */ ASSERTeq(header->size_flags, footer->size_flags); he->ptr = header; he->size = header->size_flags + HFER_SIZE; if (header->next) { struct header *next_header = vmcache_extent_get_header(header->next); ASSERTne(next_header->prev, NULL); next_header->prev = NULL; } heap->first_extent = header->next; #ifdef STATS_ENABLED heap->entries--; #endif return 0; } /* * vmcache_heap_add_mapping -- add new memory mapping to vmemcache heap */ static void vmcache_heap_add_mapping(struct heap *heap, void *addr, size_t size) { LOG(3, "heap %p addr %p size %zu", heap, addr, size); void *new_addr; size_t new_size; /* reserve 64 bytes for a guard header */ new_addr = (void *)ALIGN_UP((uintptr_t)addr + GUARD_SIZE, GUARD_SIZE); if (new_addr > addr) size -= ((uintptr_t)new_addr - (uintptr_t)addr); /* reserve 64 bytes for a guard footer */ new_size = ALIGN_DOWN(size - GUARD_SIZE, GUARD_SIZE); util_mutex_lock(&heap->lock); /* add new memory chunk to the heap */ struct heap_entry new_mem = {new_addr, new_size}; vmcache_insert_heap_entry(heap, &new_mem, &heap->first_extent, IS_FREE); /* read the added extent */ struct extent ext; ext.ptr = heap->first_extent; ext.size = vmcache_extent_get_size(ext.ptr); /* mark the guard header as allocated */ struct footer *prev_footer = vmcache_get_prev_footer(&ext); uint64_t *size_flags = &prev_footer->size_flags; *size_flags |= FLAG_ALLOCATED; /* mark the guard footer as allocated */ ptr_ext_t *next_ptr = vmcache_get_next_ptr_ext(&ext); struct header *header_next = vmcache_extent_get_header(next_ptr); size_flags = &header_next->size_flags; *size_flags |= FLAG_ALLOCATED; util_mutex_unlock(&heap->lock); } /* * vmcache_heap_create -- create vmemcache heap */ struct heap * vmcache_heap_create(void *addr, size_t size, size_t extent_size) { LOG(3, "addr %p size %zu extent_size %zu", addr, size, extent_size); struct heap *heap; heap = Zalloc(sizeof(struct heap)); if (heap == NULL) { ERR("!Zalloc"); return NULL; } util_mutex_init(&heap->lock); heap->extent_size = extent_size; vmcache_heap_add_mapping(heap, addr, size); return heap; } /* * vmcache_heap_destroy -- destroy vmemcache heap */ void vmcache_heap_destroy(struct heap *heap) { LOG(3, "heap %p", heap); util_mutex_destroy(&heap->lock); Free(heap); } /* * vmcache_free_extent -- (internal) free the smallest extent */ static int vmcache_free_extent(struct heap *heap, ptr_ext_t *small_extent) { ASSERTne(small_extent, NULL); struct header *header = vmcache_extent_get_header(small_extent); /* remove the extent from the list */ if (header->prev) { struct header *prev_header = vmcache_extent_get_header(header->prev); ASSERTeq(prev_header->next, small_extent); prev_header->next = header->next; } if (header->next) { struct header *next_header = vmcache_extent_get_header(header->next); ASSERTeq(next_header->prev, small_extent); next_header->prev = header->prev; } struct extent ext; ext.ptr = small_extent; ext.size = heap->extent_size - HFER_SIZE; /* free the extent */ struct heap_entry he; vmcache_heap_merge(heap, &ext, &he); vmcache_insert_heap_entry(heap, &he, &heap->first_extent, IS_FREE); #ifdef STATS_ENABLED heap->size_used -= ext.size; #endif return 0; } /* * vmcache_alloc -- allocate memory (take it from the queue) * * It returns the number of allocated bytes if successful, otherwise -1. * The last extent of doubly-linked list of allocated extents is returned * in 'first_extent'. * 'small_extent' has to be zeroed in the beginning of a new allocation * (e.g. when *first_extent == NULL). */ ssize_t vmcache_alloc(struct heap *heap, size_t size, ptr_ext_t **first_extent, ptr_ext_t **small_extent) { ASSERTne(first_extent, NULL); ASSERTne(small_extent, NULL); ASSERT((*first_extent == NULL) ? (*small_extent == NULL) : 1); LOG(3, "heap %p size %zu first_extent %p *small_extent %p", heap, size, *first_extent, *small_extent); struct heap_entry he, new; size_t extent_size = heap->extent_size; size_t to_allocate = size; size_t allocated = 0; util_mutex_lock(&heap->lock); do { if (vmcache_pop_heap_entry(heap, &he)) break; size_t alloc_size = roundup(to_allocate + HFER_SIZE, extent_size); if (he.size >= alloc_size + extent_size) { new.ptr = vmcache_new_heap_entry(he.ptr, alloc_size); new.size = he.size - alloc_size; he.size = alloc_size; vmcache_insert_heap_entry(heap, &new, &heap->first_extent, IS_FREE); } if (vmcache_insert_heap_entry(heap, &he, first_extent, IS_ALLOCATED)) { util_mutex_unlock(&heap->lock); return -1; } if (*small_extent == NULL && he.size == extent_size) *small_extent = *first_extent; /* allocated size without headers */ size_t allocated_size = he.size - HFER_SIZE; allocated += allocated_size; if (allocated_size > to_allocate && allocated_size - to_allocate >= extent_size - HFER_SIZE && *small_extent != NULL) { vmcache_free_extent(heap, *small_extent); } to_allocate -= MIN(allocated_size, to_allocate); } while (to_allocate > 0); #ifdef STATS_ENABLED heap->size_used += allocated; #endif util_mutex_unlock(&heap->lock); return (ssize_t)(size - to_allocate); } /* * vmcache_heap_remove -- (internal) remove an extent from the heap */ static void vmcache_heap_remove(struct heap *heap, struct extent *ext) { LOG(3, "heap %p ext %p", heap, ext); struct header *header = vmcache_extent_get_header(ext->ptr); ASSERT(header->next || header->prev || (heap->first_extent == ext->ptr)); if (header->next) { struct header *header_next = vmcache_extent_get_header(header->next); ASSERTeq(header_next->prev, ext->ptr); header_next->prev = header->prev; } if (header->prev) { struct header *header_prev = vmcache_extent_get_header(header->prev); ASSERTeq(header_prev->next, ext->ptr); header_prev->next = header->next; } if (heap->first_extent == ext->ptr) heap->first_extent = header->next; #ifdef STATS_ENABLED heap->entries--; #endif } /* * vmcache_heap_merge -- (internal) merge memory extents */ static void vmcache_heap_merge(struct heap *heap, struct extent *ext, struct heap_entry *he) { LOG(3, "heap %p ext %p", heap, ext); struct extent prev, next; he->ptr = vmcache_extent_get_header(ext->ptr); he->size = ext->size + HFER_SIZE; /* merge with the previous one (lower address) */ struct footer *prev_footer = vmcache_get_prev_footer(ext); prev.size = prev_footer->size_flags; if ((prev.size & FLAG_ALLOCATED) == 0) { prev.ptr = vmcache_get_prev_ptr_ext(prev_footer, prev.size); he->ptr = vmcache_extent_get_header(prev.ptr); he->size += prev.size + HFER_SIZE; vmcache_heap_remove(heap, &prev); } /* merge with the next one (higher address) */ next.ptr = vmcache_get_next_ptr_ext(ext); struct header *header_next = vmcache_extent_get_header(next.ptr); next.size = header_next->size_flags; if ((next.size & FLAG_ALLOCATED) == 0) { he->size += next.size + HFER_SIZE; vmcache_heap_remove(heap, &next); } } /* * vmcache_free -- free memory (give it back to the queue) */ void vmcache_free(struct heap *heap, ptr_ext_t *first_extent) { LOG(3, "heap %p first_extent %p", heap, first_extent); util_mutex_lock(&heap->lock); size_t freed = 0; /* * EXTENTS_FOREACH_SAFE variant is required here, * because vmcache_insert_heap_entry() can modify * the headers of the 'ext' extent. */ ptr_ext_t *__next; struct extent ext; EXTENTS_FOREACH_SAFE(ext, first_extent, __next) { /* size without headers */ freed += ext.size; struct heap_entry he; vmcache_heap_merge(heap, &ext, &he); vmcache_insert_heap_entry(heap, &he, &heap->first_extent, IS_FREE); } #ifdef STATS_ENABLED heap->size_used -= freed; #endif util_mutex_unlock(&heap->lock); } /* * vmcache_get_heap_used_size -- get the 'size_used' statistic */ stat_t vmcache_get_heap_used_size(struct heap *heap) { return heap->size_used; } /* * vmcache_get_heap_entries_count -- get the 'heap_entries_count' statistic */ stat_t vmcache_get_heap_entries_count(struct heap *heap) { return heap->entries; } vmemcache-0.8.1/src/vmemcache_heap.h000066400000000000000000000036621374403322600173500ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2019, Intel Corporation */ /* * vmemcache_heap.h -- internal definitions for vmemcache allocator module */ #ifndef VMEMCACHE_HEAP_H #define VMEMCACHE_HEAP_H 1 #include #include /* type of the statistics */ typedef unsigned long long stat_t; #ifdef __cplusplus extern "C" { #endif #define HEAP_ENTRY_IS_NULL(he) ((he.ptr) == NULL) /* just for type safety - see 'ptr' field in 'struct extent' below */ struct ptr_ext; typedef struct ptr_ext ptr_ext_t; /* extent structure ('struct heap_entry' without header and footer ) */ struct extent { ptr_ext_t *ptr; size_t size; }; struct heap; struct heap *vmcache_heap_create(void *addr, size_t size, size_t extent_size); void vmcache_heap_destroy(struct heap *heap); ssize_t vmcache_alloc(struct heap *heap, size_t size, ptr_ext_t **first_extent, ptr_ext_t **small_extent); void vmcache_free(struct heap *heap, ptr_ext_t *first_extent); stat_t vmcache_get_heap_used_size(struct heap *heap); stat_t vmcache_get_heap_entries_count(struct heap *heap); ptr_ext_t *vmcache_extent_get_next(ptr_ext_t *ptr); size_t vmcache_extent_get_size(ptr_ext_t *ptr); /* unsafe variant - the headers of extents cannot be modified */ #define EXTENTS_FOREACH(ext, extents) \ for ((ext).ptr = (extents), \ (ext).size = vmcache_extent_get_size((ext).ptr); \ (ext).ptr != NULL; \ (ext).ptr = vmcache_extent_get_next((ext).ptr), \ (ext).size = vmcache_extent_get_size((ext).ptr)) /* safe variant - the headers of extents can be modified (freed for example) */ #define EXTENTS_FOREACH_SAFE(ext, extents, __next) \ for ((ext).ptr = (extents), \ (ext).size = vmcache_extent_get_size((ext).ptr), \ (__next) = vmcache_extent_get_next((ext).ptr); \ (ext).ptr != NULL; \ (ext).ptr = (__next), \ (ext).size = vmcache_extent_get_size((ext).ptr), \ (__next) = vmcache_extent_get_next((__next))) #ifdef __cplusplus } #endif #endif vmemcache-0.8.1/src/vmemcache_index.c000066400000000000000000000124101374403322600175240ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2019, Intel Corporation */ /* * vmemcache_index.c -- abstraction layer for vmemcache indexing API */ #include #include #include #include #include "vmemcache.h" #include "vmemcache_index.h" #include "critnib.h" #include "fast-hash.h" #include "sys_util.h" #ifdef STATS_ENABLED #define STAT_ADD(ptr, add) util_fetch_and_add64(ptr, add) #else #define STAT_ADD(ptr, add) do {} while (0) #endif /* must be a power of 2 */ #define NSHARDS 256 struct index { struct critnib *bucket[NSHARDS]; int sharding; }; /* * shard_id -- (internal) hash the key and pick a shard bucket id */ static int shard_id(size_t key_size, const char *key) { return (int)hash(key_size, key) & (NSHARDS - 1); } /* * shard -- (internal) pick a shard bucket */ static struct critnib * shard(struct index *index, size_t key_size, const char *key) { if (index->sharding) return index->bucket[shard_id(key_size, key)]; return index->bucket[0]; } /* * vmcache_index_new -- initialize vmemcache indexing structure */ struct index * vmcache_index_new(void) { struct index *index = Malloc(sizeof(struct index)); if (!index) return NULL; index->sharding = env_yesno10("VMEMCACHE_SHARDING", 1); for (int i = 0; i < NSHARDS; i++) { struct critnib *c = critnib_new(); if (!c) { for (i--; i >= 0; i--) { util_rwlock_destroy(&index->bucket[i]->lock); critnib_delete(index->bucket[i], NULL); } Free(index); return NULL; } util_rwlock_init(&c->lock); index->bucket[i] = c; } return index; } /* * vmcache_index_delete -- destroy vmemcache indexing structure */ void vmcache_index_delete(struct index *index, delete_entry_t del_entry) { for (int i = 0; i < NSHARDS; i++) { util_rwlock_destroy(&index->bucket[i]->lock); critnib_delete(index->bucket[i], del_entry); } Free(index); } /* * vmcache_index_insert -- insert data into the vmemcache indexing structure */ int vmcache_index_insert(struct index *index, struct cache_entry *entry) { struct critnib *c = shard(index, entry->key.ksize, entry->key.key); util_rwlock_wrlock(&c->lock); int err = critnib_set(c, entry); if (err) { errno = err; util_rwlock_unlock(&c->lock); ERR("inserting to the index failed"); return -1; } #ifdef STATS_ENABLED c->leaf_count++; c->put_count++; c->DRAM_usage += malloc_usable_size(entry); #endif /* this is the first and the only one reference now (in the index) */ entry->value.refcount = 1; util_rwlock_unlock(&c->lock); return 0; } /* * vmcache_index_get -- get data from the vmemcache indexing structure */ int vmcache_index_get(struct index *index, const void *key, size_t ksize, struct cache_entry **entry, int bump_stat) { #define SIZE_1K 1024 struct critnib *c = shard(index, ksize, key); struct cache_entry *e; *entry = NULL; if (ksize > SIZE_1K) { e = Malloc(sizeof(struct cache_entry) + ksize); if (e == NULL) { ERR("!Zalloc"); return -1; } } else { e = alloca(sizeof(struct cache_entry) + ksize); } e->key.ksize = ksize; memcpy(e->key.key, key, ksize); util_rwlock_rdlock(&c->lock); struct cache_entry *v = critnib_get(c, e); if (ksize > SIZE_1K) Free(e); if (v == NULL) { util_rwlock_unlock(&c->lock); if (bump_stat) STAT_ADD(&c->miss_count, 1); LOG(1, "vmcache_index_get: cannot find an element with the given key in the index"); return 0; } if (bump_stat) STAT_ADD(&c->hit_count, 1); vmemcache_entry_acquire(v); *entry = v; util_rwlock_unlock(&c->lock); return 0; } /* * vmcache_index_remove -- remove data from the vmemcache indexing structure */ int vmcache_index_remove(VMEMcache *cache, struct cache_entry *entry) { struct critnib *c = shard(cache->index, entry->key.ksize, entry->key.key); util_rwlock_wrlock(&c->lock); struct cache_entry *v = critnib_remove(c, entry); if (v == NULL) { util_rwlock_unlock(&c->lock); ERR( "vmcache_index_remove: cannot find an element with the given key in the index"); errno = EINVAL; return -1; } #ifdef STATS_ENABLED c->leaf_count--; c->evict_count++; c->DRAM_usage -= malloc_usable_size(entry); #endif vmemcache_entry_release(cache, entry); util_rwlock_unlock(&c->lock); return 0; } /* * vmemcache_index_get_stat -- query an index-held stat */ size_t vmemcache_index_get_stat(struct index *index, enum vmemcache_statistic stat) { size_t total = 0; switch (stat) { case VMEMCACHE_STAT_DRAM_SIZE_USED: { size_t nodes = 0; for (int i = 0; i < NSHARDS; i++) { nodes += index->bucket[i]->node_count; total += index->bucket[i]->DRAM_usage; } return total + nodes * sizeof(struct critnib_node); } case VMEMCACHE_STAT_PUT: for (int i = 0; i < NSHARDS; i++) total += index->bucket[i]->put_count; break; case VMEMCACHE_STAT_EVICT: for (int i = 0; i < NSHARDS; i++) total += index->bucket[i]->evict_count; break; case VMEMCACHE_STAT_HIT: for (int i = 0; i < NSHARDS; i++) total += index->bucket[i]->hit_count; break; case VMEMCACHE_STAT_MISS: for (int i = 0; i < NSHARDS; i++) total += index->bucket[i]->miss_count; break; case VMEMCACHE_STAT_ENTRIES: for (int i = 0; i < NSHARDS; i++) total += index->bucket[i]->leaf_count; break; default: FATAL("wrong stat type"); /* not callable from outside */ } return total; } vmemcache-0.8.1/src/vmemcache_index.h000066400000000000000000000015041374403322600175330ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2019, Intel Corporation */ /* * vmemcache_index.h -- internal definitions for vmemcache indexing API */ #ifndef VMEMCACHE_INDEX_H #define VMEMCACHE_INDEX_H 1 #include "libvmemcache.h" #include "critnib.h" #ifdef __cplusplus extern "C" { #endif struct cache_entry; struct index *vmcache_index_new(void); void vmcache_index_delete(struct index *index, delete_entry_t del_entry); int vmcache_index_insert(struct index *index, struct cache_entry *entry); int vmcache_index_get(struct index *index, const void *key, size_t ksize, struct cache_entry **entry, int bump_stat); int vmcache_index_remove(VMEMcache *cache, struct cache_entry *entry); size_t vmemcache_index_get_stat(struct index *index, enum vmemcache_statistic stat); #ifdef __cplusplus } #endif #endif vmemcache-0.8.1/src/vmemcache_repl.c000066400000000000000000000222051374403322600173620ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2019, Intel Corporation */ /* * vmemcache_repl.c -- replacement policies for vmemcache */ #include #include "vmemcache.h" #include "vmemcache_repl.h" #include "util.h" #include "out.h" #include "sys/queue.h" #include "sys_util.h" #include "ringbuf.h" #define LEN_RING_BUF (1 << 12) struct repl_p_entry { TAILQ_ENTRY(repl_p_entry) node; void *data; struct repl_p_entry **ptr_entry; /* pointer to be zeroed when evicted */ }; struct repl_p_head { os_mutex_t lock; TAILQ_HEAD(head, repl_p_entry) first; struct ringbuf *ringbuf; }; /* forward declarations of replacement policy operations */ static int repl_p_none_new(struct repl_p_head **head); static void repl_p_none_delete(struct repl_p_head *head); static struct repl_p_entry * repl_p_none_insert(struct repl_p_head *head, void *element, struct repl_p_entry **ptr_entry); static void repl_p_none_use(struct repl_p_head *head, struct repl_p_entry **ptr_entry); static void * repl_p_none_evict(struct repl_p_head *head, struct repl_p_entry **ptr_entry); static int repl_p_lru_new(struct repl_p_head **head); static void repl_p_lru_delete(struct repl_p_head *head); static struct repl_p_entry * repl_p_lru_insert(struct repl_p_head *head, void *element, struct repl_p_entry **ptr_entry); static void repl_p_lru_use(struct repl_p_head *head, struct repl_p_entry **ptr_entry); static void * repl_p_lru_evict(struct repl_p_head *head, struct repl_p_entry **ptr_entry); /* replacement policy operations */ static const struct repl_p_ops repl_p_ops[VMEMCACHE_REPLACEMENT_NUM] = { { .repl_p_new = repl_p_none_new, .repl_p_delete = repl_p_none_delete, .repl_p_insert = repl_p_none_insert, .repl_p_use = repl_p_none_use, .repl_p_evict = repl_p_none_evict, .dram_per_entry = 0, }, { .repl_p_new = repl_p_lru_new, .repl_p_delete = repl_p_lru_delete, .repl_p_insert = repl_p_lru_insert, .repl_p_use = repl_p_lru_use, .repl_p_evict = repl_p_lru_evict, .dram_per_entry = sizeof(struct repl_p_entry), } }; /* * repl_p_init -- allocate and initialize the replacement policy structure */ struct repl_p * repl_p_init(enum vmemcache_repl_p rp) { struct repl_p *repl_p = Malloc(sizeof(struct repl_p)); if (repl_p == NULL) return NULL; repl_p->ops = &repl_p_ops[rp]; if (repl_p->ops->repl_p_new(&repl_p->head)) { Free(repl_p); return NULL; } return repl_p; } /* * repl_p_destroy -- destroy the replacement policy structure */ void repl_p_destroy(struct repl_p *repl_p) { ASSERTne(repl_p, NULL); repl_p->ops->repl_p_delete(repl_p->head); Free(repl_p); } /* * repl_p_none_new -- (internal) create a new "none" replacement policy */ static int repl_p_none_new(struct repl_p_head **head) { *head = NULL; return 0; } /* * repl_p_none_delete -- (internal) destroy the "none" replacement policy */ static void repl_p_none_delete(struct repl_p_head *head) { } /* * repl_p_none_insert -- (internal) insert a new element */ static struct repl_p_entry * repl_p_none_insert(struct repl_p_head *head, void *element, struct repl_p_entry **ptr_entry) { vmemcache_entry_acquire(element); return NULL; } /* * repl_p_none_use -- (internal) use the element */ static void repl_p_none_use(struct repl_p_head *head, struct repl_p_entry **ptr_entry) { } /* * repl_p_none_evict -- (internal) evict the element */ static void * repl_p_none_evict(struct repl_p_head *head, struct repl_p_entry **ptr_entry) { return ptr_entry; } /* * repl_p_lru_new -- (internal) create a new LRU replacement policy */ static int repl_p_lru_new(struct repl_p_head **head) { struct repl_p_head *h = Zalloc(sizeof(struct repl_p_head)); if (h == NULL) return -1; util_mutex_init(&h->lock); TAILQ_INIT(&h->first); h->ringbuf = ringbuf_new(LEN_RING_BUF); *head = h; return 0; } /* * dequeue_all -- (internal) dequeue all repl_p entries, * it MUST be run under a lock */ static void dequeue_all(struct repl_p_head *head) { struct repl_p_entry *e; int counter = 0; do { e = ringbuf_trydequeue_s(head->ringbuf, sizeof(struct repl_p_entry)); if (e == NULL) break; TAILQ_MOVE_TO_TAIL(&head->first, e, node); /* unlock the entry, so that it can be used again */ util_atomic_store_explicit64(e->ptr_entry, e, memory_order_relaxed); /* * We are limiting the number of iterations, * so that this loop ends for sure, because other thread * can insert new elements to the ring buffer in the same time. */ } while (++counter < LEN_RING_BUF); } /* * repl_p_lru_delete -- (internal) destroy the LRU replacement policy */ static void repl_p_lru_delete(struct repl_p_head *head) { dequeue_all(head); ringbuf_delete(head->ringbuf); while (!TAILQ_EMPTY(&head->first)) { struct repl_p_entry *entry = TAILQ_FIRST(&head->first); TAILQ_REMOVE(&head->first, entry, node); Free(entry); } util_mutex_destroy(&head->lock); Free(head); } /* * repl_p_lru_insert -- (internal) insert a new element */ static struct repl_p_entry * repl_p_lru_insert(struct repl_p_head *head, void *element, struct repl_p_entry **ptr_entry) { struct repl_p_entry *entry = Zalloc(sizeof(struct repl_p_entry)); if (entry == NULL) return NULL; entry->data = element; ASSERTne(ptr_entry, NULL); entry->ptr_entry = ptr_entry; /* * 'util_bool_compare_and_swap64' must always succeed here, * because this entry with ptr_entry=NULL has been considered as busy * so it has never been used so far. This is the first time we set * the 'entry->ptr_entry' to 'entry'. */ int rv = util_bool_compare_and_swap64(entry->ptr_entry, NULL, entry); if (rv == 0) { FATAL( "repl_p_lru_insert(): failed to initialize pointer to the LRU list"); } util_mutex_lock(&head->lock); vmemcache_entry_acquire(element); TAILQ_INSERT_TAIL(&head->first, entry, node); util_mutex_unlock(&head->lock); return entry; } /* * repl_p_lru_use -- (internal) use the element */ static void repl_p_lru_use(struct repl_p_head *head, struct repl_p_entry **ptr_entry) { struct repl_p_entry *entry; ASSERTne(ptr_entry, NULL); entry = *ptr_entry; if (entry == NULL) return; /* * Try to lock the entry by setting 'ptr_entry' to NULL * and enqueue it to the ring buffer, * so that it cannot be used nor evicted. */ if (!util_bool_compare_and_swap64(ptr_entry, entry, NULL)) return; /* * This the "in the middle of being used" state. * In this state - after bool_compare_and_swap() * and before ringbuf_tryenqueue() - the entry cannot be evicted. */ while (ringbuf_tryenqueue(head->ringbuf, entry) != 0) { util_mutex_lock(&head->lock); dequeue_all(head); util_mutex_unlock(&head->lock); } } /* * repl_p_lru_evict -- (internal) evict the element */ static void * repl_p_lru_evict(struct repl_p_head *head, struct repl_p_entry **ptr_entry) { struct repl_p_entry *entry; void *data = NULL; int is_LRU = (ptr_entry == NULL); util_mutex_lock(&head->lock); if (TAILQ_EMPTY(&head->first)) { errno = ESRCH; ERR("LRU queue is empty"); goto exit_unlock; } if (is_LRU) { entry = TAILQ_FIRST(&head->first); ptr_entry = entry->ptr_entry; } else { entry = *ptr_entry; } /* * Try to lock the entry by setting 'ptr_entry' to NULL, * so that it cannot be used nor evicted in other threads. */ if (entry != NULL && util_bool_compare_and_swap64(ptr_entry, entry, NULL)) goto evict_found_entry; /* * The first try failed. The entry could have been locked and enqueued * in the ring buffer, so let's flush the ring buffer and try again. */ dequeue_all(head); /* * If the entry was assigned as the LRU entry, let's assign it again, * because the LRU entry most likely has been changed in dequeue_all(). */ if (is_LRU) { entry = TAILQ_FIRST(&head->first); ptr_entry = entry->ptr_entry; } else { entry = *ptr_entry; } /* try to lock the entry the second time */ if (entry != NULL && util_bool_compare_and_swap64(ptr_entry, entry, NULL)) goto evict_found_entry; /* the second try failed */ if (!is_LRU) { /* the given entry is busy, give up */ errno = EAGAIN; ERR("entry is busy and cannot be evicted"); goto exit_unlock; } if (entry == NULL) { /* no entries in the LRU queue, give up */ errno = ESRCH; ERR("LRU queue is empty"); goto exit_unlock; } /* try to lock the next entries (repl_p_lru_evict can hardly fail) */ do { entry = TAILQ_NEXT(entry, node); if (entry == NULL) break; ptr_entry = entry->ptr_entry; } while (!util_bool_compare_and_swap64(ptr_entry, entry, NULL)); if (entry != NULL) goto evict_found_entry; /* * All entries in the LRU queue are locked. * The last chance is to try to dequeue an entry. */ entry = ringbuf_trydequeue_s(head->ringbuf, sizeof(struct repl_p_entry)); if (entry == NULL) { /* * Cannot find any entry to evict. * It means that all entries are heavily used * and they have to be "in the middle of being used" state now * (see repl_p_lru_use()). * There is nothing we can do but fail. */ errno = ESRCH; ERR("no entry eligible for eviction found"); goto exit_unlock; } evict_found_entry: TAILQ_REMOVE(&head->first, entry, node); data = entry->data; Free(entry); exit_unlock: util_mutex_unlock(&head->lock); return data; } vmemcache-0.8.1/src/vmemcache_repl.h000066400000000000000000000022441374403322600173700ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2019, Intel Corporation */ /* * vmemcache_repl.h -- API of replacement policy for vmemcache */ #ifndef VMEMCACHE_REPL_H #define VMEMCACHE_REPL_H 1 #include "libvmemcache.h" #ifdef __cplusplus extern "C" { #endif struct repl_p_head; struct repl_p_entry; struct repl_p_ops { /* create a new replacement policy list */ int (*repl_p_new)(struct repl_p_head **head); /* destroy the replacement policy list */ void (*repl_p_delete)(struct repl_p_head *head); /* insert a new element */ struct repl_p_entry * (*repl_p_insert)(struct repl_p_head *head, void *element, struct repl_p_entry **ptr_entry); /* evict an/the element */ void * (*repl_p_evict)(struct repl_p_head *head, struct repl_p_entry **ptr_entry); /* use the element */ void (*repl_p_use)(struct repl_p_head *head, struct repl_p_entry **ptr_entry); /* memory overhead per element */ size_t dram_per_entry; }; struct repl_p { const struct repl_p_ops *ops; struct repl_p_head *head; }; struct repl_p *repl_p_init(enum vmemcache_repl_p rp); void repl_p_destroy(struct repl_p *repl_p); #ifdef __cplusplus } #endif #endif vmemcache-0.8.1/tests/000077500000000000000000000000001374403322600146165ustar00rootroot00000000000000vmemcache-0.8.1/tests/CMakeLists.txt000066400000000000000000000113731374403322600173630ustar00rootroot00000000000000# SPDX-License-Identifier: BSD-3-Clause # Copyright 2018-2019, Intel Corporation set(DEFAULT_TEST_DIR ${CMAKE_CURRENT_BINARY_DIR}/test) set(TEST_DIR ${DEFAULT_TEST_DIR} CACHE STRING "directory for vmemcache memory pool used for tests") set(DEVICE_DAX_PATH "" CACHE STRING "raw DAX device without a file system for tests. Example: /dev/dax0.0") set(GLOBAL_TEST_ARGS -DPARENT_DIR=${TEST_DIR}/) if(TRACE_TESTS) set(GLOBAL_TEST_ARGS ${GLOBAL_TEST_ARGS} --trace-expand) endif() add_cstyle(tests) add_check_whitespace(tests) set(vg_tracers memcheck helgrind drd) # Configures test ${name} function(test name file tracer location) if (${tracer} IN_LIST vg_tracers) if (NOT VALGRIND_FOUND) message(WARNING "Valgrind not found, test skipped: ${name}") return() endif() if (COVERAGE_BUILD) message(STATUS "This is the Coverage build, skipping Valgrind test: ${name}") return() endif() endif() if(${location} STREQUAL fs) set(TEST_POOL_LOCATION ${TEST_DIR}) elseif(${location} STREQUAL ddax) set(TEST_POOL_LOCATION ${DEVICE_DAX_PATH}) else() message(FATAL_ERROR "Unknown pool's location: ${location}") return() endif() add_test(NAME ${name} COMMAND ${CMAKE_COMMAND} ${GLOBAL_TEST_ARGS} -DTEST_NAME=${name} -DSRC_DIR=${CMAKE_CURRENT_SOURCE_DIR} -DBIN_DIR=${CMAKE_CURRENT_BINARY_DIR}/${file}-${tracer}-${location} -DCONFIG=$ -DTRACER=${tracer} -DTEST_POOL_LOCATION=${TEST_POOL_LOCATION} -P ${CMAKE_CURRENT_SOURCE_DIR}/${file}.cmake) set_tests_properties(${name} PROPERTIES ENVIRONMENT "LC_ALL=C;PATH=$ENV{PATH}" TIMEOUT 300) endfunction() # add and link an executable function(add_link_executable name sources libs) add_executable(${name} ${sources}) target_include_directories(${name} PRIVATE ${CMAKE_SOURCE_DIR}/src) if(STATS_ENABLED) target_compile_definitions(${name} PRIVATE STATS_ENABLED=1) endif() target_link_libraries(${name} PRIVATE ${libs}) endfunction() set(SOURCES_BASIC vmemcache_test_basic.c) set(SOURCES_MT vmemcache_test_mt.c ${CMAKE_SOURCE_DIR}/src/os_posix.c ${CMAKE_SOURCE_DIR}/src/os_thread_posix.c) set(SOURCES_UTLIIZATION vmemcache_test_utilization.c) set(SOURCES_HEAP_USAGE vmemcache_test_heap_usage.c) set(SOURCES_EXAMPLE example.c) set(SOURCES_TWOLEVEL twolevel.c) set(LIBS_BASIC vmemcache) set(LIBS_MT vmemcache ${CMAKE_THREAD_LIBS_INIT}) set(LIBS_UTLIIZATION vmemcache m) set(LIBS_HEAP_USAGE vmemcache ${CMAKE_DL_LIBS}) set(LIBS_EXAMPLE vmemcache) set(LIBS_TWOLEVEL vmemcache) add_link_executable(vmemcache_test_basic "${SOURCES_BASIC}" "${LIBS_BASIC}") add_link_executable(vmemcache_test_mt "${SOURCES_MT}" "${LIBS_MT}") add_link_executable(vmemcache_test_utilization "${SOURCES_UTLIIZATION}" "${LIBS_UTLIIZATION}") add_link_executable(example "${SOURCES_EXAMPLE}" "${LIBS_EXAMPLE}") add_link_executable(vmemcache_test_heap_usage "${SOURCES_HEAP_USAGE}" "${LIBS_HEAP_USAGE}") add_link_executable(twolevel "${SOURCES_TWOLEVEL}" "${LIBS_TWOLEVEL}") if(NOT "${TEST_DIR}" STREQUAL "") test("FS-test-basic" test-basic none fs) test("FS-test-basic-memcheck" test-basic memcheck fs) test("FS-test-mt" test-mt none fs) test("FS-test-mt-memcheck" test-mt memcheck fs) test("FS-test-mt-helgrind" test-mt helgrind fs) test("FS-test-mt-drd" test-mt drd fs) test("FS-test-bench-mt" test-bench-mt none fs) test("FS-test-bench-simul" test-bench-simul none fs) test("FS-test-bench-simul-memcheck" test-bench-simul memcheck fs) test("FS-test-bench-simul-helgrind" test-bench-simul helgrind fs) test("FS-test-bench-simul-drd" test-bench-simul drd fs) test("FS-test-utilization" test-utilization none fs) test("FS-test-utilization-memcheck" test-utilization memcheck fs) test("FS-test-heap-usage" test-heap-usage none fs) test("FS-test-heap-usage-memcheck" test-heap-usage memcheck fs) test("FS-test-twolevel" test-twolevel none fs) endif() if(NOT "${DEVICE_DAX_PATH}" STREQUAL "") test("DDAX-test-basic" test-basic none ddax) test("DDAX-test-basic-memcheck" test-basic memcheck ddax) test("DDAX-test-mt" test-mt none ddax) test("DDAX-test-mt-memcheck" test-mt memcheck ddax) test("DDAX-test-mt-helgrind" test-mt helgrind ddax) test("DDAX-test-mt-drd" test-mt drd ddax) test("DDAX-test-bench-mt" test-bench-mt none ddax) test("DDAX-test-bench-simul" test-bench-simul none ddax) test("DDAX-test-bench-simul-memcheck" test-bench-simul memcheck ddax) test("DDAX-test-bench-simul-helgrind" test-bench-simul helgrind ddax) test("DDAX-test-bench-simul-drd" test-bench-simul drd ddax) test("DDAX-test-utilization" test-utilization none ddax) test("DDAX-test-utilization-memcheck" test-utilization memcheck ddax) test("DDAX-test-heap-usage" test-heap-usage none ddax) test("DDAX-test-heap-usage-memcheck" test-heap-usage memcheck ddax) endif() test("example" test-example none fs) vmemcache-0.8.1/tests/drd-log.supp000066400000000000000000000005511374403322600170600ustar00rootroot00000000000000{ drd:ConflictingAccess fun:*mempcpy ... fun:_IO_file_xsputn@@GLIBC* fun:fputs fun:out_print_func fun:out_common fun:out_log } { drd:ConflictingAccess fun:*memmove fun:_IO_file_xsputn@@GLIBC* fun:fputs fun:out_print_func fun:out_common fun:out_log } vmemcache-0.8.1/tests/example.c000066400000000000000000000020651374403322600164200ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019, Intel Corporation */ #include #include #include #define STR_AND_LEN(x) (x), strlen(x) static VMEMcache *cache; static void on_miss(VMEMcache *cache, const void *key, size_t key_size, void *arg) { vmemcache_put(cache, STR_AND_LEN("meow"), STR_AND_LEN("Cthulhu fthagn")); } static void get(const char *key) { char buf[128]; ssize_t len = vmemcache_get(cache, STR_AND_LEN(key), buf, sizeof(buf), 0, NULL); if (len >= 0) printf("%.*s\n", (int)len, buf); else printf("(key not found: %s)\n", key); } int main() { cache = vmemcache_new(); if (vmemcache_add(cache, "/tmp")) { fprintf(stderr, "error: vmemcache_add: %s\n", vmemcache_errormsg()); return 1; } /* Query a non-existent key. */ get("meow"); /* Insert then query. */ vmemcache_put(cache, STR_AND_LEN("bark"), STR_AND_LEN("Lorem ipsum")); get("bark"); /* Install an on-miss handler. */ vmemcache_callback_on_miss(cache, on_miss, 0); get("meow"); vmemcache_delete(cache); return 0; } vmemcache-0.8.1/tests/helgrind-log.supp000066400000000000000000000005311374403322600201010ustar00rootroot00000000000000{ Helgrind:Race fun:*mempcpy ... fun:_IO_file_xsputn@@GLIBC* fun:fputs fun:out_print_func fun:out_common fun:out_log } { Helgrind:Race fun:*memmove fun:_IO_file_xsputn@@GLIBC* fun:fputs fun:out_print_func fun:out_common fun:out_log } vmemcache-0.8.1/tests/helpers.cmake000066400000000000000000000062461374403322600172720ustar00rootroot00000000000000# SPDX-License-Identifier: BSD-3-Clause # Copyright 2017-2019, Intel Corporation cmake_minimum_required(VERSION 3.3) set(DIR ${PARENT_DIR}/${TEST_NAME}) if (WIN32) set(EXE_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${CONFIG}) set(TEST_DIR ${CMAKE_CURRENT_BINARY_DIR}/../tests/${CONFIG}) else() set(EXE_DIR ${CMAKE_CURRENT_BINARY_DIR}/../) set(TEST_DIR ${CMAKE_CURRENT_BINARY_DIR}/../tests/) endif() function(setup) execute_process(COMMAND ${CMAKE_COMMAND} -E remove_directory ${DIR}) execute_process(COMMAND ${CMAKE_COMMAND} -E make_directory ${DIR}) execute_process(COMMAND ${CMAKE_COMMAND} -E remove_directory ${BIN_DIR}) execute_process(COMMAND ${CMAKE_COMMAND} -E make_directory ${BIN_DIR}) endfunction() function(cleanup) execute_process(COMMAND ${CMAKE_COMMAND} -E remove_directory ${DIR}) endfunction() # Executes test command ${name} and verifies its status matches ${expectation}. # Optional function arguments are passed as consecutive arguments to # the command. function(execute_arg input expectation name) message(STATUS "Executing: ${name} ${ARGN}") if("${input}" STREQUAL "") execute_process(COMMAND ${name} ${ARGN} RESULT_VARIABLE RET OUTPUT_FILE ${BIN_DIR}/out ERROR_FILE ${BIN_DIR}/err) else() execute_process(COMMAND ${name} ${ARGN} RESULT_VARIABLE RET INPUT_FILE ${input} OUTPUT_FILE ${BIN_DIR}/out ERROR_FILE ${BIN_DIR}/err) endif() message(STATUS "Test ${name}:") file(READ ${BIN_DIR}/out OUT) message(STATUS "Stdout:\n${OUT}") file(READ ${BIN_DIR}/err ERR) message(STATUS "Stderr:\n${ERR}") if(NOT RET EQUAL expectation) message(FATAL_ERROR "${name} ${ARGN} exit code ${RET} doesn't match expectation ${expectation}") endif() endfunction() function(run_under_valgrind vg_opt name) message(STATUS "Executing: valgrind ${vg_opt} ${name} ${ARGN}") execute_process(COMMAND valgrind ${vg_opt} ${name} ${ARGN} RESULT_VARIABLE RET OUTPUT_FILE ${BIN_DIR}/out ERROR_FILE ${BIN_DIR}/err) message(STATUS "Test ${name}:") file(READ ${BIN_DIR}/out OUT) message(STATUS "Stdout:\n${OUT}") file(READ ${BIN_DIR}/err ERR) message(STATUS "Stderr:\n${ERR}") if(NOT RET EQUAL 0) message(FATAL_ERROR "command 'valgrind ${name} ${ARGN}' failed:\n${ERR}") endif() set(text_passed "ERROR SUMMARY: 0 errors from 0 contexts") string(FIND "${ERR}" "${text_passed}" RET) if(RET EQUAL -1) message(FATAL_ERROR "command 'valgrind ${name} ${ARGN}' failed:\n${ERR}") endif() endfunction() function(execute expectation name) set(ENV{VMEMCACHE_FILE} "${BIN_DIR}/out.log") set(ENV{VMEMCACHE_LEVEL} "3") if (${TRACER} STREQUAL "none") execute_arg("" ${expectation} ${name} ${ARGN}) elseif (${TRACER} STREQUAL memcheck) set(VG_OPT "--leak-check=full") run_under_valgrind("${VG_OPT}" ${name} ${ARGN}) elseif (${TRACER} STREQUAL helgrind) set(HEL_SUPP "${SRC_DIR}/helgrind-log.supp") set(VG_OPT "--tool=helgrind" "--suppressions=${HEL_SUPP}") run_under_valgrind("${VG_OPT}" ${name} ${ARGN}) elseif (${TRACER} STREQUAL drd) set(DRD_SUPP "${SRC_DIR}/drd-log.supp") set(VG_OPT "--tool=drd" "--suppressions=${DRD_SUPP}") run_under_valgrind("${VG_OPT}" ${name} ${ARGN}) else () message(FATAL_ERROR "unknown tracer: ${TRACER}") endif () endfunction() vmemcache-0.8.1/tests/test-basic.cmake000066400000000000000000000003131374403322600176530ustar00rootroot00000000000000# SPDX-License-Identifier: BSD-3-Clause # Copyright 2018-2019, Intel Corporation include(${SRC_DIR}/helpers.cmake) setup() execute(0 ${TEST_DIR}/vmemcache_test_basic ${TEST_POOL_LOCATION}) cleanup() vmemcache-0.8.1/tests/test-bench-mt.cmake000066400000000000000000000003221374403322600202670ustar00rootroot00000000000000# SPDX-License-Identifier: BSD-3-Clause # Copyright 2018-2019, Intel Corporation include(${SRC_DIR}/helpers.cmake) setup() execute(0 ${TEST_DIR}/../benchmarks/bench_micro "${TEST_POOL_LOCATION}") cleanup() vmemcache-0.8.1/tests/test-bench-simul.cmake000066400000000000000000000003611374403322600210030ustar00rootroot00000000000000# SPDX-License-Identifier: BSD-3-Clause # Copyright 2019, Intel Corporation include(${SRC_DIR}/helpers.cmake) setup() execute(0 ${TEST_DIR}/../benchmarks/bench_simul "${TEST_POOL_LOCATION}" n_threads=4 ops_count=100 warm_up=0) cleanup() vmemcache-0.8.1/tests/test-example.cmake000066400000000000000000000002431374403322600202270ustar00rootroot00000000000000# SPDX-License-Identifier: BSD-3-Clause # Copyright 2019, Intel Corporation include(${SRC_DIR}/helpers.cmake) setup() execute(0 ${TEST_DIR}/example) cleanup() vmemcache-0.8.1/tests/test-heap-usage.cmake000066400000000000000000000003131374403322600206110ustar00rootroot00000000000000# SPDX-License-Identifier: BSD-3-Clause # Copyright 2019, Intel Corporation include(${SRC_DIR}/helpers.cmake) setup() execute(0 ${TEST_DIR}/vmemcache_test_heap_usage ${TEST_POOL_LOCATION}) cleanup() vmemcache-0.8.1/tests/test-mt.cmake000066400000000000000000000015201374403322600172130ustar00rootroot00000000000000# SPDX-License-Identifier: BSD-3-Clause # Copyright 2018-2019, Intel Corporation include(${SRC_DIR}/helpers.cmake) setup() set(SEED 0) # set seed from time set(vg_thread_tracers helgrind drd) set(valgrind memcheck helgrind drd) if (${TRACER} IN_LIST vg_thread_tracers) set(N_THREADS 4) set(N_OPS 400) else() set(N_THREADS 10) set(N_OPS 10000) endif() if (${TRACER} IN_LIST valgrind) # skip tests that last very long under Valgrind execute(0 ${TEST_DIR}/vmemcache_test_mt ${TEST_POOL_LOCATION} ${N_THREADS} ${N_OPS} ${SEED} "skip") else() execute(0 ${TEST_DIR}/vmemcache_test_mt ${TEST_POOL_LOCATION} ${N_THREADS} ${N_OPS} ${SEED}) # additional tests for number of threads == 1 and 2 execute(0 ${TEST_DIR}/vmemcache_test_mt ${TEST_POOL_LOCATION} 1) execute(0 ${TEST_DIR}/vmemcache_test_mt ${TEST_POOL_LOCATION} 2) endif() cleanup() vmemcache-0.8.1/tests/test-twolevel.cmake000066400000000000000000000003201374403322600204310ustar00rootroot00000000000000# SPDX-License-Identifier: BSD-3-Clause # Copyright 2019, Intel Corporation include(${SRC_DIR}/helpers.cmake) setup() execute(0 ${TEST_DIR}/twolevel ${TEST_POOL_LOCATION} ${TEST_POOL_LOCATION}) cleanup() vmemcache-0.8.1/tests/test-utilization.cmake000066400000000000000000000003721374403322600211520ustar00rootroot00000000000000# SPDX-License-Identifier: BSD-3-Clause # Copyright 2019, Intel Corporation include(${SRC_DIR}/helpers.cmake) set(TIMEOUT_SEC 2) setup() execute(0 ${TEST_DIR}/vmemcache_test_utilization -d ${TEST_POOL_LOCATION} -t ${TIMEOUT_SEC} -n) cleanup() vmemcache-0.8.1/tests/test_helpers.h000066400000000000000000000053151374403322600174740ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2019, Intel Corporation */ /* * test_helpers.h -- header with helpers */ #ifndef TEST_HELPERS_H #define TEST_HELPERS_H 1 #include #include #include #include #include #include #include #define UT_ERR(...) do {\ fprintf(stderr, "ERROR: " __VA_ARGS__);\ fprintf(stderr, "\n");\ } while (/*CONSTCOND*/0) #define UT_FATAL(...) do {\ fprintf(stderr, "FATAL ERROR at %s:%i in %s(): ",\ __FILE__, __LINE__, __func__);\ fprintf(stderr, __VA_ARGS__);\ fprintf(stderr, "\n");\ abort();\ } while (/*CONSTCOND*/0) #define UT_ASSERTeq(x, y) do if ((x) != (y)) {\ UT_FATAL("ASSERT FAILED : " #x " (%llu) ≠ %llu",\ (unsigned long long)(x), (unsigned long long)(y));\ } while (/*CONSTCOND*/0) #define UT_ASSERTin(x, min, max) do if ((x) < (min) || (x) > (max)) {\ UT_FATAL("ASSERT FAILED : " #x " = %llu not in [%llu,%llu]",\ (unsigned long long)(x),\ (unsigned long long)(min), (unsigned long long)(max));\ } while (/*CONSTCOND*/0) /* * str_to_unsigned -- (internal) convert string argument to unsigned int */ static inline int str_to_unsigned(const char *str, unsigned *value) { char *endptr = NULL; errno = 0; /* to distinguish success/failure after call */ unsigned long val = strtoul(str, &endptr, 10); if ((errno == ERANGE && val == ULONG_MAX) || (errno != 0 && val == 0) || (endptr == str) || (*endptr != '\0')) { UT_ERR("strtoul() failed to convert the string %s", str); return -1; } if (val > UINT_MAX) { UT_ERR("value %s is bigger than UINT_MAX (%u)", str, UINT_MAX); return -1; } *value = (unsigned)val; return 0; } /* * str_to_ull -- (internal) convert string argument to unsigned long long */ static inline int str_to_ull(const char *str, unsigned long long *value) { char *endptr = NULL; errno = 0; /* to distinguish success/failure after call */ unsigned long long val = strtoull(str, &endptr, 10); if ((errno == ERANGE && val == ULLONG_MAX) || (errno != 0 && val == 0) || (endptr == str) || (*endptr != '\0')) { UT_ERR("strtoull() failed to convert the string %s", str); return -1; } *value = (unsigned long long)val; return 0; } /* * get_granular_rand_size - (internal) generate random size value * with specified granularity */ static inline size_t get_granular_rand_size(size_t val_max, size_t granularity) { size_t val_size = (1 + (size_t) rand() / (RAND_MAX / (val_max / granularity) + 1)) * granularity; assert(val_size <= val_max); assert(val_size >= granularity); assert(val_size % granularity == 0 && "put value size must be a multiple of granularity"); return val_size; } #endif /* TEST_HELPERS_H */ vmemcache-0.8.1/tests/twolevel.c000066400000000000000000000070521374403322600166270ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019, Intel Corporation */ #include #include #include #include #include #define ERR(...) do { fprintf(stderr, __VA_ARGS__); exit(1); } while (0) #define SIZE_MB (1024 * 1024ULL) #define L1_CAPACITY (1 * SIZE_MB) #define L2_CAPACITY (10 * SIZE_MB) #define ZSIZE (SIZE_MB / 2) static void evict_demote(VMEMcache *cache, const void *key, size_t key_size, void *arg) { VMEMcache *colder = (VMEMcache *)arg; size_t vsize; /* First, obtain the value's size. */ if (vmemcache_get(cache, key, key_size, NULL, 0, 0, &vsize)) return; /* Somehow got deleted? -- can't happen. */ void *buf = malloc(vsize); if (!buf) return; /* Then, fetch the value. */ if (vmemcache_get(cache, key, key_size, buf, vsize, 0, NULL) == (ssize_t)vsize) { /* Again, it's not supposed to be missing. */ vmemcache_put(colder, key, key_size, buf, vsize); } free(buf); } static void miss_promote(VMEMcache *cache, const void *key, size_t key_size, void *arg) { VMEMcache *colder = (VMEMcache *)arg; size_t vsize; if (vmemcache_get(colder, key, key_size, NULL, 0, 0, &vsize)) { /* * Second-level cache miss. * * You may want to handle it somehow here. */ return; } void *buf = malloc(vsize); if (!buf) return; if (vmemcache_get(colder, key, key_size, buf, vsize, 0, NULL) == (ssize_t)vsize) { /* * Note that there's no lock, thus our entry may disappear * between these two get() calls. */ if (!vmemcache_put(cache, key, key_size, buf, vsize)) { /* * Put can legitimately fail: value too big for * upper-level cache, no space because all evictable * keys are busy, etc. * * The promotion likely cascades into one or more * demotions to migrate cold keys downwards, to make * space. */ /* * You may or may not want to evict from cold cache * here. */ vmemcache_evict(colder, key, key_size); } } free(buf); } static void get(VMEMcache *cache, const char *x, int expfail) { ssize_t ret = vmemcache_get(cache, x, strlen(x) + 1, NULL, 0, 0, NULL); if ((!ret) == expfail) { ERR("get(“%s”) %s when it shouldn't\n", x, expfail ? "succeeded" : "failed"); } } int main(int argc, const char **argv) { if (argc != 3) ERR("Usage: twolevel \n"); VMEMcache *pmem = vmemcache_new(); VMEMcache *dram = vmemcache_new(); if (!pmem || !dram) ERR("VMEMcache_new failed\n"); vmemcache_set_size(pmem, L2_CAPACITY); vmemcache_set_size(dram, L1_CAPACITY); if (vmemcache_add(pmem, argv[1])) ERR("vmemcache_add(“%s”) failed: %m\n", argv[1]); if (vmemcache_add(dram, argv[2])) ERR("vmemcache_add(“%s”) failed: %m\n", argv[2]); vmemcache_callback_on_evict(dram, evict_demote, pmem); vmemcache_callback_on_miss(dram, miss_promote, pmem); void *lotta_zeroes = mmap(NULL, ZSIZE, PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS | MAP_POPULATE, -1, 0); if (!lotta_zeroes) ERR("can't mmap zeroes: %m\n"); #define PUT(x) vmemcache_put(dram, x, strlen(x) + 1, lotta_zeroes, ZSIZE) #define GET(x) get(dram, x, 0) #define GETF(x) get(dram, x, 1) PUT("first"); PUT("second"); PUT("third"); GET("first"); GET("first"); GET("second"); GET("third"); GETF("nonexistent"); const int cap = (L1_CAPACITY / ZSIZE - 1) + (L2_CAPACITY / ZSIZE - 1) - 1; for (int i = 0; i < cap; i++) { char buf[12]; snprintf(buf, sizeof(buf), "%d", i); PUT(buf); } /* "first" and "second" should have been dropped, "third" is still in */ GETF("first"); GETF("second"); GET("third"); return 0; } vmemcache-0.8.1/tests/vmemcache_test_basic.c000066400000000000000000001052371374403322600211220ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2019, Intel Corporation */ /* * vmemcache_test_basic.c -- basic unit test for libvmemcache */ #include #include #include #include #include "libvmemcache.h" #include "test_helpers.h" #define VMEMCACHE_EXTENT ((int)VMEMCACHE_MIN_EXTENT) #define LEN (VMEMCACHE_EXTENT) #define KSIZE LEN /* key size */ #define VSIZE LEN /* value size */ #define DNUM 10 /* number of data */ #define SIZE_1K 1024 /* type of statistics */ typedef unsigned long long stat_t; #ifdef STATS_ENABLED /* names of statistics */ static const char *stat_str[VMEMCACHE_STATS_NUM] = { "PUTs", "GETs", "HITs", "MISSes", "EVICTs", "CACHE_ENTRIES", "DRAM_SIZE_USED", "POOL_SIZE_USED", "HEAP_ENTRIES", }; #endif /* STATS_ENABLED */ /* context of callbacks */ struct ctx_cb { char vbuf[VSIZE]; size_t vbufsize; size_t vsize; stat_t miss_count; stat_t evict_count; }; /* test_put_in_evict callback context */ struct put_evict_cb { char *vbuf; size_t vsize; stat_t n_puts; stat_t n_evicts_stack; stat_t cb_key; stat_t max_evicts_stack; stat_t max_puts; }; /* key bigger than 1kB */ struct big_key { char buf[SIZE_1K]; stat_t n_puts; }; /* * verify_stats -- (internal) verify statistics */ static void verify_stats(VMEMcache *cache, stat_t put, stat_t get, stat_t hit, stat_t miss, stat_t evict, stat_t entries, stat_t dram, stat_t pool) { #ifdef STATS_ENABLED stat_t stat; int ret; ret = vmemcache_get_stat(cache, VMEMCACHE_STAT_PUT, &stat, sizeof(stat)); if (ret == -1) UT_FATAL("vmemcache_get_stat: %s", vmemcache_errormsg()); if (stat != put) UT_FATAL( "vmemcache_get_stat: wrong statistic's (%s) value: %llu (should be %llu)", stat_str[VMEMCACHE_STAT_PUT], stat, put); ret = vmemcache_get_stat(cache, VMEMCACHE_STAT_GET, &stat, sizeof(stat)); if (ret == -1) UT_FATAL("vmemcache_get_stat: %s", vmemcache_errormsg()); if (stat != get) UT_FATAL( "vmemcache_get_stat: wrong statistic's (%s) value: %llu (should be %llu)", stat_str[VMEMCACHE_STAT_GET], stat, get); ret = vmemcache_get_stat(cache, VMEMCACHE_STAT_HIT, &stat, sizeof(stat)); if (ret == -1) UT_FATAL("vmemcache_get_stat: %s", vmemcache_errormsg()); if (stat != hit) UT_FATAL( "vmemcache_get_stat: wrong statistic's (%s) value: %llu (should be %llu)", stat_str[VMEMCACHE_STAT_HIT], stat, hit); ret = vmemcache_get_stat(cache, VMEMCACHE_STAT_MISS, &stat, sizeof(stat)); if (ret == -1) UT_FATAL("vmemcache_get_stat: %s", vmemcache_errormsg()); if (stat != miss) UT_FATAL( "vmemcache_get_stat: wrong statistic's (%s) value: %llu (should be %llu)", stat_str[VMEMCACHE_STAT_MISS], stat, miss); ret = vmemcache_get_stat(cache, VMEMCACHE_STAT_EVICT, &stat, sizeof(stat)); if (ret == -1) UT_FATAL("vmemcache_get_stat: %s", vmemcache_errormsg()); if (stat != evict) UT_FATAL( "vmemcache_get_stat: wrong statistic's (%s) value: %llu (should be %llu)", stat_str[VMEMCACHE_STAT_EVICT], stat, evict); ret = vmemcache_get_stat(cache, VMEMCACHE_STAT_ENTRIES, &stat, sizeof(stat)); if (ret == -1) UT_FATAL("vmemcache_get_stat: %s", vmemcache_errormsg()); if (stat != entries) UT_FATAL( "vmemcache_get_stat: wrong statistic's (%s) value: %llu (should be %llu)", stat_str[VMEMCACHE_STAT_ENTRIES], stat, entries); ret = vmemcache_get_stat(cache, VMEMCACHE_STAT_DRAM_SIZE_USED, &stat, sizeof(stat)); if (ret == -1) UT_FATAL("vmemcache_get_stat: %s", vmemcache_errormsg()); if (stat != dram) UT_FATAL( "vmemcache_get_stat: wrong statistic's (%s) value: %llu (should be %llu)", stat_str[VMEMCACHE_STAT_DRAM_SIZE_USED], stat, dram); ret = vmemcache_get_stat(cache, VMEMCACHE_STAT_POOL_SIZE_USED, &stat, sizeof(stat)); if (ret == -1) UT_FATAL("vmemcache_get_stat: %s", vmemcache_errormsg()); if (stat != pool) UT_FATAL( "vmemcache_get_stat: wrong statistic's (%s) value: %llu (should be %llu)", stat_str[VMEMCACHE_STAT_POOL_SIZE_USED], stat, pool); ret = vmemcache_get_stat(cache, VMEMCACHE_STATS_NUM, &stat, sizeof(stat)); if (ret != -1) UT_FATAL( "vmemcache_get_stat() succeeded for incorrect statistic (-1)"); #endif /* STATS_ENABLED */ } /* * verify_stat_entries -- (internal) verify the statistic * 'current number of cache entries' */ static void verify_stat_entries(VMEMcache *cache, stat_t entries) { #ifdef STATS_ENABLED stat_t stat; int ret; ret = vmemcache_get_stat(cache, VMEMCACHE_STAT_ENTRIES, &stat, sizeof(stat)); if (ret == -1) UT_FATAL("vmemcache_get_stat: %s", vmemcache_errormsg()); if (stat != entries) UT_FATAL( "vmemcache_get_stat: wrong statistic's (%s) value: %llu (should be %llu)", stat_str[VMEMCACHE_STAT_ENTRIES], stat, entries); #endif /* STATS_ENABLED */ } /* * verify_heap_entries -- (internal) verify the statistic * 'current number of heap entries' */ static void verify_heap_entries(VMEMcache *cache, stat_t entries) { #ifdef STATS_ENABLED stat_t stat; int ret; ret = vmemcache_get_stat(cache, VMEMCACHE_STAT_HEAP_ENTRIES, &stat, sizeof(stat)); if (ret == -1) UT_FATAL("vmemcache_get_stat: %s", vmemcache_errormsg()); if (stat != entries) UT_FATAL( "vmemcache_get_stat: wrong statistic's (%s) value: %llu (should be %llu)", stat_str[VMEMCACHE_STAT_HEAP_ENTRIES], stat, entries); #endif } /* * test_new_delete -- (internal) test _new() and _delete() */ static void test_new_delete(const char *dir, const char *file, enum vmemcache_repl_p repl_p) { VMEMcache *cache; /* TEST #1 - minimum values of max_size and extent_size */ cache = vmemcache_new(); vmemcache_set_size(cache, VMEMCACHE_MIN_POOL); vmemcache_set_extent_size(cache, VMEMCACHE_MIN_EXTENT); vmemcache_set_eviction_policy(cache, repl_p); if (vmemcache_add(cache, dir)) UT_FATAL("vmemcache_new: %s", vmemcache_errormsg()); vmemcache_delete(cache); /* TEST #2 - extent_size = max_size = VMEMCACHE_MIN_POOL */ cache = vmemcache_new(); vmemcache_set_size(cache, VMEMCACHE_MIN_POOL); vmemcache_set_extent_size(cache, VMEMCACHE_MIN_POOL); vmemcache_set_eviction_policy(cache, repl_p); if (vmemcache_add(cache, dir)) UT_FATAL("vmemcache_new: %s", vmemcache_errormsg()); vmemcache_delete(cache); /* TEST #3 - extent_size == 1 */ cache = vmemcache_new(); if (!vmemcache_set_extent_size(cache, 1)) UT_FATAL( "vmemcache_set_extent_size did not fail with extent_size == 1"); vmemcache_delete(cache); /* TEST #4 - extent_size == -1 */ cache = vmemcache_new(); vmemcache_set_size(cache, VMEMCACHE_MIN_POOL); if (!vmemcache_set_extent_size(cache, (size_t)-1) && !vmemcache_add(cache, dir)) { UT_FATAL("vmemcache_new did not fail with extent_size == -1"); } vmemcache_delete(cache); /* TEST #5 - extent_size == VMEMCACHE_MIN_EXTENT - 1 */ cache = vmemcache_new(); if (!vmemcache_set_extent_size(cache, VMEMCACHE_MIN_EXTENT - 1)) UT_FATAL( "vmemcache_new did not fail with extent_size == VMEMCACHE_MIN_EXTENT - 1"); vmemcache_delete(cache); /* TEST #6 - extent_size == max_size + 1 */ cache = vmemcache_new(); vmemcache_set_size(cache, VMEMCACHE_MIN_POOL); if (!vmemcache_set_extent_size(cache, VMEMCACHE_MIN_POOL + 1) && !vmemcache_add(cache, dir)) { UT_FATAL( "vmemcache_new did not fail with extent_size == max_size + 1"); } vmemcache_delete(cache); /* TEST #7 - size == VMEMCACHE_MIN_POOL - 1 */ cache = vmemcache_new(); if (!vmemcache_set_size(cache, VMEMCACHE_MIN_POOL - 1)) UT_FATAL( "vmemcache_new did not fail with size == VMEMCACHE_MIN_POOL - 1"); vmemcache_delete(cache); /* TEST #8 - size == 1 */ cache = vmemcache_new(); if (!vmemcache_set_size(cache, 1)) UT_FATAL( "vmemcache_new did not fail with size == 1"); vmemcache_delete(cache); /* TEST #9 - size == -1 */ cache = vmemcache_new(); if (!vmemcache_set_size(cache, (size_t)-1)) UT_FATAL( "vmemcache_new did not fail with size == -1"); vmemcache_delete(cache); /* TEST #10 - not a directory, but a file */ cache = vmemcache_new(); vmemcache_set_size(cache, VMEMCACHE_MIN_POOL); vmemcache_set_extent_size(cache, VMEMCACHE_MIN_EXTENT); vmemcache_set_eviction_policy(cache, repl_p); if (!vmemcache_add(cache, file)) UT_FATAL( "vmemcache_new did not fail with a file instead of a directory"); #define ERR_MSG_1 "open: Not a directory" if (strcmp(vmemcache_errormsg(), ERR_MSG_1)) UT_FATAL("wrong error message: '%s' (should be '"ERR_MSG_1"')", vmemcache_errormsg()); vmemcache_delete(cache); /* TEST #11 - NULL directory path */ cache = vmemcache_new(); vmemcache_set_size(cache, VMEMCACHE_MIN_POOL); vmemcache_set_extent_size(cache, VMEMCACHE_MIN_EXTENT); vmemcache_set_eviction_policy(cache, repl_p); if (!vmemcache_add(cache, NULL)) UT_FATAL( "vmemcache_new did not fail with a NULL directory path"); #define ERR_MSG_2 "invalid (NULL) path" if (strcmp(vmemcache_errormsg(), ERR_MSG_2)) UT_FATAL("wrong error message: '%s' (should be '"ERR_MSG_2"')", vmemcache_errormsg()); vmemcache_delete(cache); /* TEST #12 - nonexistent directory path */ cache = vmemcache_new(); vmemcache_set_size(cache, VMEMCACHE_MIN_POOL); vmemcache_set_extent_size(cache, VMEMCACHE_MIN_EXTENT); vmemcache_set_eviction_policy(cache, repl_p); char nonexistent[PATH_MAX]; strcpy(nonexistent, dir); strcat(nonexistent, "/nonexistent_dir"); if (!vmemcache_add(cache, nonexistent)) UT_FATAL( "vmemcache_new did not fail with a nonexistent directory path"); vmemcache_delete(cache); } /* * test_put_get_evict -- (internal) test _put(), _get() and _evict() */ static void test_put_get_evict(const char *dir, enum vmemcache_repl_p repl_p) { VMEMcache *cache = vmemcache_new(); vmemcache_set_size(cache, VMEMCACHE_MIN_POOL); vmemcache_set_extent_size(cache, VMEMCACHE_EXTENT); vmemcache_set_eviction_policy(cache, repl_p); if (vmemcache_add(cache, dir)) UT_FATAL("vmemcache_new: %s", vmemcache_errormsg()); const char *key = "KEY"; const char *value = "VALUE"; size_t key_size = strlen(key) + 1; size_t val_size = strlen(value) + 1; if (vmemcache_put(cache, key, key_size, value, val_size)) UT_FATAL("vmemcache_put: %s", vmemcache_errormsg()); verify_stat_entries(cache, 1); char vbuf[VMEMCACHE_EXTENT]; /* user-provided buffer */ size_t vbufsize = VMEMCACHE_EXTENT; /* size of vbuf */ size_t vsize = 0; /* real size of the object */ ssize_t ret; size_t exists_vsize; ret = vmemcache_exists(cache, key, key_size, &exists_vsize); UT_ASSERTeq(ret, 1); /* get the only one element */ ret = vmemcache_get(cache, key, key_size, vbuf, vbufsize, 0, &vsize); if (ret < 0) UT_FATAL("vmemcache_get: %s", vmemcache_errormsg()); UT_ASSERTeq(exists_vsize, vsize); if ((size_t)ret != val_size) UT_FATAL( "vmemcache_get: wrong return value: %zi (should be %zu)", ret, val_size); if (vsize != val_size) UT_FATAL( "vmemcache_get: wrong size of value: %zi (should be %zu)", vsize, val_size); if (strncmp(vbuf, value, vsize)) UT_FATAL("vmemcache_get: wrong value: %s (should be %s)", vbuf, value); /* evict the only one element */ switch (repl_p) { case VMEMCACHE_REPLACEMENT_NONE: ret = vmemcache_evict(cache, key, key_size); break; case VMEMCACHE_REPLACEMENT_LRU: ret = vmemcache_evict(cache, NULL, 0); break; default: UT_FATAL("unknown policy: %u", repl_p); break; } if (ret == -1) UT_FATAL("vmemcache_evict: %s", vmemcache_errormsg()); /* getting the evicted element should return -1 (no such element) */ ret = vmemcache_get(cache, key, key_size, vbuf, vbufsize, 0, &vsize); if (ret != -1 || errno != ENOENT) UT_FATAL("vmemcache_get did not return -1 (no such element)"); ret = vmemcache_exists(cache, key, key_size, NULL); UT_ASSERTeq(ret, 0); vmemcache_delete(cache); } /* * on_evict_test_evict_cb -- (internal) 'on evict' callback for test_evict */ static void on_evict_test_evict_cb(VMEMcache *cache, const void *key, size_t key_size, void *arg) { struct ctx_cb *ctx = arg; ssize_t ret; ctx->evict_count++; ret = vmemcache_get(cache, key, key_size, ctx->vbuf, ctx->vbufsize, 0, &ctx->vsize); if (ret < 0) UT_FATAL("vmemcache_get"); if ((size_t)ret != VSIZE) UT_FATAL( "vmemcache_get: wrong return value: %zi (should be %i)", ret, VSIZE); } /* * on_miss_test_evict_cb -- (internal) 'on miss' callback for test_evict */ static void on_miss_test_evict_cb(VMEMcache *cache, const void *key, size_t key_size, void *arg) { struct ctx_cb *ctx = arg; ctx->miss_count++; size_t size = (key_size <= ctx->vbufsize) ? key_size : ctx->vbufsize; memcpy(ctx->vbuf, key, size); ctx->vsize = size; } /* * test_evict -- (internal) test _evict() */ static void test_evict(const char *dir, enum vmemcache_repl_p repl_p) { VMEMcache *cache; char vbuf[VSIZE]; size_t vsize = 0; ssize_t ret; struct ctx_cb ctx = {"", VSIZE, 0, 0, 0}; struct kv { char key[KSIZE]; char value[VSIZE]; } data[DNUM]; cache = vmemcache_new(); vmemcache_set_size(cache, VMEMCACHE_MIN_POOL); vmemcache_set_extent_size(cache, VMEMCACHE_EXTENT); vmemcache_set_eviction_policy(cache, repl_p); if (vmemcache_add(cache, dir)) UT_FATAL("vmemcache_new: %s", vmemcache_errormsg()); vmemcache_callback_on_evict(cache, on_evict_test_evict_cb, &ctx); vmemcache_callback_on_miss(cache, on_miss_test_evict_cb, &ctx); for (int i = 0; i < DNUM; i++) { data[i].key[0] = 'k'; memset(&data[i].key[1], '0' + i, KSIZE - 2); data[i].key[KSIZE - 1] = 0; data[i].value[0] = 'v'; memset(&data[i].value[1], '0' + i, VSIZE - 2); data[i].value[VSIZE - 1] = 0; if (vmemcache_put(cache, data[i].key, KSIZE, data[i].value, VSIZE)) UT_FATAL("vmemcache_put: %s", vmemcache_errormsg()); } verify_stat_entries(cache, DNUM); /* TEST #1 - evict the element with index #5 */ /* stats: evict:1 (get:1 hit:1) */ ret = vmemcache_evict(cache, data[5].key, KSIZE); if (ret == -1) UT_FATAL("vmemcache_evict: %s", vmemcache_errormsg()); if (ctx.vsize != VSIZE) UT_FATAL( "vmemcache_get: wrong size of value: %zi (should be %i)", ctx.vsize, VSIZE); /* check if the evicted element is #5 */ if (strncmp(ctx.vbuf, data[5].value, ctx.vsize)) UT_FATAL("vmemcache_get: wrong value: %s (should be %s)", ctx.vbuf, data[5].value); /* TEST #2 - evict the LRU element */ /* stats: evict:1 (get:1 hit:1) */ ret = vmemcache_evict(cache, NULL, 0); if (ret == -1) UT_FATAL("vmemcache_evict: %s", vmemcache_errormsg()); if (ctx.vsize != VSIZE) UT_FATAL( "vmemcache_get: wrong size of value: %zi (should be %i)", ctx.vsize, VSIZE); /* check if the evicted LRU element is #0 */ if (strncmp(ctx.vbuf, data[0].value, ctx.vsize)) UT_FATAL("vmemcache_get: wrong value: %s (should be %s)", ctx.vbuf, data[0].value); /* TEST #3 - get the element with index #1 (to change LRU one to #2) */ /* stats: get:1 hit:1 */ ret = vmemcache_get(cache, data[1].key, KSIZE, vbuf, VSIZE, 0, &vsize); if (ret < 0) UT_FATAL("vmemcache_get"); if ((size_t)ret != VSIZE) UT_FATAL( "vmemcache_get: wrong return value: %zi (should be %i)", ret, VSIZE); if (vsize != VSIZE) UT_FATAL( "vmemcache_get: wrong size of value: %zi (should be %i)", ctx.vsize, VSIZE); /* check if the got element is #1 */ if (strncmp(vbuf, data[1].value, vsize)) UT_FATAL("vmemcache_get: wrong value: %s (should be %s)", vbuf, data[1].value); /* TEST #4 - evict the LRU element (it should be #2 now) */ /* stats: evict:1 (get:1 hit:1) */ ret = vmemcache_evict(cache, NULL, 0); if (ret == -1) UT_FATAL("vmemcache_evict: %s", vmemcache_errormsg()); if (ctx.vsize != VSIZE) UT_FATAL( "vmemcache_get: wrong size of value: %zi (should be %i)", ctx.vsize, VSIZE); /* check if the evicted LRU element is #2 */ if (strncmp(ctx.vbuf, data[2].value, ctx.vsize)) UT_FATAL("vmemcache_get: wrong value: %s (should be %s)", ctx.vbuf, data[2].value); /* TEST #5 - get the evicted element with index #2 */ /* stats: get:1 miss:1 */ ret = vmemcache_get(cache, data[2].key, KSIZE, vbuf, VSIZE, 0, &vsize); if (ret != -1) UT_FATAL("vmemcache_get succeeded when it shouldn't"); if (errno != ENOENT) UT_FATAL("vmemcache_get: errno %d should be ENOENT", errno); /* check if the 'on_miss' callback got key #2 */ if (strncmp(ctx.vbuf, data[2].key, ctx.vsize)) UT_FATAL("vmemcache_get: wrong value: %s (should be %s)", ctx.vbuf, data[2].key); /* TEST #6 - null output arguments */ /* stats: get:1 hit:1 */ vmemcache_get(cache, data[2].key, KSIZE, NULL, VSIZE, 0, NULL); /* TEST #7 - too large put */ if (!vmemcache_put(cache, data[2].key, KSIZE, vbuf, VMEMCACHE_MIN_POOL + 1)) { UT_FATAL("vmemcache_put: too large put didn't fail"); } if (errno != ENOSPC) { UT_FATAL( "vmemcache_put: too large put returned \"%s\" \"%s\" instead of ENOSPC", strerror(errno), vmemcache_errormsg()); } /* TEST #8 - evict nonexistent key */ const char *non_existent_key = "non_existent"; ret = vmemcache_evict(cache, non_existent_key, strlen(non_existent_key)); if (ret == 0) UT_FATAL( "vmemcache_evict: return value for nonexistent key equals 0"); else if (errno != ENOENT) UT_FATAL( "vmemcache_evict: nonexistent key: errno %d (should be %d)", errno, ENOENT); /* free all the memory */ /* stats: evict:DNUM+1 -3 already evicted, miss:1 */ while (vmemcache_evict(cache, NULL, 0) == 0) ; /* check statistics */ verify_stats(cache, DNUM, /* put */ 3 + ctx.evict_count, /* get */ 1 + ctx.evict_count, /* hit */ ctx.miss_count, ctx.evict_count, 0, 0, 0); UT_ASSERTeq(ctx.miss_count, 2); UT_ASSERTeq(ctx.evict_count, DNUM); vmemcache_delete(cache); } /* * on_evict_test_memory_leaks_cb -- (internal) 'on evict' callback for * test_memory_leaks */ static void on_evict_test_memory_leaks_cb(VMEMcache *cache, const void *key, size_t key_size, void *arg) { stat_t *counter = arg; (*counter)++; } /* * test_memory_leaks -- (internal) test if there are any memory leaks */ static void test_memory_leaks(const char *dir, int key_gt_1K, enum vmemcache_repl_p repl_p, unsigned seed) { char *vbuf; char *get_buf; size_t size; size_t vsize; int ret; ssize_t get_ret; struct big_key bk; srand(seed); stat_t n_puts = 0; stat_t n_evicts = 0; stat_t n_gets = 0; size_t min_size = VMEMCACHE_MIN_EXTENT / 2; size_t max_size = VMEMCACHE_MIN_POOL / 16; VMEMcache *cache = vmemcache_new(); vmemcache_set_size(cache, VMEMCACHE_MIN_POOL); vmemcache_set_extent_size(cache, VMEMCACHE_MIN_EXTENT); vmemcache_set_eviction_policy(cache, repl_p); if (vmemcache_add(cache, dir)) UT_FATAL("vmemcache_new: %s", vmemcache_errormsg()); vmemcache_callback_on_evict(cache, on_evict_test_memory_leaks_cb, &n_evicts); while (n_evicts < 1000) { size = min_size + (size_t)rand() % (max_size - min_size + 1); vbuf = malloc(size); if (vbuf == NULL) UT_FATAL("out of memory"); memset(vbuf, 42, size - 1); vbuf[size - 1] = '\0'; if (key_gt_1K) { memset(bk.buf, 42 /* arbitrary */, sizeof(bk.buf)); bk.n_puts = n_puts; ret = vmemcache_put(cache, &bk, sizeof(bk), vbuf, size); } else { ret = vmemcache_put(cache, &n_puts, sizeof(n_puts), vbuf, size); } if (ret) UT_FATAL( "vmemcache_put(n_puts: %llu n_evicts: %llu): %s", n_puts, n_evicts, vmemcache_errormsg()); get_buf = malloc(size); if (get_buf == NULL) UT_FATAL("out of memory"); if (key_gt_1K) get_ret = vmemcache_get(cache, &bk, sizeof(bk), get_buf, size, 0, &vsize); else get_ret = vmemcache_get(cache, &n_puts, sizeof(n_puts), get_buf, size, 0, &vsize); if (get_ret < 0) UT_FATAL("vmemcache_get: %s", vmemcache_errormsg()); if ((size_t)get_ret != size) UT_FATAL( "vmemcache_get: wrong return value: %zi (should be %zu)", get_ret, size); if (size != vsize) UT_FATAL( "vmemcache_get: wrong size of value: %zi (should be %zu)", vsize, size); if (strcmp(vbuf, get_buf)) UT_FATAL( "vmemcache_get: wrong value: %s (should be %s)", get_buf, vbuf); free(vbuf); free(get_buf); n_gets++; n_puts++; } verify_stat_entries(cache, n_puts - n_evicts); /* free all the memory */ while (vmemcache_evict(cache, NULL, 0) == 0) ; /* check statistics */ verify_stats(cache, n_puts, n_gets, n_gets, 0, n_evicts, 0, 0, 0); vmemcache_delete(cache); if (n_evicts != n_puts) UT_FATAL("memory leak detected"); } /* * test_merge_allocations -- (internal) test merging allocations */ static void test_merge_allocations(const char *dir, enum vmemcache_repl_p repl_p) { ssize_t ret; #define N_KEYS 5 const char *key[N_KEYS] = { "KEY_1", "KEY_2", "KEY_3", "KEY_4", "KEY_5", }; const char *value = "VALUE"; size_t key_size = strlen(key[0]) + 1; size_t val_size = strlen(value) + 1; VMEMcache *cache = vmemcache_new(); vmemcache_set_size(cache, VMEMCACHE_MIN_POOL); vmemcache_set_extent_size(cache, VMEMCACHE_EXTENT); vmemcache_set_eviction_policy(cache, repl_p); if (vmemcache_add(cache, dir)) UT_FATAL("vmemcache_new: %s", vmemcache_errormsg()); verify_stat_entries(cache, 0); verify_heap_entries(cache, 1); for (int i = 0; i < N_KEYS; i++) if (vmemcache_put(cache, key[i], key_size, value, val_size)) UT_FATAL("vmemcache_put: %s", vmemcache_errormsg()); verify_stat_entries(cache, N_KEYS); verify_heap_entries(cache, 1); /* order of evicting the keys */ const unsigned i_key[N_KEYS] = {1, 3, 0, 4, 2}; for (int i = 0; i < N_KEYS; i++) { ret = vmemcache_evict(cache, key[i_key[i]], key_size); if (ret == -1) UT_FATAL("vmemcache_evict: %s", vmemcache_errormsg()); } verify_stat_entries(cache, 0); verify_heap_entries(cache, 1); if (vmemcache_put(cache, key[0], key_size, value, val_size)) UT_FATAL("vmemcache_put: %s", vmemcache_errormsg()); verify_stat_entries(cache, 1); verify_heap_entries(cache, 1); vmemcache_delete(cache); } /* * on_evict_test_put_in_evict_cb -- (internal) 'on evict' callback * for test_put_in_evict */ static void on_evict_test_put_in_evict_cb(VMEMcache *cache, const void *key, size_t key_size, void *arg) { struct put_evict_cb *ctx = arg; /* * restrict the 'on evict' callbacks stack size to mitigate the risk * of stack overflow */ if (++ctx->n_evicts_stack > ctx->max_evicts_stack) return; /* * keys provided by callback should not overlap with keys provided * in main loop */ ctx->cb_key++; int ret = vmemcache_put(cache, &ctx->cb_key, sizeof(ctx->cb_key), ctx->vbuf, ctx->vsize); if (ret && errno != ENOSPC) UT_FATAL("vmemcache_put: %s, key: %d, errno: %d (should be %d)", vmemcache_errormsg(), *(int *)key, errno, ENOSPC); } /* * test_put_in_evict -- (internal) test valid library behaviour for making * a put in 'on evict' callback function */ static void test_put_in_evict(const char *dir, enum vmemcache_repl_p policy, unsigned seed) { size_t min_size = VMEMCACHE_MIN_EXTENT / 2; size_t max_size = VMEMCACHE_MIN_POOL / 16; stat_t max_puts = 1000; stat_t max_evicts_stack = 500; srand(seed); VMEMcache *cache = vmemcache_new(); vmemcache_set_size(cache, VMEMCACHE_MIN_POOL); vmemcache_set_extent_size(cache, VMEMCACHE_MIN_EXTENT); vmemcache_set_eviction_policy(cache, policy); if (vmemcache_add(cache, dir)) UT_FATAL("vmemcache_new: %s", vmemcache_errormsg()); struct put_evict_cb ctx = {NULL, 0, 0, 0, max_puts, max_evicts_stack, max_puts}; vmemcache_callback_on_evict(cache, on_evict_test_put_in_evict_cb, &ctx); while (ctx.n_puts < ctx.max_puts) { ctx.n_puts++; ctx.n_evicts_stack = 0; ctx.vsize = get_granular_rand_size(max_size, min_size); ctx.vbuf = malloc(ctx.vsize); if (ctx.vbuf == NULL) UT_FATAL("out of memory"); int ret = vmemcache_put(cache, &ctx.n_puts, sizeof(ctx.n_puts), ctx.vbuf, ctx.vsize); if (ret) UT_FATAL("vmemcache_put(n_puts: %llu): %s", ctx.n_puts, vmemcache_errormsg()); free(ctx.vbuf); } vmemcache_delete(cache); } /* * test_vmemcache_get_stat -- (internal) vmemcache_get_stat tests */ static void test_vmemcache_get_stat(const char *dir) { VMEMcache *cache = vmemcache_new(); vmemcache_set_size(cache, VMEMCACHE_MIN_POOL); vmemcache_set_extent_size(cache, VMEMCACHE_MIN_EXTENT); if (vmemcache_add(cache, dir)) UT_FATAL("vmemcache_new: %s", vmemcache_errormsg()); /* TEST #1 - stat with invalid size */ size_t invalid_size = sizeof(stat_t) + 1; char buf[invalid_size]; int ret = vmemcache_get_stat(cache, VMEMCACHE_STAT_PUT, buf, invalid_size); if (ret == 0) UT_FATAL( "vmemcache_get_stat: unexpected success in case of invalid size of a statistic"); else if (errno != EINVAL) UT_FATAL("vmemcache_get_stat: errno equals %d (should be %d)", errno, EINVAL); vmemcache_delete(cache); } /* context of data integrity callback */ struct ctx_di_cb { char *values_buffer; char *get_buffer; size_t size_get_buffer; stat_t evict_count; }; struct value { struct header { size_t offset; size_t size; } header; char buffer[]; /* of size 'size' */ }; #define HEADER_SIZE offsetof(struct value, buffer) /* * on_evict_test_data_integrity -- (internal) 'on evict' callback * for test_data_integrity */ static void on_evict_test_data_integrity(VMEMcache *cache, const void *key, size_t key_size, void *arg) { struct ctx_di_cb *ctx = arg; size_t vsize, size; ssize_t ret; ctx->evict_count++; /* * First get - read the whole entry (offset == 0) */ ret = vmemcache_get(cache, key, key_size, ctx->get_buffer, ctx->size_get_buffer, 0, &vsize); if (ret < 0) UT_FATAL("vmemcache_get: %s", vmemcache_errormsg()); struct value *value = (struct value *)ctx->get_buffer; size = value->header.size + HEADER_SIZE; if ((size_t)ret != size) UT_FATAL( "vmemcache_get: wrong return value: %zi (should be %zu)", ret, size); if (vsize != size) UT_FATAL( "vmemcache_get: wrong size of value: %zi (should be %zu)", vsize, size); int cmp_val = memcmp(value->buffer, ctx->values_buffer + value->header.offset, value->header.size); if (cmp_val) UT_FATAL( "vmemcache_get: wrong value for offset = 0 and size = %zu", value->header.size); /* * Second get - read a part of the entry (offset != 0) * * Offset and length are set to random values from the ranges: * - length: from VMEMCACHE_MIN_EXTENT to (value->header.size - 1) * - offset: from 1 to (value->header.size - length) */ size_t length; size_t offset; if (value->header.size > VMEMCACHE_MIN_EXTENT) length = VMEMCACHE_MIN_EXTENT + (size_t)rand() % (value->header.size - VMEMCACHE_MIN_EXTENT); else length = value->header.size - 1; offset = 1 + (size_t)rand() % (value->header.size - length); ret = vmemcache_get(cache, key, key_size, value->buffer, length, offset + HEADER_SIZE, &vsize); if (ret < 0) UT_FATAL("vmemcache_get: %s", vmemcache_errormsg()); if ((size_t)ret != length) UT_FATAL( "vmemcache_get: wrong return value: %zi (should be %zu)", ret, length); if (vsize != size) UT_FATAL( "vmemcache_get: wrong size of value: %zi (should be %zu)", vsize, size); cmp_val = memcmp(value->buffer, ctx->values_buffer + value->header.offset + offset, length); if (cmp_val) UT_FATAL( "vmemcache_get: wrong value for offset = %zu and size = %zu", offset, length); } /* * test_data_integrity -- (internal) test data integrity */ static void test_data_integrity(const char *dir, unsigned seed) { size_t size; size_t offset; int ret; srand(seed); stat_t n_puts = 0; size_t buff_size = VMEMCACHE_MIN_POOL; size_t min_size = VMEMCACHE_MIN_EXTENT; size_t max_size = VMEMCACHE_MIN_POOL / 16; /* create and fill the buffer of values */ char *values_buffer = malloc(buff_size); if (values_buffer == NULL) UT_FATAL("out of memory"); for (int i = 0; i < (int)buff_size; i++) values_buffer[i] = (char)rand(); /* create the put buffer */ char *put_buffer = malloc(max_size); if (put_buffer == NULL) UT_FATAL("out of memory"); /* create the get buffer */ char *get_buffer = malloc(max_size); if (get_buffer == NULL) UT_FATAL("out of memory"); struct ctx_di_cb ctx = {values_buffer, get_buffer, max_size, 0}; VMEMcache *cache = vmemcache_new(); vmemcache_set_size(cache, VMEMCACHE_MIN_POOL); vmemcache_set_extent_size(cache, VMEMCACHE_MIN_EXTENT); if (vmemcache_add(cache, dir)) UT_FATAL("vmemcache_new: %s", vmemcache_errormsg()); vmemcache_callback_on_evict(cache, on_evict_test_data_integrity, &ctx); while (ctx.evict_count < 1000) { size = min_size + (size_t)rand() % (max_size - min_size + 1); offset = (size_t)rand() % (buff_size - size + 1); struct value *value = (struct value *)put_buffer; value->header.offset = offset; value->header.size = size - HEADER_SIZE; memcpy(value->buffer, values_buffer + value->header.offset, value->header.size); ret = vmemcache_put(cache, &n_puts, sizeof(n_puts), value, size); if (ret) UT_FATAL( "vmemcache_put(n_puts: %llu n_evicts: %llu): %s", n_puts, ctx.evict_count, vmemcache_errormsg()); n_puts++; } verify_stat_entries(cache, n_puts - ctx.evict_count); /* free all the memory */ while (vmemcache_evict(cache, NULL, 0) == 0) ; /* check statistics */ verify_stats(cache, n_puts, /* puts */ 2 * ctx.evict_count, /* gets */ 2 * ctx.evict_count, /* hits */ 0, /* misses */ ctx.evict_count, /* evicts */ 0, /* cache entries */ 0, /* DRAM memory used */ 0); /* pool memory used */ vmemcache_delete(cache); free(values_buffer); free(put_buffer); free(get_buffer); if (ctx.evict_count != n_puts) UT_FATAL("memory leak detected"); } /* * test_get_with_offset -- (internal) test _get() with offset != 0 */ static void test_get_with_offset(const char *dir) { VMEMcache *cache = vmemcache_new(); vmemcache_set_size(cache, VMEMCACHE_MIN_POOL); vmemcache_set_extent_size(cache, VMEMCACHE_MIN_EXTENT); if (vmemcache_add(cache, dir)) UT_FATAL("vmemcache_new: %s", vmemcache_errormsg()); char key[KSIZE]; char value[VSIZE + 1]; memset(key, 0, KSIZE); for (int i = 0; i < VSIZE; i++) value[i] = (char)i; if (vmemcache_put(cache, key, KSIZE, value, VSIZE)) UT_FATAL("vmemcache_put: %s", vmemcache_errormsg()); verify_stat_entries(cache, 1); char vbuf[VSIZE + 1]; /* user-provided buffer */ size_t vsize = 0; /* real size of the object */ ssize_t read; /* * An arbitrary chosen offset (integer value == 33) * in the 'value' buffer of consecutive characters. */ size_t offset = '!'; /* get the only one element */ read = vmemcache_get(cache, key, KSIZE, vbuf, VSIZE, offset, &vsize); if (read < 0) UT_FATAL("vmemcache_get: %s", vmemcache_errormsg()); if ((size_t)read != VSIZE - offset) UT_FATAL( "vmemcache_get: wrong return value: %zi (should be %zu)", read, VSIZE - offset); if (vsize != VSIZE) UT_FATAL( "vmemcache_get: wrong size of value: %zi (should be %i)", vsize, VSIZE); /* insert '\0' after the last read byte */ vbuf[read] = 0; value[offset + (size_t)read] = 0; if (memcmp(vbuf, value + offset, (unsigned long)read)) UT_FATAL("vmemcache_get: wrong value: %s (should be %s)", vbuf, value + offset); vmemcache_delete(cache); } /* test_offsets test case parameters */ struct offset_tc { char *vbuf; size_t vbuf_size; size_t offset; ssize_t expected_ret; }; /* * run_offset_tc -- (internal) run single get with offset test case */ static void run_offset_tc(struct offset_tc *tc, VMEMcache *cache, const char *key, size_t ksize, const char *val, size_t val_size) { size_t vsize; ssize_t ret = vmemcache_get(cache, key, ksize, tc->vbuf, tc->vbuf_size, tc->offset, &vsize); if (ret != tc->expected_ret) UT_FATAL( "vmemcache_get: wrong return value: %zi (should be %zi)", ret, tc->expected_ret); if (vsize != val_size) UT_FATAL( "vmemcache_get: wrong size of value: %zi (should be %zu)", vsize, val_size); if (tc->expected_ret > 0) { const char *retval = val + tc->offset; if (strncmp(tc->vbuf, retval, (size_t)tc->expected_ret)) UT_FATAL( "vmemcache_get: wrong value: %s (should be %s)", tc->vbuf, retval); } } /* * test_offsets -- (internal) test vmemcache_get() with * different offsets and value buffer sizes */ static void test_offsets(const char *dir, enum vmemcache_repl_p policy) { size_t val_size = 32; char vbuf_eq[val_size]; size_t twice_size = val_size * 2; char vbuf_twice[twice_size]; size_t half_size = val_size / 2; char vbuf_half[half_size]; struct offset_tc tcs[] = { /* vbuf, vbuf_size, offset, expected_ret */ {vbuf_eq, val_size, 0, (ssize_t)val_size}, {vbuf_eq, val_size, 2, (ssize_t)val_size - 2}, {vbuf_eq, val_size, val_size - 1, 1}, {vbuf_eq, val_size, val_size, 0}, {vbuf_eq, val_size, val_size + 1, 0}, {vbuf_twice, twice_size, 0, (ssize_t)val_size}, {vbuf_twice, twice_size, 4, (ssize_t)val_size - 4}, {vbuf_twice, twice_size, val_size - 1, 1}, {vbuf_twice, twice_size, val_size, 0}, {vbuf_twice, twice_size, val_size + 1, 0}, {vbuf_half, half_size, 0, (ssize_t)half_size}, {vbuf_half, half_size, 2, (ssize_t)half_size}, {vbuf_half, half_size, half_size, (ssize_t)half_size}, {vbuf_half, half_size, half_size + 2, (ssize_t)val_size - (ssize_t)half_size - 2}, {vbuf_half, half_size, val_size, 0}, {vbuf_half, half_size, val_size + 1, 0}, }; size_t n_tcs = sizeof(tcs) / sizeof(struct offset_tc); VMEMcache *cache = vmemcache_new(); vmemcache_set_size(cache, VMEMCACHE_MIN_POOL); vmemcache_set_extent_size(cache, VMEMCACHE_MIN_EXTENT); vmemcache_set_eviction_policy(cache, policy); if (vmemcache_add(cache, dir)) UT_FATAL("vmemcache_add: %s", vmemcache_errormsg()); const char *key = "KEY"; size_t ksize = strlen(key) + 1; char val[val_size]; for (unsigned i = 0; i < val_size - 1; i++) val[i] = (char)('a' + i); val[val_size - 1] = '\0'; if (vmemcache_put(cache, key, ksize, val, val_size)) UT_FATAL("vmemcache_put: %s", vmemcache_errormsg()); for (unsigned i = 0; i < n_tcs; ++i) { run_offset_tc(&tcs[i], cache, key, ksize, val, val_size); } vmemcache_delete(cache); } int main(int argc, char *argv[]) { if (argc < 2) { fprintf(stderr, "usage: %s dir-name\n", argv[0]); exit(-1); } const char *dir = argv[1]; unsigned seed; if (argc == 3) { if (str_to_unsigned(argv[2], &seed) || seed < 1) UT_FATAL("incorrect value of seed: %s", argv[2]); } else { seed = (unsigned)time(NULL); printf("seed: %u\n", seed); } test_new_delete(dir, argv[0], VMEMCACHE_REPLACEMENT_NONE); test_new_delete(dir, argv[0], VMEMCACHE_REPLACEMENT_LRU); test_put_get_evict(dir, VMEMCACHE_REPLACEMENT_NONE); test_put_get_evict(dir, VMEMCACHE_REPLACEMENT_LRU); test_get_with_offset(dir); test_evict(dir, VMEMCACHE_REPLACEMENT_LRU); /* '0' means: key size < 1kB */ test_memory_leaks(dir, 0, VMEMCACHE_REPLACEMENT_LRU, seed); /* '1' means: key size > 1kB */ test_memory_leaks(dir, 1, VMEMCACHE_REPLACEMENT_LRU, seed); test_merge_allocations(dir, VMEMCACHE_REPLACEMENT_NONE); test_merge_allocations(dir, VMEMCACHE_REPLACEMENT_LRU); test_put_in_evict(dir, VMEMCACHE_REPLACEMENT_LRU, seed); test_offsets(dir, VMEMCACHE_REPLACEMENT_LRU); test_offsets(dir, VMEMCACHE_REPLACEMENT_NONE); test_vmemcache_get_stat(dir); test_data_integrity(dir, seed); return 0; } vmemcache-0.8.1/tests/vmemcache_test_heap_usage.c000066400000000000000000000117331374403322600221370ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019, Intel Corporation */ /* * vmemcache_test_heap_usage.c -- libvmemcache heap usage tracing test. * The test passes if measured unit usage (usage per entry) is lower than * MAX_BYTES_PER_ENTRY */ /* enable RTLD_NEXT not defined by POSIX */ #define _GNU_SOURCE #include #include #include #include #include #include #include #include "test_helpers.h" #define MAX_BYTES_PER_ENTRY 180 static __thread int trace = 0; /* * TRACE_HEAP - heap usage is traced only for expressions wrapped inside * this macro */ #define TRACE_HEAP(fn) do { trace = 1; fn; trace = 0; } while (0) static void *(*actual_malloc)(size_t); static void *(*actual_realloc)(void *, size_t); static void (*actual_free)(void *); typedef struct { ssize_t usage; size_t entries; ssize_t unit_usage; int evicted; } heap_usage; static __thread heap_usage usage = {0, 0, 0, 0}; static int verbose = 0; /* * on_evict_cb -- (internal) 'on evict' callback */ static void on_evict_cb(VMEMcache *cache, const void *key, size_t key_size, void *arg) { heap_usage *usage = arg; usage->entries--; usage->evicted = 1; } /* * log_line -- (internal) print log line with current heap status */ static void log_line(heap_usage *usage, size_t size, void *ptr, const char *prefix) { printf("%s %zu bytes\t(%p)\theap usage: %zd bytes\n", prefix, size, ptr, usage->usage); } /* * malloc -- (internal) 'malloc' wrapper */ void *malloc(size_t size) { int tmp_trace = trace; trace = 0; if (actual_malloc == NULL) { actual_malloc = dlsym(RTLD_NEXT, "malloc"); if (actual_malloc == NULL) UT_FATAL("dlsym: could not load 'malloc' symbol"); } void *p = actual_malloc(size); if (p == NULL) goto end; if (tmp_trace) { size_t s = malloc_usable_size(p); usage.usage += (ssize_t)s; if (verbose) log_line(&usage, s, p, "allocating"); } end: trace = tmp_trace; return p; } /* * realloc -- (internal) 'realloc' wrapper */ void *realloc(void *ptr, size_t size) { int tmp_trace = trace; trace = 0; if (actual_realloc == NULL) { actual_realloc = dlsym(RTLD_NEXT, "realloc"); if (actual_realloc == NULL) UT_FATAL("dlsym: could not load 'realloc' symbol"); } if (tmp_trace) { size_t old_size = malloc_usable_size(ptr); usage.usage -= (ssize_t)old_size; } void *p = actual_realloc(ptr, size); if (p == NULL) goto end; if (tmp_trace) { size_t new_size = malloc_usable_size(p); usage.usage += (ssize_t)new_size; if (verbose) log_line(&usage, new_size, p, "allocating"); } end: trace = tmp_trace; return p; } /* * free -- (internal) 'free' wrapper */ void free(void *ptr) { int tmp_trace = trace; trace = 0; if (actual_free == NULL) { actual_free = dlsym(RTLD_NEXT, "free"); if (actual_free == NULL) UT_FATAL("dlsym: could not load 'free' symbol"); } if (tmp_trace) { size_t size = malloc_usable_size(ptr); usage.usage -= (ssize_t)size; if (verbose) log_line(&usage, size, ptr, "freeing"); } actual_free(ptr); trace = tmp_trace; } /* * test_heap_usage -- (internal) test heap usage */ static int test_heap_usage(const char *dir, heap_usage *usage) { int ret = 0; VMEMcache *cache; TRACE_HEAP(cache = vmemcache_new()); if (cache == NULL) UT_FATAL("vmemcache_new: %s", vmemcache_errormsg()); vmemcache_set_size(cache, VMEMCACHE_MIN_POOL); vmemcache_set_extent_size(cache, VMEMCACHE_MIN_EXTENT); TRACE_HEAP(ret = vmemcache_add(cache, dir)); if (ret) UT_FATAL("vmemcache_add: %s", vmemcache_errormsg()); TRACE_HEAP(vmemcache_callback_on_evict(cache, on_evict_cb, usage)); size_t key = 0; size_t vsize = 32; char *value = malloc(vsize); if (value == NULL) UT_FATAL("out of memory"); memset(value, 'a', vsize - 1); value[vsize - 1] = '\0'; int putret; while (!usage->evicted) { TRACE_HEAP(putret = vmemcache_put(cache, &key, sizeof(key), value, vsize)); if (putret) UT_FATAL("vmemcache put: %s. errno: %s", vmemcache_errormsg(), strerror(errno)); usage->entries++; key++; usage->unit_usage = usage->usage / (ssize_t)usage->entries; if (verbose) printf( "bytes per entry: %zu, (number of entries: %zu)\n", usage->unit_usage, usage->entries); } free(value); ssize_t unit_usage_full_cache = usage->unit_usage; TRACE_HEAP(vmemcache_delete(cache)); printf("heap usage per entry: %zd bytes\n", unit_usage_full_cache); if (unit_usage_full_cache > MAX_BYTES_PER_ENTRY) { UT_ERR( "heap usage per entry equals %zd bytes, should be lower than %d bytes", unit_usage_full_cache, MAX_BYTES_PER_ENTRY); ret = 1; } if (usage->usage != 0) UT_FATAL( "Final heap usage is different than 0 (%zd): possible memory leak", usage->usage); return ret; } int main(int argc, char **argv) { if (argc < 2) UT_FATAL("%s \n", argv[0]); if (argc == 3) { if (strcmp("verbose", argv[2]) == 0) verbose = 1; else UT_FATAL("Unknown argument: %s", argv[2]); } return test_heap_usage(argv[1], &usage); } vmemcache-0.8.1/tests/vmemcache_test_mt.c000066400000000000000000000377621374403322600204700ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2019, Intel Corporation */ /* * vmemcache_test_mt.c -- multi-threaded test for libvmemcache */ #include #include #include #include #include "libvmemcache.h" #include "test_helpers.h" #include "os_thread.h" #define EVICT_BY_LRU 0 #define EVICT_BY_KEY 1 #define BUF_SIZE 256 /* type of statistics */ typedef unsigned long long stat_t; struct buffers { size_t size; char *buff; }; struct context { unsigned thread_number; unsigned n_threads; VMEMcache *cache; struct buffers *buffs; unsigned nbuffs; unsigned ops_count; void *(*worker)(void *); }; #ifdef STATS_ENABLED /* * get_stat -- (internal) get one statistic */ static void get_stat(VMEMcache *cache, stat_t *stat_val, enum vmemcache_statistic i_stat) { int ret = vmemcache_get_stat(cache, i_stat, stat_val, sizeof(*stat_val)); if (ret == -1) UT_FATAL("vmemcache_get_stat: %s", vmemcache_errormsg()); } #endif /* STATS_ENABLED */ /* * free_cache -- (internal) free the cache */ static void free_cache(VMEMcache *cache) { /* evict all entries from the cache */ while (vmemcache_evict(cache, NULL, 0) == 0) ; #ifdef STATS_ENABLED /* verify that all memory is freed */ stat_t entries, heap_entries, dram, pool_ued; get_stat(cache, &entries, VMEMCACHE_STAT_ENTRIES); get_stat(cache, &heap_entries, VMEMCACHE_STAT_HEAP_ENTRIES); get_stat(cache, &dram, VMEMCACHE_STAT_DRAM_SIZE_USED); get_stat(cache, &pool_ued, VMEMCACHE_STAT_POOL_SIZE_USED); if (entries != 0) UT_FATAL("%llu entries were not freed", entries); if (dram != 0) UT_FATAL("%llu bytes of DRAM memory were not freed", dram); if (pool_ued != 0) UT_FATAL("%llu bytes of pool memory were not freed", pool_ued); if (heap_entries != 1) UT_FATAL("%llu heap entries were not merged", heap_entries - 1); #endif /* STATS_ENABLED */ } /* * run_threads -- (internal) create and join threads */ static void run_threads(unsigned n_threads, os_thread_t *threads, struct context *ctx) { for (unsigned i = 0; i < n_threads; ++i) os_thread_create(&threads[i], NULL, ctx[i].worker, &ctx[i]); for (unsigned i = 0; i < n_threads; ++i) os_thread_join(&threads[i], NULL); } /* * worker_thread_put -- (internal) worker testing vmemcache_put() */ static void * worker_thread_put(void *arg) { struct context *ctx = arg; unsigned long long i; unsigned long long shift = ctx->thread_number * ctx->ops_count; for (i = shift; i < (shift + ctx->ops_count); i++) { if (vmemcache_put(ctx->cache, &i, sizeof(i), ctx->buffs[i % ctx->nbuffs].buff, ctx->buffs[i % ctx->nbuffs].size)) UT_FATAL("ERROR: vmemcache_put: %s", vmemcache_errormsg()); } return NULL; } /* * worker_thread_get -- (internal) worker testing vmemcache_get() */ static void * worker_thread_get(void *arg) { struct context *ctx = arg; unsigned long long i; char vbuf[BUF_SIZE]; /* user-provided buffer */ size_t vbufsize = BUF_SIZE; /* size of vbuf */ size_t vsize = 0; /* real size of the object */ /* starting from 1, because the entry #0 has been evicted */ for (i = 1; i < ctx->ops_count; i++) { if (vmemcache_get(ctx->cache, &i, sizeof(i), vbuf, vbufsize, 0, &vsize) == -1) UT_FATAL("ERROR: vmemcache_get: %s", vmemcache_errormsg()); } return NULL; } /* * worker_thread_put_in_gets -- (internal) worker testing vmemcache_put() */ static void * worker_thread_put_in_gets(void *arg) { struct context *ctx = arg; unsigned long long i; unsigned long long start = ctx->ops_count + (ctx->thread_number & 0x1); /* * There is '3' here - in order to have the same number (ctx->ops_count) * of operations per each thread. */ unsigned long long end = 3 * ctx->ops_count; for (i = start; i < end; i += 2) { if (vmemcache_put(ctx->cache, &i, sizeof(i), ctx->buffs[i % ctx->nbuffs].buff, ctx->buffs[i % ctx->nbuffs].size)) UT_FATAL("ERROR: vmemcache_put: %s", vmemcache_errormsg()); } return NULL; } /* * run_test_put -- (internal) run test for vmemcache_put() */ static void run_test_put(VMEMcache *cache, unsigned n_threads, os_thread_t *threads, unsigned ops_per_thread, struct context *ctx) { free_cache(cache); for (unsigned i = 0; i < n_threads; ++i) { ctx[i].worker = worker_thread_put; ctx[i].ops_count = ops_per_thread; } printf("%s: STARTED\n", __func__); run_threads(n_threads, threads, ctx); printf("%s: PASSED\n", __func__); } /* * on_evict_cb -- (internal) 'on evict' callback for run_test_get */ static void on_evict_cb(VMEMcache *cache, const void *key, size_t key_size, void *arg) { int *cache_is_full = arg; *cache_is_full = 1; } /* * init_test_get -- (internal) initialize test for vmemcache_get() */ static void init_test_get(VMEMcache *cache, unsigned n_threads, os_thread_t *threads, unsigned ops_per_thread, struct context *ctx) { free_cache(cache); int cache_is_full = 0; vmemcache_callback_on_evict(cache, on_evict_cb, &cache_is_full); printf("%s: filling the pool...", __func__); fflush(stdout); unsigned n = 0; /* number of elements put into the cache */ while (!cache_is_full && n < ops_per_thread) { unsigned long long n_key = n; if (vmemcache_put(cache, &n_key, sizeof(n_key), ctx->buffs[n % ctx->nbuffs].buff, ctx->buffs[n % ctx->nbuffs].size)) UT_FATAL("ERROR: vmemcache_put: %s", vmemcache_errormsg()); n++; } printf(" done (inserted %u elements)\n", n); vmemcache_callback_on_evict(cache, NULL, NULL); if (ops_per_thread > n) { /* we cannot get more than we have put */ ops_per_thread = n; printf("%s: decreasing ops_count to: %u\n", __func__, n_threads * ops_per_thread); } for (unsigned i = 0; i < n_threads; ++i) { ctx[i].worker = worker_thread_get; ctx[i].ops_count = ops_per_thread; } } /* * run_test_get -- (internal) run test for vmemcache_get() */ static void run_test_get(VMEMcache *cache, unsigned n_threads, os_thread_t *threads, unsigned ops_per_thread, struct context *ctx) { init_test_get(cache, n_threads, threads, ops_per_thread, ctx); printf("%s: STARTED\n", __func__); run_threads(n_threads, threads, ctx); printf("%s: PASSED\n", __func__); } static void on_miss_cb(VMEMcache *cache, const void *key, size_t key_size, void *arg); /* * run_test_get_put -- (internal) run test for vmemcache_get() * and vmemcache_put() */ static void run_test_get_put(VMEMcache *cache, unsigned n_threads, os_thread_t *threads, unsigned ops_per_thread, struct context *ctx) { init_test_get(cache, n_threads, threads, ops_per_thread, ctx); if (n_threads < 10) { ctx[n_threads >> 1].worker = worker_thread_put_in_gets; } else { /* 20% of threads (in the middle of their array) are puts */ unsigned n_puts = (2 * n_threads) / 10; /* 20% of threads */ unsigned start = (n_threads / 2) - (n_puts / 2); for (unsigned i = start; i < start + n_puts; i++) ctx[i].worker = worker_thread_put_in_gets; } vmemcache_callback_on_miss(cache, on_miss_cb, ctx); printf("%s: STARTED\n", __func__); run_threads(n_threads, threads, ctx); printf("%s: PASSED\n", __func__); } /* * on_miss_cb -- (internal) 'on miss' callback for run_test_get_on_miss * and run_test_get_put */ static void on_miss_cb(VMEMcache *cache, const void *key, size_t key_size, void *arg) { struct context *ctx = arg; typedef unsigned long long key_t; assert(key_size == sizeof(key_t)); key_t n = *(key_t *)key; int ret = vmemcache_put(ctx->cache, key, key_size, ctx->buffs[n % ctx->nbuffs].buff, ctx->buffs[n % ctx->nbuffs].size); if (ret && errno != EEXIST) UT_FATAL("ERROR: vmemcache_put: %s", vmemcache_errormsg()); } /* * worker_thread_get_unique_keys -- (internal) worker testing vmemcache_get() * with unique keys */ static void * worker_thread_get_unique_keys(void *arg) { struct context *ctx = arg; unsigned long long key; char vbuf[BUF_SIZE]; /* user-provided buffer */ size_t vbufsize = BUF_SIZE; /* size of vbuf */ size_t vsize = 0; /* real size of the object */ for (unsigned i = 0; i < ctx->ops_count; i++) { key = ((unsigned long long)ctx->thread_number << 48) | i; if (vmemcache_get(ctx->cache, &key, sizeof(key), vbuf, vbufsize, 0, &vsize) == -1) UT_FATAL("ERROR: vmemcache_get: %s", vmemcache_errormsg()); } return NULL; } /* * run_test_get_on_miss -- (internal) run test for vmemcache_get() with * vmemcache_put() called in the 'on miss' callback */ static void run_test_get_on_miss(VMEMcache *cache, unsigned n_threads, os_thread_t *threads, unsigned ops_per_thread, struct context *ctx) { free_cache(cache); vmemcache_callback_on_miss(cache, on_miss_cb, ctx); for (unsigned i = 0; i < n_threads; ++i) { ctx[i].worker = worker_thread_get_unique_keys; ctx[i].ops_count = ops_per_thread; } printf("%s: STARTED\n", __func__); run_threads(n_threads, threads, ctx); vmemcache_callback_on_miss(cache, NULL, NULL); #ifdef STATS_ENABLED stat_t puts, gets, misses; get_stat(cache, &puts, VMEMCACHE_STAT_PUT); get_stat(cache, &gets, VMEMCACHE_STAT_GET); get_stat(cache, &misses, VMEMCACHE_STAT_MISS); stat_t nops = n_threads * ops_per_thread; if (puts != nops) UT_FATAL("wrong number of puts: %llu (should be: %llu", puts, nops); if (gets != nops) UT_FATAL("wrong number of gets: %llu (should be: %llu", gets, nops); if (misses != nops) UT_FATAL("wrong number of misses: %llu (should be: %llu", misses, nops); #endif /* STATS_ENABLED */ printf("%s: PASSED\n", __func__); } static uint32_t keep_running; /* * worker_thread_test_evict_get -- (internal) worker testing vmemcache_get() */ static void * worker_thread_test_evict_get(void *arg) { struct context *ctx = arg; unsigned long long n = ctx->thread_number; char vbuf; while (__atomic_load_n(&keep_running, __ATOMIC_SEQ_CST) && vmemcache_get(ctx->cache, &n, sizeof(n), &vbuf, sizeof(vbuf), 0, NULL) == sizeof(vbuf)) ; return NULL; } /* * worker_thread_test_evict_by_LRU -- (internal) worker evicting by LRU */ static void * worker_thread_test_evict_by_LRU(void *arg) { struct context *ctx = arg; /* at least one entry has to be evicted successfully */ if (vmemcache_evict(ctx->cache, NULL, 0)) UT_FATAL("vmemcache_evict: %s", vmemcache_errormsg()); /* try to evict all other entries */ while (vmemcache_evict(ctx->cache, NULL, 0) == 0) ; __atomic_store_n(&keep_running, 0, __ATOMIC_SEQ_CST); return NULL; } /* * worker_thread_test_evict_by_key -- (internal) worker evicting by key */ static void * worker_thread_test_evict_by_key(void *arg) { struct context *ctx = arg; unsigned n_threads = ctx->n_threads; /* * Try to evict all entries by key first. * It is very likely that even all of these vmemcache_evict() calls * will fail, because the conditions are extremely difficult (all cache * entries are being constantly read (used) by separate threads), * but this is acceptable, because this test is dedicated * to test the failure path of vmemcache_evict() * and the success criteria of this test are checks done in free_cache() * at the end of the test. */ for (unsigned long long n = 0; n < n_threads; ++n) vmemcache_evict(ctx->cache, &n, sizeof(n)); /* try to evict by LRU all entries that were not evicted above */ while (vmemcache_evict(ctx->cache, NULL, 0) == 0) ; __atomic_store_n(&keep_running, 0, __ATOMIC_SEQ_CST); return NULL; } /* * run_test_evict -- (internal) run test for vmemcache_evict() * * This test is dedicated to test the failure path of vmemcache_evict(). * It simulates extremely difficult conditions for an eviction: * all cache entries are being constantly read (used) by separate threads * (only one thread tries to evict entries by key or by LRU), * so it is very likely that most of vmemcache_evict() calls in this test * will fail. * The main success criteria of this test are checks done in free_cache() * at the end of the test. */ static void run_test_evict(VMEMcache *cache, unsigned n_threads, os_thread_t *threads, unsigned ops_per_thread, struct context *ctx, int by_key) { free_cache(cache); for (unsigned long long n = 0; n < n_threads; ++n) { if (vmemcache_put(ctx->cache, &n, sizeof(n), &n, sizeof(n))) UT_FATAL("ERROR: vmemcache_put: %s", vmemcache_errormsg()); } for (unsigned i = 0; i < n_threads; ++i) { ctx[i].worker = worker_thread_test_evict_get; ctx[i].ops_count = ops_per_thread; } /* overwrite the last routine */ if (by_key) ctx[n_threads - 1].worker = worker_thread_test_evict_by_key; else ctx[n_threads - 1].worker = worker_thread_test_evict_by_LRU; printf("%s%s: STARTED\n", __func__, by_key ? "_by_key" : "_by_LRU"); __atomic_store_n(&keep_running, 1, __ATOMIC_SEQ_CST); run_threads(n_threads, threads, ctx); /* success of this function is the main success criteria of this test */ free_cache(cache); printf("%s%s: PASSED\n", __func__, by_key ? "_by_key" : "_by_LRU"); } int main(int argc, char *argv[]) { unsigned seed = 0; int skip = 0; int ret = -1; if (argc < 2 || argc > 6) { fprintf(stderr, "usage: %s dir-name [threads] [ops_count] [seed] ['skip']\n" "\t seed == 0 - set seed from time()\n" "\t 'skip' - skip tests that last very long under Valgrind\n", argv[0]); exit(-1); } const char *dir = argv[1]; /* default values of parameters */ unsigned n_threads = 10; unsigned ops_count = 10000; unsigned nbuffs = 10; size_t min_size = 8; size_t max_size = 64; if (argc >= 3 && (str_to_unsigned(argv[2], &n_threads) || n_threads < 1)) UT_FATAL("incorrect value of n_threads: %s", argv[2]); if (argc >= 4 && (str_to_unsigned(argv[3], &ops_count) || ops_count < 1)) UT_FATAL("incorrect value of ops_count: %s", argv[3]); if (argc >= 5 && (str_to_unsigned(argv[4], &seed))) UT_FATAL("incorrect value of seed: %s", argv[4]); if (argc == 6) { if (strcmp(argv[5], "skip")) UT_FATAL("incorrect value of the 'skip' option: %s", argv[5]); skip = 1; } if (seed == 0) seed = (unsigned)time(NULL); printf("Multi-threaded test parameters:\n"); printf(" directory : %s\n", dir); printf(" n_threads : %u\n", n_threads); printf(" ops_count : %u\n", ops_count); printf(" nbuffs : %u\n", nbuffs); printf(" min_size : %zu\n", min_size); printf(" max_size : %zu\n", max_size); printf(" seed : %u\n\n", seed); srand(seed); VMEMcache *cache = vmemcache_new(); vmemcache_set_size(cache, VMEMCACHE_MIN_POOL); /* limit the size */ if (vmemcache_add(cache, dir)) UT_FATAL("vmemcache_new: %s (%s)", vmemcache_errormsg(), dir); struct buffers *buffs = calloc(nbuffs, sizeof(*buffs)); if (buffs == NULL) UT_FATAL("out of memory"); for (unsigned i = 0; i < nbuffs; ++i) { /* generate N random sizes (between A – B bytes) */ buffs[i].size = min_size + (size_t)rand() % (max_size - min_size + 1); /* allocate a buffer and fill it for every generated size */ buffs[i].buff = malloc(buffs[i].size); if (buffs[i].buff == NULL) UT_FATAL("out of memory"); memset(buffs[i].buff, 0xCC, buffs[i].size); } os_thread_t *threads = calloc(n_threads, sizeof(*threads)); if (threads == NULL) UT_FATAL("out of memory"); struct context *ctx = calloc(n_threads, sizeof(*ctx)); if (ctx == NULL) UT_FATAL("out of memory"); for (unsigned i = 0; i < n_threads; ++i) { ctx[i].n_threads = n_threads; ctx[i].thread_number = i; ctx[i].cache = cache; ctx[i].buffs = buffs; ctx[i].nbuffs = nbuffs; } unsigned ops_per_thread = ops_count / n_threads; /* run all tests */ run_test_get_on_miss(cache, n_threads, threads, ops_per_thread, ctx); run_test_put(cache, n_threads, threads, ops_per_thread, ctx); run_test_get(cache, n_threads, threads, ops_per_thread, ctx); run_test_get_put(cache, n_threads, threads, ops_per_thread, ctx); if (!skip) { run_test_evict(cache, n_threads, threads, ops_per_thread, ctx, EVICT_BY_LRU); run_test_evict(cache, n_threads, threads, ops_per_thread, ctx, EVICT_BY_KEY); } ret = 0; free(ctx); free(threads); for (unsigned i = 0; i < nbuffs; ++i) free(buffs[i].buff); free(buffs); vmemcache_delete(cache); return ret; } vmemcache-0.8.1/tests/vmemcache_test_utilization.c000066400000000000000000000147651374403322600224210ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019, Intel Corporation */ /* * vmemcache_test_utilization.c -- space utilization test source */ #include "test_helpers.h" #include #include #include #include #include #include #include #include #include #include #include #define ALLOWED_RATIO 0.87 #define MAX_KEYSIZE 30 typedef struct { bool evicted; } on_evict_info; typedef struct { size_t pool_size; size_t extent_size; size_t val_max; char dir[PATH_MAX]; long seconds; unsigned seed; int print_output; } test_params; static const char *usage_str = "usage: %s " "-d " "[-p ] " "[-e ] " "[-v ] " "[-t ] " "[-m ] " "[-o ] " "[-s ] " "[-n] " "[-h]\n" "\t n - do not print out csv output (it is printed by default)\n"; /* * on_evict - (internal) on evict callback function */ static void on_evict(VMEMcache *cache, const void *key, size_t key_size, void *arg) { on_evict_info *info = (on_evict_info *)arg; info->evicted = true; } /* * parse_ull - (internal) try parsing unsigned long long command line argument */ static unsigned long long parse_ull(const char *valname, const char *prog) { unsigned long long val; if (str_to_ull(optarg, &val) != 0) { fprintf(stderr, "invalid %s value\n", valname); printf(usage_str, prog); exit(1); } return val; } /* * parse_unsigned - (internal) try parsing unsigned command line argument */ static unsigned parse_unsigned(const char *valname, const char *prog) { unsigned val; if (str_to_unsigned(optarg, &val) != 0) { fprintf(stderr, "invalid %s value\n", valname); printf(usage_str, prog); exit(1); } return val; } /* * argerror - (internal) exit with message on command line argument error */ static void argerror(const char *msg, const char *prog) { fprintf(stderr, "%s", msg); printf(usage_str, prog); exit(1); } /* * parse_args - (internal) parse command line arguments */ static test_params parse_args(int argc, char **argv) { test_params p = { .pool_size = VMEMCACHE_MIN_POOL, .extent_size = VMEMCACHE_MIN_EXTENT, .val_max = 0, .dir = "", .seconds = 0, .seed = 0, .print_output = 1, }; size_t val_max_factor = 70; const char *optstr = "hp:e:v:t:m:o:d:s:n"; int opt; long seconds = 0; long minutes = 0; long hours = 0; while ((opt = getopt(argc, argv, optstr)) != -1) { switch (opt) { case 'h': printf(usage_str, argv[0]); exit(0); case 'p': p.pool_size = (size_t)parse_ull("pool size", argv[0]); break; case 'e': p.extent_size = (size_t)parse_ull("extent size", argv[0]); break; case 'v': val_max_factor = (size_t)parse_ull("val max factor", argv[0]); break; case 't': seconds = parse_unsigned("seconds", argv[0]); break; case 'm': minutes = parse_unsigned("minutes", argv[0]); break; case 'o': hours = parse_unsigned("hours", argv[0]); break; case 's': p.seed = parse_unsigned("seed for rand()", argv[0]); break; case 'd': if (*optarg == 0) argerror("invalid dir argument\n", argv[0]); strcpy(p.dir, optarg); break; case 'n': p.print_output = 0; break; default: argerror("", argv[0]); break; } } if (*p.dir == 0) argerror("missing required dir argument\n", argv[0]); p.seconds = seconds + 60 * minutes + 3600 * hours; if (p.seconds <= 0) argerror("timeout must be greater than 0\n", argv[0]); p.val_max = val_max_factor * p.extent_size; if (p.seed == 0) p.seed = (unsigned)time(NULL); srand(p.seed); printf("seed = %u\n", p.seed); return p; } /* * put_until_timeout - (internal) put random-sized values into cache, * print utilization ratio as a csv */ static int put_until_timeout(VMEMcache *vc, const test_params *p) { int ret = 1; on_evict_info info = { false }; vmemcache_callback_on_evict(vc, on_evict, &info); /* print csv header */ if (p->print_output) printf("keynum,ratio\n"); float prev_ratio; float ratio = 0.0f; bool print_ratio = false; char *val = malloc(p->val_max); if (val == NULL) { fprintf(stderr, "malloc: cannot allocate memory (%zu bytes)\n", p->val_max); return ret; } size_t val_size; unsigned long long used_size; char key[MAX_KEYSIZE]; int len; size_t keynum = 0; long endtime = time(NULL) + p->seconds; while (endtime > time(NULL)) { /* create key */ len = sprintf(key, "%zu", keynum); if (len < 0) { fprintf(stderr, "sprintf return value: %d\n", len); goto exit_free; } /* generate value */ val_size = get_granular_rand_size(p->val_max, p->extent_size); /* put */ int ret = vmemcache_put(vc, key, (size_t)len, val, val_size); if (ret != 0) { fprintf(stderr, "vmemcache_put: %s\n", vmemcache_errormsg()); goto exit_free; } #ifdef STATS_ENABLED if (vmemcache_get_stat(vc, VMEMCACHE_STAT_POOL_SIZE_USED, &used_size, sizeof(used_size)) != 0) { fprintf(stderr, "vmemcache_get_stat: %s\n", vmemcache_errormsg()); goto exit_free; } #else /* * This test will always pass and show 100% utilization, * if statistics are disabled. */ used_size = p->pool_size; #endif /* STATS_ENABLED */ /* * Do not print the csv line if current ratio value is the same * (taking precision into account) as the previous one. The * intent is to avoid unnecessary bloating of the csv output. */ ratio = (float)used_size / (float)p->pool_size; if (p->print_output) { print_ratio = keynum == 0 || lroundf(ratio * 100) != lroundf(prev_ratio * 100); if (print_ratio) { printf("%zu,%.3f\n", keynum, ratio); prev_ratio = ratio; } } if (info.evicted && ratio < ALLOWED_RATIO) { fprintf(stderr, "insufficient space utilization. ratio: %.3f: seed %u\n", ratio, p->seed); goto exit_free; } ++keynum; } ret = 0; /* print the last csv line if already not printed */ if (p->print_output) { if (!print_ratio) printf("%zu,%.3f\n", keynum - 1, ratio); } else { printf("Passed\n"); } exit_free: free(val); return ret; } int main(int argc, char **argv) { test_params p = parse_args(argc, argv); VMEMcache *vc = vmemcache_new(); vmemcache_set_size(vc, p.pool_size); vmemcache_set_extent_size(vc, p.extent_size); vmemcache_set_eviction_policy(vc, VMEMCACHE_REPLACEMENT_LRU); if (vmemcache_add(vc, p.dir)) UT_FATAL("vmemcache_new: %s (%s)", vmemcache_errormsg(), p.dir); int ret = put_until_timeout(vc, &p); vmemcache_delete(vc); return ret; } vmemcache-0.8.1/travis.yml000066400000000000000000000013161374403322600155100ustar00rootroot00000000000000dist: trusty sudo: required language: c services: - docker env: matrix: - TYPE=normal OS=ubuntu OS_VER=18.04 PUSH_IMAGE=1 COVERAGE=1 - TYPE=normal OS=fedora OS_VER=28 PUSH_IMAGE=1 before_install: - echo $TRAVIS_COMMIT_RANGE - export HOST_WORKDIR=`pwd` - export GITHUB_REPO=pmem/vmemcache - export DOCKERHUB_REPO=pmem/vmemcache - cd utils/docker - ./pull-or-rebuild-image.sh - if [[ -f push_image_to_repo_flag ]]; then PUSH_THE_IMAGE=1; fi - if [[ -f skip_build_package_check ]]; then export SKIP_CHECK=1; fi - rm -f push_image_to_repo_flag skip_build_package_check script: - ./build.sh after_success: - if [[ $PUSH_THE_IMAGE -eq 1 ]]; then images/push-image.sh $OS-$OS_VER; fi vmemcache-0.8.1/utils/000077500000000000000000000000001374403322600146145ustar00rootroot00000000000000vmemcache-0.8.1/utils/check_license/000077500000000000000000000000001374403322600173735ustar00rootroot00000000000000vmemcache-0.8.1/utils/check_license/check-headers.sh000077500000000000000000000110641374403322600224220ustar00rootroot00000000000000#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2020, Intel Corporation # check-headers.sh - check copyright and license in source files SELF=$0 function usage() { echo "Usage: $SELF [-h|-v|-a]" echo " -h, --help this help message" echo " -v, --verbose verbose mode" echo " -a, --all check all files (only modified files are checked by default)" } if [ "$#" -lt 2 ]; then usage >&2 exit 2 fi SOURCE_ROOT=$1 shift LICENSE=$1 shift PATTERN=`mktemp` TMP=`mktemp` TMP2=`mktemp` TEMPFILE=`mktemp` rm -f $PATTERN $TMP $TMP2 if [ "$1" == "-h" -o "$1" == "--help" ]; then usage exit 0 fi export GIT="git -C ${SOURCE_ROOT}" $GIT rev-parse || exit 1 if [ -f $SOURCE_ROOT/.git/shallow ]; then SHALLOW_CLONE=1 echo echo "Warning: This is a shallow clone. Checking dates in copyright headers" echo " will be skipped in case of files that have no history." echo else SHALLOW_CLONE=0 fi VERBOSE=0 CHECK_ALL=0 while [ "$1" != "" ]; do case $1 in -v|--verbose) VERBOSE=1 ;; -a|--all) CHECK_ALL=1 ;; esac shift done if [ $CHECK_ALL -eq 0 ]; then CURRENT_COMMIT=$($GIT log --pretty=%H -1) MERGE_BASE=$($GIT merge-base HEAD origin/master 2>/dev/null) [ -z $MERGE_BASE ] && \ MERGE_BASE=$($GIT log --pretty="%cN:%H" | grep GitHub | head -n1 | cut -d: -f2) [ -z $MERGE_BASE -o "$CURRENT_COMMIT" = "$MERGE_BASE" ] && \ CHECK_ALL=1 fi if [ $CHECK_ALL -eq 1 ]; then echo "Checking copyright headers of all files..." GIT_COMMAND="ls-tree -r --name-only HEAD" else if [ $VERBOSE -eq 1 ]; then echo echo "Warning: will check copyright headers of modified files only," echo " in order to check all files issue the following command:" echo " $ $SELF -a" echo " (e.g.: $ $SELF $SOURCE_ROOT $LICENSE -a)" echo fi echo "Checking copyright headers of modified files only..." GIT_COMMAND="diff --name-only $MERGE_BASE $CURRENT_COMMIT" fi FILES=$($GIT $GIT_COMMAND | ${SOURCE_ROOT}/utils/check_license/file-exceptions.sh | \ grep -E -e '*\.[chs]$' -e '*\.[ch]pp$' -e '*\.sh$' \ -e '*\.link$' -e 'Makefile*' -e 'TEST*' \ -e '/common.inc$' -e '/match$' -e '/check_whitespace$' \ -e 'LICENSE$' -e 'CMakeLists.txt$' -e '*\.cmake$' | \ xargs) RV=0 for file in $FILES ; do # The src_path is a path which should be used in every command except git. # git is called with -C flag so filepaths should be relative to SOURCE_ROOT src_path="${SOURCE_ROOT}/$file" [ ! -f $src_path ] && continue # ensure that file is UTF-8 encoded ENCODING=`file -b --mime-encoding $src_path` iconv -f $ENCODING -t "UTF-8" $src_path > $TEMPFILE if ! grep -q "SPDX-License-Identifier: $LICENSE" $src_path; then echo >&2 "error: no $LICENSE SPDX tag in file: $src_path" RV=1 fi if [ $SHALLOW_CLONE -eq 0 ]; then $GIT log --no-merges --format="%ai %aE" -- $file | sort > $TMP else # mark the grafted commits (commits with no parents) $GIT log --no-merges --format="%ai %aE grafted-%p-commit" -- $file | sort > $TMP fi # skip checking dates for non-Intel commits [[ ! $(tail -n1 $TMP) =~ "@intel.com" ]] && continue # skip checking dates for new files [ $(cat $TMP | wc -l) -le 1 ] && continue # grep out the grafted commits (commits with no parents) # and skip checking dates for non-Intel commits grep -v -e "grafted--commit" $TMP | grep -e "@intel.com" > $TMP2 [ $(cat $TMP2 | wc -l) -eq 0 ] && continue FIRST=`head -n1 $TMP2` LAST=` tail -n1 $TMP2` YEARS=`sed ' /Copyright [0-9-]\+.*, Intel Corporation/!d s/.*Copyright \([0-9]\+\)-\([0-9]\+\),.*/\1-\2/ s/.*Copyright \([0-9]\+\),.*/\1-\1/' $src_path` if [ -z "$YEARS" ]; then echo >&2 "No copyright years in $src_path" RV=1 continue fi HEADER_FIRST=`echo $YEARS | cut -d"-" -f1` HEADER_LAST=` echo $YEARS | cut -d"-" -f2` COMMIT_FIRST=`echo $FIRST | cut -d"-" -f1` COMMIT_LAST=` echo $LAST | cut -d"-" -f1` if [ "$COMMIT_FIRST" != "" -a "$COMMIT_LAST" != "" ]; then if [ $HEADER_LAST -lt $COMMIT_LAST ]; then if [ $HEADER_FIRST -lt $COMMIT_FIRST ]; then COMMIT_FIRST=$HEADER_FIRST fi COMMIT_LAST=`date +%G` if [ $COMMIT_FIRST -eq $COMMIT_LAST ]; then NEW=$COMMIT_LAST else NEW=$COMMIT_FIRST-$COMMIT_LAST fi echo "$file:1: error: wrong copyright date: (is: $YEARS, should be: $NEW)" >&2 RV=1 fi else echo "error: unknown commit dates in file: $file" >&2 RV=1 fi done rm -f $TMP $TMP2 $TEMPFILE # check if error found if [ $RV -eq 0 ]; then echo "Copyright headers are OK." else echo "Error(s) in copyright headers found!" >&2 fi exit $RV vmemcache-0.8.1/utils/check_license/file-exceptions.sh000077500000000000000000000003241374403322600230270ustar00rootroot00000000000000#!/bin/sh -e # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2019, Intel Corporation # # file-exceptions.sh - filter out files not checked for copyright and license grep -v -E -e '/queue.h$|fast-hash' vmemcache-0.8.1/utils/check_whitespace000077500000000000000000000067101374403322600200370ustar00rootroot00000000000000#!/usr/bin/env perl # SPDX-License-Identifier: BSD-3-Clause # Copyright 2015-2018, Intel Corporation # # check_whitespace -- scrub source tree for whitespace errors # use strict; use warnings; use File::Basename; use File::Find; use Encode; use v5.16; my $Me = $0; $Me =~ s,.*/,,; $SIG{HUP} = $SIG{INT} = $SIG{TERM} = $SIG{__DIE__} = sub { die @_ if $^S; my $errstr = shift; die "$Me: ERROR: $errstr"; }; my $Errcount = 0; # # err -- emit error, keep total error count # sub err { warn @_, "\n"; $Errcount++; } # # decode_file_as_string -- slurp an entire file into memory and decode # sub decode_file_as_string { my ($full, $file) = @_; my $fh; open($fh, '<', $full) or die "$full $!\n"; local $/; $_ = <$fh>; close $fh; # check known encodings or die my $decoded; my @encodings = ("UTF-8", "UTF-16", "UTF-16LE", "UTF-16BE"); foreach my $enc (@encodings) { eval { $decoded = decode( $enc, $_, Encode::FB_CROAK ) }; if (!$@) { $decoded =~ s/\R/\n/g; return $decoded; } } die "$Me: ERROR: Unknown file encoding"; } # # check_whitespace -- run the checks on the given file # sub check_whitespace { my ($full, $file) = @_; my $line = 0; my $eol; my $nf = 0; my $fstr = decode_file_as_string($full, $file); for (split /^/, $fstr) { $line++; $eol = /[\n]/s; if (/^\.nf$/) { err("$full:$line: ERROR: nested .nf") if $nf; $nf = 1; } elsif (/^\.fi$/) { $nf = 0; } elsif ($nf == 0) { chomp; err("$full:$line: ERROR: trailing whitespace") if /\s$/; err("$full:$line: ERROR: spaces before tabs") if / \t/; } } err("$full:$line: .nf without .fi") if $nf; err("$full:$line: noeol") unless $eol; } sub check_whitespace_with_exc { my ($full) = @_; $_ = $full; return 0 if /^[.\/]*src\/jemalloc.*/; return 0 if /^[.\/]*src\/sys\/queue\.h/; return 0 if /^[.\/]*src\/common\/valgrind\/.*\.h/; $_ = basename($full); return 0 unless /^(README.*|LICENSE.*|Makefile.*|CMakeLists.txt|.gitignore|TEST.*|RUNTESTS|check_whitespace|.*\.([chp13s]|sh|map|cpp|hpp|inc|PS1|ps1|py|md|cmake))$/; return 0 if -z; check_whitespace($full, $_); return 1; } my $verbose = 0; my $force = 0; my $recursive = 0; sub check { my ($file) = @_; my $r; if ($force) { $r = check_whitespace($file, basename($file)); } else { $r = check_whitespace_with_exc($file); } if ($verbose) { if ($r == 0) { printf("skipped $file\n"); } else { printf("checked $file\n"); } } } my @files = (); foreach my $arg (@ARGV) { if ($arg eq '-v') { $verbose = 1; next; } if ($arg eq '-f') { $force = 1; next; } if ($arg eq '-r') { $recursive = 1; next; } if ($arg eq '-g') { @files = `git ls-tree -r --name-only HEAD`; chomp(@files); next; } if ($arg eq '-h') { printf "Options: -g - check all files tracked by git -r dir - recursively check all files in specified directory -v verbose - print whether file was checked or not -f force - disable blacklist\n"; exit 1; } if ($recursive == 1) { find(sub { my $full = $File::Find::name; if (!$force && ($full eq './.git' || $full eq './src/jemalloc' || $full eq './src/debug' || $full eq './src/nondebug' || $full eq './rpmbuild' || $full eq './dpkgbuild')) { $File::Find::prune = 1; return; } return unless -f; push @files, $full; }, $arg); $recursive = 0; next; } push @files, $arg; } if (!@files) { printf "Empty file list!\n"; } foreach (@files) { check($_); } exit $Errcount; vmemcache-0.8.1/utils/cstyle000077500000000000000000000656401374403322600160600ustar00rootroot00000000000000#!/usr/bin/env perl # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2008 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # Portions copyright 2017, Intel Corporation. # # @(#)cstyle 1.58 98/09/09 (from shannon) #ident "%Z%%M% %I% %E% SMI" # # cstyle - check for some common stylistic errors. # # cstyle is a sort of "lint" for C coding style. # It attempts to check for the style used in the # kernel, sometimes known as "Bill Joy Normal Form". # # There's a lot this can't check for, like proper indentation # of code blocks. There's also a lot more this could check for. # # A note to the non perl literate: # # perl regular expressions are pretty much like egrep # regular expressions, with the following special symbols # # \s any space character # \S any non-space character # \w any "word" character [a-zA-Z0-9_] # \W any non-word character # \d a digit [0-9] # \D a non-digit # \b word boundary (between \w and \W) # \B non-word boundary # require 5.0; use IO::File; use Getopt::Std; use strict; use warnings; my $usage = "usage: cstyle [-chpvCP] [-o constructs] file ... -c check continuation indentation inside functions -h perform heuristic checks that are sometimes wrong -p perform some of the more picky checks -v verbose -C don't check anything in header block comments -P check for use of non-POSIX types -o constructs allow a comma-seperated list of optional constructs: doxygen allow doxygen-style block comments (/** /*!) splint allow splint-style lint comments (/*@ ... @*/) "; my %opts; if (!getopts("cho:pvCP", \%opts)) { print $usage; exit 2; } my $check_continuation = $opts{'c'}; my $heuristic = $opts{'h'}; my $picky = $opts{'p'}; my $verbose = $opts{'v'}; my $ignore_hdr_comment = $opts{'C'}; my $check_posix_types = $opts{'P'}; my $doxygen_comments = 0; my $splint_comments = 0; if (defined($opts{'o'})) { for my $x (split /,/, $opts{'o'}) { if ($x eq "doxygen") { $doxygen_comments = 1; } elsif ($x eq "splint") { $splint_comments = 1; } else { print "cstyle: unrecognized construct \"$x\"\n"; print $usage; exit 2; } } } my ($filename, $line, $prev); # shared globals my $fmt; my $hdr_comment_start; if ($verbose) { $fmt = "%s:%d: %s\n%s\n"; } else { $fmt = "%s:%d: %s\n"; } if ($doxygen_comments) { # doxygen comments look like "/*!" or "/**"; allow them. $hdr_comment_start = qr/^\s*\/\*[\!\*]?$/; } else { $hdr_comment_start = qr/^\s*\/\*$/; } # Note, following must be in single quotes so that \s and \w work right. my $typename = '(int|char|short|long|unsigned|float|double' . '|\w+_t|struct\s+\w+|union\s+\w+|FILE|BOOL)'; # mapping of old types to POSIX compatible types my %old2posix = ( 'unchar' => 'uchar_t', 'ushort' => 'ushort_t', 'uint' => 'uint_t', 'ulong' => 'ulong_t', 'u_int' => 'uint_t', 'u_short' => 'ushort_t', 'u_long' => 'ulong_t', 'u_char' => 'uchar_t', 'quad' => 'quad_t' ); my $lint_re = qr/\/\*(?: ARGSUSED[0-9]*|NOTREACHED|LINTLIBRARY|VARARGS[0-9]*| CONSTCOND|CONSTANTCOND|CONSTANTCONDITION|EMPTY| FALLTHRU|FALLTHROUGH|LINTED.*?|PRINTFLIKE[0-9]*| PROTOLIB[0-9]*|SCANFLIKE[0-9]*|CSTYLED.*? )\*\//x; my $splint_re = qr/\/\*@.*?@\*\//x; my $warlock_re = qr/\/\*\s*(?: VARIABLES\ PROTECTED\ BY| MEMBERS\ PROTECTED\ BY| ALL\ MEMBERS\ PROTECTED\ BY| READ-ONLY\ VARIABLES:| READ-ONLY\ MEMBERS:| VARIABLES\ READABLE\ WITHOUT\ LOCK:| MEMBERS\ READABLE\ WITHOUT\ LOCK:| LOCKS\ COVERED\ BY| LOCK\ UNNEEDED\ BECAUSE| LOCK\ NEEDED:| LOCK\ HELD\ ON\ ENTRY:| READ\ LOCK\ HELD\ ON\ ENTRY:| WRITE\ LOCK\ HELD\ ON\ ENTRY:| LOCK\ ACQUIRED\ AS\ SIDE\ EFFECT:| READ\ LOCK\ ACQUIRED\ AS\ SIDE\ EFFECT:| WRITE\ LOCK\ ACQUIRED\ AS\ SIDE\ EFFECT:| LOCK\ RELEASED\ AS\ SIDE\ EFFECT:| LOCK\ UPGRADED\ AS\ SIDE\ EFFECT:| LOCK\ DOWNGRADED\ AS\ SIDE\ EFFECT:| FUNCTIONS\ CALLED\ THROUGH\ POINTER| FUNCTIONS\ CALLED\ THROUGH\ MEMBER| LOCK\ ORDER: )/x; my $err_stat = 0; # exit status if ($#ARGV >= 0) { foreach my $arg (@ARGV) { my $fh = new IO::File $arg, "r"; if (!defined($fh)) { printf "%s: can not open\n", $arg; } else { &cstyle($arg, $fh); close $fh; } } } else { &cstyle("", *STDIN); } exit $err_stat; my $no_errs = 0; # set for CSTYLED-protected lines sub err($) { my ($error) = @_; unless ($no_errs) { if ($verbose) { printf $fmt, $filename, $., $error, $line; } else { printf $fmt, $filename, $., $error; } $err_stat = 1; } } sub err_prefix($$) { my ($prevline, $error) = @_; my $out = $prevline."\n".$line; unless ($no_errs) { printf $fmt, $filename, $., $error, $out; $err_stat = 1; } } sub err_prev($) { my ($error) = @_; unless ($no_errs) { printf $fmt, $filename, $. - 1, $error, $prev; $err_stat = 1; } } sub cstyle($$) { my ($fn, $filehandle) = @_; $filename = $fn; # share it globally my $in_cpp = 0; my $next_in_cpp = 0; my $in_comment = 0; my $in_header_comment = 0; my $comment_done = 0; my $in_warlock_comment = 0; my $in_function = 0; my $in_function_header = 0; my $in_declaration = 0; my $note_level = 0; my $nextok = 0; my $nocheck = 0; my $in_string = 0; my ($okmsg, $comment_prefix); $line = ''; $prev = ''; reset_indent(); line: while (<$filehandle>) { s/\r?\n$//; # strip return and newline # save the original line, then remove all text from within # double or single quotes, we do not want to check such text. $line = $_; # # C allows strings to be continued with a backslash at the end of # the line. We translate that into a quoted string on the previous # line followed by an initial quote on the next line. # # (we assume that no-one will use backslash-continuation with character # constants) # $_ = '"' . $_ if ($in_string && !$nocheck && !$in_comment); # # normal strings and characters # s/'([^\\']|\\[^xX0]|\\0[0-9]*|\\[xX][0-9a-fA-F]*)'/''/g; s/"([^\\"]|\\.)*"/\"\"/g; # # detect string continuation # if ($nocheck || $in_comment) { $in_string = 0; } else { # # Now that all full strings are replaced with "", we check # for unfinished strings continuing onto the next line. # $in_string = (s/([^"](?:"")*)"([^\\"]|\\.)*\\$/$1""/ || s/^("")*"([^\\"]|\\.)*\\$/""/); } # # figure out if we are in a cpp directive # $in_cpp = $next_in_cpp || /^\s*#/; # continued or started $next_in_cpp = $in_cpp && /\\$/; # only if continued # strip off trailing backslashes, which appear in long macros s/\s*\\$//; # an /* END CSTYLED */ comment ends a no-check block. if ($nocheck) { if (/\/\* *END *CSTYLED *\*\//) { $nocheck = 0; } else { reset_indent(); next line; } } # a /*CSTYLED*/ comment indicates that the next line is ok. if ($nextok) { if ($okmsg) { err($okmsg); } $nextok = 0; $okmsg = 0; if (/\/\* *CSTYLED.*\*\//) { /^.*\/\* *CSTYLED *(.*) *\*\/.*$/; $okmsg = $1; $nextok = 1; } $no_errs = 1; } elsif ($no_errs) { $no_errs = 0; } # check length of line. # first, a quick check to see if there is any chance of being too long. if (($line =~ tr/\t/\t/) * 7 + length($line) > 80) { # yes, there is a chance. # replace tabs with spaces and check again. my $eline = $line; 1 while $eline =~ s/\t+/' ' x (length($&) * 8 - length($`) % 8)/e; if (length($eline) > 80) { # allow long line if it is user visible string # find if line start from " and ends # with " + 2 optional characters # (these characters can be i.e. '");' '" \' or '",' etc...) if($eline =~ /^ *".*"[^"]{0,2}$/) { # check if entire line is one string literal $eline =~ s/^ *"//; $eline =~ s/"[^"]{0,2}$//; if($eline =~ /[^\\]"|[^\\](\\\\)+"/) { err("line > 80 characters"); } } else { err("line > 80 characters"); } } } # ignore NOTE(...) annotations (assumes NOTE is on lines by itself). if ($note_level || /\b_?NOTE\s*\(/) { # if in NOTE or this is NOTE s/[^()]//g; # eliminate all non-parens $note_level += s/\(//g - length; # update paren nest level next; } # a /* BEGIN CSTYLED */ comment starts a no-check block. if (/\/\* *BEGIN *CSTYLED *\*\//) { $nocheck = 1; } # a /*CSTYLED*/ comment indicates that the next line is ok. if (/\/\* *CSTYLED.*\*\//) { /^.*\/\* *CSTYLED *(.*) *\*\/.*$/; $okmsg = $1; $nextok = 1; } if (/\/\/ *CSTYLED/) { /^.*\/\/ *CSTYLED *(.*)$/; $okmsg = $1; $nextok = 1; } # universal checks; apply to everything if (/\t +\t/) { err("spaces between tabs"); } if (/ \t+ /) { err("tabs between spaces"); } if (/\s$/) { err("space or tab at end of line"); } if (/[^ \t(]\/\*/ && !/\w\(\/\*.*\*\/\);/) { err("comment preceded by non-blank"); } # is this the beginning or ending of a function? # (not if "struct foo\n{\n") if (/^{$/ && $prev =~ /\)\s*(const\s*)?(\/\*.*\*\/\s*)?\\?$/) { $in_function = 1; $in_declaration = 1; $in_function_header = 0; $prev = $line; next line; } if (/^}\s*(\/\*.*\*\/\s*)*$/) { if ($prev =~ /^\s*return\s*;/) { err_prev("unneeded return at end of function"); } $in_function = 0; reset_indent(); # we don't check between functions $prev = $line; next line; } if (/^\w*\($/) { $in_function_header = 1; } if ($in_warlock_comment && /\*\//) { $in_warlock_comment = 0; $prev = $line; next line; } # a blank line terminates the declarations within a function. # XXX - but still a problem in sub-blocks. if ($in_declaration && /^$/) { $in_declaration = 0; } if ($comment_done) { $in_comment = 0; $in_header_comment = 0; $comment_done = 0; } # does this looks like the start of a block comment? if (/$hdr_comment_start/) { if (!/^\t*\/\*/) { err("block comment not indented by tabs"); } $in_comment = 1; /^(\s*)\//; $comment_prefix = $1; if ($comment_prefix eq "") { $in_header_comment = 1; } $prev = $line; next line; } # are we still in the block comment? if ($in_comment) { if (/^$comment_prefix \*\/$/) { $comment_done = 1; } elsif (/\*\//) { $comment_done = 1; err("improper block comment close") unless ($ignore_hdr_comment && $in_header_comment); } elsif (!/^$comment_prefix \*[ \t]/ && !/^$comment_prefix \*$/) { err("improper block comment") unless ($ignore_hdr_comment && $in_header_comment); } } if ($in_header_comment && $ignore_hdr_comment) { $prev = $line; next line; } # check for errors that might occur in comments and in code. # allow spaces to be used to draw pictures in header and block comments. if (/[^ ] / && !/".* .*"/ && !$in_header_comment && !$in_comment) { err("spaces instead of tabs"); } if (/^ / && !/^ \*[ \t\/]/ && !/^ \*$/ && (!/^ \w/ || $in_function != 0)) { err("indent by spaces instead of tabs"); } if (/^\t+ [^ \t\*]/ || /^\t+ \S/ || /^\t+ \S/) { err("continuation line not indented by 4 spaces"); } if (/$warlock_re/ && !/\*\//) { $in_warlock_comment = 1; $prev = $line; next line; } if (/^\s*\/\*./ && !/^\s*\/\*.*\*\// && !/$hdr_comment_start/) { err("improper first line of block comment"); } if ($in_comment) { # still in comment, don't do further checks $prev = $line; next line; } if ((/[^(]\/\*\S/ || /^\/\*\S/) && !(/$lint_re/ || ($splint_comments && /$splint_re/))) { err("missing blank after open comment"); } if (/\S\*\/[^)]|\S\*\/$/ && !(/$lint_re/ || ($splint_comments && /$splint_re/))) { err("missing blank before close comment"); } if (/\/\/\S/) { # C++ comments err("missing blank after start comment"); } # check for unterminated single line comments, but allow them when # they are used to comment out the argument list of a function # declaration. if (/\S.*\/\*/ && !/\S.*\/\*.*\*\// && !/\(\/\*/) { err("unterminated single line comment"); } if (/^(#else|#endif|#include)(.*)$/) { $prev = $line; if ($picky) { my $directive = $1; my $clause = $2; # Enforce ANSI rules for #else and #endif: no noncomment # identifiers are allowed after #endif or #else. Allow # C++ comments since they seem to be a fact of life. if ((($1 eq "#endif") || ($1 eq "#else")) && ($clause ne "") && (!($clause =~ /^\s+\/\*.*\*\/$/)) && (!($clause =~ /^\s+\/\/.*$/))) { err("non-comment text following " . "$directive (or malformed $directive " . "directive)"); } } next line; } # # delete any comments and check everything else. Note that # ".*?" is a non-greedy match, so that we don't get confused by # multiple comments on the same line. # s/\/\*.*?\*\//\x01/g; s/\/\/.*$/\x01/; # C++ comments # delete any trailing whitespace; we have already checked for that. s/\s*$//; # following checks do not apply to text in comments. if (/[^ \t\+]\+[^\+=]/ || /[^\+]\+[^ \+=]/) { err("missing space around + operator"); } if (/[^ \t]\+=/ || /\+=[^ ]/) { err("missing space around += operator"); } if (/[^ \t\-]\-[^\->]/ && !/\(\w+\)\-\w/ && !/[\(\[]\-[\w \t]+[\)\],]/) { err("missing space before - operator"); } if (/[^\-]\-[^ \-=>]/ && !/\(\-\w+\)/ && !/(return|case|=|>|<|\?|:|,|^[ \t]+)[ \t]+\-[\w\(]/ && !/(\([^\)]+\)|\[|\()\-[\w\(\]]/) { err("missing space after - operator"); } if (/(return|case|=|\?|:|,|\[)[ \t]+\-[ \t]/ || /[\(\[]\-[ \t]/) { err("extra space after - operator"); } if (/[^ \t]\-=/ || /\-=[^ ]/) { err("missing space around -= operator"); } if (/[^ \t][\%\/]/ || /[\%\/][^ =]/ || /[\%\/]=[^ ]/) { err("missing space around one of operators: % %= / /="); } if (/[^ \t]\*=/ || /\*=[^ ]/) { err("missing space around *= operator"); } if (/[^ \t\(\)\*\[]\*/) { err("missing space before * operator"); } if (/\*[^ =\*\w\(,]/ && !/\(.+ \*+\)/ && !/\*\[\]/ && !/\*\-\-\w/ && !/\*\+\+\w/ && !/\*\)/) { err("missing space after * operator"); } if (/[^<>\s][!<>=]=/ || /[^<>][!<>=]=[^\s,]/ || (/[^->]>[^,=>\s]/ && !/[^->]>$/) || (/[^<]<[^,=<\s]/ && !/[^<]<$/) || /[^<\s]<[^<]/ || /[^->\s]>[^>]/) { err("missing space around relational operator"); } if (/\S>>=/ || /\S<<=/ || />>=\S/ || /<<=\S/ || /\S[-+*\/&|^%]=/ || (/[^-+*\/&|^%!<>=\s]=[^=]/ && !/[^-+*\/&|^%!<>=\s]=$/) || (/[^!<>=]=[^=\s]/ && !/[^!<>=]=$/)) { # XXX - should only check this for C++ code # XXX - there are probably other forms that should be allowed if (!/\soperator=/) { err("missing space around assignment operator"); } } if (/[,;]\S/ && !/\bfor \(;;\)/) { err("comma or semicolon followed by non-blank"); } # allow "for" statements to have empty "while" clauses if (/\s[,;]/ && !/^[\t]+;$/ && !/^\s*for \([^;]*; ;[^;]*\)/) { err("comma or semicolon preceded by blank"); } if (/^\s*(&&|\|\|)/) { err("improper boolean continuation"); } if (/\S *(&&|\|\|)/ || /(&&|\|\|) *\S/) { err("more than one space around boolean operator"); } if (/\b(for|if|while|switch|return|case)\(/) { err("missing space between keyword and paren"); } if (/(\b(for|if|while|switch|return)\b.*){2,}/ && !/^#define/) { # multiple "case" and "sizeof" allowed err("more than one keyword on line"); } if (/\b(for|if|while|switch|return|case)\s\s+\(/ && !/^#if\s+\(/) { err("extra space between keyword and paren"); } # try to detect "func (x)" but not "if (x)" or # "#define foo (x)" or "int (*func)();" if (/\w\s\(/) { my $s = $_; # strip off all keywords on the line s/\b(for|if|while|switch|return|case)\s\(/XXX(/g; s/\b(sizeof|typeof|__typeof__)\s*\(/XXX(/g; s/#elif\s\(/XXX(/g; s/^#define\s+\w+\s+\(/XXX(/; # do not match things like "void (*f)();" # or "typedef void (func_t)();" s/\w\s\(+\*/XXX(*/g; s/\b($typename|void)\s+\(+/XXX(/og; s/\btypedef\s($typename|void)\s+\(+/XXX(/og; # do not match "__attribute__ ((format (...)))" s/\b__attribute__\s*\(\(format\s*\(/__attribute__((XXX(/g; if (/\w\s\(/) { err("extra space between function name and left paren"); } $_ = $s; } # try to detect "int foo(x)", but not "extern int foo(x);" # XXX - this still trips over too many legitimate things, # like "int foo(x,\n\ty);" # if (/^(\w+(\s|\*)+)+\w+\(/ && !/\)[;,](\s|\x01)*$/ && # !/^(extern|static)\b/) { # err("return type of function not on separate line"); # } # this is a close approximation if (/^(\w+(\s|\*)+)+\w+\(.*\)(\s|\x01)*$/ && !/^(extern|static)\b/) { err("return type of function not on separate line"); } if (/^#define\t/ || /^#ifdef\t/ || /^#ifndef\t/) { err("#define/ifdef/ifndef followed by tab instead of space"); } if (/^#define\s\s+/ || /^#ifdef\s\s+/ || /^#ifndef\s\s+/) { err("#define/ifdef/ifndef followed by more than one space"); } # AON C-style doesn't require this. #if (/^\s*return\W[^;]*;/ && !/^\s*return\s*\(.*\);/) { # err("unparenthesized return expression"); #} if (/\bsizeof\b/ && !/\bsizeof\s*\(.*\)/) { err("unparenthesized sizeof expression"); } if (/\b(sizeof|typeof)\b/ && /\b(sizeof|typeof)\s+\(.*\)/) { err("spaces between sizeof/typeof expression and paren"); } if (/\(\s/) { err("whitespace after left paren"); } # allow "for" statements to have empty "continue" clauses if (/\s\)/ && !/^\s*for \([^;]*;[^;]*; \)/) { err("whitespace before right paren"); } if (/^\s*\(void\)[^ ]/) { err("missing space after (void) cast"); } if (/\S\{/ && !/\{\{/ && !/\(struct \w+\)\{/) { err("missing space before left brace"); } if ($in_function && /^\s+{/ && ($prev =~ /\)\s*$/ || $prev =~ /\bstruct\s+\w+$/)) { err("left brace starting a line"); } if (/}(else|while)/) { err("missing space after right brace"); } if (/}\s\s+(else|while)/) { err("extra space after right brace"); } if (/\b_VOID\b|\bVOID\b|\bSTATIC\b/) { err("obsolete use of VOID or STATIC"); } if (/\b($typename|void)\*/o) { err("missing space between type name and *"); } if (/^\s+#/) { err("preprocessor statement not in column 1"); } if (/^#\s/) { err("blank after preprocessor #"); } if (/!\s*(strcmp|strncmp|bcmp)\s*\(/) { err("don't use boolean ! with comparison functions"); } if (/^\S+\([\S\s]*\)\s*{/) { err("brace of function definition not at beginning of line"); } if (/static\s+\S+\s*=\s*(0|NULL)\s*;/) { err("static variable initialized with 0 or NULL"); } if (/typedef[\S\s]+\*\s*\w+\s*;/) { err("typedefed pointer type"); } if (/unsigned\s+int/) { err("'unsigned int' instead of just 'unsigned'"); } if (/long\s+long\s+int/) { err("'long long int' instead of just 'long long'"); } elsif (/long\s+int/) { err("'long int' instead of just 'long'"); } # # We completely ignore, for purposes of indentation: # * lines outside of functions # * preprocessor lines # if ($check_continuation && $in_function && !$in_cpp) { process_indent($_); } if ($picky) { # try to detect spaces after casts, but allow (e.g.) # "sizeof (int) + 1", "void (*funcptr)(int) = foo;", and # "int foo(int) __NORETURN;" if ((/^\($typename( \*+)?\)\s/o || /\W\($typename( \*+)?\)\s/o) && !/sizeof\($typename( \*)?\)\s/o && !/\($typename( \*+)?\)\s+=[^=]/o) { err("space after cast"); } if (/\b($typename|void)\s*\*\s/o && !/\b($typename|void)\s*\*\s+const\b/o) { err("unary * followed by space"); } } if ($check_posix_types) { # try to detect old non-POSIX types. # POSIX requires all non-standard typedefs to end in _t, # but historically these have been used. if (/\b(unchar|ushort|uint|ulong|u_int|u_short|u_long|u_char|quad)\b/) { err("non-POSIX typedef $1 used: use $old2posix{$1} instead"); } } if ($heuristic) { # cannot check this everywhere due to "struct {\n...\n} foo;" if ($in_function && !$in_declaration && /}./ && !/}\s+=/ && !/{.*}[;,]$/ && !/}(\s|\x01)*$/ && !/} (else|while)/ && !/}}/) { err("possible bad text following right brace"); } # cannot check this because sub-blocks in # the middle of code are ok if ($in_function && /^\s+{/) { err("possible left brace starting a line"); } } if (/^\s*else\W/) { if ($prev =~ /^\s*}$/) { err_prefix($prev, "else and right brace should be on same line"); } } $prev = $line; } if ($prev eq "") { err("last line in file is blank"); } } # # Continuation-line checking # # The rest of this file contains the code for the continuation checking # engine. It's a pretty simple state machine which tracks the expression # depth (unmatched '('s and '['s). # # Keep in mind that the argument to process_indent() has already been heavily # processed; all comments have been replaced by control-A, and the contents of # strings and character constants have been elided. # my $cont_in; # currently inside of a continuation my $cont_off; # skipping an initializer or definition my $cont_noerr; # suppress cascading errors my $cont_start; # the line being continued my $cont_base; # the base indentation my $cont_first; # this is the first line of a statement my $cont_multiseg; # this continuation has multiple segments my $cont_special; # this is a C statement (if, for, etc.) my $cont_macro; # this is a macro my $cont_case; # this is a multi-line case my @cont_paren; # the stack of unmatched ( and [s we've seen sub reset_indent() { $cont_in = 0; $cont_off = 0; } sub delabel($) { # # replace labels with tabs. Note that there may be multiple # labels on a line. # local $_ = $_[0]; while (/^(\t*)( *(?:(?:\w+\s*)|(?:case\b[^:]*)): *)(.*)$/) { my ($pre_tabs, $label, $rest) = ($1, $2, $3); $_ = $pre_tabs; while ($label =~ s/^([^\t]*)(\t+)//) { $_ .= "\t" x (length($2) + length($1) / 8); } $_ .= ("\t" x (length($label) / 8)).$rest; } return ($_); } sub process_indent($) { require strict; local $_ = $_[0]; # preserve the global $_ s/\x01//g; # No comments s/\s+$//; # Strip trailing whitespace return if (/^$/); # skip empty lines # regexps used below; keywords taking (), macros, and continued cases my $special = '(?:(?:\}\s*)?else\s+)?(?:if|for|while|switch)\b'; my $macro = '[A-Z_][A-Z_0-9]*\('; my $case = 'case\b[^:]*$'; # skip over enumerations, array definitions, initializers, etc. if ($cont_off <= 0 && !/^\s*$special/ && (/(?:(?:\b(?:enum|struct|union)\s*[^\{]*)|(?:\s+=\s*))\{/ || (/^\s*{/ && $prev =~ /=\s*(?:\/\*.*\*\/\s*)*$/))) { $cont_in = 0; $cont_off = tr/{/{/ - tr/}/}/; return; } if ($cont_off) { $cont_off += tr/{/{/ - tr/}/}/; return; } if (!$cont_in) { $cont_start = $line; if (/^\t* /) { err("non-continuation indented 4 spaces"); $cont_noerr = 1; # stop reporting } $_ = delabel($_); # replace labels with tabs # check if the statement is complete return if (/^\s*\}?$/); return if (/^\s*\}?\s*else\s*\{?$/); return if (/^\s*do\s*\{?$/); return if (/{$/); return if (/}[,;]?$/); # Allow macros on their own lines return if (/^\s*[A-Z_][A-Z_0-9]*$/); # cases we don't deal with, generally non-kosher if (/{/) { err("stuff after {"); return; } # Get the base line, and set up the state machine /^(\t*)/; $cont_base = $1; $cont_in = 1; @cont_paren = (); $cont_first = 1; $cont_multiseg = 0; # certain things need special processing $cont_special = /^\s*$special/? 1 : 0; $cont_macro = /^\s*$macro/? 1 : 0; $cont_case = /^\s*$case/? 1 : 0; } else { $cont_first = 0; # Strings may be pulled back to an earlier (half-)tabstop unless ($cont_noerr || /^$cont_base / || (/^\t*(?: )?(?:gettext\()?\"/ && !/^$cont_base\t/)) { err_prefix($cont_start, "continuation should be indented 4 spaces"); } } my $rest = $_; # keeps the remainder of the line # # The split matches 0 characters, so that each 'special' character # is processed separately. Parens and brackets are pushed and # popped off the @cont_paren stack. For normal processing, we wait # until a ; or { terminates the statement. "special" processing # (if/for/while/switch) is allowed to stop when the stack empties, # as is macro processing. Case statements are terminated with a : # and an empty paren stack. # foreach $_ (split /[^\(\)\[\]\{\}\;\:]*/) { next if (length($_) == 0); # rest contains the remainder of the line my $rxp = "[^\Q$_\E]*\Q$_\E"; $rest =~ s/^$rxp//; if (/\(/ || /\[/) { push @cont_paren, $_; } elsif (/\)/ || /\]/) { my $cur = $_; tr/\)\]/\(\[/; my $old = (pop @cont_paren); if (!defined($old)) { err("unexpected '$cur'"); $cont_in = 0; last; } elsif ($old ne $_) { err("'$cur' mismatched with '$old'"); $cont_in = 0; last; } # # If the stack is now empty, do special processing # for if/for/while/switch and macro statements. # next if (@cont_paren != 0); if ($cont_special) { if ($rest =~ /^\s*{?$/) { $cont_in = 0; last; } if ($rest =~ /^\s*;$/) { err("empty if/for/while body ". "not on its own line"); $cont_in = 0; last; } if (!$cont_first && $cont_multiseg == 1) { err_prefix($cont_start, "multiple statements continued ". "over multiple lines"); $cont_multiseg = 2; } elsif ($cont_multiseg == 0) { $cont_multiseg = 1; } # We've finished this section, start # processing the next. goto section_ended; } if ($cont_macro) { if ($rest =~ /^$/) { $cont_in = 0; last; } } } elsif (/\;/) { if ($cont_case) { err("unexpected ;"); } elsif (!$cont_special) { err("unexpected ;") if (@cont_paren != 0); if (!$cont_first && $cont_multiseg == 1) { err_prefix($cont_start, "multiple statements continued ". "over multiple lines"); $cont_multiseg = 2; } elsif ($cont_multiseg == 0) { $cont_multiseg = 1; } if ($rest =~ /^$/) { $cont_in = 0; last; } if ($rest =~ /^\s*special/) { err("if/for/while/switch not started ". "on its own line"); } goto section_ended; } } elsif (/\{/) { err("{ while in parens/brackets") if (@cont_paren != 0); err("stuff after {") if ($rest =~ /[^\s}]/); $cont_in = 0; last; } elsif (/\}/) { err("} while in parens/brackets") if (@cont_paren != 0); if (!$cont_special && $rest !~ /^\s*(while|else)\b/) { if ($rest =~ /^$/) { err("unexpected }"); } else { err("stuff after }"); } $cont_in = 0; last; } } elsif (/\:/ && $cont_case && @cont_paren == 0) { err("stuff after multi-line case") if ($rest !~ /$^/); $cont_in = 0; last; } next; section_ended: # End of a statement or if/while/for loop. Reset # cont_special and cont_macro based on the rest of the # line. $cont_special = ($rest =~ /^\s*$special/)? 1 : 0; $cont_macro = ($rest =~ /^\s*$macro/)? 1 : 0; $cont_case = 0; next; } $cont_noerr = 0 if (!$cont_in); } vmemcache-0.8.1/utils/docker/000077500000000000000000000000001374403322600160635ustar00rootroot00000000000000vmemcache-0.8.1/utils/docker/build.sh000077500000000000000000000035231374403322600175240ustar00rootroot00000000000000#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2017-2018, Intel Corporation # # build-local.sh - runs a Docker container from a Docker image with environment # prepared for running tests. # # # Notes: # - run this script from its location or set the variable 'HOST_WORKDIR' to # where the root of this project is on the host machine, # - set variables 'OS' and 'OS_VER' properly to a system you want to build this # repo on (for proper values take a look on the list of Dockerfiles at the # utils/docker/images directory), eg. OS=ubuntu, OS_VER=16.04. # set -e if [[ -z "$OS" || -z "$OS_VER" ]]; then echo "ERROR: The variables OS and OS_VER have to be set " \ "(eg. OS=ubuntu, OS_VER=16.04)." exit 1 fi if [[ -z "$HOST_WORKDIR" ]]; then HOST_WORKDIR=$(readlink -f ../..) fi chmod -R a+w $HOST_WORKDIR imageName=${DOCKERHUB_REPO}:${OS}-${OS_VER} containerName=vmemcache-${OS}-${OS_VER} if [[ "$command" == "" ]]; then command="./run-build.sh"; fi if [ -n "$DNS_SERVER" ]; then DNS_SETTING=" --dns=$DNS_SERVER "; fi WORKDIR=/vmemcache SCRIPTSDIR=$WORKDIR/utils/docker echo Building ${OS}-${OS_VER} ci_env=`bash <(curl -s https://codecov.io/env)` # Run a container with # - environment variables set (--env) # - host directory containing source mounted (-v) # - working directory set (-w) docker run --privileged=true --name=$containerName -ti \ $DNS_SETTING \ ${docker_opts} \ $ci_env \ --env http_proxy=$http_proxy \ --env https_proxy=$https_proxy \ --env GITHUB_TOKEN=$GITHUB_TOKEN \ --env WORKDIR=$WORKDIR \ --env SCRIPTSDIR=$SCRIPTSDIR \ --env COVERAGE=$COVERAGE \ --env TRAVIS_REPO_SLUG=$TRAVIS_REPO_SLUG \ --env TRAVIS_BRANCH=$TRAVIS_BRANCH \ --env TRAVIS_EVENT_TYPE=$TRAVIS_EVENT_TYPE \ -v $HOST_WORKDIR:$WORKDIR \ -v /etc/localtime:/etc/localtime \ -w $SCRIPTSDIR \ $imageName $command vmemcache-0.8.1/utils/docker/images/000077500000000000000000000000001374403322600173305ustar00rootroot00000000000000vmemcache-0.8.1/utils/docker/images/Dockerfile.fedora-28000066400000000000000000000014261374403322600230130ustar00rootroot00000000000000# SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2020, Intel Corporation # # Dockerfile - a 'recipe' for Docker to build an image of fedora-based # environment prepared for running libvmemcache tests. # # Pull base image FROM fedora:28 MAINTAINER piotr.balcer@intel.com # Install basic tools RUN dnf update -y \ && dnf install -y \ clang \ cmake \ gcc \ git \ hub \ libunwind-devel \ make \ man \ pandoc \ passwd \ rpm-build \ sudo \ tar \ wget \ which \ valgrind \ valgrind-devel \ && dnf clean all # Add user ENV USER user ENV USERPASS pass RUN useradd -m $USER RUN echo $USERPASS | passwd $USER --stdin RUN gpasswd wheel -a $USER USER $USER # Set required environment variables ENV OS fedora ENV OS_VER 28 ENV PACKAGE_MANAGER rpm ENV NOTTY 1 vmemcache-0.8.1/utils/docker/images/Dockerfile.ubuntu-18.04000066400000000000000000000014571374403322600233220ustar00rootroot00000000000000# SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2020, Intel Corporation # # Dockerfile - a 'recipe' for Docker to build an image of ubuntu-based # environment prepared for running libvmemcache tests. # # Pull base image FROM ubuntu:18.04 MAINTAINER piotr.balcer@intel.com # Update the Apt cache and install basic tools RUN apt-get update \ && apt-get install -y software-properties-common \ clang \ cmake \ curl \ debhelper \ devscripts \ gcc \ gdb \ git \ libunwind8-dev \ pandoc \ pkg-config \ sudo \ wget \ whois \ valgrind \ && rm -rf /var/lib/apt/lists/* # Add user ENV USER user ENV USERPASS pass RUN useradd -m $USER -g sudo -p `mkpasswd $USERPASS` USER $USER # Set required environment variables ENV OS ubuntu ENV OS_VER 18.04 ENV PACKAGE_MANAGER deb ENV NOTTY 1 vmemcache-0.8.1/utils/docker/images/build-image.sh000077500000000000000000000020711374403322600220460ustar00rootroot00000000000000#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2018, Intel Corporation # # build-image.sh - prepares a Docker image with -based # environment for testing libvmemcache, according # to the Dockerfile. file located # in the same directory. # # The script can be run locally. # set -e function usage { echo "Usage:" echo " build-image.sh " echo "where , for example, can be 'ubuntu-16.04', provided " \ "a Dockerfile named 'Dockerfile.ubuntu-16.04' exists in the " \ "current directory." } # Check if the first and second argument is nonempty if [[ -z "$1" || -z "$2" ]]; then usage exit 1 fi # Check if the file Dockerfile.OS-VER exists if [[ ! -f "Dockerfile.$2" ]]; then echo "ERROR: wrong argument." usage exit 1 fi # Build a Docker image tagged with ${DOCKERHUB_REPO}:OS-VER docker build -t $1:$2 \ --build-arg http_proxy=$http_proxy \ --build-arg https_proxy=$https_proxy \ -f Dockerfile.$2 . vmemcache-0.8.1/utils/docker/images/push-image.sh000077500000000000000000000021261374403322600217270ustar00rootroot00000000000000#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2018, Intel Corporation # # push-image.sh - pushes the Docker image tagged with OS-VER # to the Docker Hub. # # The script utilizes $DOCKERHUB_USER and $DOCKERHUB_PASSWORD variables to log in to # Docker Hub. The variables can be set in the Travis project's configuration # for automated builds. # set -e function usage { echo "Usage:" echo " push-image.sh " echo "where , for example, can be 'ubuntu-16.04', provided " \ "a Docker image tagged with ${DOCKERHUB_REPO}:ubuntu-16.04 exists " \ "locally." } # Check if the first argument is nonempty if [[ -z "$1" ]]; then usage exit 1 fi # Check if the image tagged with ${DOCKERHUB_REPO}:OS-VER exists locally if [[ ! $(docker images -a | awk -v pattern="^${DOCKERHUB_REPO}:$1\$" \ '$1":"$2 ~ pattern') ]] then echo "ERROR: wrong argument." usage exit 1 fi # Log in to the Docker Hub docker login -u="$DOCKERHUB_USER" -p="$DOCKERHUB_PASSWORD" # Push the image to the repository docker push ${DOCKERHUB_REPO}:$1 vmemcache-0.8.1/utils/docker/pull-or-rebuild-image.sh000077500000000000000000000073751374403322600225340ustar00rootroot00000000000000#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2018, Intel Corporation # # pull-or-rebuild-image.sh - rebuilds the Docker image used in the # current Travis build if necessary. # # The script rebuilds the Docker image if the Dockerfile for the current # OS version (Dockerfile.${OS}-${OS_VER}) or any .sh script from the directory # with Dockerfiles were modified and committed. # # If the Travis build is not of the "pull_request" type (i.e. in case of # merge after pull_request) and it succeed, the Docker image should be pushed # to the Docker Hub repository. An empty file is created to signal that to # further scripts. # # If the Docker image does not have to be rebuilt, it will be pulled from # Docker Hub. # set -e if [[ "$TRAVIS_EVENT_TYPE" != "cron" && "$TRAVIS_BRANCH" != "coverity_scan" \ && "$COVERITY" -eq 1 ]]; then echo "INFO: Skip Coverity scan job if build is triggered neither by " \ "'cron' nor by a push to 'coverity_scan' branch" exit 0 fi if [[ ( "$TRAVIS_EVENT_TYPE" == "cron" || "$TRAVIS_BRANCH" == "coverity_scan" )\ && "$COVERITY" -ne 1 ]]; then echo "INFO: Skip regular jobs if build is triggered either by 'cron'" \ " or by a push to 'coverity_scan' branch" exit 0 fi if [[ -z "$OS" || -z "$OS_VER" ]]; then echo "ERROR: The variables OS and OS_VER have to be set properly " \ "(eg. OS=ubuntu, OS_VER=16.04)." exit 1 fi if [[ -z "$HOST_WORKDIR" ]]; then echo "ERROR: The variable HOST_WORKDIR has to contain a path to " \ "the root of this project on the host machine" exit 1 fi # TRAVIS_COMMIT_RANGE is usually invalid for force pushes - ignore such values # when used with non-upstream repository if [ -n "$TRAVIS_COMMIT_RANGE" -a $TRAVIS_REPO_SLUG != "${GITHUB_REPO}" ]; then if ! git rev-list $TRAVIS_COMMIT_RANGE; then TRAVIS_COMMIT_RANGE= fi fi # Find all the commits for the current build if [[ -n "$TRAVIS_COMMIT_RANGE" ]]; then commits=$(git rev-list $TRAVIS_COMMIT_RANGE) else commits=$TRAVIS_COMMIT fi echo "Commits in the commit range:" for commit in $commits; do echo $commit; done # Get the list of files modified by the commits files=$(for commit in $commits; do git diff-tree --no-commit-id --name-only \ -r $commit; done | sort -u) echo "Files modified within the commit range:" for file in $files; do echo $file; done # Path to directory with Dockerfiles and image building scripts images_dir_name=images base_dir=utils/docker/$images_dir_name # Check if committed file modifications require the Docker image to be rebuilt for file in $files; do # Check if modified files are relevant to the current build if [[ $file =~ ^($base_dir)\/Dockerfile\.($OS)-($OS_VER)$ ]] \ || [[ $file =~ ^($base_dir)\/.*\.sh$ ]] then # Rebuild Docker image for the current OS version echo "Rebuilding the Docker image for the Dockerfile.$OS-$OS_VER" pushd $images_dir_name ./build-image.sh ${DOCKERHUB_REPO} ${OS}-${OS_VER} popd # Check if the image has to be pushed to Docker Hub # (i.e. the build is triggered by commits to the ${GITHUB_REPO} # repository's master branch, and the Travis build is not # of the "pull_request" type). In that case, create the empty # file. if [[ $TRAVIS_REPO_SLUG == "${GITHUB_REPO}" \ && $TRAVIS_BRANCH == "master" \ && $TRAVIS_EVENT_TYPE != "pull_request" && $PUSH_IMAGE == "1" ]] then echo "The image will be pushed to Docker Hub" touch push_image_to_repo_flag else echo "Skip pushing the image to Docker Hub" fi if [[ $PUSH_IMAGE == "1" ]] then echo "Skip build package check if image has to be pushed" touch skip_build_package_check fi exit 0 fi done # Getting here means rebuilding the Docker image is not required. # Pull the image from Docker Hub. docker pull ${DOCKERHUB_REPO}:${OS}-${OS_VER} vmemcache-0.8.1/utils/docker/run-build.sh000077500000000000000000000071711374403322600203310ustar00rootroot00000000000000#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2019, Intel Corporation # # run-build.sh - is called inside a Docker container; prepares the environment # and starts a build of libvmemcache. # set -e [ "$WORKDIR" != "" ] && cd $WORKDIR INSTALL_DIR=/tmp/vmemcache mkdir -p $INSTALL_DIR # ----------------------------------------- # gcc & Debug echo echo " ##########################################" echo " # Running the configuration: gcc & Debug #" echo " ##########################################" echo mkdir -p build cd build CC=gcc \ cmake .. -DCMAKE_BUILD_TYPE=Debug \ -DDEVELOPER_MODE=1 \ -DCMAKE_INSTALL_PREFIX=$INSTALL_DIR \ -DTRACE_TESTS=1 make -j2 ctest --output-on-failure make install make uninstall cd .. rm -r build # ----------------------------------------- # gcc & Release echo echo " ############################################" echo " # Running the configuration: gcc & Release #" echo " ############################################" echo mkdir build cd build CC=gcc \ cmake .. -DCMAKE_BUILD_TYPE=Release \ -DDEVELOPER_MODE=1 \ -DCMAKE_INSTALL_PREFIX=$INSTALL_DIR \ -DTRACE_TESTS=1 make -j2 ctest --output-on-failure cd .. rm -r build # ----------------------------------------- # Clang & Debug echo echo " ############################################" echo " # Running the configuration: Clang & Debug #" echo " ############################################" echo mkdir build cd build CC=clang \ cmake .. -DCMAKE_BUILD_TYPE=Debug \ -DDEVELOPER_MODE=1 \ -DCMAKE_INSTALL_PREFIX=$INSTALL_DIR \ -DTRACE_TESTS=1 make -j2 ctest --output-on-failure cd .. rm -r build # ----------------------------------------- # Clang & Release echo echo " ##############################################" echo " # Running the configuration: Clang & Release #" echo " ##############################################" echo mkdir build cd build CC=clang \ cmake .. -DCMAKE_BUILD_TYPE=Release \ -DDEVELOPER_MODE=1 \ -DCMAKE_INSTALL_PREFIX=$INSTALL_DIR \ -DTRACE_TESTS=1 make -j2 ctest --output-on-failure cd .. rm -r build # ----------------------------------------- # deb & rpm echo echo " ##########################################" echo " # Running the configuration: deb & rpm #" echo " ##########################################" echo mkdir -p build cd build CC=gcc \ cmake .. -DCMAKE_BUILD_TYPE=Release \ -DCPACK_GENERATOR=$PACKAGE_MANAGER \ -DCMAKE_INSTALL_PREFIX=/usr \ -DTRACE_TESTS=1 make -j2 ctest --output-on-failure make package find . -iname "libvmemcache*.$PACKAGE_MANAGER" if [ $PACKAGE_MANAGER = "deb" ]; then echo "$ dpkg-deb --info ./libvmemcache*.deb" dpkg-deb --info ./libvmemcache*.deb echo "$ dpkg-deb -c ./libvmemcache*.deb" dpkg-deb -c ./libvmemcache*.deb echo "$ sudo -S dpkg -i ./libvmemcache*.deb" echo $USERPASS | sudo -S dpkg -i ./libvmemcache*.deb elif [ $PACKAGE_MANAGER = "rpm" ]; then echo "$ rpm -q --info ./libvmemcache*.rpm" rpm -q --info ./libvmemcache*.rpm && true echo "$ rpm -q --list ./libvmemcache*.rpm" rpm -q --list ./libvmemcache*.rpm && true echo "$ sudo -S rpm -ivh --force *.rpm" echo $USERPASS | sudo -S rpm -ivh --force *.rpm fi cd .. rm -rf build # ----------------------------------------- # Coverage if [[ $COVERAGE -eq 1 ]] ; then echo echo " #######################################" echo " # Running the configuration: Coverage #" echo " #######################################" echo mkdir build cd build CC=gcc \ cmake .. -DCMAKE_BUILD_TYPE=Debug \ -DTRACE_TESTS=1 \ -DCOVERAGE_BUILD=1 \ -DCMAKE_C_FLAGS=-coverage make -j2 ctest --output-on-failure bash <(curl -s https://codecov.io/bash) -c cd .. rm -r build fi vmemcache-0.8.1/utils/md2man/000077500000000000000000000000001374403322600157725ustar00rootroot00000000000000vmemcache-0.8.1/utils/md2man/default.man000066400000000000000000000041611374403322600201150ustar00rootroot00000000000000$if(has-tables)$ .\"t $endif$ $if(pandoc-version)$ .\" Automatically generated by Pandoc $pandoc-version$ .\" $endif$ $if(adjusting)$ .ad $adjusting$ $endif$ .TH "$title$" "$section$" "$date$" "PMDK - $version$" "PMDK Programmer's Manual" $if(hyphenate)$ .hy $else$ .nh \" Turn off hyphenation by default. $endif$ $for(header-includes)$ $header-includes$ $endfor$ .\" Copyright 2018-$year$, Intel Corporation .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" .\" * Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" .\" * Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in .\" the documentation and/or other materials provided with the .\" distribution. .\" .\" * Neither the name of the copyright holder nor the names of its .\" contributors may be used to endorse or promote products derived .\" from this software without specific prior written permission. .\" .\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS .\" "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT .\" LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR .\" A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT .\" OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, .\" SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT .\" LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, .\" DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY .\" THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT .\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE .\" OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. $for(include-before)$ $include-before$ $endfor$ $body$ $for(include-after)$ $include-after$ $endfor$ $if(author)$ .SH AUTHORS $for(author)$$author$$sep$; $endfor$. $endif$ vmemcache-0.8.1/utils/md2man/md2man.sh000077500000000000000000000032741374403322600175150ustar00rootroot00000000000000#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2019, Intel Corporation # # # md2man.sh -- convert markdown to groff man pages # # usage: md2man.sh file template outfile # # This script converts markdown file into groff man page using pandoc. # It performs some pre- and post-processing for better results: # - parse input file for YAML metadata block and read man page title, # section and version # - cut-off metadata block and license # - unindent code blocks # set -e set -o pipefail filename=$1 template=$2 outfile=$3 title=`sed -n 's/^title:\ _MP(*\([A-Za-z_-]*\).*$/\1/p' $filename` section=`sed -n 's/^title:.*\([0-9]\))$/\1/p' $filename` version=`sed -n 's/^date:\ *\(.*\)$/\1/p' $filename` dt=$(date +"%F") SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-$(date +%s)}" YEAR=$(date -u -d "@$SOURCE_DATE_EPOCH" +%Y 2>/dev/null || date -u -r "$SOURCE_DATE_EPOCH" +%Y 2>/dev/null || date -u +%Y) dt=$(date -u -d "@$SOURCE_DATE_EPOCH" +%F 2>/dev/null || date -u -r "$SOURCE_DATE_EPOCH" +%F 2>/dev/null || date -u +%F) cat $filename | sed -n -e '/# NAME #/,$p' |\ pandoc -s -t man -o $outfile.tmp --template=$template \ -V title=$title -V section=$section \ -V date="$dt" -V version="$version" \ -V year="$YEAR" | sed '/^\.IP/{ N /\n\.nf/{ s/IP/PP/ } }' # don't overwrite the output file if the only thing that changed # is modification date (diff output has exactly 4 lines in this case) if [ -e $outfile ] then difflines=`diff $outfile $outfile.tmp | wc -l || true >2` onlydates=`diff $outfile $outfile.tmp | grep "$dt" | wc -l || true` if [ $difflines -eq 4 -a $onlydates -eq 1 ]; then rm $outfile.tmp else mv $outfile.tmp $outfile fi else mv $outfile.tmp $outfile fi