pax_global_header 0000666 0000000 0000000 00000000064 13477012671 0014522 g ustar 00root root 0000000 0000000 52 comment=cf3023406969b14610df03a043fca8a078c9c195
.circleci/ 0000775 0000000 0000000 00000000000 13477012671 0012721 5 ustar 00root root 0000000 0000000 .circleci/config.yml 0000664 0000000 0000000 00000044261 13477012671 0014720 0 ustar 00root root 0000000 0000000 version: 2.0
references:
container_config: &container_config
machine: true
cache_init: &cache_init
run:
name: Initialize Cache
command: |
echo "${APT_COMPILER_PACKAGE}_${BUILD_TOOLSET}_${CXX}_${CC}_${BUILD_TYPE}_${CXXFLAGS}" > /tmp/_build_env_vars
echo Build env vars used for cache keys:
cat /tmp/_build_env_vars
container_setup_pre: &container_setup_pre
restore_cache:
keys:
# Find the most recent cache from any branch
- v4_container_setup_cache_{{ checksum "/tmp/_build_env_vars" }}_{{ arch }}
container_setup_post: &container_setup_post
save_cache:
# Add _aptcache_contents to cache key so that it is re-uploaded each time the cache changes.
key: v4_container_setup_cache_{{ checksum "/tmp/_build_env_vars" }}_{{ arch }}_{{ checksum "/tmp/_aptcache_contents" }}
paths:
- /tmp/aptcache
container_setup: &container_setup
run:
name: Setup Environment
command: |
if [ -d "/tmp/aptcache" ]; then
echo Using packages from apt cache
sudo cp -R /tmp/aptcache/* /var/cache/apt/archives/
else
echo No apt cache found
fi
sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key|sudo apt-key add -
sudo touch /etc/apt/sources.list.d/clang.list
sudo chmod o+w /etc/apt/sources.list.d/clang.list
cat > /etc/apt/sources.list.d/clang.list << EOF
deb http://apt.llvm.org/trusty/ llvm-toolchain-trusty-4.0 main
deb-src http://apt.llvm.org/trusty/ llvm-toolchain-trusty-4.0 main
deb http://apt.llvm.org/trusty/ llvm-toolchain-trusty-5.0 main
deb-src http://apt.llvm.org/trusty/ llvm-toolchain-trusty-5.0 main
deb http://apt.llvm.org/trusty/ llvm-toolchain-trusty-6.0 main
deb-src http://apt.llvm.org/trusty/ llvm-toolchain-trusty-6.0 main
deb http://apt.llvm.org/trusty/ llvm-toolchain-trusty-7 main
deb-src http://apt.llvm.org/trusty/ llvm-toolchain-trusty-7 main
deb http://apt.llvm.org/trusty/ llvm-toolchain-trusty-8 main
deb-src http://apt.llvm.org/trusty/ llvm-toolchain-trusty-8 main
EOF
sudo chmod o-w /etc/apt/sources.list.d/clang.list
DEBIAN_FRONTEND=noninteractive sudo apt-get update -qq
DEBIAN_FRONTEND=noninteractive sudo apt-get install -y git ccache $APT_COMPILER_PACKAGE cmake3 make libcurl4-openssl-dev libssl-dev libfuse-dev python
# Use /dev/urandom when /dev/random is accessed to use less entropy
sudo cp -a /dev/urandom /dev/random
if [ "${BUILD_TOOLSET}" = "clang" ]; then
# They aren't set automatically unfortunately
sudo ln -s /usr/bin/$CC /usr/bin/clang
sudo ln -s /usr/bin/$CXX /usr/bin/clang++
sudo ln -s /usr/bin/clang-tidy-8 /usr/bin/clang-tidy
sudo ln -s /usr/bin/run-clang-tidy-8.py /usr/bin/run-clang-tidy.py
# Need a c++14 compliant STL for clang
sudo apt-get install -y g++-5
sudo apt-get remove g++-4.8 gcc-4.8
fi
# Setup ccache
sudo ln -s /usr/bin/ccache /usr/local/bin/$CC
sudo ln -s /usr/bin/ccache /usr/local/bin/$CXX
sudo mkdir /ccache_data
sudo chown circleci:circleci /ccache_data
echo 'export CCACHE_COMPILERCHECK=content' >> $BASH_ENV
echo 'export CCACHE_COMPRESS=1' >> $BASH_ENV
echo 'export CCACHE_DIR=/ccache_data' >> $BASH_ENV
echo 'export CCACHE_SLOPPINESS=include_file_mtime' >> $BASH_ENV
sudo mkdir -p /tmp/aptcache
sudo cp -R /var/cache/apt/archives/* /tmp/aptcache/
ls /tmp/aptcache > /tmp/_aptcache_contents
echo
echo System Info:
cat /etc/issue
uname -a
cmake --version
/usr/local/bin/$CC --version
/usr/local/bin/$CXX --version
upgrade_boost_pre: &upgrade_boost_pre
restore_cache:
keys:
# Find the most recent cache from any branch
- v4_upgrade_boost_cache_{{ checksum "/tmp/_build_env_vars" }}_{{ arch }}
upgrade_boost_post: &upgrade_boost_post
save_cache:
key: v4_upgrade_boost_cache_{{ checksum "/tmp/_build_env_vars" }}_{{ arch }}
paths:
- /tmp/boost_1_65_1
upgrade_boost: &upgrade_boost
run:
name: Upgrade Boost
command: |
# Detect number of CPU cores
export NUMCORES=`nproc`
echo Using $NUMCORES cores
# Download and prepare boost (only if not already present from cache)
if [ ! -d "/tmp/boost_1_65_1" ]; then
echo "Didn't find boost in cache. Downloading and building."
wget -O /tmp/boost.tar.bz2 https://sourceforge.net/projects/boost/files/boost/1.65.1/boost_1_65_1.tar.bz2/download
if [ $(sha512sum /tmp/boost.tar.bz2 | awk '{print $1;}') == "a9e6866d3bb3e7c198f442ff09f5322f58064dca79bc420f2f0168eb63964226dfbc4f034a5a5e5958281fdf7518a1b057c894fbda0b61fced59c1661bf30f1a" ]; then
echo Correct sha512sum
else
echo Wrong sha512sum
sha512sum boost.tar.bz2
exit 1
fi
echo Extracting...
tar -xf /tmp/boost.tar.bz2 -C /tmp
rm -rf boost.tar.bz2
cd /tmp/boost_1_65_1
./bootstrap.sh --with-toolset=${BUILD_TOOLSET} --with-libraries=filesystem,thread,chrono,program_options
cd ..
else
echo Found boost in cache. Use cache and build.
fi
# Compile and install boost (if cached, this should be fast)
cd /tmp/boost_1_65_1
sudo ./b2 toolset=${BUILD_TOOLSET} link=static cxxflags=-fPIC -d0 -j$NUMCORES install
build_pre: &build_pre
restore_cache:
keys:
# Find most recent cache from any revision on the same branch (cache keys are prefix matched)
# CIRCLE_PR_NUMBER is only set if this is a pull request.
- v3_build_cache_{{ checksum "/tmp/_build_env_vars" }}_{{ arch }}_{{ .Branch }}_{{ .Environment.CIRCLE_PR_NUMBER }}
# Fallback to less specific caches if the one above wasn't found
- v3_build_cache_{{ checksum "/tmp/_build_env_vars" }}_{{ arch }}_{{ .Branch }}
- v3_build_cache_{{ checksum "/tmp/_build_env_vars" }}_{{ arch }}
build_post: &build_post
save_cache:
key: v3_build_cache_{{ checksum "/tmp/_build_env_vars" }}_{{ arch }}_{{ .Branch }}_{{ .Environment.CIRCLE_PR_NUMBER }}_{{ .Revision }}
paths:
- /ccache_data
build: &build
run:
name: Build
command: |
export NUMCORES=`nproc` && if [ ! -n "$NUMCORES" ]; then export NUMCORES=`sysctl -n hw.ncpu`; fi
echo Using $NUMCORES cores
# Use ccache
export CXX=/usr/local/bin/$CXX
export CC=/usr/local/bin/$CC
ccache --max-size=512M
ccache --show-stats
# Disable OpenMP if it is clang, because Ubuntu 14.04 doesn't have the libomp-dev package needed to support OpenMP for clang.
if [[ ${APT_COMPILER_PACKAGE} == clang* ]]; then
OPENMP_PARAMS="-DDISABLE_OPENMP=ON"
else
OPENMP_PARAMS=""
fi
# Build
mkdir cmake
cd cmake
cmake .. -DBUILD_TESTING=on -DCMAKE_BUILD_TYPE=${BUILD_TYPE} ${OPENMP_PARAMS} ${CMAKE_FLAGS}
make -j$NUMCORES
ccache --show-stats
test: &test
run:
name: Test
no_output_timeout: 120m
command: |
if "${RUN_TESTS}"; then
cd cmake
./test/gitversion/gitversion-test ${GTEST_ARGS}
./test/cpp-utils/cpp-utils-test ${GTEST_ARGS}
if [ ! "$DISABLE_BROKEN_ASAN_TESTS" = true ] ; then ./test/fspp/fspp-test ${GTEST_ARGS} ; fi
./test/parallelaccessstore/parallelaccessstore-test ${GTEST_ARGS}
./test/blockstore/blockstore-test ${GTEST_ARGS}
./test/blobstore/blobstore-test ${GTEST_ARGS}
./test/cryfs/cryfs-test ${GTEST_ARGS}
./test/cryfs-cli/cryfs-cli-test ${GTEST_ARGS}
fi
job_definition: &job_definition
<<: *container_config
steps:
- <<: *cache_init
- <<: *container_setup_pre
- <<: *container_setup
- <<: *container_setup_post
- <<: *upgrade_boost_pre
- <<: *upgrade_boost
- <<: *upgrade_boost_post
- checkout
- <<: *build_pre
- <<: *build
- <<: *build_post
- <<: *test
enable_for_tags: &enable_for_tags
filters:
tags:
only: /.*/
jobs:
gcc_5_debug:
<<: *job_definition
environment:
CC: gcc-5
CXX: g++-5
BUILD_TOOLSET: gcc
APT_COMPILER_PACKAGE: "g++-5"
CXXFLAGS: ""
BUILD_TYPE: "Debug"
GTEST_ARGS: ""
CMAKE_FLAGS: ""
RUN_TESTS: true
gcc_5_release:
<<: *job_definition
environment:
CC: gcc-5
CXX: g++-5
BUILD_TOOLSET: gcc
APT_COMPILER_PACKAGE: "g++-5"
CXXFLAGS: ""
BUILD_TYPE: "Release"
GTEST_ARGS: ""
CMAKE_FLAGS: ""
RUN_TESTS: true
gcc_6_debug:
<<: *job_definition
environment:
CC: gcc-6
CXX: g++-6
BUILD_TOOLSET: gcc
APT_COMPILER_PACKAGE: "g++-6"
CXXFLAGS: ""
BUILD_TYPE: "Debug"
GTEST_ARGS: ""
CMAKE_FLAGS: ""
RUN_TESTS: true
gcc_6_release:
<<: *job_definition
environment:
CC: gcc-6
CXX: g++-6
BUILD_TOOLSET: gcc
APT_COMPILER_PACKAGE: "g++-6"
CXXFLAGS: ""
BUILD_TYPE: "Release"
GTEST_ARGS: ""
CMAKE_FLAGS: ""
RUN_TESTS: true
gcc_7_debug:
<<: *job_definition
environment:
CC: gcc-7
CXX: g++-7
BUILD_TOOLSET: gcc
APT_COMPILER_PACKAGE: "g++-7"
CXXFLAGS: ""
BUILD_TYPE: "Debug"
GTEST_ARGS: ""
CMAKE_FLAGS: ""
RUN_TESTS: true
gcc_7_release:
<<: *job_definition
environment:
CC: gcc-7
CXX: g++-7
BUILD_TOOLSET: gcc
APT_COMPILER_PACKAGE: "g++-7"
CXXFLAGS: ""
BUILD_TYPE: "Release"
GTEST_ARGS: ""
CMAKE_FLAGS: ""
RUN_TESTS: true
gcc_8_debug:
<<: *job_definition
environment:
CC: gcc-8
CXX: g++-8
BUILD_TOOLSET: gcc
APT_COMPILER_PACKAGE: "g++-8"
CXXFLAGS: ""
BUILD_TYPE: "Debug"
GTEST_ARGS: ""
CMAKE_FLAGS: ""
RUN_TESTS: true
gcc_8_release:
<<: *job_definition
environment:
CC: gcc-8
CXX: g++-8
BUILD_TOOLSET: gcc
APT_COMPILER_PACKAGE: "g++-8"
CXXFLAGS: ""
BUILD_TYPE: "Release"
GTEST_ARGS: ""
CMAKE_FLAGS: ""
RUN_TESTS: true
gcc_9_debug:
<<: *job_definition
environment:
CC: gcc-9
CXX: g++-9
BUILD_TOOLSET: gcc
APT_COMPILER_PACKAGE: "g++-9"
CXXFLAGS: ""
BUILD_TYPE: "Debug"
GTEST_ARGS: ""
CMAKE_FLAGS: ""
RUN_TESTS: true
gcc_9_release:
<<: *job_definition
environment:
CC: gcc-9
CXX: g++-9
BUILD_TOOLSET: gcc
APT_COMPILER_PACKAGE: "g++-9"
CXXFLAGS: ""
BUILD_TYPE: "Release"
GTEST_ARGS: ""
CMAKE_FLAGS: ""
RUN_TESTS: true
clang_4_debug:
<<: *job_definition
environment:
CC: clang-4.0
CXX: clang++-4.0
BUILD_TOOLSET: clang
APT_COMPILER_PACKAGE: clang-4.0
CXXFLAGS: ""
BUILD_TYPE: "Debug"
GTEST_ARGS: ""
CMAKE_FLAGS: ""
RUN_TESTS: true
clang_4_release:
<<: *job_definition
environment:
CC: clang-4.0
CXX: clang++-4.0
BUILD_TOOLSET: clang
APT_COMPILER_PACKAGE: clang-4.0
CXXFLAGS: ""
BUILD_TYPE: "Release"
GTEST_ARGS: ""
CMAKE_FLAGS: ""
RUN_TESTS: true
clang_5_debug:
<<: *job_definition
environment:
CC: clang-5.0
CXX: clang++-5.0
BUILD_TOOLSET: clang
APT_COMPILER_PACKAGE: clang-5.0
CXXFLAGS: ""
BUILD_TYPE: "Debug"
GTEST_ARGS: ""
CMAKE_FLAGS: ""
RUN_TESTS: true
clang_5_release:
<<: *job_definition
environment:
CC: clang-5.0
CXX: clang++-5.0
BUILD_TOOLSET: clang
APT_COMPILER_PACKAGE: clang-5.0
CXXFLAGS: ""
BUILD_TYPE: "Release"
GTEST_ARGS: ""
CMAKE_FLAGS: ""
RUN_TESTS: true
clang_6_debug:
<<: *job_definition
environment:
CC: clang-6.0
CXX: clang++-6.0
BUILD_TOOLSET: clang
APT_COMPILER_PACKAGE: clang-6.0
CXXFLAGS: ""
BUILD_TYPE: "Debug"
GTEST_ARGS: ""
CMAKE_FLAGS: ""
RUN_TESTS: true
clang_6_release:
<<: *job_definition
environment:
CC: clang-6.0
CXX: clang++-6.0
BUILD_TOOLSET: clang
APT_COMPILER_PACKAGE: clang-6.0
CXXFLAGS: ""
BUILD_TYPE: "Release"
GTEST_ARGS: ""
CMAKE_FLAGS: ""
RUN_TESTS: true
clang_7_debug:
<<: *job_definition
environment:
CC: clang-7
CXX: clang++-7
BUILD_TOOLSET: clang
APT_COMPILER_PACKAGE: clang-7
CXXFLAGS: ""
BUILD_TYPE: "Debug"
GTEST_ARGS: ""
CMAKE_FLAGS: ""
RUN_TESTS: true
clang_7_release:
<<: *job_definition
environment:
CC: clang-7
CXX: clang++-7
BUILD_TOOLSET: clang
APT_COMPILER_PACKAGE: clang-7
CXXFLAGS: ""
BUILD_TYPE: "Release"
GTEST_ARGS: ""
CMAKE_FLAGS: ""
RUN_TESTS: true
clang_8_debug:
<<: *job_definition
environment:
CC: clang-8
CXX: clang++-8
BUILD_TOOLSET: clang
APT_COMPILER_PACKAGE: clang-8
CXXFLAGS: ""
BUILD_TYPE: "Debug"
GTEST_ARGS: ""
CMAKE_FLAGS: ""
RUN_TESTS: true
clang_8_release:
<<: *job_definition
environment:
CC: clang-8
CXX: clang++-8
BUILD_TOOLSET: clang
APT_COMPILER_PACKAGE: clang-8
CXXFLAGS: ""
BUILD_TYPE: "Release"
GTEST_ARGS: ""
CMAKE_FLAGS: ""
RUN_TESTS: true
clang_werror:
<<: *job_definition
environment:
CC: clang-8
CXX: clang++-8
BUILD_TOOLSET: clang
APT_COMPILER_PACKAGE: clang-8
CXXFLAGS: ""
BUILD_TYPE: "Release"
GTEST_ARGS: ""
CMAKE_FLAGS: "-DUSE_WERROR=on"
RUN_TESTS: false
gcc_werror:
<<: *job_definition
environment:
CC: gcc-9
CXX: g++-9
BUILD_TOOLSET: gcc
APT_COMPILER_PACKAGE: "g++-9"
CXXFLAGS: ""
BUILD_TYPE: "Release"
GTEST_ARGS: ""
CMAKE_FLAGS: "-DUSE_WERROR=on"
RUN_TESTS: false
no_compatibility:
<<: *job_definition
environment:
CC: clang-8
CXX: clang++-8
BUILD_TOOLSET: clang
APT_COMPILER_PACKAGE: clang-8
CXXFLAGS: "-DCRYFS_NO_COMPATIBILITY"
BUILD_TYPE: "Debug"
GTEST_ARGS: ""
CMAKE_FLAGS: ""
RUN_TESTS: true
address_sanitizer:
<<: *job_definition
environment:
CC: clang-8
CXX: clang++-8
BUILD_TOOLSET: clang
APT_COMPILER_PACKAGE: clang-8
CXXFLAGS: "-O2 -fsanitize=address -fno-omit-frame-pointer -fno-common -fsanitize-address-use-after-scope"
BUILD_TYPE: "Debug"
ASAN_OPTIONS: "detect_leaks=1 check_initialization_order=1 detect_stack_use_after_return=1 detect_invalid_pointer_pairs=1 atexit=1"
DISABLE_BROKEN_ASAN_TESTS: true
GTEST_ARGS: ""
CMAKE_FLAGS: ""
RUN_TESTS: true
ub_sanitizer:
<<: *job_definition
environment:
CC: clang-8
CXX: clang++-8
BUILD_TOOLSET: clang
APT_COMPILER_PACKAGE: clang-8
CXXFLAGS: "-O2 -fno-sanitize-recover=undefined,nullability,implicit-conversion,unsigned-integer-overflow -fno-omit-frame-pointer -fno-common"
BUILD_TYPE: "Debug"
GTEST_ARGS: ""
CMAKE_FLAGS: ""
RUN_TESTS: true
thread_sanitizer:
<<: *job_definition
environment:
CC: clang-8
CXX: clang++-8
BUILD_TOOLSET: clang
APT_COMPILER_PACKAGE: clang-8
OMP_NUM_THREADS: "1"
CXXFLAGS: "-O2 -fsanitize=thread -fno-omit-frame-pointer"
BUILD_TYPE: "Debug"
GTEST_ARGS: "--gtest_filter=-LoggingTest.LoggingAlsoWorksAfterFork:AssertTest_*:BacktraceTest.*:SignalCatcherTest.*_thenDies:SignalHandlerTest.*_thenDies:SignalHandlerTest.givenMultipleSigIntHandlers_whenRaising_thenCatchesCorrectSignal:CliTest_Setup.*:CliTest_IntegrityCheck.*:*/CliTest_WrongEnvironment.*:CliTest_Unmount.*"
CMAKE_FLAGS: ""
RUN_TESTS: true
clang_tidy:
<<: *container_config
steps:
- <<: *cache_init
- <<: *container_setup_pre
- <<: *container_setup
- <<: *container_setup_post
- <<: *upgrade_boost_pre
- <<: *upgrade_boost
- <<: *upgrade_boost_post
- checkout
- run:
name: clang-tidy
command: |
# realpath, jq are needed for run-clang-tidy.sh, g++ is needed for pyyaml
sudo apt-get install realpath g++ jq
pip install pyyaml
mkdir cmake
cd cmake
if ! ../run-clang-tidy.sh -fix ; then
git diff > /tmp/clang-tidy-fixes
exit 1
fi
- store_artifacts:
path: /tmp/clang-tidy-fixes
environment:
CC: clang-8
CXX: clang++-8
BUILD_TOOLSET: clang
APT_COMPILER_PACKAGE: "clang-8 clang-tidy-8"
workflows:
version: 2
build_and_test:
jobs:
- gcc_5_debug:
<<: *enable_for_tags
- gcc_5_release:
<<: *enable_for_tags
- gcc_6_debug:
<<: *enable_for_tags
- gcc_6_release:
<<: *enable_for_tags
- gcc_7_debug:
<<: *enable_for_tags
- gcc_7_release:
<<: *enable_for_tags
- gcc_8_debug:
<<: *enable_for_tags
- gcc_8_release:
<<: *enable_for_tags
- gcc_9_debug:
<<: *enable_for_tags
- gcc_9_release:
<<: *enable_for_tags
- clang_4_debug:
<<: *enable_for_tags
- clang_4_release:
<<: *enable_for_tags
- clang_5_debug:
<<: *enable_for_tags
- clang_5_release:
<<: *enable_for_tags
- clang_6_debug:
<<: *enable_for_tags
- clang_6_release:
<<: *enable_for_tags
- clang_7_debug:
<<: *enable_for_tags
- clang_7_release:
<<: *enable_for_tags
- clang_8_debug:
<<: *enable_for_tags
- clang_8_release:
<<: *enable_for_tags
- clang_werror:
<<: *enable_for_tags
- gcc_werror:
<<: *enable_for_tags
- no_compatibility:
<<: *enable_for_tags
- address_sanitizer:
<<: *enable_for_tags
- ub_sanitizer:
<<: *enable_for_tags
- thread_sanitizer:
<<: *enable_for_tags
- clang_tidy:
<<: *enable_for_tags
.clang-tidy 0000664 0000000 0000000 00000004414 13477012671 0013125 0 ustar 00root root 0000000 0000000 ---
# TODO Enable (some of) the explicitly disabled checks. Possibly needs helper types from gsl library or similar to enable full cppcoreguidelines.
# TODO Enable more checks (google-*, hicpp-*, llvm-*, modernize-*, mpi-*, performance-*, readability-*)
# TODO Maybe just enable * and disable a list instead?
Checks: |
clang-diagnostic-*,
clang-analyzer-*,
bugprone-*,
cert-*,
cppcoreguidelines-*,
misc-*,
boost-use-to-string,
-cert-env33-c,
-cert-err58-cpp,
-cert-err60-cpp,
-bugprone-macro-parentheses,
-bugprone-exception-escape,
-cppcoreguidelines-owning-memory,
-cppcoreguidelines-no-malloc,
-cppcoreguidelines-pro-type-const-cast,
-cppcoreguidelines-pro-bounds-pointer-arithmetic,
-cppcoreguidelines-pro-type-reinterpret-cast,
-cppcoreguidelines-special-member-functions,
-cppcoreguidelines-pro-type-cstyle-cast,
-cppcoreguidelines-pro-bounds-array-to-pointer-decay,
-cppcoreguidelines-pro-type-vararg,
-cppcoreguidelines-avoid-goto,
-cppcoreguidelines-avoid-magic-numbers,
-cppcoreguidelines-macro-usage,
-cppcoreguidelines-non-private-member-variables-in-classes,
-clang-analyzer-optin.cplusplus.VirtualCall,
-clang-analyzer-cplusplus.NewDeleteLeaks,
-misc-macro-parentheses,
-misc-non-private-member-variables-in-classes,
-misc-unused-raii
WarningsAsErrors: '*'
HeaderFilterRegex: '/src/|/test/'
CheckOptions:
- key: google-readability-braces-around-statements.ShortStatementLines
value: '1'
- key: google-readability-function-size.StatementThreshold
value: '800'
- key: google-readability-namespace-comments.ShortNamespaceLines
value: '10'
- key: google-readability-namespace-comments.SpacesBeforeComments
value: '2'
- key: modernize-loop-convert.MaxCopySize
value: '16'
- key: modernize-loop-convert.MinConfidence
value: reasonable
- key: modernize-loop-convert.NamingStyle
value: CamelCase
- key: modernize-pass-by-value.IncludeStyle
value: llvm
- key: modernize-replace-auto-ptr.IncludeStyle
value: llvm
- key: modernize-use-nullptr.NullMacros
value: 'NULL'
...
.github/ 0000775 0000000 0000000 00000000000 13477012671 0012426 5 ustar 00root root 0000000 0000000 .github/ISSUE_TEMPLATE.md 0000664 0000000 0000000 00000000253 13477012671 0015133 0 ustar 00root root 0000000 0000000 ## Expected Behavior
## Actual Behavior
## Steps to Reproduce the Problem
1.
2.
3.
## Specifications
- CryFS Version:
- Operating System (incl. Version):
.gitignore 0000664 0000000 0000000 00000000257 13477012671 0013062 0 ustar 00root root 0000000 0000000 umltest.inner.sh
umltest.status
/build
/cmake
/cmake-build-*
/.idea
*~
src/gitversion/*.pyc
src/gitversion/__pycache__
cmake-build-debug
cmake-build-release
cmake-build-test
.travis.yml 0000664 0000000 0000000 00000000350 13477012671 0013175 0 ustar 00root root 0000000 0000000 language: cpp
sudo: required
os: osx
compiler:
# - gcc
- clang
env:
- BUILD_TARGET=Debug
- BUILD_TARGET=Release
- BUILD_TARGET=RelWithDebInfo
install:
- .travisci/install.sh
script:
- .travisci/build_and_test.sh
cache: ccache
.travisci/ 0000775 0000000 0000000 00000000000 13477012671 0012770 5 ustar 00root root 0000000 0000000 .travisci/build_and_test.sh 0000775 0000000 0000000 00000002466 13477012671 0016317 0 ustar 00root root 0000000 0000000 #!/bin/bash
set -ev
# If using gcc on mac, actually use it ("gcc" just links to clang, but "gcc-4.8" is gcc, https://github.com/travis-ci/travis-ci/issues/2423)
# Note: This must be here and not in install.sh, because environment variables can't be passed between scripts.
if [ "${CXX}" == "g++" ]; then
echo Switch to actual g++ and not just the AppleClang symlink
export CXX="g++-7" CC="gcc-7"
else
echo Do not switch to actual g++ because we are not g++
fi
# Setup ccache
export PATH="/usr/local/opt/ccache/libexec:$PATH"
export CCACHE_COMPILERCHECK=content
export CCACHE_COMPRESS=1
export CCACHE_SLOPPINESS=include_file_mtime
ccache --max-size=512M
ccache --show-stats
# Detect number of CPU cores
export NUMCORES=`sysctl -n hw.ncpu`
echo Using $NUMCORES cores
echo Using CXX compiler $CXX and C compiler $CC
# Setup target directory
mkdir cmake
cd cmake
cmake --version
# Build
echo Build target: ${BUILD_TARGET}
cmake .. -DBUILD_TESTING=on -DCMAKE_BUILD_TYPE=${BUILD_TARGET}
make -j$NUMCORES
ccache --show-stats
# Test
./test/gitversion/gitversion-test
./test/cpp-utils/cpp-utils-test
./test/parallelaccessstore/parallelaccessstore-test
./test/blockstore/blockstore-test
./test/blobstore/blobstore-test
./test/cryfs/cryfs-test
# TODO Also run once fixed
# ./test/fspp/fspp-test
# ./test/cryfs-cli/cryfs-cli-test
.travisci/install.sh 0000775 0000000 0000000 00000001203 13477012671 0014771 0 ustar 00root root 0000000 0000000 #!/bin/bash
set -e
# Install newer GCC if we're running on GCC
if [ "${CXX}" == "g++" ]; then
# We need to uninstall oclint because it creates a /usr/local/include/c++ symlink that clashes with the gcc5 package
# see https://github.com/Homebrew/homebrew-core/issues/21172
brew cask uninstall oclint
brew install gcc@7
fi
brew cask install osxfuse
brew install libomp
# By default, travis only fetches the newest 50 commits. We need more in case we're further from the last version tag, so the build doesn't fail because it can't generate the version number.
git fetch --unshallow --tags
# Setup ccache
brew install ccache
CMakeLists.txt 0000664 0000000 0000000 00000003571 13477012671 0013634 0 ustar 00root root 0000000 0000000 cmake_minimum_required(VERSION 3.0 FATAL_ERROR)
cmake_policy(SET CMP0054 NEW)
# TODO Perf test:
# - try if setting CRYPTOPP_NATIVE_ARCH=ON and adding -march=native to the compile commands for cryfs source files makes a difference
# -> if yes, offer a cmake option to enable both of these
project(cryfs)
list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_LIST_DIR}/cmake-utils)
include(utils)
require_gcc_version(5.0)
require_clang_version(4.0)
# Default value is not to build test cases
option(BUILD_TESTING "build test cases" OFF)
option(CRYFS_UPDATE_CHECKS "let cryfs check for updates and security vulnerabilities" ON)
option(DISABLE_OPENMP "allow building without OpenMP libraries. This will cause performance degradations." OFF)
# The following options are helpful for development and/or CI
option(USE_WERROR "build with -Werror flag")
option(USE_CLANG_TIDY "build with clang-tidy checks enabled" OFF)
option(USE_IWYU "build with iwyu checks enabled" OFF)
option(CLANG_TIDY_WARNINGS_AS_ERRORS "treat clang-tidy warnings as errors" OFF)
if(USE_IWYU)
# note: for iwyu, we need cmake 3.3
cmake_minimum_required(VERSION 3.3 FATAL_ERROR)
endif()
if(USE_CLANG_TIDY)
# note: for clang-tidy, we need cmake 3.6, or (if the return code should be handled correctly, e.g. on CI), we need 3.8.
cmake_minimum_required(VERSION 3.8 FATAL_ERROR)
endif()
if (MSVC)
option(DOKAN_PATH "Location of the Dokan library, e.g. C:\\Program Files\\Dokan\\DokanLibrary-1.1.0" "")
endif()
# Default value is to build in release mode but with debug symbols
if(NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE RelWithDebInfo CACHE INTERNAL "CMAKE_BUILD_TYPE")
endif(NOT CMAKE_BUILD_TYPE)
# The MSVC version on AppVeyor CI needs this
if(MSVC)
add_definitions(/bigobj)
endif()
add_subdirectory(vendor EXCLUDE_FROM_ALL)
add_subdirectory(src)
add_subdirectory(doc)
add_subdirectory(test)
add_subdirectory(cpack)
CMakeSettings.json 0000664 0000000 0000000 00000004242 13477012671 0014464 0 ustar 00root root 0000000 0000000 {
"configurations": [
{
"name": "x86-Debug",
"generator": "Ninja",
"configurationType": "Debug",
"inheritEnvironments": [ "msvc_x86" ],
"buildRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\build\\${name}",
"installRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\install\\${name}",
"cmakeCommandArgs": "-DBUILD_TESTING=on -DBOOST_ROOT=C:\\local\\boost_1_68_0 -DDOKAN_PATH=\"C:\\Program Files\\Dokan\\Dokan Library-1.2.2\"",
"buildCommandArgs": "-v",
"ctestCommandArgs": ""
},
{
"name": "x86-Release",
"generator": "Ninja",
"configurationType": "RelWithDebInfo",
"inheritEnvironments": [ "msvc_x86" ],
"buildRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\build\\${name}",
"installRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\install\\${name}",
"cmakeCommandArgs": "-DBUILD_TESTING=on -DBOOST_ROOT=C:\\local\\boost_1_68_0 -DDOKAN_PATH=\"C:\\Program Files\\Dokan\\Dokan Library-1.2.2\"",
"buildCommandArgs": "-v",
"ctestCommandArgs": ""
},
{
"name": "x64-Debug",
"generator": "Ninja",
"configurationType": "Debug",
"inheritEnvironments": [ "msvc_x64_x64" ],
"buildRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\build\\${name}",
"installRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\install\\${name}",
"cmakeCommandArgs": "-DBUILD_TESTING=on -DBOOST_ROOT=C:\\local\\boost_1_68_0 -DDOKAN_PATH=\"C:\\Program Files\\Dokan\\Dokan Library-1.2.2\"",
"buildCommandArgs": "-v",
"ctestCommandArgs": ""
},
{
"name": "x64-Release",
"generator": "Ninja",
"configurationType": "RelWithDebInfo",
"inheritEnvironments": [ "msvc_x64_x64" ],
"buildRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\build\\${name}",
"installRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\install\\${name}",
"cmakeCommandArgs": "-DBUILD_TESTING=on -DBOOST_ROOT=C:\\local\\boost_1_68_0 -DDOKAN_PATH=\"C:\\Program Files\\Dokan\\Dokan Library-1.2.2\"",
"buildCommandArgs": "-v",
"ctestCommandArgs": ""
}
]
} ChangeLog.txt 0000664 0000000 0000000 00000026535 13477012671 0013471 0 ustar 00root root 0000000 0000000 Version 0.10.2
---------------
Fixed bugs:
* Fix occasional crash in mkdir() on Windows
* Fix a race condition when a file descriptor is closed while there's read/write requests for that file being processed.
Improvements:
* Better logging when local state can't be loaded
Other:
* Updated to crypto++ 8.2
Version 0.10.1
---------------
Fixed bugs:
* If file system migration encounters files or folders with the wrong format in the base directory, it now just ignores them instead of crashing.
* When trying to migrate a file system from CryFS 0.9.3 or older, show an error message suggesting to first open it with 0.9.10 because we can't load that anymore.
* The '--unmount-idle' parameter works again
* Fix building with boost 1.67
Compatibility:
* Fixed some incompatibilities with systems using the musl libc
* Use boost::stacktrace instead of libbacktrace to build stack traces. This fixes a segfault issue with platforms using libexecinfo and is generally more portable.
Other:
* Updated to crypto++ 8.1
* Updated to DokanY 1.2.1
* Unit tests can now be run from any directory
Version 0.10.0
---------------
New Features & Improvements:
* Experimental Windows support
* Integrity checks ensure you notice when someone modifies your file system.
* File system nodes (files, directories, symlinks) store a parent pointer to the directory that contains them. This information can be used in later versions to resolve some synchronization conflicts.
* Allow mounting using system mount tool and /etc/fstab (e.g. mount -t fuse.cryfs basedir mountdir)
* Performance improvements
* Use relatime instead of strictatime (further performance improvement)
* Pass fuse options directly to cryfs (i.e. 'cryfs basedir mountdir -o allow_other' instead of 'cryfs basedir mountdir -- -o allow_other')
* CryFS tells the operating system to lock the encryption key to memory, i.e. not swap it to the disk (note: this is best-effort and cannot be guaranteed. Hibernation, for example, will still write the encryption key to the disk).
* New block size options: 4KB and 16KB
* New default block size: 16KB. This should decrease the size of the ciphertext directory for most users.
* Increased scrypt hardness to (N=1048576, r=4, p=8) to make it harder to crack the key while allowing cryfs to take advantage of multicore machines.
* cryfs-unmount tool to unmount filesystems
Fixed bugs:
* `du` shows correct file system size on Mac OS X.
* On Mac OS X, Finder shows the correct name for the mount directory
Version 0.9.10
--------------
Fixed bugs:
* Fixed occasional deadlock (https://github.com/cryfs/cryfs/issues/64)
* Fix for reading empty files out of bounds
* Fixed race condition (https://github.com/cryfs/cryfs/issues/224 and https://github.com/cryfs/cryfs/issues/243)
Version 0.9.9
--------------
Improvements:
* Add --allow-filesystem-upgrade option which will upgrade old file systems without asking the user. This will be especially helpful for GUI tools.
* Add --version option that shows the CryFS version and exits.
* When CryFS fails to load a file system, the process stops with a helpful error code, which can be used by GUI tools to show detailed messages.
* Only migrate a file system if the underlying storage format changed
Version 0.9.8
--------------
Compatibility:
* Runs on Debian with FreeBSD kernel
* Runs on FreeBSD 11.1
* Works with Crypto++ 6.0
Improvements:
* added a man page
Fixed bugs:
* `du` shows correct file system size
* Updated spdlog dependency to fix build on newer systems
Version 0.9.7
--------------
Compatibility:
* Runs on FreeBSD
* Works with Clang++ 3.8 (Debian experimental or newer Ubuntu systems)
* Works with GCC 7
Version 0.9.6
---------------
Fixed bugs:
* Fix potential deadlock
* Fix potential crash
Improvements:
* Allow building with -DCRYFS_UPDATE_CHECKS=off, which will create an executable with disabled update checks (the alternative to disable them in the environment also still works).
* Automatically disable update checks when running in noninteractive mode.
* More detailed error reporting if key derivation fails
Compatibility:
* Compatible with libcurl version >= 7.50.0, and <= 7.21.6 (tested down to 7.19.0)
* Compatible with Crypto++ 5.6.4
* Compatible with compilers running under hardening-wrapper
Version 0.9.5
---------------
Fixed Bugs:
* Fixed a bug that prevented mounting a file system on Mac OS X.
* File system operations correctly update the timestamps (access time, modification time and status change time).
* Reacts correctly to fsync() and fdatasync() syscalls by flushing the corresponding data to the disk.
Improvements:
* When mounting an old file system, CryFS will ask before migrating it to the newest version.
* Operating system tools like the mount command or /proc/self/mountinfo report correct file system type and also report the base directory.
* Compatibility with GCC 6
Version 0.9.4
---------------
Improvements:
* Ciphertext blocks are split into subdirectories (before, all were on top level) to reduce number of files per directory. Some unix tools don't work well with directories with too many entries.
Fixed Bugs:
* Renaming a file to an existing file (i.e. overwriting an existing file) didn't free the allocated memory for the overwritten file
* Renaming a file to an existing file could hurt an invariant in the directory layout (directory entries have to be sorted) and doing so could cause files to seemingly disappear.
* Fix a potential deadlock in the cache
Compatibility:
* The generated .deb packages work for any Ubuntu/Debian based distribution, but will not install the package source for automatic updates if it's an unsupported operating system.
Version 0.9.3
---------------
New Features:
* The ciphertext block size is configurable. You can use the "--blocksize" command line argument.
If not specified, CryFS will ask you for a block size when creating a file system.
* It's easier for tools and scripts to use CryFS:
If an environment variable CRYFS_FRONTEND=noninteractive is set, we don't ask for options
(but take default values for everything that's not specified on command line).
Furthermore, in noninteractive mode, we won't ask for password confirmation when creating a file system.
The password only has to be sent once to stdin.
* You can disable the automatic update check by setting CRYFS_NO_UPDATE_CHECK=true in your environment.
Fixed Bugs:
* Building CryFS from the GitHub tarball (i.e. when there is no .git directory present) works.
* A bug in the fstat implementation caused problems with some text editors (e.g. nano) falsely thinking a file changed since they opened it.
* When trying to rename a file to an already existing file name, a bug deleted it instead.
* Rename operation allows overwriting existing files, as specified in the rename(2) man page.
Compatibility:
* The generated .deb packages for Debian also work for the Devuan operating system.
Version 0.9.2
---------------
* Experimental support for installing CryFS on Mac OS X using homebrew
(0.9.2 is not released for Linux)
Version 0.9.1
---------------
* Report file system usage statistics to the operating system (e.g. amount of space used). This information can be queried using the 'df' tool on linux. See https://github.com/cryfs/cryfs/commit/68acc27e88ff5209ca55ddb4e91f5a449d77fb54
* Use stronger scrypt parameters when generating the config file key from the user password. This makes it a bit more secure, but also takes a bit longer to load a file system. See https://github.com/cryfs/cryfs/commit/7f1493ab9210319cab008e71d4ee8f4d7d920f39
* Fix a bug where deleting a non-empty directory could leave some blocks over. See https://github.com/cryfs/cryfs/commit/df041ac84511e4560c4f099cd8cc089d08e05737
Version 0.9.0
---------------
(warning) file systems created with earlier CryFS versions are incompatible with this release.
* Fully support file access times
* Fix: Password is read from stdin, not from glibc getpass(). This enables external tools (e.g. GUIs) to pass in the password without problems.
* Remove --extpass parameter, because that encourages tool writers to do bad things like storing a password in a file and using --extpass="cat filename".
The password can now be passed in to stdin without problems, so tools should use that.
* Works with zuluMount GUI, https://mhogomchungu.github.io/zuluCrypt/
* Introduce version flags for file system entities to allow future CryFS versions to be backwards-compatible even if the format changes.
* (for developers) New git repository layout. All subrepositories have been merged to one directory.
* (for developers) Using CMake instead of biicode as build system.
Version 0.8.6
---------------
* Fix a deadlock that was caused when a very high load of parallel resize operations was issued, see https://github.com/cryfs/cryfs/issues/3
* Fix a bug that prevented deleting symlinks, see https://github.com/cryfs/cryfs/issues/2
* Gracefully accept modifications to the file access times instead of failing, although they're not stored yet (they will be stored in 0.9.0). This should fix https://github.com/cryfs/cryfs/issues/4
Version 0.8.5
---------------
* Fix package manager warning when installing the .deb package
* Offer a default configuration when creating new filesystems
* If the given base or mount directory doesn't exist, offer to create them
Version 0.8.4
---------------
* Offering .deb packages for Debian and Ubuntu
* Compatibility with 32bit systems
* Support files larger than 4GB
Version 0.8.3
---------------
* Ask for password confirmation when creating new filesystem
* Check for new CryFS versions and ask the user to update if a new version is available
* Implemented a mechanism that can show warnings about security bugs to users of a certain CryFS version. Let's hope this won't be necessary ;)
* Compatibility with GCC 4.8 (that allows compiling on Ubuntu 14.04 for example)
Version 0.8.2
---------------
* Mount directory, base directory, logfile and config file can be specified as relative paths
* Improved error messages
Version 0.8.1
---------------
* Config File Encryption: Configuration files are encrypted with two ciphers. The user specifies a password, which is then used with the scrypt KDF to generate the two encryption keys.
- Inner level: Encrypts the config data using the user specified cipher.
- Outer level: Encrypts the name of the inner cipher and the inner level ciphertext using aes-256-gcm.
The config file is padded to hide the size of the configuration data (including the name of the cipher used).
* No external config file needed: If the configuration file is not specified as command line parameter, it will be put into the base directory. This way, the filesystem can be mounted with the password only, without specifying a config file on command line.
* Logfiles: Added a --logfile option to specify where logs should be written to. If the option is not specified, CryFs logs to syslog.
* Running in Background: Fixed daemonization. When CryFs is run without "-f" flag, it will run in background.
* Better error messages when base directory is not existing, not readable or not writeable.
* Allow --cipher=xxx to specify cipher on command line. If cryfs is creating a new filesystem, it will use this cipher. If it is opening an existing filesystem, it will check whether this is the cipher used by it.
* --show-ciphers shows a list of all supported ciphers
* --extpass allows using an external program for password input
* --unmount-idle x automatically unmounts the filesystem after x minutes without a filesystem operation.
LICENSE.txt 0000664 0000000 0000000 00000016743 13477012671 0012724 0 ustar 00root root 0000000 0000000 GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc.
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
This version of the GNU Lesser General Public License incorporates
the terms and conditions of version 3 of the GNU General Public
License, supplemented by the additional permissions listed below.
0. Additional Definitions.
As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU
General Public License.
"The Library" refers to a covered work governed by this License,
other than an Application or a Combined Work as defined below.
An "Application" is any work that makes use of an interface provided
by the Library, but which is not otherwise based on the Library.
Defining a subclass of a class defined by the Library is deemed a mode
of using an interface provided by the Library.
A "Combined Work" is a work produced by combining or linking an
Application with the Library. The particular version of the Library
with which the Combined Work was made is also called the "Linked
Version".
The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code
for portions of the Combined Work that, considered in isolation, are
based on the Application, and not on the Linked Version.
The "Corresponding Application Code" for a Combined Work means the
object code and/or source code for the Application, including any data
and utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.
1. Exception to Section 3 of the GNU GPL.
You may convey a covered work under sections 3 and 4 of this License
without being bound by section 3 of the GNU GPL.
2. Conveying Modified Versions.
If you modify a copy of the Library, and, in your modifications, a
facility refers to a function or data to be supplied by an Application
that uses the facility (other than as an argument passed when the
facility is invoked), then you may convey a copy of the modified
version:
a) under this License, provided that you make a good faith effort to
ensure that, in the event an Application does not supply the
function or data, the facility still operates, and performs
whatever part of its purpose remains meaningful, or
b) under the GNU GPL, with none of the additional permissions of
this License applicable to that copy.
3. Object Code Incorporating Material from Library Header Files.
The object code form of an Application may incorporate material from
a header file that is part of the Library. You may convey such object
code under terms of your choice, provided that, if the incorporated
material is not limited to numerical parameters, data structure
layouts and accessors, or small macros, inline functions and templates
(ten or fewer lines in length), you do both of the following:
a) Give prominent notice with each copy of the object code that the
Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the object code with a copy of the GNU GPL and this license
document.
4. Combined Works.
You may convey a Combined Work under terms of your choice that,
taken together, effectively do not restrict modification of the
portions of the Library contained in the Combined Work and reverse
engineering for debugging such modifications, if you also do each of
the following:
a) Give prominent notice with each copy of the Combined Work that
the Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the Combined Work with a copy of the GNU GPL and this license
document.
c) For a Combined Work that displays copyright notices during
execution, include the copyright notice for the Library among
these notices, as well as a reference directing the user to the
copies of the GNU GPL and this license document.
d) Do one of the following:
0) Convey the Minimal Corresponding Source under the terms of this
License, and the Corresponding Application Code in a form
suitable for, and under terms that permit, the user to
recombine or relink the Application with a modified version of
the Linked Version to produce a modified Combined Work, in the
manner specified by section 6 of the GNU GPL for conveying
Corresponding Source.
1) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (a) uses at run time
a copy of the Library already present on the user's computer
system, and (b) will operate properly with a modified version
of the Library that is interface-compatible with the Linked
Version.
e) Provide Installation Information, but only if you would otherwise
be required to provide such information under section 6 of the
GNU GPL, and only to the extent that such information is
necessary to install and execute a modified version of the
Combined Work produced by recombining or relinking the
Application with a modified version of the Linked Version. (If
you use option 4d0, the Installation Information must accompany
the Minimal Corresponding Source and Corresponding Application
Code. If you use option 4d1, you must provide the Installation
Information in the manner specified by section 6 of the GNU GPL
for conveying Corresponding Source.)
5. Combined Libraries.
You may place library facilities that are a work based on the
Library side by side in a single library together with other library
facilities that are not Applications and are not covered by this
License, and convey such a combined library under terms of your
choice, if you do both of the following:
a) Accompany the combined library with a copy of the same work based
on the Library, uncombined with any other library facilities,
conveyed under the terms of this License.
b) Give prominent notice with the combined library that part of it
is a work based on the Library, and explaining where to find the
accompanying uncombined form of the same work.
6. Revised Versions of the GNU Lesser General Public License.
The Free Software Foundation may publish revised and/or new versions
of the GNU Lesser General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the
Library as you received it specifies that a certain numbered version
of the GNU Lesser General Public License "or any later version"
applies to it, you have the option of following the terms and
conditions either of that published version or of any later version
published by the Free Software Foundation. If the Library as you
received it does not specify a version number of the GNU Lesser
General Public License, you may choose any version of the GNU Lesser
General Public License ever published by the Free Software Foundation.
If the Library as you received it specifies that a proxy can decide
whether future versions of the GNU Lesser General Public License shall
apply, that proxy's public statement of acceptance of any version is
permanent authorization for you to choose that version for the
Library.
README.md 0000664 0000000 0000000 00000014120 13477012671 0012343 0 ustar 00root root 0000000 0000000 # CryFS [](https://travis-ci.org/cryfs/cryfs) [](https://circleci.com/gh/cryfs/cryfs/tree/master) [](https://ci.appveyor.com/project/smessmer/cryfs/branch/master)
CryFS encrypts your files, so you can safely store them anywhere. It works well together with cloud services like Dropbox, iCloud, OneDrive and others.
See [https://www.cryfs.org](https://www.cryfs.org).
Install latest release
======================
Linux
------
This only works for Ubuntu 17.04 and later, and Debian Stretch and later.
You can also use CryFS on older versions of these distributions by following the **Building from source** instructions below.
sudo apt install cryfs
OSX
----
CryFS is distributed via Homebrew. Just do
brew cask install osxfuse
brew install cryfs
Windows (experimental)
----------------------
CryFS has experimental Windows support since the 0.10 release series. To install it, do:
1. Install [DokanY](https://github.com/dokan-dev/dokany/releases)
2. Install [Microsoft Visual C++ Redistributable for Visual Studio 2017](https://support.microsoft.com/en-us/help/2977003/the-latest-supported-visual-c-downloads)
3. Install [CryFS](https://www.cryfs.org/#download)
GUI
===
Theres some GUI applications with CryFS support. You usually have to install the GUI **and** also CryFS itself for it to work.
- [SiriKali](https://mhogomchungu.github.io/sirikali/)
- [Plasma Vault](https://www.kde.org/announcements/plasma-5.11.0.php) in KDE Plasma >= 5.11
Building from source
====================
Requirements
------------
- Git (for getting the source code)
- GCC version >= 5.0 or Clang >= 4.0
- CMake version >= 3.0
- libcurl4 (including development headers)
- Boost libraries version >= 1.65.1 (including development headers)
- filesystem
- system
- chrono
- program_options
- thread
- SSL development libraries (including development headers, e.g. libssl-dev)
- libFUSE version >= 2.8.6 (including development headers), on Mac OS X instead install osxfuse from https://osxfuse.github.io/
- Python >= 2.7
- OpenMP
You can use the following commands to install these requirements
# Ubuntu
$ sudo apt install git g++ cmake make libcurl4-openssl-dev libboost-filesystem-dev libboost-system-dev libboost-chrono-dev libboost-program-options-dev libboost-thread-dev libssl-dev libfuse-dev python
# Fedora
sudo dnf install git gcc-c++ cmake make libcurl-devel boost-devel boost-static openssl-devel fuse-devel python
# Macintosh
brew install cmake boost openssl libomp
Build & Install
---------------
1. Clone repository
$ git clone https://github.com/cryfs/cryfs.git cryfs
$ cd cryfs
2. Build
$ mkdir cmake && cd cmake
$ cmake ..
$ make
3. Install
$ sudo make install
You can pass the following variables to the *cmake* command (using *-Dvariablename=value*):
- **-DCMAKE_BUILD_TYPE**=[Release|Debug]: Whether to run code optimization or add debug symbols. Default: Release
- **-DBUILD_TESTING**=[on|off]: Whether to build the test cases (can take a long time). Default: off
- **-DCRYFS_UPDATE_CHECKS**=off: Build a CryFS that doesn't check online for updates and security vulnerabilities.
Building on Windows (experimental)
----------------------------------
Build with Visual Studio 2017 and pass in the following flags to CMake:
-DDOKAN_PATH=[dokan library location, e.g. "C:\Program Files\Dokan\DokanLibrary-1.2.1"]
-DBOOST_ROOT=[path to root of boost installation]
If you set these variables correctly in the `CMakeSettings.json` file, you should be able to open the cryfs source folder with Visual Studio 2017.
Troubleshooting
---------------
On most systems, CMake should find the libraries automatically. However, that doesn't always work.
1. **Boost headers not found**
Pass in the boost include path with
cmake .. -DBoost_INCLUDE_DIRS=/path/to/boost/headers
If you want to link boost dynamically (e.g. you don't have the static libraries), use the following:
cmake .. -DBoost_USE_STATIC_LIBS=off
2. **Fuse/Osxfuse library not found**
Pass in the library path with
cmake .. -DFUSE_LIB_PATH=/path/to/fuse/or/osxfuse
3. **Fuse/Osxfuse headers not found**
Pass in the include path with
cmake .. -DCMAKE_CXX_FLAGS="-I/path/to/fuse/or/osxfuse/headers"
4. **Openssl headers not found**
Pass in the include path with
cmake .. -DCMAKE_C_FLAGS="-I/path/to/openssl/include"
5. **OpenMP not found (osx)**
Either build it without OpenMP
cmake .. -DDISABLE_OPENMP=on
but that will cause slower file system mount times (performance after mounting will be unaffected).
If you installed OpenMP with homebrew or macports, it should be autodetected.
If that doesn't work for some reason (or you want to use a different installation than the autodetected one),
pass in these flags:
cmake .. -DOpenMP_CXX_FLAGS='-Xpreprocessor -fopenmp -I/path/to/openmp/include' -DOpenMP_CXX_LIB_NAMES=omp -DOpenMP_omp_LIBRARY=/path/to/libomp.dylib
Creating .deb and .rpm packages
-------------------------------
There are additional requirements if you want to create packages. They are:
- CMake version >= 3.3
- rpmbuild for creating .rpm package
1. Clone repository
$ git clone https://github.com/cryfs/cryfs.git cryfs
$ cd cryfs
2. Build
$ mkdir cmake && cd cmake
$ cmake .. -DCMAKE_BUILD_TYPE=RelWithDebInfo -DBUILD_TESTING=off
$ make package
Disclaimer
----------------------
On the event of a password leak, you are strongly advised to create a new filesystem and copy all the data over from the previous one. Done this, all copies of the compromised filesystem and config file must be removed (e.g, from the "previous versions" feature of your cloud system) to prevent access to the key (and, as a result, your data) using the leaked password.
appveyor.yml 0000664 0000000 0000000 00000003672 13477012671 0013466 0 ustar 00root root 0000000 0000000 image:
- Visual Studio 2017
#- Visual Studio 2017 Preview
platform:
- x64
- x86
#- Any CPU
configuration:
- Debug
- RelWithDebInfo
- Release
version: '{branch}-{build}'
init:
- echo %NUMBER_OF_PROCESSORS%
- echo %PLATFORM%
- echo %APPVEYOR_BUILD_WORKER_IMAGE%
- set arch=32
- if "%PLATFORM%"=="x64" ( set arch=64)
- set VisualStudioVersion=2017
- if "%APPVEYOR_BUILD_WORKER_IMAGE%" == "Visual Studio 2017 Preview" ( set VisualStudioVersion=Preview)
- cmd: call "C:\Program Files (x86)\Microsoft Visual Studio\%VisualStudioVersion%\Community\VC\Auxiliary\Build\vcvars%arch%.bat"
install:
- choco install -y dokany --version 1.2.1.2000 --installargs INSTALLDEVFILES=1
- cmake --version
build_script:
- cmd: mkdir build
- cmd: cd build
# note: The cmake+ninja workflow requires us to set build type in both cmake commands ('cmake' and 'cmake --build'), otherwise the cryfs.exe will depend on debug versions of the visual studio c++ runtime (i.e. msvcp140d.dll)
- cmd: cmake .. -G "Ninja" -DCMAKE_BUILD_TYPE=%CONFIGURATION% -DBUILD_TESTING=on -DBOOST_ROOT="C:/Libraries/boost_1_67_0" -DDOKAN_PATH="C:/Program Files/Dokan/DokanLibrary-1.2.1"
- cmd: cmake --build . --config %CONFIGURATION%
- cmd: .\test\gitversion\gitversion-test.exe
# cpp-utils-test disables ThreadDebuggingTest_ThreadName.*_thenIsCorrect because the appveyor image is too old to support the API needed for that
- cmd: .\test\cpp-utils\cpp-utils-test.exe --gtest_filter=-ThreadDebuggingTest_ThreadName.*_thenIsCorrect
#- cmd: .\test\fspp\fspp-test.exe
- cmd: .\test\parallelaccessstore\parallelaccessstore-test.exe
- cmd: .\test\blockstore\blockstore-test.exe
- cmd: .\test\blobstore\blobstore-test.exe
- cmd: .\test\cryfs\cryfs-test.exe
#- cmd: .\test\cryfs-cli\cryfs-cli-test.exe
- cmd: cpack -C %CONFIGURATION% --verbose -G WIX
on_failure:
- cmd: type C:\projects\cryfs\build\_CPack_Packages\win64\WIX\wix.log
artifacts:
- path: build/cryfs-*.msi
name: CryFS
archive.sh 0000775 0000000 0000000 00000000420 13477012671 0013042 0 ustar 00root root 0000000 0000000 #!/bin/bash
TAG=$1
GPGHOMEDIR=$2
git archive --format=tgz "$1" > cryfs-$1.tar.gz
gpg --homedir "$GPGHOMEDIR" --armor --detach-sign cryfs-$1.tar.gz
git archive --format=tar "$1" | xz -9 > cryfs-$1.tar.xz
gpg --homedir "$GPGHOMEDIR" --armor --detach-sign cryfs-$1.tar.xz
cmake-utils/ 0000775 0000000 0000000 00000000000 13477012671 0013304 5 ustar 00root root 0000000 0000000 cmake-utils/FindLibunwind.cmake 0000664 0000000 0000000 00000003556 13477012671 0017053 0 ustar 00root root 0000000 0000000 # Taken from https://github.com/monero-project/monero/blob/31bdf7bd113c2576fe579ef3a25a2d8fef419ffc/cmake/FindLibunwind.cmake
# modifications:
# - remove linkage against gcc_eh because it was causing segfaults in various of our unit tests
# - Try to find libunwind
# Once done this will define
#
# LIBUNWIND_FOUND - system has libunwind
# LIBUNWIND_INCLUDE_DIR - the libunwind include directory
# LIBUNWIND_LIBRARIES - Link these to use libunwind
# LIBUNWIND_DEFINITIONS - Compiler switches required for using libunwind
# Copyright (c) 2006, Alexander Dymo,
#
# Redistribution and use is allowed according to the terms of the BSD license.
# For details see the accompanying COPYING-CMAKE-SCRIPTS file.
find_path(LIBUNWIND_INCLUDE_DIR libunwind.h
/usr/include
/usr/local/include
)
find_library(LIBUNWIND_LIBRARIES NAMES unwind )
if(NOT LIBUNWIND_LIBRARIES STREQUAL "LIBUNWIND_LIBRARIES-NOTFOUND")
if (CMAKE_COMPILER_IS_GNUCC)
set(LIBUNWIND_LIBRARIES "${LIBUNWIND_LIBRARIES}")
endif()
endif()
# some versions of libunwind need liblzma, and we don't use pkg-config
# so we just look whether liblzma is installed, and add it if it is.
# It might not be actually needed, but doesn't hurt if it is not.
# We don't need any headers, just the lib, as it's privately needed.
message(STATUS "looking for liblzma")
find_library(LIBLZMA_LIBRARIES lzma )
if(NOT LIBLZMA_LIBRARIES STREQUAL "LIBLZMA_LIBRARIES-NOTFOUND")
message(STATUS "liblzma found")
set(LIBUNWIND_LIBRARIES "${LIBUNWIND_LIBRARIES};${LIBLZMA_LIBRARIES}")
endif()
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(Libunwind "Could not find libunwind" LIBUNWIND_INCLUDE_DIR LIBUNWIND_LIBRARIES)
# show the LIBUNWIND_INCLUDE_DIR and LIBUNWIND_LIBRARIES variables only in the advanced view
mark_as_advanced(LIBUNWIND_INCLUDE_DIR LIBUNWIND_LIBRARIES ) cmake-utils/TargetArch.cmake 0000664 0000000 0000000 00000015516 13477012671 0016342 0 ustar 00root root 0000000 0000000 # This file is taken from https://github.com/axr/solar-cmake/blob/73cfea0db0284c5e2010aca23989046e5bda95c9/TargetArch.cmake
# License:
# Copyright (c) 2012 Petroules Corporation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Based on the Qt 5 processor detection code, so should be very accurate
# https://qt.gitorious.org/qt/qtbase/blobs/master/src/corelib/global/qprocessordetection.h
# Currently handles arm (v5, v6, v7), x86 (32/64), ia64, and ppc (32/64)
# Regarding POWER/PowerPC, just as is noted in the Qt source,
# "There are many more known variants/revisions that we do not handle/detect."
set(archdetect_c_code "
#if defined(__arm__) || defined(__TARGET_ARCH_ARM)
#if defined(__ARM_ARCH_7__) \\
|| defined(__ARM_ARCH_7A__) \\
|| defined(__ARM_ARCH_7R__) \\
|| defined(__ARM_ARCH_7M__) \\
|| (defined(__TARGET_ARCH_ARM) && __TARGET_ARCH_ARM-0 >= 7)
#error cmake_ARCH armv7
#elif defined(__ARM_ARCH_6__) \\
|| defined(__ARM_ARCH_6J__) \\
|| defined(__ARM_ARCH_6T2__) \\
|| defined(__ARM_ARCH_6Z__) \\
|| defined(__ARM_ARCH_6K__) \\
|| defined(__ARM_ARCH_6ZK__) \\
|| defined(__ARM_ARCH_6M__) \\
|| (defined(__TARGET_ARCH_ARM) && __TARGET_ARCH_ARM-0 >= 6)
#error cmake_ARCH armv6
#elif defined(__ARM_ARCH_5TEJ__) \\
|| (defined(__TARGET_ARCH_ARM) && __TARGET_ARCH_ARM-0 >= 5)
#error cmake_ARCH armv5
#else
#error cmake_ARCH arm
#endif
#elif defined(__i386) || defined(__i386__) || defined(_M_IX86)
#error cmake_ARCH i386
#elif defined(__x86_64) || defined(__x86_64__) || defined(__amd64) || defined(_M_X64)
#error cmake_ARCH x86_64
#elif defined(__ia64) || defined(__ia64__) || defined(_M_IA64)
#error cmake_ARCH ia64
#elif defined(__ppc__) || defined(__ppc) || defined(__powerpc__) \\
|| defined(_ARCH_COM) || defined(_ARCH_PWR) || defined(_ARCH_PPC) \\
|| defined(_M_MPPC) || defined(_M_PPC)
#if defined(__ppc64__) || defined(__powerpc64__) || defined(__64BIT__)
#error cmake_ARCH ppc64
#else
#error cmake_ARCH ppc
#endif
#endif
#error cmake_ARCH unknown
")
# Set ppc_support to TRUE before including this file or ppc and ppc64
# will be treated as invalid architectures since they are no longer supported by Apple
function(target_architecture output_var)
if(APPLE AND CMAKE_OSX_ARCHITECTURES)
# On OS X we use CMAKE_OSX_ARCHITECTURES *if* it was set
# First let's normalize the order of the values
# Note that it's not possible to compile PowerPC applications if you are using
# the OS X SDK version 10.6 or later - you'll need 10.4/10.5 for that, so we
# disable it by default
# See this page for more information:
# http://stackoverflow.com/questions/5333490/how-can-we-restore-ppc-ppc64-as-well-as-full-10-4-10-5-sdk-support-to-xcode-4
# Architecture defaults to i386 or ppc on OS X 10.5 and earlier, depending on the CPU type detected at runtime.
# On OS X 10.6+ the default is x86_64 if the CPU supports it, i386 otherwise.
foreach(osx_arch ${CMAKE_OSX_ARCHITECTURES})
if("${osx_arch}" STREQUAL "ppc" AND ppc_support)
set(osx_arch_ppc TRUE)
elseif("${osx_arch}" STREQUAL "i386")
set(osx_arch_i386 TRUE)
elseif("${osx_arch}" STREQUAL "x86_64")
set(osx_arch_x86_64 TRUE)
elseif("${osx_arch}" STREQUAL "ppc64" AND ppc_support)
set(osx_arch_ppc64 TRUE)
else()
message(FATAL_ERROR "Invalid OS X arch name: ${osx_arch}")
endif()
endforeach()
# Now add all the architectures in our normalized order
if(osx_arch_ppc)
list(APPEND ARCH ppc)
endif()
if(osx_arch_i386)
list(APPEND ARCH i386)
endif()
if(osx_arch_x86_64)
list(APPEND ARCH x86_64)
endif()
if(osx_arch_ppc64)
list(APPEND ARCH ppc64)
endif()
else()
file(WRITE "${CMAKE_BINARY_DIR}/arch.c" "${archdetect_c_code}")
enable_language(C)
# Detect the architecture in a rather creative way...
# This compiles a small C program which is a series of ifdefs that selects a
# particular #error preprocessor directive whose message string contains the
# target architecture. The program will always fail to compile (both because
# file is not a valid C program, and obviously because of the presence of the
# #error preprocessor directives... but by exploiting the preprocessor in this
# way, we can detect the correct target architecture even when cross-compiling,
# since the program itself never needs to be run (only the compiler/preprocessor)
try_run(
run_result_unused
compile_result_unused
"${CMAKE_BINARY_DIR}"
"${CMAKE_BINARY_DIR}/arch.c"
COMPILE_OUTPUT_VARIABLE ARCH
CMAKE_FLAGS CMAKE_OSX_ARCHITECTURES=${CMAKE_OSX_ARCHITECTURES}
)
# Parse the architecture name from the compiler output
string(REGEX MATCH "cmake_ARCH ([a-zA-Z0-9_]+)" ARCH "${ARCH}")
# Get rid of the value marker leaving just the architecture name
string(REPLACE "cmake_ARCH " "" ARCH "${ARCH}")
# If we are compiling with an unknown architecture this variable should
# already be set to "unknown" but in the case that it's empty (i.e. due
# to a typo in the code), then set it to unknown
if (NOT ARCH)
set(ARCH unknown)
endif()
endif()
set(${output_var} "${ARCH}" PARENT_SCOPE)
endfunction() cmake-utils/utils.cmake 0000664 0000000 0000000 00000022364 13477012671 0015455 0 ustar 00root root 0000000 0000000 include(CheckCXXCompilerFlag)
###################################################
# Activate C++14
#
# Uses: target_activate_cpp14(buildtarget)
###################################################
function(target_activate_cpp14 TARGET)
if("${CMAKE_VERSION}" VERSION_GREATER "3.1")
set_property(TARGET ${TARGET} PROPERTY CXX_STANDARD 14)
set_property(TARGET ${TARGET} PROPERTY CXX_STANDARD_REQUIRED ON)
else("${CMAKE_VERSION}" VERSION_GREATER "3.1")
check_cxx_compiler_flag("-std=c++14" COMPILER_HAS_CPP14_SUPPORT)
if (COMPILER_HAS_CPP14_SUPPORT)
target_compile_options(${TARGET} PRIVATE -std=c++14)
else(COMPILER_HAS_CPP14_SUPPORT)
check_cxx_compiler_flag("-std=c++1y" COMPILER_HAS_CPP14_PARTIAL_SUPPORT)
if (COMPILER_HAS_CPP14_PARTIAL_SUPPORT)
target_compile_options(${TARGET} PRIVATE -std=c++1y)
else()
message(FATAL_ERROR "Compiler doesn't support C++14")
endif()
endif(COMPILER_HAS_CPP14_SUPPORT)
endif("${CMAKE_VERSION}" VERSION_GREATER "3.1")
# Ideally, we'd like to use libc++ on linux as well, but:
# - http://stackoverflow.com/questions/37096062/get-a-basic-c-program-to-compile-using-clang-on-ubuntu-16
# - https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=808086
# so only use it on Apple systems...
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND APPLE)
target_compile_options(${TARGET} PUBLIC -stdlib=libc++)
endif(CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND APPLE)
endfunction(target_activate_cpp14)
# Find clang-tidy executable (for use in target_enable_style_warnings)
if (USE_CLANG_TIDY)
find_program(
CLANG_TIDY_EXE
NAMES "clang-tidy"
DOC "Path to clang-tidy executable"
)
if(NOT CLANG_TIDY_EXE)
message(FATAL_ERROR "clang-tidy not found. Please install clang-tidy or run without -DUSE_CLANG_TIDY=on.")
else()
set(CLANG_TIDY_OPTIONS "-system-headers=0")
if (CLANG_TIDY_WARNINGS_AS_ERRORS)
set(CLANG_TIDY_OPTIONS "${CLANG_TIDY_OPTIONS}" "-warnings-as-errors=*")
endif()
message(STATUS "Clang-tidy is enabled. Executable: ${CLANG_TIDY_EXE} Arguments: ${CLANG_TIDY_OPTIONS}")
set(CLANG_TIDY_CLI "${CLANG_TIDY_EXE}" "${CLANG_TIDY_OPTIONS}")
endif()
endif()
# Find iwyu (for use in target_enable_style_warnings)
if (USE_IWYU)
find_program(
IWYU_EXE NAMES
include-what-you-use
iwyu
)
if(NOT IWYU_EXE)
message(FATAL_ERROR "include-what-you-use not found. Please install iwyu or run without -DUSE_IWYU=on.")
else()
message(STATUS "iwyu found: ${IWYU_EXE}")
set(DO_IWYU "${IWYU_EXE}")
endif()
endif()
#################################################
# Enable style compiler warnings
#
# Uses: target_enable_style_warnings(buildtarget)
#################################################
function(target_enable_style_warnings TARGET)
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC")
# TODO
elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang")
target_compile_options(${TARGET} PRIVATE -Wall -Wextra -Wold-style-cast -Wcast-align -Wno-unused-command-line-argument) # TODO consider -Wpedantic -Wchkp -Wcast-qual -Wctor-dtor-privacy -Wdisabled-optimization -Wformat=2 -Winit-self -Wlogical-op -Wmissing-include-dirs -Wnoexcept -Wold-style-cast -Woverloaded-virtual -Wredundant-decls -Wshadow -Wsign-promo -Wstrict-null-sentinel -Wstrict-overflow=5 -Wundef -Wno-unused -Wno-variadic-macros -Wno-parentheses -fdiagnostics-show-option -Wconversion and others?
elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
target_compile_options(${TARGET} PRIVATE -Wall -Wextra -Wold-style-cast -Wcast-align -Wno-maybe-uninitialized) # TODO consider -Wpedantic -Wchkp -Wcast-qual -Wctor-dtor-privacy -Wdisabled-optimization -Wformat=2 -Winit-self -Wlogical-op -Wmissing-include-dirs -Wnoexcept -Wold-style-cast -Woverloaded-virtual -Wredundant-decls -Wshadow -Wsign-promo -Wstrict-null-sentinel -Wstrict-overflow=5 -Wundef -Wno-unused -Wno-variadic-macros -Wno-parentheses -fdiagnostics-show-option -Wconversion and others?
endif()
if (USE_WERROR)
target_compile_options(${TARGET} PRIVATE -Werror)
endif()
# Enable clang-tidy
if(USE_CLANG_TIDY)
set_target_properties(
${TARGET} PROPERTIES
CXX_CLANG_TIDY "${CLANG_TIDY_CLI}"
)
endif()
if(USE_IWYU)
set_target_properties(
${TARGET} PROPERTIES
CXX_INCLUDE_WHAT_YOU_USE "${DO_IWYU}"
)
endif()
endfunction(target_enable_style_warnings)
##################################################
# Add boost to the project
#
# Uses:
# target_add_boost(buildtarget) # if you're only using header-only boost libs
# target_add_boost(buildtarget system filesystem) # list all libraries to link against in the dependencies
##################################################
function(target_add_boost TARGET)
# Load boost libraries
if(NOT DEFINED Boost_USE_STATIC_LIBS OR Boost_USE_STATIC_LIBS)
# Many supported systems don't have boost >= 1.65.1. Better link it statically.
message(STATUS "Boost will be statically linked")
set(Boost_USE_STATIC_LIBS ON)
else(NOT DEFINED Boost_USE_STATIC_LIBS OR Boost_USE_STATIC_LIBS)
message(STATUS "Boost will be dynamically linked")
set(Boost_USE_STATIC_LIBS OFF)
endif(NOT DEFINED Boost_USE_STATIC_LIBS OR Boost_USE_STATIC_LIBS)
set(BOOST_THREAD_VERSION 4)
find_package(Boost 1.65.1
REQUIRED
COMPONENTS ${ARGN})
target_include_directories(${TARGET} SYSTEM PUBLIC ${Boost_INCLUDE_DIRS})
target_link_libraries(${TARGET} PUBLIC ${Boost_LIBRARIES})
target_compile_definitions(${TARGET} PUBLIC BOOST_THREAD_VERSION=4)
if(${CMAKE_SYSTEM_NAME} MATCHES "Linux")
# Also link to rt, because boost thread needs that.
target_link_libraries(${TARGET} PUBLIC rt)
endif(${CMAKE_SYSTEM_NAME} MATCHES "Linux")
endfunction(target_add_boost)
##################################################
# Specify that a specific minimal version of gcc is required
#
# Uses:
# require_gcc_version(4.9)
##################################################
function(require_gcc_version VERSION)
if (CMAKE_COMPILER_IS_GNUCXX)
execute_process(COMMAND ${CMAKE_CXX_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION)
if (GCC_VERSION VERSION_LESS ${VERSION})
message(FATAL_ERROR "Needs at least gcc version ${VERSION}, found gcc ${GCC_VERSION}")
endif (GCC_VERSION VERSION_LESS ${VERSION})
endif (CMAKE_COMPILER_IS_GNUCXX)
endfunction(require_gcc_version)
##################################################
# Specify that a specific minimal version of clang is required
#
# Uses:
# require_clang_version(3.5)
##################################################
function(require_clang_version VERSION)
if (CMAKE_CXX_COMPILER_ID MATCHES "Clang")
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${VERSION})
message(FATAL_ERROR "Needs at least clang version ${VERSION}, found clang ${CMAKE_CXX_COMPILER_VERSION}")
endif (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${VERSION})
endif(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
endfunction(require_clang_version)
##################################################
# Find the location of a library and return its full path in OUTPUT_VARIABLE.
# If PATH_VARIABLE points to a defined variable, then the library will only be searched in this path.
# If PATH_VARIABLE points to a undefined variable, default system locations will be searched.
#
# Uses (the following will search for fuse in system locations by default, and if the user passes -DFUSE_LIB_PATH to cmake, it will only search in this path.
# find_library_with_path(MYLIBRARY fuse FUSE_LIB_PATH)
# target_link_library(target ${MYLIBRARY})
##################################################
function(find_library_with_path OUTPUT_VARIABLE LIBRARY_NAME PATH_VARIABLE)
if(${PATH_VARIABLE})
find_library(${OUTPUT_VARIABLE} ${LIBRARY_NAME} PATHS ${${PATH_VARIABLE}} NO_DEFAULT_PATH)
if (${OUTPUT_VARIABLE} MATCHES NOTFOUND)
message(FATAL_ERROR "Didn't find ${LIBRARY_NAME} in path specified by the ${PATH_VARIABLE} parameter (${${PATH_VARIABLE}}). Pass in the correct path or remove the parameter to try common system locations.")
else(${OUTPUT_VARIABLE} MATCHES NOTFOUND)
message(STATUS "Found ${LIBRARY_NAME} in user-defined path ${${PATH_VARIABLE}}")
endif(${OUTPUT_VARIABLE} MATCHES NOTFOUND)
else(${PATH_VARIABLE})
find_library(${OUTPUT_VARIABLE} ${LIBRARY_NAME})
if (${OUTPUT_VARIABLE} MATCHES NOTFOUND)
message(FATAL_ERROR "Didn't find ${LIBRARY_NAME} library. If ${LIBRARY_NAME} is installed, try passing in the library location with -D${PATH_VARIABLE}=/path/to/${LIBRARY_NAME}/lib.")
else(${OUTPUT_VARIABLE} MATCHES NOTFOUND)
message(STATUS "Found ${LIBRARY_NAME} in system location")
endif(${OUTPUT_VARIABLE} MATCHES NOTFOUND)
endif(${PATH_VARIABLE})
endfunction(find_library_with_path)
include(cmake-utils/TargetArch.cmake)
function(get_target_architecture output_var)
target_architecture(local_output_var)
set(${output_var} ${local_output_var} PARENT_SCOPE)
endfunction()
cpack/ 0000775 0000000 0000000 00000000000 13477012671 0012147 5 ustar 00root root 0000000 0000000 cpack/CMakeLists.txt 0000664 0000000 0000000 00000007573 13477012671 0014723 0 ustar 00root root 0000000 0000000 # appends a build number from the APPVEYOR_BUILD_NUMBER environment variable as fourth component to a version number,
# i.e. "0.10" becomes "0.10.0.[buildnumber]", "1" becomes "1.0.0.[buildnumber]".
function(append_build_number VERSION_NUMBER OUTPUT)
string(REPLACE "." ";" VERSION_COMPONENTS ${STRIPPED_VERSION_NUMBER})
list(LENGTH VERSION_COMPONENTS NUM_VERSION_COMPONENTS)
if (${NUM_VERSION_COMPONENTS} LESS_EQUAL 0)
message(FATAL_ERROR "Didn't find any version components")
endif()
if (${NUM_VERSION_COMPONENTS} LESS_EQUAL 1)
string(APPEND STRIPPED_VERSION_NUMBER ".0")
endif()
if (${NUM_VERSION_COMPONENTS} LESS_EQUAL 2)
string(APPEND STRIPPED_VERSION_NUMBER ".0")
endif()
if (NOT $ENV{APPVEYOR_BUILD_NUMBER} STREQUAL "")
string(APPEND STRIPPED_VERSION_NUMBER ".$ENV{APPVEYOR_BUILD_NUMBER}")
endif()
set(${OUTPUT} "${STRIPPED_VERSION_NUMBER}" PARENT_SCOPE)
endfunction()
if("${CMAKE_VERSION}" VERSION_LESS "3.3")
# Earlier cmake versions generate .deb packages for which the package manager says they're bad quality
# and asks the user whether they really want to install it. Cmake 3.3 fixes this.
message(WARNING "Distribution package generation is only supported for CMake version >= 3.3. You're using ${CMAKE_VERSION}. You will be able to build and install CryFS, but you won't be able to generate .deb packages.")
else()
# Fix debfiles permissions. Unfortunately, git doesn't store file permissions.
# When installing the .deb package and these files have the wrong permissions, the package manager complains.
execute_process(COMMAND /bin/bash -c "chmod 0755 ${CMAKE_CURRENT_SOURCE_DIR}/debfiles/*")
set(CPACK_PACKAGE_NAME "cryfs")
set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "Encrypt your files and store them in the cloud.")
set(CPACK_PACKAGE_DESCRIPTION "CryFS encrypts your files, so you can safely store them anywhere. It works well together with cloud services like Dropbox, iCloud, OneDrive and others.")
set(CPACK_PACKAGE_CONTACT "Sebastian Messmer ")
set(CPACK_PACKAGE_VENDOR "Sebastian Messmer")
set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/../LICENSE.txt")
get_git_version(GITVERSION_VERSION_STRING)
if(WIN32 AND NOT UNIX)
set(CPACK_GENERATOR WIX)
string(REGEX REPLACE "^([0-9\\.]+)([-+][0-9\\.a-zA-Z+-]+)?$" "\\1" STRIPPED_VERSION_NUMBER "${GITVERSION_VERSION_STRING}")
append_build_number(${STRIPPED_VERSION_NUMBER} WIX_VERSION_NUMBER)
message(STATUS "WIX package version is ${WIX_VERSION_NUMBER}")
set(CPACK_PACKAGE_VERSION "${WIX_VERSION_NUMBER}")
set(CPACK_WIX_UPGRADE_GUID "8b872ce1-557d-48e6-ac57-9f5e574feabf")
set(CPACK_WIX_PRODUCT_GUID "26116061-4f99-4c44-a178-2153fa396308")
#set(CPACK_WIX_PRODUCT_ICON "...")
set(CPACK_WIX_PROPERTY_ARPURLINFOABOUT "https://www.cryfs.org")
set(CPACK_PACKAGE_INSTALL_DIRECTORY "CryFS/${GITVERSION_VERSION_STRING}")
set(CPACK_WIX_PATCH_FILE "${CMAKE_CURRENT_SOURCE_DIR}/wix/change_path_env.xml")
else()
set(CPACK_GENERATOR TGZ DEB RPM)
set(CPACK_PACKAGE_VERSION "${GITVERSION_VERSION_STRING}")
set(CPACK_STRIP_FILES OFF)
set(CPACK_SOURCE_STRIP_FILES OFF)
endif()
set(CPACK_PACKAGE_EXECUTABLES "cryfs" "CryFS")
set(CPACK_DEBIAN_PACKAGE_SECTION "utils")
set(CPACK_DEBIAN_PACKAGE_SHLIBDEPS ON)
# Needs gnupg2, lsb-release for postinst script
set(CPACK_DEBIAN_PACKAGE_DEPENDS "fuse, gnupg2, lsb-release")
set(CPACK_DEBIAN_PACKAGE_HOMEPAGE "https://www.cryfs.org")
set(CPACK_RPM_PACKAGE_LICENSE "LGPLv3")
set(CPACK_RPM_PACKAGE_DESCRIPTION ${CPACK_PACKAGE_DESCRIPTION})
set(CPACK_RPM_EXCLUDE_FROM_AUTO_FILELIST_ADDITION "/usr/bin;/usr/share/man;/usr/share/man/man1")
set(CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA "${CMAKE_CURRENT_SOURCE_DIR}/debfiles/postinst;${CMAKE_CURRENT_SOURCE_DIR}/debfiles/postrm")
include(CPack)
endif()
cpack/debfiles/ 0000775 0000000 0000000 00000000000 13477012671 0013724 5 ustar 00root root 0000000 0000000 cpack/debfiles/postinst 0000775 0000000 0000000 00000011316 13477012671 0015537 0 ustar 00root root 0000000 0000000 #!/bin/bash
# This script is called after the cryfs .deb package is installed.
# It sets up the package source so the user gets automatic updates for cryfs.
# DEVELOPER WARNING: There is a lot of redundancy between this file and the install.sh script in the cryfs-web repository. Please port modifications to there!
set -e
DEBIAN_REPO_URL="http://apt.cryfs.org/debian"
UBUNTU_REPO_URL="http://apt.cryfs.org/ubuntu"
DISTRIBUTION=`lsb_release -s -i`
DISTRIBUTION_VERSION=`lsb_release -s -c`
containsElement () {
local e
for e in "${@:2}"; do [[ "$e" == "$1" ]] && return 0; done
return 1
}
get_repo_url () {
if [[ "$DISTRIBUTION" == "Debian" ]] || [[ "$DISTRIBUTION" == "Devuan" ]]; then
echo $DEBIAN_REPO_URL
elif [[ "$DISTRIBUTION" == "Ubuntu" ]]; then
echo $UBUNTU_REPO_URL
else
echo Not adding package source because $DISTRIBUTION is not supported. Please keep CryFS manually up to date. 1>&2
exit 0
fi
}
get_apt_config () {
apt-config dump|grep "$1 "|sed -e "s/^$1\ \"\([^\"]*\)\"\;/\1/g"
}
sources_list_dir () {
root=$(get_apt_config "Dir")
etc=$(get_apt_config "Dir::Etc")
sourceparts=$(get_apt_config "Dir::Etc::sourceparts")
echo "$root/$etc/$sourceparts"
}
add_repository () {
dir=$(sources_list_dir)
repo_url=$(get_repo_url)
echo "deb $repo_url $DISTRIBUTION_VERSION main" > $dir/cryfs.list
}
install_key () {
# Key from http://www.cryfs.org/apt.key
apt-key add - > /dev/null <&2
exit 1
;;
esac
set +e
exit 0
cpack/debfiles/postrm 0000775 0000000 0000000 00000001546 13477012671 0015204 0 ustar 00root root 0000000 0000000 #!/bin/bash
# This script is called after the cryfs .deb package is uninstalled.
# It removes the package source that was used to get automatic updates.
set -e
get_apt_config () {
apt-config dump|grep "$1 "|sed -e "s/^$1\ \"\([^\"]*\)\"\;/\1/g"
}
sources_list_dir () {
root=$(get_apt_config "Dir")
etc=$(get_apt_config "Dir::Etc")
sourceparts=$(get_apt_config "Dir::Etc::sourceparts")
echo $root$etc$sourceparts
}
remove_repository () {
dir=$(sources_list_dir)
rm -f $dir/cryfs.list
}
remove_key () {
# Don't fail if key was already removed
apt-key rm 549E65B2 2>&1 > /dev/null || true
}
case "$1" in
purge)
remove_repository
remove_key
;;
abort-install|abort-upgrade|remove|upgrade|failed-upgrade)
;;
*)
echo "postrm called with unknown argument '$1'" >&2
exit 1
;;
esac
set +e
exit 0
cpack/wix/ 0000775 0000000 0000000 00000000000 13477012671 0012756 5 ustar 00root root 0000000 0000000 cpack/wix/change_path_env.xml 0000664 0000000 0000000 00000000327 13477012671 0016613 0 ustar 00root root 0000000 0000000
doc/ 0000775 0000000 0000000 00000000000 13477012671 0011633 5 ustar 00root root 0000000 0000000 doc/CMakeLists.txt 0000664 0000000 0000000 00000001041 13477012671 0014367 0 ustar 00root root 0000000 0000000 project (doc)
IF (WIN32)
MESSAGE(STATUS "This is Windows. Will not install man page")
ELSE (WIN32)
INCLUDE(GNUInstallDirs)
find_program(GZIP gzip)
add_custom_command(
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/cryfs.1.gz
COMMAND ${GZIP} -c ${CMAKE_CURRENT_SOURCE_DIR}/man/cryfs.1 > ${CMAKE_CURRENT_BINARY_DIR}/cryfs.1.gz
)
add_custom_target(man ALL DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/cryfs.1.gz)
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/cryfs.1.gz
DESTINATION ${CMAKE_INSTALL_MANDIR}/man1
CONFIGURATIONS Release
)
ENDIF(WIN32)
doc/man/ 0000775 0000000 0000000 00000000000 13477012671 0012406 5 ustar 00root root 0000000 0000000 doc/man/cryfs.1 0000664 0000000 0000000 00000015463 13477012671 0013627 0 ustar 00root root 0000000 0000000 .\" cryfs(1) man page
.
.TH cryfs 1
.
.
.
.SH NAME
cryfs \- cryptographic filesystem for the cloud
.
.
.
.SH SYNOPSIS
.\" mount/create syntax
.B cryfs
[\fB\-c\fR \fIfile\fR]
[\fB\-f\fR]
[\fIoptions\fR]
.I basedir mountpoint
[\fB\-\-\fR \fIfuse-options\fR]
.br
.\" show-ciphers syntax
.B cryfs \-\-help\fR|\fB\-\-version\fR|\fB\-\-show-ciphers
.
.
.
.SH DESCRIPTION
.
.B CryFS
encrypts your files, so you can safely store them anywhere.
.PP
.
The goal of CryFS is not only to keep file contents, but also
file sizes, metadata and directory structure confidential.
CryFS uses
.B encrypted same-size blocks
to store both the files themselves and the block's relations to another.
These blocks are stored as individual files in the base directory,
which can then be synchronized with cloud services such as Dropbox.
.PP
.
The blocks are encrypted using a random key, which is stored in a
.B configuration file
encrypted by the user's passphrase.
By default, it will be stored together with the data in the base directory,
but you can choose a different location if you do not want it in your cloud
or when using a weak passphrase.
.
.
.
.SH USING CRYFS
.
.SS Selecting base and mount directories
.
While you can access your files through your
.B mount directory,
CryFS actually places them in your
.B base directory
after encrypting.
CryFS will encrypt and decrypt your files 'on the fly' as they are accessed,
so files will never be stored on the disk in unencrypted form.
.PP
.
You can choose any empty directory as your base, but your mount directory
should be outside of any cloud storage, as your cloud may try to sync your
(temporarily mounted) unencrypted files as well.
.
.SS Setup and usage of your encrypted directory
.
.TP
Creating and mounting your encrypted storage use the same command-line syntax:
.B cryfs
.I basedir mountpoint
.PP
.
If CryFS detects an encrypted storage in the given base directory, you will
be asked for the passphrase to unlock and mount it. Otherwise, CryFS will
help you with creating one, just follow the on-screen instructions.
.PP
.
.TP
After you are done working with your encrypted files, unmount your storage \
with the command
.B fusermount -u
.I mountpoint
.
.
.SS Changing your passphrase
.
As the encryption key to your CryFS storage is stored in your configuration
file, it would be possible to re-encrypt it using a different passphrase
(although this feature has not been implemented yet).
.PP
.
However, this does not change the actual encryption key of your storage, so
someone with access to the old passphrase and configuration file (for example
through the file history of your cloud or your file system) could still access
your files, even those created after the password change.
.PP
.
For this reason, the recommended way to change your passphrase is to create a
new CryFS storage with the new passphrase and move your files from the old to
the new one.
.
.
.
.SH OPTIONS
.
.SS Getting help
.
.TP
\fB\-h\fR, \fB\-\-help\fR
.
Show a help message containing short descriptions for all options.
.
.
.TP
\fB\-\-show\-ciphers\fR
.
Show a list of all supported encryption ciphers.
.
.
.TP
\fB\-\-version\fR
.
Show the CryFS version number.
.
.
.SS Encryption parameters
.
.TP
\fB\-\-blocksize\fR \fIarg\fR
.
Set the block size to \fIarg\fR bytes. Defaults to
.BR 32768 .
.br
\" Intentional space
.br
A higher block size may help reducing the file count in your base directory
(especially when storing large files), but will also waste more space when
storing smaller files.
.
.
.TP
\fB\-\-cipher\fR \fIarg\fR
.
Use \fIarg\fR as the cipher for the encryption. Defaults to
.BR aes-256-gcm .
.
.
.TP
\fB\-c\fR \fIfile\fR, \fB\-\-config\fR \fIfile\fR
.
Use \fIfile\fR as configuration file for this CryFS storage instead of
\fIbasedir\fR/cryfs.config
.
.
.SS General options
.
.TP
\fB\-f\fR, \fB\-\-foreground\fI
.
Run CryFS in the foreground. Stop using CTRL-C.
.
.
.TP
\fB\-\-allow-filesystem-upgrade\fI
.
Allow upgrading the file system if it was created with an old CryFS version. After the upgrade, older CryFS versions might not be able to use the file system anymore.
.
.
.TP
\fB\-\-allow-integrity-violations\fI
.
By default, CryFS checks for integrity violations, i.e. will notice if an adversary modified or rolled back the file system. Using this flag, you can disable the integrity checks. This can for example be helpful for loading an old snapshot of your file system without CryFS thinking an adversary rolled it back.
.
.
.TP
\fB\-\-allow-replaced-filesystem\fI
.
By default, CryFS remembers file systems it has seen in this base directory and checks that it didn't get replaced by an attacker with an entirely different file system since the last time it was loaded. However, if you do want to replace the file system with an entirely new one, you can pass in this option to disable the check.
.
.
.TP
\fB\-\-missing-block-is-integrity-violation\fR=true
.
When CryFS encounters a missing ciphertext block, it cannot cannot (yet) know if it was deleted by an unauthorized adversary or by a second authorized client. This is one of the restrictions of the integrity checks currently in place. You can enable this flag to treat missing ciphertext blocks as integrity violations, but then your file system will not be usable by multiple clients anymore. By default, this flag is disabled.
.
.
.TP
\fB\-\-logfile\fR \fIfile\fR
.
Write status information to \fIfile\fR. If no logfile is given, CryFS will
write them to syslog in background mode, or to stdout in foreground mode.
.
.
.TP
\fB\-\-unmount\-idle\fR \fIarg\fR
.
Unmount automatically after \fIarg\fR minutes of inactivity.
.
.
.
.SH ENVIRONMENT
.
.TP
\fBCRYFS_FRONTEND\fR=noninteractive
.
With this option set, CryFS will only ask for the encryption passphrase once.
Instead of asking the user for parameters not specified on the command line,
it will just use the default values. CryFS will also not ask you to confirm
your passphrase when creating a new CryFS storage.
.br
\" Intentional space
.br
Set this environment variable when automating CryFS using external tools or
shell scripts.
.
.
.TP
\fBCRYFS_NO_UPDATE_CHECK\fR=true
.
By default, CryFS connects to the internet to check for known security
vulnerabilities and new versions. This option disables this.
.
.
.TP
\fBCRYFS_LOCAL_STATE_DIR\fR=[path]
.
Sets the directory cryfs uses to store local state. This local state
is used to recognize known file systems and run integrity checks
(i.e. check that they haven't been modified by an attacker.
Default value: ${HOME}/.cryfs
.
.
.
.SH SEE ALSO
.
.BR mount.fuse (1),
.BR fusermount (1)
.PP
.
For more information about the design of CryFS, visit
.B https://www.cryfs.org
.PP
.
Visit the development repository at
.B https://github.com/cryfs/cryfs
for the source code and the full list of contributors to CryFS.
.
.
.
.SH AUTHORS
.
CryFS was created by Sebastian Messmer and contributors.
This man page was written by Maximilian Wende.
run-clang-tidy.sh 0000775 0000000 0000000 00000001672 13477012671 0014270 0 ustar 00root root 0000000 0000000 #!/bin/bash
# Note: Call this from a cmake build directory (e.g. cmake/) for out-of-source builds
# Examples:
# mkdir cmake && cd cmake && ../run-clang-tidy.sh
# mkdir cmake && cd cmake && ../run-clang-tidy.sh -fix
# mkdir cmake && cd cmake && ../run-clang-tidy.sh -export-fixes fixes.yaml
set -e
export NUMCORES=`nproc` && if [ ! -n "$NUMCORES" ]; then export NUMCORES=`sysctl -n hw.ncpu`; fi
echo Using ${NUMCORES} cores
# Run cmake in current working directory, but on source that is in the same directory as this script file
cmake -DBUILD_TESTING=on -DCMAKE_EXPORT_COMPILE_COMMANDS=ON "${0%/*}"
# Filter all third party code from the compilation database
cat compile_commands.json|jq "map(select(.file | test(\"^$(realpath ${0%/*})/(src|test)/.*$\")))" > compile_commands2.json
rm compile_commands.json
mv compile_commands2.json compile_commands.json
run-clang-tidy.py -j${NUMCORES} -quiet -header-filter "$(realpath ${0%/*})/(src|test)/.*" $@
run-iwyu.sh 0000775 0000000 0000000 00000002045 13477012671 0013225 0 ustar 00root root 0000000 0000000 #!/bin/bash
# Note: Call this from a cmake build directory (e.g. cmake/) for out-of-source builds
# Examples:
# mkdir cmake && cd cmake && ../run-iwqu.sh
# mkdir cmake && cd cmake && ../run-iwqu.sh -fix
set -e
export NUMCORES=`nproc` && if [ ! -n "$NUMCORES" ]; then export NUMCORES=`sysctl -n hw.ncpu`; fi
echo Using ${NUMCORES} cores
# Run cmake in current working directory, but on source that is in the same directory as this script file
cmake -DBUILD_TESTING=on -DCMAKE_EXPORT_COMPILE_COMMANDS=ON "${0%/*}"
# Filter all third party code from the compilation database
cat compile_commands.json|jq "map(select(.file | test(\"^$(realpath ${0%/*})/(src|test)/.*$\")))" > compile_commands2.json
rm compile_commands.json
mv compile_commands2.json compile_commands.json
if [ "$1" = "-fix" ]; then
TMPFILE=/tmp/iwyu.`cat /dev/urandom | tr -cd 'a-f0-9' | head -c 8`.out
function cleanup {
rm ${TMPFILE}
}
trap cleanup EXIT
iwyu_tool -j${NUMCORES} -p. ${@:2} | tee ${TMPFILE}
fix_include < ${TMPFILE}
else
iwyu_tool -j${NUMCORES} -p. $@
fi
src/ 0000775 0000000 0000000 00000000000 13477012671 0011655 5 ustar 00root root 0000000 0000000 src/CMakeLists.txt 0000664 0000000 0000000 00000000515 13477012671 0014416 0 ustar 00root root 0000000 0000000 include_directories(${CMAKE_CURRENT_SOURCE_DIR})
add_subdirectory(gitversion)
add_subdirectory(cpp-utils)
add_subdirectory(fspp)
add_subdirectory(parallelaccessstore)
add_subdirectory(blockstore)
add_subdirectory(blobstore)
add_subdirectory(cryfs)
add_subdirectory(cryfs-cli)
add_subdirectory(cryfs-unmount)
add_subdirectory(stats)
src/blobstore/ 0000775 0000000 0000000 00000000000 13477012671 0013650 5 ustar 00root root 0000000 0000000 src/blobstore/CMakeLists.txt 0000664 0000000 0000000 00000002425 13477012671 0016413 0 ustar 00root root 0000000 0000000 project (blobstore)
set(SOURCES
implementations/onblocks/parallelaccessdatatreestore/ParallelAccessDataTreeStoreAdapter.cpp
implementations/onblocks/parallelaccessdatatreestore/DataTreeRef.cpp
implementations/onblocks/parallelaccessdatatreestore/ParallelAccessDataTreeStore.cpp
implementations/onblocks/utils/Math.cpp
implementations/onblocks/BlobStoreOnBlocks.cpp
implementations/onblocks/datanodestore/DataNode.cpp
implementations/onblocks/datanodestore/DataLeafNode.cpp
implementations/onblocks/datanodestore/DataInnerNode.cpp
implementations/onblocks/datanodestore/DataNodeStore.cpp
implementations/onblocks/datatreestore/impl/algorithms.cpp
implementations/onblocks/datatreestore/impl/CachedValue.cpp
implementations/onblocks/datatreestore/impl/LeafTraverser.cpp
implementations/onblocks/datatreestore/LeafHandle.cpp
implementations/onblocks/datatreestore/DataTree.cpp
implementations/onblocks/datatreestore/DataTreeStore.cpp
implementations/onblocks/BlobOnBlocks.cpp
)
add_library(${PROJECT_NAME} STATIC ${SOURCES})
target_link_libraries(${PROJECT_NAME} PUBLIC cpp-utils blockstore)
target_add_boost(${PROJECT_NAME} filesystem system thread)
target_enable_style_warnings(${PROJECT_NAME})
target_activate_cpp14(${PROJECT_NAME})
src/blobstore/implementations/ 0000775 0000000 0000000 00000000000 13477012671 0017060 5 ustar 00root root 0000000 0000000 src/blobstore/implementations/onblocks/ 0000775 0000000 0000000 00000000000 13477012671 0020672 5 ustar 00root root 0000000 0000000 src/blobstore/implementations/onblocks/BlobOnBlocks.cpp 0000664 0000000 0000000 00000003115 13477012671 0023707 0 ustar 00root root 0000000 0000000 #include "parallelaccessdatatreestore/DataTreeRef.h"
#include "BlobOnBlocks.h"
#include "datanodestore/DataLeafNode.h"
#include "datanodestore/DataNodeStore.h"
#include "utils/Math.h"
#include
#include
#include "datatreestore/LeafHandle.h"
using cpputils::unique_ref;
using cpputils::Data;
using blockstore::BlockId;
namespace blobstore {
namespace onblocks {
using parallelaccessdatatreestore::DataTreeRef;
BlobOnBlocks::BlobOnBlocks(unique_ref datatree)
: _datatree(std::move(datatree)) {
}
BlobOnBlocks::~BlobOnBlocks() {
} // NOLINT (workaround https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82481 )
uint64_t BlobOnBlocks::size() const {
return _datatree->numBytes();
}
void BlobOnBlocks::resize(uint64_t numBytes) {
_datatree->resizeNumBytes(numBytes);
}
Data BlobOnBlocks::readAll() const {
return _datatree->readAllBytes();
}
void BlobOnBlocks::read(void *target, uint64_t offset, uint64_t count) const {
return _datatree->readBytes(target, offset, count);
}
uint64_t BlobOnBlocks::tryRead(void *target, uint64_t offset, uint64_t count) const {
return _datatree->tryReadBytes(target, offset, count);
}
void BlobOnBlocks::write(const void *source, uint64_t offset, uint64_t count) {
_datatree->writeBytes(source, offset, count);
}
void BlobOnBlocks::flush() {
_datatree->flush();
}
uint32_t BlobOnBlocks::numNodes() const {
return _datatree->numNodes();
}
const BlockId &BlobOnBlocks::blockId() const {
return _datatree->blockId();
}
unique_ref BlobOnBlocks::releaseTree() {
return std::move(_datatree);
}
}
}
src/blobstore/implementations/onblocks/BlobOnBlocks.h 0000664 0000000 0000000 00000003310 13477012671 0023351 0 ustar 00root root 0000000 0000000 #pragma once
#ifndef MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_BLOBONBLOCKS_H_
#define MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_BLOBONBLOCKS_H_
#include "../../interface/Blob.h"
#include "datatreestore/LeafHandle.h"
#include
#include
#include
namespace blobstore {
namespace onblocks {
namespace datanodestore {
class DataLeafNode;
}
namespace parallelaccessdatatreestore {
class DataTreeRef;
}
class BlobOnBlocks final: public Blob {
public:
BlobOnBlocks(cpputils::unique_ref datatree);
~BlobOnBlocks();
const blockstore::BlockId &blockId() const override;
uint64_t size() const override;
void resize(uint64_t numBytes) override;
cpputils::Data readAll() const override;
void read(void *target, uint64_t offset, uint64_t size) const override;
uint64_t tryRead(void *target, uint64_t offset, uint64_t size) const override;
void write(const void *source, uint64_t offset, uint64_t size) override;
void flush() override;
uint32_t numNodes() const override;
cpputils::unique_ref releaseTree();
private:
uint64_t _tryRead(void *target, uint64_t offset, uint64_t size) const;
void _read(void *target, uint64_t offset, uint64_t count) const;
void _traverseLeaves(uint64_t offsetBytes, uint64_t sizeBytes, std::function onExistingLeaf, std::function onCreateLeaf) const;
cpputils::unique_ref _datatree;
DISALLOW_COPY_AND_ASSIGN(BlobOnBlocks);
};
}
}
#endif
src/blobstore/implementations/onblocks/BlobStoreOnBlocks.cpp 0000664 0000000 0000000 00000004547 13477012671 0024736 0 ustar 00root root 0000000 0000000 #include "parallelaccessdatatreestore/DataTreeRef.h"
#include "parallelaccessdatatreestore/ParallelAccessDataTreeStore.h"
#include
#include "datanodestore/DataLeafNode.h"
#include "datanodestore/DataNodeStore.h"
#include "datatreestore/DataTreeStore.h"
#include "datatreestore/DataTree.h"
#include "BlobStoreOnBlocks.h"
#include "BlobOnBlocks.h"
#include
#include
using cpputils::unique_ref;
using cpputils::make_unique_ref;
using blockstore::BlockStore;
using blockstore::parallelaccess::ParallelAccessBlockStore;
using blockstore::BlockId;
using cpputils::dynamic_pointer_move;
using boost::optional;
using boost::none;
namespace blobstore {
namespace onblocks {
using datanodestore::DataNodeStore;
using datatreestore::DataTreeStore;
using parallelaccessdatatreestore::ParallelAccessDataTreeStore;
BlobStoreOnBlocks::BlobStoreOnBlocks(unique_ref blockStore, uint64_t physicalBlocksizeBytes)
: _dataTreeStore(make_unique_ref(make_unique_ref(make_unique_ref(make_unique_ref(std::move(blockStore)), physicalBlocksizeBytes)))) {
}
BlobStoreOnBlocks::~BlobStoreOnBlocks() {
}
unique_ref BlobStoreOnBlocks::create() {
return make_unique_ref(_dataTreeStore->createNewTree());
}
optional> BlobStoreOnBlocks::load(const BlockId &blockId) {
auto tree = _dataTreeStore->load(blockId);
if (tree == none) {
return none;
}
return optional>(make_unique_ref(std::move(*tree)));
}
void BlobStoreOnBlocks::remove(unique_ref blob) {
auto _blob = dynamic_pointer_move(blob);
ASSERT(_blob != none, "Passed Blob in BlobStoreOnBlocks::remove() is not a BlobOnBlocks.");
_dataTreeStore->remove((*_blob)->releaseTree());
}
void BlobStoreOnBlocks::remove(const BlockId &blockId) {
_dataTreeStore->remove(blockId);
}
uint64_t BlobStoreOnBlocks::virtualBlocksizeBytes() const {
return _dataTreeStore->virtualBlocksizeBytes();
}
uint64_t BlobStoreOnBlocks::numBlocks() const {
return _dataTreeStore->numNodes();
}
uint64_t BlobStoreOnBlocks::estimateSpaceForNumBlocksLeft() const {
return _dataTreeStore->estimateSpaceForNumNodesLeft();
}
}
}
src/blobstore/implementations/onblocks/BlobStoreOnBlocks.h 0000664 0000000 0000000 00000002750 13477012671 0024375 0 ustar 00root root 0000000 0000000 #pragma once
#ifndef MESSMER_BLOBSTORE_IMPLEMENTATIONS_BLOCKED_BLOBSTOREONBLOCKS_H_
#define MESSMER_BLOBSTORE_IMPLEMENTATIONS_BLOCKED_BLOBSTOREONBLOCKS_H_
#include "../../interface/BlobStore.h"
#include "BlobOnBlocks.h"
#include
namespace blobstore {
namespace onblocks {
namespace parallelaccessdatatreestore {
class ParallelAccessDataTreeStore;
}
//TODO Make blobstore able to cope with incomplete data (some blocks missing, because they're not synchronized yet) and write test cases for that
class BlobStoreOnBlocks final: public BlobStore {
public:
BlobStoreOnBlocks(cpputils::unique_ref blockStore, uint64_t physicalBlocksizeBytes);
~BlobStoreOnBlocks();
cpputils::unique_ref create() override;
boost::optional> load(const blockstore::BlockId &blockId) override;
void remove(cpputils::unique_ref blob) override;
void remove(const blockstore::BlockId &blockId) override;
//TODO Test blocksizeBytes/numBlocks/estimateSpaceForNumBlocksLeft
//virtual means "space we can use" as opposed to "space it takes on the disk" (i.e. virtual is without headers, checksums, ...)
uint64_t virtualBlocksizeBytes() const override;
uint64_t numBlocks() const override;
uint64_t estimateSpaceForNumBlocksLeft() const override;
private:
cpputils::unique_ref _dataTreeStore;
DISALLOW_COPY_AND_ASSIGN(BlobStoreOnBlocks);
};
}
}
#endif
src/blobstore/implementations/onblocks/datanodestore/ 0000775 0000000 0000000 00000000000 13477012671 0023526 5 ustar 00root root 0000000 0000000 src/blobstore/implementations/onblocks/datanodestore/DataInnerNode.cpp 0000664 0000000 0000000 00000006441 13477012671 0026712 0 ustar 00root root 0000000 0000000 #include "DataInnerNode.h"
#include "DataNodeStore.h"
#include
using blockstore::Block;
using blockstore::BlockStore;
using cpputils::Data;
using cpputils::unique_ref;
using cpputils::make_unique_ref;
using blockstore::BlockId;
using std::vector;
namespace blobstore {
namespace onblocks {
namespace datanodestore {
DataInnerNode::DataInnerNode(DataNodeView view)
: DataNode(std::move(view)) {
ASSERT(depth() > 0, "Inner node can't have depth 0. Is this a leaf maybe?");
if (node().FormatVersion() != FORMAT_VERSION_HEADER) {
throw std::runtime_error("This node format (" + std::to_string(node().FormatVersion()) + ") is not supported. Was it created with a newer version of CryFS?");
}
}
DataInnerNode::~DataInnerNode() {
}
unique_ref DataInnerNode::InitializeNewNode(unique_ref block, const DataNodeLayout &layout, uint8_t depth, const vector &children) {
ASSERT(children.size() >= 1, "An inner node must have at least one child");
Data data = _serializeChildren(children);
return make_unique_ref(DataNodeView::initialize(std::move(block), layout, DataNode::FORMAT_VERSION_HEADER, depth, children.size(), std::move(data)));
}
unique_ref DataInnerNode::CreateNewNode(BlockStore *blockStore, const DataNodeLayout &layout, uint8_t depth, const vector &children) {
ASSERT(children.size() >= 1, "An inner node must have at least one child");
Data data = _serializeChildren(children);
return make_unique_ref(DataNodeView::create(blockStore, layout, DataNode::FORMAT_VERSION_HEADER, depth, children.size(), std::move(data)));
}
Data DataInnerNode::_serializeChildren(const vector &children) {
Data data(sizeof(ChildEntry) * children.size());
uint32_t i = 0;
for (const BlockId &child : children) {
child.ToBinary(data.dataOffset(i * BlockId::BINARY_LENGTH));
++i;
}
return data;
}
uint32_t DataInnerNode::numChildren() const {
return node().Size();
}
DataInnerNode::ChildEntry DataInnerNode::readChild(unsigned int index) const {
ASSERT(index < numChildren(), "Accessing child out of range");
return ChildEntry(BlockId::FromBinary(static_cast(node().data()) + index * sizeof(ChildEntry)));
}
void DataInnerNode::_writeChild(unsigned int index, const ChildEntry& child) {
ASSERT(index < numChildren(), "Accessing child out of range");
node().write(child.blockId().data().data(), index * sizeof(ChildEntry), sizeof(ChildEntry));
}
DataInnerNode::ChildEntry DataInnerNode::readLastChild() const {
return readChild(numChildren() - 1);
}
void DataInnerNode::_writeLastChild(const ChildEntry& child) {
_writeChild(numChildren() - 1, child);
}
void DataInnerNode::addChild(const DataNode &child) {
ASSERT(numChildren() < maxStoreableChildren(), "Adding more children than we can store");
ASSERT(child.depth() == depth()-1, "The child that should be added has wrong depth");
node().setSize(node().Size()+1);
_writeLastChild(ChildEntry(child.blockId()));
}
void DataInnerNode::removeLastChild() {
ASSERT(node().Size() > 1, "There is no child to remove");
_writeLastChild(ChildEntry(BlockId::Null()));
node().setSize(node().Size()-1);
}
uint32_t DataInnerNode::maxStoreableChildren() const {
return node().layout().maxChildrenPerInnerNode();
}
}
}
}
src/blobstore/implementations/onblocks/datanodestore/DataInnerNode.h 0000664 0000000 0000000 00000002547 13477012671 0026362 0 ustar 00root root 0000000 0000000 #pragma once
#ifndef MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATANODESTORE_DATAINNERNODE_H_
#define MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATANODESTORE_DATAINNERNODE_H_
#include "DataNode.h"
#include "DataInnerNode_ChildEntry.h"
namespace blobstore {
namespace onblocks {
namespace datanodestore {
class DataInnerNode final: public DataNode {
public:
static cpputils::unique_ref InitializeNewNode(cpputils::unique_ref block, const DataNodeLayout &layout, uint8_t depth, const std::vector &children);
static cpputils::unique_ref CreateNewNode(blockstore::BlockStore *blockStore, const DataNodeLayout &layout, uint8_t depth, const std::vector &children);
using ChildEntry = DataInnerNode_ChildEntry;
DataInnerNode(DataNodeView block);
~DataInnerNode();
uint32_t maxStoreableChildren() const;
ChildEntry readChild(unsigned int index) const;
ChildEntry readLastChild() const;
uint32_t numChildren() const;
void addChild(const DataNode &child_blockId);
void removeLastChild();
private:
void _writeChild(unsigned int index, const ChildEntry& child);
void _writeLastChild(const ChildEntry& child);
static cpputils::Data _serializeChildren(const std::vector &children);
DISALLOW_COPY_AND_ASSIGN(DataInnerNode);
};
}
}
}
#endif
src/blobstore/implementations/onblocks/datanodestore/DataInnerNode_ChildEntry.h 0000664 0000000 0000000 00000001533 13477012671 0030501 0 ustar 00root root 0000000 0000000 #pragma once
#ifndef MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATANODESTORE_DATAINNERNODE_CHILDENTRY_H_
#define MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATANODESTORE_DATAINNERNODE_CHILDENTRY_H_
#include
namespace blobstore{
namespace onblocks{
namespace datanodestore{
struct DataInnerNode_ChildEntry final {
public:
DataInnerNode_ChildEntry(const blockstore::BlockId &blockId): _blockId(blockId) {}
const blockstore::BlockId& blockId() const {
return _blockId;
}
DataInnerNode_ChildEntry(const DataInnerNode_ChildEntry&) = delete;
DataInnerNode_ChildEntry& operator=(const DataInnerNode_ChildEntry&) = delete;
DataInnerNode_ChildEntry(DataInnerNode_ChildEntry&&) = default;
DataInnerNode_ChildEntry& operator=(DataInnerNode_ChildEntry&&) = default;
private:
blockstore::BlockId _blockId;
};
}
}
}
#endif
src/blobstore/implementations/onblocks/datanodestore/DataLeafNode.cpp 0000664 0000000 0000000 00000005460 13477012671 0026506 0 ustar 00root root 0000000 0000000 #include "DataLeafNode.h"
#include "DataInnerNode.h"
#include
using cpputils::Data;
using blockstore::BlockId;
using blockstore::BlockStore;
using cpputils::unique_ref;
using cpputils::make_unique_ref;
namespace blobstore {
namespace onblocks {
namespace datanodestore {
DataLeafNode::DataLeafNode(DataNodeView view)
: DataNode(std::move(view)) {
ASSERT(node().Depth() == 0, "Leaf node must have depth 0. Is it an inner node instead?");
ASSERT(numBytes() <= maxStoreableBytes(), "Leaf says it stores more bytes than it has space for");
if (node().FormatVersion() != FORMAT_VERSION_HEADER) {
throw std::runtime_error("This node format is not supported. Was it created with a newer version of CryFS?");
}
}
DataLeafNode::~DataLeafNode() {
}
unique_ref DataLeafNode::CreateNewNode(BlockStore *blockStore, const DataNodeLayout &layout, Data data) {
ASSERT(data.size() <= layout.maxBytesPerLeaf(), "Data passed in is too large for one leaf.");
uint32_t size = data.size();
return make_unique_ref(DataNodeView::create(blockStore, layout, DataNode::FORMAT_VERSION_HEADER, 0, size, std::move(data)));
}
unique_ref DataLeafNode::OverwriteNode(BlockStore *blockStore, const DataNodeLayout &layout, const BlockId &blockId, Data data) {
ASSERT(data.size() == layout.maxBytesPerLeaf(), "Data passed in is too large for one leaf.");
uint32_t size = data.size();
return make_unique_ref(DataNodeView::overwrite(blockStore, layout, DataNode::FORMAT_VERSION_HEADER, 0, size, blockId, std::move(data)));
}
void DataLeafNode::read(void *target, uint64_t offset, uint64_t size) const {
ASSERT(offset <= node().Size() && offset + size <= node().Size(), "Read out of valid area"); // Also check offset, because the addition could lead to overflows
std::memcpy(target, static_cast(node().data()) + offset, size);
}
void DataLeafNode::write(const void *source, uint64_t offset, uint64_t size) {
ASSERT(offset <= node().Size() && offset + size <= node().Size(), "Write out of valid area"); // Also check offset, because the addition could lead to overflows
node().write(source, offset, size);
}
uint32_t DataLeafNode::numBytes() const {
return node().Size();
}
void DataLeafNode::resize(uint32_t new_size) {
ASSERT(new_size <= maxStoreableBytes(), "Trying to resize to a size larger than the maximal size");
uint32_t old_size = node().Size();
if (new_size < old_size) {
fillDataWithZeroesFromTo(new_size, old_size);
}
node().setSize(new_size);
}
void DataLeafNode::fillDataWithZeroesFromTo(uint64_t begin, uint64_t end) {
Data ZEROES(end-begin);
ZEROES.FillWithZeroes();
node().write(ZEROES.data(), begin, end-begin);
}
uint64_t DataLeafNode::maxStoreableBytes() const {
return node().layout().maxBytesPerLeaf();
}
}
}
}
src/blobstore/implementations/onblocks/datanodestore/DataLeafNode.h 0000664 0000000 0000000 00000002270 13477012671 0026147 0 ustar 00root root 0000000 0000000 #pragma once
#ifndef MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATANODESTORE_DATALEAFNODE_H_
#define MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATANODESTORE_DATALEAFNODE_H_
#include "DataNode.h"
namespace blobstore {
namespace onblocks {
namespace datanodestore {
class DataInnerNode;
class DataLeafNode final: public DataNode {
public:
static cpputils::unique_ref CreateNewNode(blockstore::BlockStore *blockStore, const DataNodeLayout &layout, cpputils::Data data);
static cpputils::unique_ref OverwriteNode(blockstore::BlockStore *blockStore, const DataNodeLayout &layout, const blockstore::BlockId &blockId, cpputils::Data data);
DataLeafNode(DataNodeView block);
~DataLeafNode();
//Returning uint64_t, because calculations handling this probably need to be done in 64bit to support >4GB blobs.
uint64_t maxStoreableBytes() const;
void read(void *target, uint64_t offset, uint64_t size) const;
void write(const void *source, uint64_t offset, uint64_t size);
uint32_t numBytes() const;
void resize(uint32_t size);
private:
void fillDataWithZeroesFromTo(uint64_t begin, uint64_t end);
DISALLOW_COPY_AND_ASSIGN(DataLeafNode);
};
}
}
}
#endif
src/blobstore/implementations/onblocks/datanodestore/DataNode.cpp 0000664 0000000 0000000 00000002201 13477012671 0025704 0 ustar 00root root 0000000 0000000 #include "DataInnerNode.h"
#include "DataLeafNode.h"
#include "DataNode.h"
#include "DataNodeStore.h"
#include
using blockstore::BlockId;
using cpputils::unique_ref;
namespace blobstore {
namespace onblocks {
namespace datanodestore {
constexpr uint16_t DataNode::FORMAT_VERSION_HEADER;
DataNode::DataNode(DataNodeView node)
: _node(std::move(node)) {
}
DataNode::~DataNode() {
}
DataNodeView &DataNode::node() {
return const_cast(const_cast(this)->node());
}
const DataNodeView &DataNode::node() const {
return _node;
}
const BlockId &DataNode::blockId() const {
return _node.blockId();
}
uint8_t DataNode::depth() const {
return _node.Depth();
}
unique_ref DataNode::convertToNewInnerNode(unique_ref node, const DataNodeLayout &layout, const DataNode &first_child) {
auto block = node->_node.releaseBlock();
blockstore::utils::fillWithZeroes(block.get());
return DataInnerNode::InitializeNewNode(std::move(block), layout, first_child.depth()+1, {first_child.blockId()});
}
void DataNode::flush() const {
_node.flush();
}
}
}
}
src/blobstore/implementations/onblocks/datanodestore/DataNode.h 0000664 0000000 0000000 00000001771 13477012671 0025364 0 ustar 00root root 0000000 0000000 #pragma once
#ifndef MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATANODESTORE_DATANODE_H_
#define MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATANODESTORE_DATANODE_H_
#include "DataNodeView.h"
#include
namespace blobstore {
namespace onblocks {
namespace datanodestore {
class DataNodeStore;
class DataInnerNode;
class DataNode {
public:
virtual ~DataNode();
const blockstore::BlockId &blockId() const;
uint8_t depth() const;
static cpputils::unique_ref convertToNewInnerNode(cpputils::unique_ref node, const DataNodeLayout &layout, const DataNode &first_child);
void flush() const;
protected:
// The FORMAT_VERSION_HEADER is used to allow future versions to have compatibility.
static constexpr uint16_t FORMAT_VERSION_HEADER = 0;
DataNode(DataNodeView block);
DataNodeView &node();
const DataNodeView &node() const;
friend class DataNodeStore;
private:
DataNodeView _node;
DISALLOW_COPY_AND_ASSIGN(DataNode);
};
}
}
}
#endif
src/blobstore/implementations/onblocks/datanodestore/DataNodeStore.cpp 0000664 0000000 0000000 00000011624 13477012671 0026732 0 ustar 00root root 0000000 0000000 #include "DataInnerNode.h"
#include "DataLeafNode.h"
#include "DataNodeStore.h"
#include
#include
#include
#include
using blockstore::BlockStore;
using blockstore::Block;
using blockstore::BlockId;
using cpputils::Data;
using cpputils::unique_ref;
using cpputils::make_unique_ref;
using cpputils::dynamic_pointer_move;
using std::runtime_error;
using boost::optional;
using boost::none;
using std::vector;
namespace blobstore {
namespace onblocks {
namespace datanodestore {
DataNodeStore::DataNodeStore(unique_ref blockstore, uint64_t physicalBlocksizeBytes)
: _blockstore(std::move(blockstore)), _layout(_blockstore->blockSizeFromPhysicalBlockSize(physicalBlocksizeBytes)) {
}
DataNodeStore::~DataNodeStore() {
}
unique_ref DataNodeStore::load(unique_ref block) {
DataNodeView node(std::move(block));
if (node.Depth() == 0) {
return make_unique_ref(std::move(node));
} else if (node.Depth() <= MAX_DEPTH) {
return make_unique_ref(std::move(node));
} else {
throw runtime_error("Tree is to deep. Data corruption?");
}
}
unique_ref DataNodeStore::createNewInnerNode(uint8_t depth, const vector &children) {
ASSERT(children.size() >= 1, "Inner node must have at least one child");
return DataInnerNode::CreateNewNode(_blockstore.get(), _layout, depth, children);
}
unique_ref DataNodeStore::createNewLeafNode(Data data) {
return DataLeafNode::CreateNewNode(_blockstore.get(), _layout, std::move(data));
}
unique_ref DataNodeStore::overwriteLeaf(const BlockId &blockId, Data data) {
return DataLeafNode::OverwriteNode(_blockstore.get(), _layout, blockId, std::move(data));
}
optional> DataNodeStore::load(const BlockId &blockId) {
auto block = _blockstore->load(blockId);
if (block == none) {
return none;
} else {
ASSERT((*block)->size() == _layout.blocksizeBytes(), "Loading block of wrong size");
return load(std::move(*block));
}
}
unique_ref DataNodeStore::createNewNodeAsCopyFrom(const DataNode &source) {
ASSERT(source.node().layout().blocksizeBytes() == _layout.blocksizeBytes(), "Source node has wrong layout. Is it from the same DataNodeStore?");
auto newBlock = blockstore::utils::copyToNewBlock(_blockstore.get(), source.node().block());
return load(std::move(newBlock));
}
unique_ref DataNodeStore::overwriteNodeWith(unique_ref target, const DataNode &source) {
ASSERT(target->node().layout().blocksizeBytes() == _layout.blocksizeBytes(), "Target node has wrong layout. Is it from the same DataNodeStore?");
ASSERT(source.node().layout().blocksizeBytes() == _layout.blocksizeBytes(), "Source node has wrong layout. Is it from the same DataNodeStore?");
auto targetBlock = target->node().releaseBlock();
cpputils::destruct(std::move(target)); // Call destructor
blockstore::utils::copyTo(targetBlock.get(), source.node().block());
return DataNodeStore::load(std::move(targetBlock));
}
void DataNodeStore::remove(unique_ref node) {
BlockId blockId = node->blockId();
cpputils::destruct(std::move(node));
remove(blockId);
}
void DataNodeStore::remove(const BlockId &blockId) {
_blockstore->remove(blockId);
}
void DataNodeStore::removeSubtree(unique_ref node) {
auto leaf = dynamic_pointer_move(node);
if (leaf != none) {
remove(std::move(*leaf));
return;
}
auto inner = dynamic_pointer_move(node);
ASSERT(inner != none, "Is neither a leaf nor an inner node");
for (uint32_t i = 0; i < (*inner)->numChildren(); ++i) {
removeSubtree((*inner)->depth()-1, (*inner)->readChild(i).blockId());
}
remove(std::move(*inner));
}
void DataNodeStore::removeSubtree(uint8_t depth, const BlockId &blockId) {
if (depth == 0) {
remove(blockId);
} else {
auto node = load(blockId);
ASSERT(node != none, "Node for removeSubtree not found");
auto inner = dynamic_pointer_move(*node);
ASSERT(inner != none, "Is not an inner node, but depth was not zero");
ASSERT((*inner)->depth() == depth, "Wrong depth given");
for (uint32_t i = 0; i < (*inner)->numChildren(); ++i) {
removeSubtree(depth-1, (*inner)->readChild(i).blockId());
}
remove(std::move(*inner));
}
}
uint64_t DataNodeStore::numNodes() const {
return _blockstore->numBlocks();
}
uint64_t DataNodeStore::estimateSpaceForNumNodesLeft() const {
return _blockstore->estimateNumFreeBytes() / _layout.blocksizeBytes();
}
uint64_t DataNodeStore::virtualBlocksizeBytes() const {
return _layout.blocksizeBytes();
}
DataNodeLayout DataNodeStore::layout() const {
return _layout;
}
void DataNodeStore::forEachNode(std::function callback) const {
_blockstore->forEachBlock(std::move(callback));
}
}
}
}
src/blobstore/implementations/onblocks/datanodestore/DataNodeStore.h 0000664 0000000 0000000 00000004172 13477012671 0026377 0 ustar 00root root 0000000 0000000 #pragma once
#ifndef MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATANODESTORE_DATANODESTORE_H_
#define MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATANODESTORE_DATANODESTORE_H_
#include
#include
#include "DataNodeView.h"
#include
namespace blockstore{
class Block;
class BlockStore;
}
namespace blobstore {
namespace onblocks {
namespace datanodestore {
class DataNode;
class DataLeafNode;
class DataInnerNode;
class DataNodeStore final {
public:
DataNodeStore(cpputils::unique_ref blockstore, uint64_t physicalBlocksizeBytes);
~DataNodeStore();
static constexpr uint8_t MAX_DEPTH = 10;
DataNodeLayout layout() const;
boost::optional> load(const blockstore::BlockId &blockId);
static cpputils::unique_ref load(cpputils::unique_ref block);
cpputils::unique_ref createNewLeafNode(cpputils::Data data);
cpputils::unique_ref createNewInnerNode(uint8_t depth, const std::vector &children);
cpputils::unique_ref createNewNodeAsCopyFrom(const DataNode &source);
cpputils::unique_ref overwriteNodeWith(cpputils::unique_ref target, const DataNode &source);
cpputils::unique_ref overwriteLeaf(const blockstore::BlockId &blockId, cpputils::Data data);
void remove(cpputils::unique_ref node);
void remove(const blockstore::BlockId &blockId);
void removeSubtree(uint8_t depth, const blockstore::BlockId &blockId);
void removeSubtree(cpputils::unique_ref node);
//TODO Test blocksizeBytes/numBlocks/estimateSpaceForNumBlocksLeft
uint64_t virtualBlocksizeBytes() const;
uint64_t numNodes() const;
uint64_t estimateSpaceForNumNodesLeft() const;
//TODO Test overwriteNodeWith(), createNodeAsCopyFrom(), removeSubtree()
void forEachNode(std::function callback) const;
private:
cpputils::unique_ref _blockstore;
const DataNodeLayout _layout;
DISALLOW_COPY_AND_ASSIGN(DataNodeStore);
};
}
}
}
#endif
src/blobstore/implementations/onblocks/datanodestore/DataNodeView.h 0000664 0000000 0000000 00000013715 13477012671 0026220 0 ustar 00root root 0000000 0000000 #pragma once
#ifndef MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATANODESTORE_DATANODEVIEW_H_
#define MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATANODESTORE_DATANODEVIEW_H_
#include
#include "../BlobStoreOnBlocks.h"
#include "DataInnerNode_ChildEntry.h"
#include
#include
#include
#include
namespace blobstore {
namespace onblocks {
namespace datanodestore {
//TODO Move DataNodeLayout into own file
class DataNodeLayout final {
public:
constexpr DataNodeLayout(uint64_t blocksizeBytes)
:_blocksizeBytes(
(HEADERSIZE_BYTES + 2*sizeof(DataInnerNode_ChildEntry) <= blocksizeBytes)
? blocksizeBytes
: throw std::logic_error("Blocksize too small, not enough space to store two children in an inner node")) {
}
//Total size of the header
static constexpr uint32_t HEADERSIZE_BYTES = 8;
//Where in the header is the format version field (used to allow compatibility with future versions of CryFS)
static constexpr uint32_t FORMAT_VERSION_OFFSET_BYTES = 0; //format version uses 2 bytes
//Where in the header is the depth field
static constexpr uint32_t DEPTH_OFFSET_BYTES = 3; // depth uses 1 byte
//Where in the header is the size field (for inner nodes: number of children, for leafs: content data size)
static constexpr uint32_t SIZE_OFFSET_BYTES = 4; // size uses 4 bytes
//Size of a block (header + data region)
constexpr uint64_t blocksizeBytes() const {
return _blocksizeBytes;
}
//Number of bytes in the data region of a node
constexpr uint64_t datasizeBytes() const {
return _blocksizeBytes - HEADERSIZE_BYTES;
}
//Maximum number of children an inner node can store
constexpr uint64_t maxChildrenPerInnerNode() const {
return datasizeBytes() / sizeof(DataInnerNode_ChildEntry);
}
//Maximum number of bytes a leaf can store
constexpr uint64_t maxBytesPerLeaf() const {
return datasizeBytes();
}
private:
uint32_t _blocksizeBytes;
};
class DataNodeView final {
public:
DataNodeView(cpputils::unique_ref block): _block(std::move(block)) {
}
~DataNodeView() {}
static DataNodeView create(blockstore::BlockStore *blockStore, const DataNodeLayout &layout, uint16_t formatVersion, uint8_t depth, uint32_t size, cpputils::Data data) {
ASSERT(data.size() <= layout.datasizeBytes(), "Data is too large for node");
cpputils::Data serialized = _serialize(layout, formatVersion, depth, size, std::move(data));
ASSERT(serialized.size() == layout.blocksizeBytes(), "Wrong block size");
auto block = blockStore->create(serialized);
return DataNodeView(std::move(block));
}
static DataNodeView initialize(cpputils::unique_ref block, const DataNodeLayout &layout, uint16_t formatVersion, uint8_t depth, uint32_t size, cpputils::Data data) {
ASSERT(data.size() <= DataNodeLayout(block->size()).datasizeBytes(), "Data is too large for node");
cpputils::Data serialized = _serialize(layout, formatVersion, depth, size, std::move(data));
ASSERT(serialized.size() == block->size(), "Block has wrong size");
block->write(serialized.data(), 0, serialized.size());
return DataNodeView(std::move(block));
}
static DataNodeView overwrite(blockstore::BlockStore *blockStore, const DataNodeLayout &layout, uint16_t formatVersion, uint8_t depth, uint32_t size, const blockstore::BlockId &blockId, cpputils::Data data) {
ASSERT(data.size() <= layout.datasizeBytes(), "Data is too large for node");
cpputils::Data serialized = _serialize(layout, formatVersion, depth, size, std::move(data));
auto block = blockStore->overwrite(blockId, std::move(serialized));
return DataNodeView(std::move(block));
}
DataNodeView(DataNodeView &&rhs) = default;
uint16_t FormatVersion() const {
return cpputils::deserializeWithOffset(_block->data(), DataNodeLayout::FORMAT_VERSION_OFFSET_BYTES);
}
void setFormatVersion(uint16_t value) {
_block->write(&value, DataNodeLayout::FORMAT_VERSION_OFFSET_BYTES, sizeof(value));
}
uint8_t Depth() const {
return cpputils::deserializeWithOffset(_block->data(), DataNodeLayout::DEPTH_OFFSET_BYTES);
}
void setDepth(uint8_t value) {
_block->write(&value, DataNodeLayout::DEPTH_OFFSET_BYTES, sizeof(value));
}
uint32_t Size() const {
return cpputils::deserializeWithOffset(_block->data(), DataNodeLayout::SIZE_OFFSET_BYTES);
}
void setSize(uint32_t value) {
_block->write(&value, DataNodeLayout::SIZE_OFFSET_BYTES, sizeof(value));
}
const void *data() const {
return static_cast(_block->data()) + DataNodeLayout::HEADERSIZE_BYTES;
}
void write(const void *source, uint64_t offset, uint64_t size) {
_block->write(source, offset + DataNodeLayout::HEADERSIZE_BYTES, size);
}
DataNodeLayout layout() const {
return DataNodeLayout(_block->size());
}
cpputils::unique_ref releaseBlock() {
return std::move(_block);
}
const blockstore::Block &block() const {
return *_block;
}
const blockstore::BlockId &blockId() const {
return _block->blockId();
}
void flush() const {
_block->flush();
}
private:
static cpputils::Data _serialize(const DataNodeLayout &layout, uint16_t formatVersion, uint8_t depth, uint32_t size, cpputils::Data data) {
cpputils::Data result(layout.blocksizeBytes());
cpputils::serialize(result.dataOffset(layout.FORMAT_VERSION_OFFSET_BYTES), formatVersion);
cpputils::serialize(result.dataOffset(layout.DEPTH_OFFSET_BYTES), depth);
cpputils::serialize(result.dataOffset(layout.SIZE_OFFSET_BYTES), size);
std::memcpy(result.dataOffset(layout.HEADERSIZE_BYTES), data.data(), data.size());
std::memset(result.dataOffset(layout.HEADERSIZE_BYTES+data.size()), 0, layout.datasizeBytes()-data.size());
return result;
}
cpputils::unique_ref _block;
DISALLOW_COPY_AND_ASSIGN(DataNodeView);
};
}
}
}
#endif
src/blobstore/implementations/onblocks/datatreestore/ 0000775 0000000 0000000 00000000000 13477012671 0023540 5 ustar 00root root 0000000 0000000 src/blobstore/implementations/onblocks/datatreestore/DataTree.cpp 0000664 0000000 0000000 00000035253 13477012671 0025745 0 ustar 00root root 0000000 0000000 #include "DataTree.h"
#include "../datanodestore/DataNodeStore.h"
#include "../datanodestore/DataInnerNode.h"
#include "../datanodestore/DataLeafNode.h"
#include "../utils/Math.h"
#include "impl/algorithms.h"
#include
#include
#include
#include
#include "impl/LeafTraverser.h"
#include
#include
using blockstore::BlockId;
using blobstore::onblocks::datanodestore::DataNodeStore;
using blobstore::onblocks::datanodestore::DataNode;
using blobstore::onblocks::datanodestore::DataInnerNode;
using blobstore::onblocks::datanodestore::DataLeafNode;
using std::function;
using boost::shared_mutex;
using boost::shared_lock;
using boost::unique_lock;
using boost::none;
using boost::optional;
using cpputils::optional_ownership_ptr;
using cpputils::unique_ref;
using cpputils::Data;
using namespace cpputils::logging;
//TODO shared_lock currently not enough for traverse because of root replacement. Can be fixed while keeping shared?
namespace blobstore {
namespace onblocks {
namespace datatreestore {
DataTree::DataTree(DataNodeStore *nodeStore, unique_ref rootNode)
: _treeStructureMutex(), _nodeStore(nodeStore), _rootNode(std::move(rootNode)), _blockId(_rootNode->blockId()), _sizeCache() {
}
DataTree::~DataTree() {
}
const BlockId &DataTree::blockId() const {
return _blockId;
}
void DataTree::flush() const {
// By grabbing a lock, we ensure that all modifying functions don't run currently and are therefore flushed.
// It's only a shared lock, because this doesn't modify the tree structure.
shared_lock lock(_treeStructureMutex);
// We also have to flush the root node
_rootNode->flush();
}
unique_ref DataTree::releaseRootNode() {
// Lock also ensures that the root node is currently set (traversing unsets it temporarily)
// It's a unique lock because this "modifies" tree structure by changing _rootNode.
unique_lock lock(_treeStructureMutex);
return std::move(_rootNode);
}
uint32_t DataTree::numNodes() const {
uint32_t numNodesCurrentLevel = numLeaves();
uint32_t totalNumNodes = numNodesCurrentLevel;
for(size_t level = 0; level < _rootNode->depth(); ++level) {
numNodesCurrentLevel = blobstore::onblocks::utils::ceilDivision(numNodesCurrentLevel, static_cast(_nodeStore->layout().maxChildrenPerInnerNode()));
totalNumNodes += numNodesCurrentLevel;
}
return totalNumNodes;
}
uint32_t DataTree::numLeaves() const {
shared_lock lock(_treeStructureMutex);
return _getOrComputeSizeCache().numLeaves;
}
uint64_t DataTree::numBytes() const {
shared_lock lock(_treeStructureMutex);
return _numBytes();
}
uint64_t DataTree::_numBytes() const {
return _getOrComputeSizeCache().numBytes;
}
DataTree::SizeCache DataTree::_getOrComputeSizeCache() const {
return _sizeCache.getOrCompute([this] () {
return _computeSizeCache(*_rootNode);
});
}
uint32_t DataTree::forceComputeNumLeaves() const {
_sizeCache.clear();
return numLeaves();
}
DataTree::SizeCache DataTree::_computeSizeCache(const DataNode &node) const {
const DataLeafNode *leaf = dynamic_cast(&node);
if (leaf != nullptr) {
return {1, leaf->numBytes()};
}
const DataInnerNode &inner = dynamic_cast(node);
uint32_t numLeavesInLeftChildren = static_cast(inner.numChildren()-1) * _leavesPerFullChild(inner);
uint64_t numBytesInLeftChildren = numLeavesInLeftChildren * _nodeStore->layout().maxBytesPerLeaf();
auto lastChild = _nodeStore->load(inner.readLastChild().blockId());
ASSERT(lastChild != none, "Couldn't load last child");
SizeCache sizeInRightChild = _computeSizeCache(**lastChild);
return SizeCache {
numLeavesInLeftChildren + sizeInRightChild.numLeaves,
numBytesInLeftChildren + sizeInRightChild.numBytes
};
}
void DataTree::_traverseLeavesByLeafIndices(uint32_t beginIndex, uint32_t endIndex, bool readOnlyTraversal,
function onExistingLeaf,
function onCreateLeaf,
function onBacktrackFromSubtree) const {
if(endIndex <= beginIndex) {
return;
}
// TODO no const cast
LeafTraverser(_nodeStore, readOnlyTraversal).traverseAndUpdateRoot(&const_cast(this)->_rootNode, beginIndex, endIndex, onExistingLeaf, onCreateLeaf, onBacktrackFromSubtree);
}
void DataTree::_traverseLeavesByByteIndices(uint64_t beginByte, uint64_t sizeBytes, bool readOnlyTraversal, function onExistingLeaf, function onCreateLeaf) const {
if (sizeBytes == 0) {
return;
}
uint64_t endByte = beginByte + sizeBytes;
uint64_t _maxBytesPerLeaf = maxBytesPerLeaf();
uint32_t firstLeaf = beginByte / _maxBytesPerLeaf;
uint32_t endLeaf = utils::ceilDivision(endByte, _maxBytesPerLeaf);
bool blobIsGrowingFromThisTraversal = false;
auto _onExistingLeaf = [&onExistingLeaf, beginByte, endByte, endLeaf, _maxBytesPerLeaf, &blobIsGrowingFromThisTraversal] (uint32_t leafIndex, bool isRightBorderLeaf, LeafHandle leafHandle) {
uint64_t indexOfFirstLeafByte = leafIndex * _maxBytesPerLeaf;
ASSERT(endByte > indexOfFirstLeafByte, "Traversal went too far right");
uint32_t dataBegin = utils::maxZeroSubtraction(beginByte, indexOfFirstLeafByte);
uint32_t dataEnd = std::min(_maxBytesPerLeaf, endByte - indexOfFirstLeafByte);
// If we are traversing exactly until the last leaf, then the last leaf wasn't resized by the traversal and might have a wrong size. We have to fix it.
if (isRightBorderLeaf) {
ASSERT(leafIndex == endLeaf-1, "If we traversed further right, this wouldn't be the right border leaf.");
auto leaf = leafHandle.node();
if (leaf->numBytes() < dataEnd) {
leaf->resize(dataEnd);
blobIsGrowingFromThisTraversal = true;
}
}
onExistingLeaf(indexOfFirstLeafByte, std::move(leafHandle), dataBegin, dataEnd-dataBegin);
};
auto _onCreateLeaf = [&onCreateLeaf, _maxBytesPerLeaf, beginByte, firstLeaf, endByte, endLeaf, &blobIsGrowingFromThisTraversal, readOnlyTraversal] (uint32_t leafIndex) -> Data {
ASSERT(!readOnlyTraversal, "Cannot create leaves in a read-only traversal");
blobIsGrowingFromThisTraversal = true;
uint64_t indexOfFirstLeafByte = leafIndex * _maxBytesPerLeaf;
ASSERT(endByte > indexOfFirstLeafByte, "Traversal went too far right");
uint32_t dataBegin = utils::maxZeroSubtraction(beginByte, indexOfFirstLeafByte);
uint32_t dataEnd = std::min(_maxBytesPerLeaf, endByte - indexOfFirstLeafByte);
ASSERT(leafIndex == firstLeaf || dataBegin == 0, "Only the leftmost leaf can have a gap on the left.");
ASSERT(leafIndex == endLeaf-1 || dataEnd == _maxBytesPerLeaf, "Only the rightmost leaf can have a gap on the right");
Data data = onCreateLeaf(indexOfFirstLeafByte + dataBegin, dataEnd-dataBegin);
ASSERT(data.size() == dataEnd-dataBegin, "Returned leaf data with wrong size");
// If this leaf is created but only partly in the traversed region (i.e. dataBegin > leafBegin), we have to fill the data before the traversed region with zeroes.
if (dataBegin != 0) {
Data actualData(dataBegin + data.size());
std::memset(actualData.data(), 0, dataBegin);
std::memcpy(actualData.dataOffset(dataBegin), data.data(), data.size());
data = std::move(actualData);
}
return data;
};
auto _onBacktrackFromSubtree = [] (DataInnerNode* /*node*/) {};
_traverseLeavesByLeafIndices(firstLeaf, endLeaf, readOnlyTraversal, _onExistingLeaf, _onCreateLeaf, _onBacktrackFromSubtree);
ASSERT(!readOnlyTraversal || !blobIsGrowingFromThisTraversal, "Blob grew from traversal that didn't allow growing (i.e. reading)");
if (blobIsGrowingFromThisTraversal) {
_sizeCache.update([endLeaf, endByte] (optional* cache) {
*cache = SizeCache{endLeaf, endByte};
});
}
}
uint32_t DataTree::_leavesPerFullChild(const DataInnerNode &root) const {
return utils::intPow(_nodeStore->layout().maxChildrenPerInnerNode(), static_cast(root.depth())-1);
}
void DataTree::resizeNumBytes(uint64_t newNumBytes) {
std::unique_lock lock(_treeStructureMutex);
uint32_t newNumLeaves = std::max(UINT64_C(1), utils::ceilDivision(newNumBytes, _nodeStore->layout().maxBytesPerLeaf()));
uint32_t newLastLeafSize = newNumBytes - (newNumLeaves-1) * _nodeStore->layout().maxBytesPerLeaf();
uint32_t maxChildrenPerInnerNode = _nodeStore->layout().maxChildrenPerInnerNode();
auto onExistingLeaf = [newLastLeafSize] (uint32_t /*index*/, bool /*isRightBorderLeaf*/, LeafHandle leafHandle) {
auto leaf = leafHandle.node();
// This is only called, if the new last leaf was already existing
if (leaf->numBytes() != newLastLeafSize) {
leaf->resize(newLastLeafSize);
}
};
auto onCreateLeaf = [newLastLeafSize] (uint32_t /*index*/) -> Data {
// This is only called, if the new last leaf was not existing yet
return Data(newLastLeafSize).FillWithZeroes();
};
auto onBacktrackFromSubtree = [this, newNumLeaves, maxChildrenPerInnerNode] (DataInnerNode* node) {
// This is only called for the right border nodes of the new tree.
// When growing size, the following is a no-op. When shrinking, we're deleting the children that aren't needed anymore.
uint32_t maxLeavesPerChild = utils::intPow(static_cast(maxChildrenPerInnerNode), (static_cast(node->depth())-1));
uint32_t neededNodesOnChildLevel = utils::ceilDivision(newNumLeaves, maxLeavesPerChild);
uint32_t neededSiblings = utils::ceilDivision(neededNodesOnChildLevel, maxChildrenPerInnerNode);
uint32_t neededChildrenForRightBorderNode = neededNodesOnChildLevel - (neededSiblings-1) * maxChildrenPerInnerNode;
ASSERT(neededChildrenForRightBorderNode <= node->numChildren(), "Node has too few children");
// All children to the right of the new right-border-node are removed including their subtree.
while(node->numChildren() > neededChildrenForRightBorderNode) {
_nodeStore->removeSubtree(node->depth()-1, node->readLastChild().blockId());
node->removeLastChild();
}
};
_traverseLeavesByLeafIndices(newNumLeaves - 1, newNumLeaves, false, onExistingLeaf, onCreateLeaf, onBacktrackFromSubtree);
_sizeCache.update([newNumLeaves, newNumBytes] (boost::optional* cache) {
*cache = SizeCache{newNumLeaves, newNumBytes};
});
}
uint64_t DataTree::maxBytesPerLeaf() const {
return _nodeStore->layout().maxBytesPerLeaf();
}
uint8_t DataTree::depth() const {
shared_lock lock(_treeStructureMutex);
return _rootNode->depth();
}
void DataTree::readBytes(void *target, uint64_t offset, uint64_t count) const {
shared_lock lock(_treeStructureMutex);
const uint64_t _size = _numBytes();
if(offset > _size || offset + count > _size) {
throw std::runtime_error("BlobOnBlocks::read() read outside blob. Use BlobOnBlocks::tryRead() if this should be allowed.");
}
const uint64_t read = _tryReadBytes(target, offset, count);
if (read != count) {
throw std::runtime_error("BlobOnBlocks::read() couldn't read all requested bytes. Use BlobOnBlocks::tryRead() if this should be allowed.");
}
}
Data DataTree::readAllBytes() const {
shared_lock lock(_treeStructureMutex);
//TODO Querying numBytes can be inefficient. Is this possible without a call to size()?
uint64_t count = _numBytes();
Data result(count);
_doReadBytes(result.data(), 0, count);
return result;
}
uint64_t DataTree::tryReadBytes(void *target, uint64_t offset, uint64_t count) const {
shared_lock lock(_treeStructureMutex);
auto result = _tryReadBytes(target, offset, count);
return result;
}
uint64_t DataTree::_tryReadBytes(void *target, uint64_t offset, uint64_t count) const {
//TODO Quite inefficient to call size() here, because that has to traverse the tree
const uint64_t _size = _numBytes();
const uint64_t realCount = std::max(INT64_C(0), std::min(static_cast(count), static_cast(_size)-static_cast(offset)));
_doReadBytes(target, offset, realCount);
return realCount;
}
void DataTree::_doReadBytes(void *target, uint64_t offset, uint64_t count) const {
auto onExistingLeaf = [target, offset, count] (uint64_t indexOfFirstLeafByte, LeafHandle leaf, uint32_t leafDataOffset, uint32_t leafDataSize) {
ASSERT(indexOfFirstLeafByte+leafDataOffset>=offset && indexOfFirstLeafByte-offset+leafDataOffset <= count && indexOfFirstLeafByte-offset+leafDataOffset+leafDataSize <= count, "Writing to target out of bounds");
//TODO Simplify formula, make it easier to understand
leaf.node()->read(static_cast(target) + indexOfFirstLeafByte - offset + leafDataOffset, leafDataOffset, leafDataSize);
};
auto onCreateLeaf = [] (uint64_t /*beginByte*/, uint32_t /*count*/) -> Data {
ASSERT(false, "Reading shouldn't create new leaves.");
};
_traverseLeavesByByteIndices(offset, count, true, onExistingLeaf, onCreateLeaf);
}
void DataTree::writeBytes(const void *source, uint64_t offset, uint64_t count) {
unique_lock lock(_treeStructureMutex);
auto onExistingLeaf = [source, offset, count] (uint64_t indexOfFirstLeafByte, LeafHandle leaf, uint32_t leafDataOffset, uint32_t leafDataSize) {
ASSERT(indexOfFirstLeafByte+leafDataOffset>=offset && indexOfFirstLeafByte-offset+leafDataOffset <= count && indexOfFirstLeafByte-offset+leafDataOffset+leafDataSize <= count, "Reading from source out of bounds");
if (leafDataOffset == 0 && leafDataSize == leaf.nodeStore()->layout().maxBytesPerLeaf()) {
Data leafData(leafDataSize);
std::memcpy(leafData.data(), static_cast(source) + indexOfFirstLeafByte - offset, leafDataSize);
leaf.nodeStore()->overwriteLeaf(leaf.blockId(), std::move(leafData));
} else {
//TODO Simplify formula, make it easier to understand
leaf.node()->write(static_cast(source) + indexOfFirstLeafByte - offset + leafDataOffset, leafDataOffset,
leafDataSize);
}
};
auto onCreateLeaf = [source, offset, count] (uint64_t beginByte, uint32_t numBytes) -> Data {
ASSERT(beginByte >= offset && beginByte-offset <= count && beginByte-offset+numBytes <= count, "Reading from source out of bounds");
Data result(numBytes);
//TODO Simplify formula, make it easier to understand
std::memcpy(result.data(), static_cast(source) + beginByte - offset, numBytes);
return result;
};
_traverseLeavesByByteIndices(offset, count, false, onExistingLeaf, onCreateLeaf);
}
}
}
}
src/blobstore/implementations/onblocks/datatreestore/DataTree.h 0000664 0000000 0000000 00000007164 13477012671 0025412 0 ustar 00root root 0000000 0000000 #pragma once
#ifndef MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATATREE_H_
#define MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATATREE_H_
#include
#include
#include
#include "../datanodestore/DataNodeView.h"
//TODO Replace with C++14 once std::shared_mutex is supported
#include
#include
#include "LeafHandle.h"
#include "impl/CachedValue.h"
namespace blobstore {
namespace onblocks {
namespace datanodestore {
class DataNodeStore;
class DataInnerNode;
class DataLeafNode;
class DataNode;
}
namespace datatreestore {
//TODO It is strange that DataLeafNode is still part in the public interface of DataTree. This should be separated somehow.
class DataTree final {
public:
DataTree(datanodestore::DataNodeStore *nodeStore, cpputils::unique_ref rootNode);
~DataTree();
const blockstore::BlockId &blockId() const;
//Returning uint64_t, because calculations handling this probably need to be done in 64bit to support >4GB blobs.
uint64_t maxBytesPerLeaf() const;
uint64_t tryReadBytes(void *target, uint64_t offset, uint64_t count) const;
void readBytes(void *target, uint64_t offset, uint64_t count) const;
cpputils::Data readAllBytes() const;
void writeBytes(const void *source, uint64_t offset, uint64_t count);
void resizeNumBytes(uint64_t newNumBytes);
uint32_t numNodes() const;
uint32_t numLeaves() const;
uint64_t numBytes() const;
uint8_t depth() const;
// only used by test cases
uint32_t forceComputeNumLeaves() const;
void flush() const;
private:
// This mutex must protect the tree structure, i.e. which nodes exist and how they're connected.
// Also protects total number of bytes (i.e. number of leaves + size of last leaf).
// It also protects the data in leaf nodes, because writing bytes might grow the blob and change the structure.
mutable boost::shared_mutex _treeStructureMutex;
datanodestore::DataNodeStore *_nodeStore;
cpputils::unique_ref _rootNode;
blockstore::BlockId _blockId; // BlockId is stored in a member variable, since _rootNode is nullptr while traversing, but we still want to be able to return the blockId.
struct SizeCache final {
uint32_t numLeaves;
uint64_t numBytes;
};
mutable CachedValue _sizeCache;
cpputils::unique_ref releaseRootNode();
friend class DataTreeStore;
void _traverseLeavesByLeafIndices(uint32_t beginIndex, uint32_t endIndex, bool readOnlyTraversal,
std::function onExistingLeaf,
std::function onCreateLeaf,
std::function onBacktrackFromSubtree) const;
void _traverseLeavesByByteIndices(uint64_t beginByte, uint64_t sizeBytes, bool readOnlyTraversal, std::function onExistingLeaf, std::function onCreateLeaf) const;
uint32_t _leavesPerFullChild(const datanodestore::DataInnerNode &root) const;
SizeCache _getOrComputeSizeCache() const;
SizeCache _computeSizeCache(const datanodestore::DataNode &node) const;
uint64_t _tryReadBytes(void *target, uint64_t offset, uint64_t count) const;
void _doReadBytes(void *target, uint64_t offset, uint64_t count) const;
uint64_t _numBytes() const;
DISALLOW_COPY_AND_ASSIGN(DataTree);
};
}
}
}
#endif
src/blobstore/implementations/onblocks/datatreestore/DataTreeStore.cpp 0000664 0000000 0000000 00000002310 13477012671 0026746 0 ustar 00root root 0000000 0000000 #include "DataTreeStore.h"
#include "../datanodestore/DataLeafNode.h"
#include "DataTree.h"
using cpputils::unique_ref;
using cpputils::make_unique_ref;
using cpputils::Data;
using boost::optional;
using boost::none;
using blobstore::onblocks::datanodestore::DataNodeStore;
namespace blobstore {
namespace onblocks {
namespace datatreestore {
DataTreeStore::DataTreeStore(unique_ref nodeStore)
: _nodeStore(std::move(nodeStore)) {
}
DataTreeStore::~DataTreeStore() {
}
optional> DataTreeStore::load(const blockstore::BlockId &blockId) {
auto node = _nodeStore->load(blockId);
if (node == none) {
return none;
}
return make_unique_ref(_nodeStore.get(), std::move(*node));
}
unique_ref DataTreeStore::createNewTree() {
auto newleaf = _nodeStore->createNewLeafNode(Data(0));
return make_unique_ref(_nodeStore.get(), std::move(newleaf));
}
void DataTreeStore::remove(unique_ref tree) {
_nodeStore->removeSubtree(tree->releaseRootNode());
}
void DataTreeStore::remove(const blockstore::BlockId &blockId) {
auto tree = load(blockId);
ASSERT(tree != none, "Tree to remove not found");
remove(std::move(*tree));
}
}
}
}
src/blobstore/implementations/onblocks/datatreestore/DataTreeStore.h 0000664 0000000 0000000 00000002743 13477012671 0026425 0 ustar 00root root 0000000 0000000 #pragma once
#ifndef MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATATREESTORE_DATATREESTORE_H_
#define MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATATREESTORE_DATATREESTORE_H_
#include
#include
#include
#include
#include
#include "../datanodestore/DataNodeStore.h"
namespace blobstore {
namespace onblocks {
namespace datatreestore {
class DataTree;
class DataTreeStore final {
public:
DataTreeStore(cpputils::unique_ref nodeStore);
~DataTreeStore();
boost::optional> load(const blockstore::BlockId &blockId);
cpputils::unique_ref createNewTree();
void remove(cpputils::unique_ref tree);
void remove(const blockstore::BlockId &blockId);
//TODO Test blocksizeBytes/numBlocks/estimateSpaceForNumBlocksLeft
uint64_t virtualBlocksizeBytes() const;
uint64_t numNodes() const;
uint64_t estimateSpaceForNumNodesLeft() const;
private:
cpputils::unique_ref _nodeStore;
DISALLOW_COPY_AND_ASSIGN(DataTreeStore);
};
inline uint64_t DataTreeStore::numNodes() const {
return _nodeStore->numNodes();
}
inline uint64_t DataTreeStore::estimateSpaceForNumNodesLeft() const {
return _nodeStore->estimateSpaceForNumNodesLeft();
}
inline uint64_t DataTreeStore::virtualBlocksizeBytes() const {
return _nodeStore->virtualBlocksizeBytes();
}
}
}
}
#endif
src/blobstore/implementations/onblocks/datatreestore/LeafHandle.cpp 0000664 0000000 0000000 00000002606 13477012671 0026233 0 ustar 00root root 0000000 0000000 #include "LeafHandle.h"
#include "../datanodestore/DataLeafNode.h"
#include "../datanodestore/DataNodeStore.h"
using cpputils::WithOwnership;
using cpputils::WithoutOwnership;
using boost::none;
using cpputils::dynamic_pointer_move;
using blobstore::onblocks::datanodestore::DataLeafNode;
using blobstore::onblocks::datanodestore::DataNodeStore;
using blockstore::BlockId;
namespace blobstore {
namespace onblocks {
namespace datatreestore {
LeafHandle::LeafHandle(DataNodeStore *nodeStore, const BlockId &blockId)
: _nodeStore(nodeStore), _blockId(blockId), _leaf(cpputils::null()) {
}
LeafHandle::LeafHandle(DataNodeStore *nodeStore, DataLeafNode *node)
: _nodeStore(nodeStore), _blockId(node->blockId()),
_leaf(WithoutOwnership(node)) {
}
DataLeafNode *LeafHandle::node() {
if (_leaf.get() == nullptr) {
auto loaded = _nodeStore->load(_blockId);
ASSERT(loaded != none, "Leaf not found");
auto leaf = dynamic_pointer_move(*loaded);
ASSERT(leaf != none, "Loaded leaf is not leaf node");
_leaf = WithOwnership(std::move(*leaf));
}
return _leaf.get();
}
}
}
}
src/blobstore/implementations/onblocks/datatreestore/LeafHandle.h 0000664 0000000 0000000 00000002534 13477012671 0025700 0 ustar 00root root 0000000 0000000 #pragma once
#ifndef MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_LEAFHANDLE_H_
#define MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_LEAFHANDLE_H_
#include
#include
#include
namespace blobstore {
namespace onblocks {
namespace datanodestore {
class DataNodeStore;
class DataLeafNode;
}
namespace datatreestore {
class LeafHandle final {
public:
LeafHandle(datanodestore::DataNodeStore *nodeStore, const blockstore::BlockId &blockId);
LeafHandle(datanodestore::DataNodeStore *nodeStore, datanodestore::DataLeafNode *node);
LeafHandle(LeafHandle &&rhs) = default;
const blockstore::BlockId &blockId() {
return _blockId;
}
datanodestore::DataLeafNode *node();
datanodestore::DataNodeStore *nodeStore() {
return _nodeStore;
}
private:
datanodestore::DataNodeStore *_nodeStore;
blockstore::BlockId _blockId;
cpputils::optional_ownership_ptr _leaf;
DISALLOW_COPY_AND_ASSIGN(LeafHandle);
};
}
}
}
#endif
src/blobstore/implementations/onblocks/datatreestore/impl/ 0000775 0000000 0000000 00000000000 13477012671 0024501 5 ustar 00root root 0000000 0000000 src/blobstore/implementations/onblocks/datatreestore/impl/CachedValue.cpp 0000664 0000000 0000000 00000000032 13477012671 0027344 0 ustar 00root root 0000000 0000000 #include "CachedValue.h"
src/blobstore/implementations/onblocks/datatreestore/impl/CachedValue.h 0000664 0000000 0000000 00000002002 13477012671 0027010 0 ustar 00root root 0000000 0000000 #pragma once
#ifndef MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_IMPL_CACHEDVALUE_H_
#define MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_IMPL_CACHEDVALUE_H_
#include
#include
#include
namespace blobstore {
namespace onblocks {
// TODO Test
template
class CachedValue final {
public:
CachedValue() :_cache(boost::none), _mutex() {}
T getOrCompute(std::function compute) {
boost::upgrade_lock readLock(_mutex);
if (_cache == boost::none) {
boost::upgrade_to_unique_lock writeLock(readLock);
_cache = compute();
}
return *_cache;
}
void update(std::function*)> func) {
boost::unique_lock writeLock(_mutex);
func(&_cache);
}
void clear() {
update([] (boost::optional* cache) {
*cache = boost::none;
});
}
private:
boost::optional _cache;
boost::shared_mutex _mutex;
};
}
}
#endif
src/blobstore/implementations/onblocks/datatreestore/impl/LeafTraverser.cpp 0000664 0000000 0000000 00000043505 13477012671 0027761 0 ustar 00root root 0000000 0000000 #include "LeafTraverser.h"
#include
#include "../../datanodestore/DataLeafNode.h"
#include "../../datanodestore/DataInnerNode.h"
#include "../../datanodestore/DataNodeStore.h"
#include "../../utils/Math.h"
using std::function;
using std::vector;
using boost::none;
using cpputils::Data;
using cpputils::unique_ref;
using cpputils::dynamic_pointer_move;
using blobstore::onblocks::datanodestore::DataNodeStore;
using blobstore::onblocks::datanodestore::DataNode;
using blobstore::onblocks::datanodestore::DataInnerNode;
using blobstore::onblocks::datanodestore::DataLeafNode;
namespace blobstore {
namespace onblocks {
namespace datatreestore {
LeafTraverser::LeafTraverser(DataNodeStore *nodeStore, bool readOnlyTraversal)
: _nodeStore(nodeStore), _readOnlyTraversal(readOnlyTraversal) {
}
void LeafTraverser::traverseAndUpdateRoot(unique_ref* root, uint32_t beginIndex, uint32_t endIndex, function onExistingLeaf, function onCreateLeaf, function onBacktrackFromSubtree) {
_traverseAndUpdateRoot(root, beginIndex, endIndex, true, onExistingLeaf, onCreateLeaf, onBacktrackFromSubtree);
}
void LeafTraverser::_traverseAndUpdateRoot(unique_ref* root, uint32_t beginIndex, uint32_t endIndex, bool isLeftBorderOfTraversal, function onExistingLeaf, function onCreateLeaf, function onBacktrackFromSubtree) {
ASSERT(beginIndex <= endIndex, "Invalid parameters");
//TODO Test cases with numLeaves < / >= beginIndex, ideally test all configurations:
// beginIndexdepth());
bool increaseTreeDepth = endIndex > maxLeavesForDepth;
ASSERT(!_readOnlyTraversal || !increaseTreeDepth, "Tried to grow a tree on a read only traversal");
if ((*root)->depth() == 0) {
DataLeafNode *leaf = dynamic_cast(root->get());
ASSERT(leaf != nullptr, "Depth 0 has to be leaf node");
if (increaseTreeDepth && leaf->numBytes() != _nodeStore->layout().maxBytesPerLeaf()) {
leaf->resize(_nodeStore->layout().maxBytesPerLeaf());
}
if (beginIndex == 0 && endIndex >= 1) {
bool isRightBorderLeaf = (endIndex == 1);
onExistingLeaf(0, isRightBorderLeaf, LeafHandle(_nodeStore, leaf));
}
} else {
DataInnerNode *inner = dynamic_cast(root->get());
ASSERT(inner != nullptr, "Depth != 0 has to be leaf node");
_traverseExistingSubtree(inner, std::min(beginIndex, maxLeavesForDepth),
std::min(endIndex, maxLeavesForDepth), 0, isLeftBorderOfTraversal, !increaseTreeDepth,
increaseTreeDepth, onExistingLeaf, onCreateLeaf, onBacktrackFromSubtree);
}
// If the traversal goes too far right for a tree this depth, increase tree depth by one and continue traversal.
// This is recursive, i.e. will be repeated if the tree is still not deep enough.
// We don't increase to the full needed tree depth in one step, because we want the traversal to go as far as possible
// and only then increase the depth - this causes the tree to be in consistent shape (balanced) for longer.
if (increaseTreeDepth) {
ASSERT(!_readOnlyTraversal, "Can't increase tree depth in a read-only traversal");
// TODO Test cases that increase tree depth by 0, 1, 2, ... levels
*root = _increaseTreeDepth(std::move(*root));
_traverseAndUpdateRoot(root, std::max(beginIndex, maxLeavesForDepth), endIndex, false, onExistingLeaf, onCreateLeaf, onBacktrackFromSubtree);
} else {
// Once we're done growing the tree and done with the traversal, we might have to decrease tree depth,
// because the callbacks could have deleted nodes (this happens for example when shrinking the tree using a traversal).
_whileRootHasOnlyOneChildReplaceRootWithItsChild(root);
}
}
unique_ref LeafTraverser::_increaseTreeDepth(unique_ref root) {
ASSERT(!_readOnlyTraversal, "Can't increase tree depth in a read-only traversal");
auto copyOfOldRoot = _nodeStore->createNewNodeAsCopyFrom(*root);
return DataNode::convertToNewInnerNode(std::move(root), _nodeStore->layout(), *copyOfOldRoot);
}
void LeafTraverser::_traverseExistingSubtree(const blockstore::BlockId &blockId, uint8_t depth, uint32_t beginIndex, uint32_t endIndex, uint32_t leafOffset, bool isLeftBorderOfTraversal, bool isRightBorderNode, bool growLastLeaf, function onExistingLeaf, function onCreateLeaf, function onBacktrackFromSubtree) {
if (depth == 0) {
ASSERT(beginIndex <= 1 && endIndex <= 1,
"If root node is a leaf, the (sub)tree has only one leaf - access indices must be 0 or 1.");
LeafHandle leafHandle(_nodeStore, blockId);
if (growLastLeaf) {
if (leafHandle.node()->numBytes() != _nodeStore->layout().maxBytesPerLeaf()) {
ASSERT(!_readOnlyTraversal, "Can't grow the last leaf in a read-only traversal");
leafHandle.node()->resize(_nodeStore->layout().maxBytesPerLeaf());
}
}
if (beginIndex == 0 && endIndex == 1) {
onExistingLeaf(leafOffset, isRightBorderNode, std::move(leafHandle));
}
} else {
auto node = _nodeStore->load(blockId);
if (node == none) {
throw std::runtime_error("Couldn't find child node " + blockId.ToString());
}
auto inner = dynamic_pointer_move(*node);
ASSERT(inner != none, "Has to be either leaf or inner node");
ASSERT((*inner)->depth() == depth, "Wrong depth given");
_traverseExistingSubtree(inner->get(), beginIndex, endIndex, leafOffset, isLeftBorderOfTraversal,
isRightBorderNode, growLastLeaf, onExistingLeaf, onCreateLeaf, onBacktrackFromSubtree);
}
}
void LeafTraverser::_traverseExistingSubtree(DataInnerNode *root, uint32_t beginIndex, uint32_t endIndex, uint32_t leafOffset, bool isLeftBorderOfTraversal, bool isRightBorderNode, bool growLastLeaf, function onExistingLeaf, function onCreateLeaf, function onBacktrackFromSubtree) {
ASSERT(beginIndex <= endIndex, "Invalid parameters");
//TODO Call callbacks for different leaves in parallel.
uint32_t leavesPerChild = _maxLeavesForTreeDepth(root->depth()-1);
uint32_t beginChild = beginIndex/leavesPerChild;
uint32_t endChild = utils::ceilDivision(endIndex, leavesPerChild);
ASSERT(endChild <= _nodeStore->layout().maxChildrenPerInnerNode(), "Traversal region would need increasing the tree depth. This should have happened before calling this function.");
uint32_t numChildren = root->numChildren();
ASSERT(!growLastLeaf || endChild >= numChildren, "Can only grow last leaf if it exists");
ASSERT(!_readOnlyTraversal || endChild <= numChildren, "Can only traverse out of bounds in a read-only traversal");
bool shouldGrowLastExistingLeaf = growLastLeaf || endChild > numChildren;
// If we traverse outside of the valid region (i.e. usually would only traverse to new leaves and not to the last leaf),
// we still have to descend to the last old child to fill it with leaves and grow the last old leaf.
if (isLeftBorderOfTraversal && beginChild >= numChildren) {
ASSERT(numChildren > 0, "Node doesn't have children.");
auto childBlockId = root->readLastChild().blockId();
uint32_t childOffset = (numChildren-1) * leavesPerChild;
_traverseExistingSubtree(childBlockId, root->depth()-1, leavesPerChild, leavesPerChild, childOffset, true, false, true,
[] (uint32_t /*index*/, bool /*isRightBorderNode*/, LeafHandle /*leaf*/) {ASSERT(false, "We don't actually traverse any leaves.");},
[] (uint32_t /*index*/) -> Data {ASSERT(false, "We don't actually traverse any leaves.");},
[] (DataInnerNode* /*node*/) {ASSERT(false, "We don't actually traverse any leaves.");});
}
// Traverse existing children
for (uint32_t childIndex = beginChild; childIndex < std::min(endChild, numChildren); ++childIndex) {
auto childBlockId = root->readChild(childIndex).blockId();
uint32_t childOffset = childIndex * leavesPerChild;
uint32_t localBeginIndex = utils::maxZeroSubtraction(beginIndex, childOffset);
uint32_t localEndIndex = std::min(leavesPerChild, endIndex - childOffset);
bool isFirstChild = (childIndex == beginChild);
bool isLastExistingChild = (childIndex == numChildren - 1);
bool isLastChild = isLastExistingChild && (numChildren == endChild);
ASSERT(localEndIndex <= leavesPerChild, "We don't want the child to add a tree level because it doesn't have enough space for the traversal.");
_traverseExistingSubtree(childBlockId, root->depth()-1, localBeginIndex, localEndIndex, leafOffset + childOffset, isLeftBorderOfTraversal && isFirstChild,
isRightBorderNode && isLastChild, shouldGrowLastExistingLeaf && isLastExistingChild, onExistingLeaf, onCreateLeaf, onBacktrackFromSubtree);
}
// Traverse new children (including gap children, i.e. children that are created but not traversed because they're to the right of the current size, but to the left of the traversal region)
for (uint32_t childIndex = numChildren; childIndex < endChild; ++childIndex) {
ASSERT(!_readOnlyTraversal, "Can't create new children in a read-only traversal");
uint32_t childOffset = childIndex * leavesPerChild;
uint32_t localBeginIndex = std::min(leavesPerChild, utils::maxZeroSubtraction(beginIndex, childOffset));
uint32_t localEndIndex = std::min(leavesPerChild, endIndex - childOffset);
auto leafCreator = (childIndex >= beginChild) ? onCreateLeaf : _createMaxSizeLeaf();
auto child = _createNewSubtree(localBeginIndex, localEndIndex, leafOffset + childOffset, root->depth() - 1, leafCreator, onBacktrackFromSubtree);
root->addChild(*child);
}
// This is only a backtrack, if we actually visited a leaf here.
if (endIndex > beginIndex) {
onBacktrackFromSubtree(root);
}
}
unique_ref LeafTraverser::_createNewSubtree(uint32_t beginIndex, uint32_t endIndex, uint32_t leafOffset, uint8_t depth, function onCreateLeaf, function onBacktrackFromSubtree) {
ASSERT(!_readOnlyTraversal, "Can't create a new subtree in a read-only traversal");
ASSERT(beginIndex <= endIndex, "Invalid parameters");
if (0 == depth) {
ASSERT(beginIndex <= 1 && endIndex == 1, "With depth 0, we can only traverse one or zero leaves (i.e. traverse one leaf or traverse a gap leaf).");
auto leafCreator = (beginIndex
== 0) ? onCreateLeaf : _createMaxSizeLeaf();
return _nodeStore->createNewLeafNode(leafCreator(leafOffset));
}
uint8_t minNeededDepth = utils::ceilLog(_nodeStore->layout().maxChildrenPerInnerNode(), static_cast(endIndex));
ASSERT(depth >= minNeededDepth, "Given tree depth doesn't fit given number of leaves to create.");
uint32_t leavesPerChild = _maxLeavesForTreeDepth(depth-1);
uint32_t beginChild = beginIndex/leavesPerChild;
uint32_t endChild = utils::ceilDivision(endIndex, leavesPerChild);
vector children;
children.reserve(endChild);
// TODO Remove redundancy of following two for loops by using min/max for calculating the parameters of the recursive call.
// Create gap children (i.e. children before the traversal but after the current size)
for (uint32_t childIndex = 0; childIndex < beginChild; ++childIndex) {
uint32_t childOffset = childIndex * leavesPerChild;
auto child = _createNewSubtree(leavesPerChild, leavesPerChild, leafOffset + childOffset, depth - 1,
[] (uint32_t /*index*/)->Data {ASSERT(false, "We're only creating gap leaves here, not traversing any.");},
[] (DataInnerNode* /*node*/) {});
ASSERT(child->depth() == depth-1, "Created child node has wrong depth");
children.push_back(child->blockId());
}
// Create new children that are traversed
for(uint32_t childIndex = beginChild; childIndex < endChild; ++childIndex) {
uint32_t childOffset = childIndex * leavesPerChild;
uint32_t localBeginIndex = utils::maxZeroSubtraction(beginIndex, childOffset);
uint32_t localEndIndex = std::min(leavesPerChild, endIndex - childOffset);
auto child = _createNewSubtree(localBeginIndex, localEndIndex, leafOffset + childOffset, depth - 1, onCreateLeaf, onBacktrackFromSubtree);
ASSERT(child->depth() == depth-1, "Created child node has wrong depth");
children.push_back(child->blockId());
}
ASSERT(children.size() > 0, "No children created");
auto newNode = _nodeStore->createNewInnerNode(depth, children);
// This is only a backtrack, if we actually created a leaf here.
if (endIndex > beginIndex) {
onBacktrackFromSubtree(newNode.get());
}
return newNode;
}
uint32_t LeafTraverser::_maxLeavesForTreeDepth(uint8_t depth) const {
return utils::intPow(_nodeStore->layout().maxChildrenPerInnerNode(), static_cast(depth));
}
function LeafTraverser::_createMaxSizeLeaf() const {
ASSERT(!_readOnlyTraversal, "Can't create a new leaf in a read-only traversal");
uint64_t maxBytesPerLeaf = _nodeStore->layout().maxBytesPerLeaf();
return [maxBytesPerLeaf] (uint32_t /*index*/) -> Data {
return Data(maxBytesPerLeaf).FillWithZeroes();
};
}
void LeafTraverser::_whileRootHasOnlyOneChildReplaceRootWithItsChild(unique_ref* root) {
DataInnerNode *inner = dynamic_cast(root->get());
if (inner != nullptr && inner->numChildren() == 1) {
ASSERT(!_readOnlyTraversal, "Can't decrease tree depth in a read-only traversal");
auto newRoot = _whileRootHasOnlyOneChildRemoveRootReturnChild(inner->readChild(0).blockId());
*root = _nodeStore->overwriteNodeWith(std::move(*root), *newRoot);
_nodeStore->remove(std::move(newRoot));
}
}
unique_ref LeafTraverser::_whileRootHasOnlyOneChildRemoveRootReturnChild(const blockstore::BlockId &blockId) {
ASSERT(!_readOnlyTraversal, "Can't decrease tree depth in a read-only traversal");
auto current = _nodeStore->load(blockId);
ASSERT(current != none, "Node not found");
auto inner = dynamic_pointer_move(*current);
if (inner == none) {
return std::move(*current);
} else if ((*inner)->numChildren() == 1) {
auto result = _whileRootHasOnlyOneChildRemoveRootReturnChild((*inner)->readChild(0).blockId());
_nodeStore->remove(std::move(*inner));
return result;
} else {
return std::move(*inner);
}
}
}
}
}
src/blobstore/implementations/onblocks/datatreestore/impl/LeafTraverser.h 0000664 0000000 0000000 00000010465 13477012671 0027425 0 ustar 00root root 0000000 0000000 #pragma once
#ifndef MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_IMPL_LEAFTRAVERSER_H_
#define MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_IMPL_LEAFTRAVERSER_H_
#include
#include
#include
#include
#include "blobstore/implementations/onblocks/datatreestore/LeafHandle.h"
namespace blobstore {
namespace onblocks {
namespace datanodestore {
class DataNodeStore;
class DataNode;
class DataLeafNode;
class DataInnerNode;
}
namespace datatreestore {
/**
* LeafTraverser can create leaves if they don't exist yet (i.e. endIndex > numLeaves), but
* it cannot increase the tree depth. That is, the tree has to be deep enough to allow
* creating the number of leaves.
*/
class LeafTraverser final {
public:
LeafTraverser(datanodestore::DataNodeStore *nodeStore, bool readOnlyTraversal);
void traverseAndUpdateRoot(
cpputils::unique_ref* root, uint32_t beginIndex, uint32_t endIndex,
std::function onExistingLeaf,
std::function onCreateLeaf,
std::function onBacktrackFromSubtree);
private:
datanodestore::DataNodeStore *_nodeStore;
const bool _readOnlyTraversal;
void _traverseAndUpdateRoot(
cpputils::unique_ref