pax_global_header 0000666 0000000 0000000 00000000064 14456142610 0014515 g ustar 00root root 0000000 0000000 52 comment=2c86a6ed376244cb2e3e1fe7e174554b813fcec1
.circleci/ 0000775 0000000 0000000 00000000000 14456142610 0012714 5 ustar 00root root 0000000 0000000 .circleci/config.yml 0000664 0000000 0000000 00000000733 14456142610 0014707 0 ustar 00root root 0000000 0000000 # This config file is a dummy CircleCI config that does nothing. We migrated away from CircleCI to Github Actions.
# But our release/0.10 branch still uses CircleCI, so we can't disable the service entirely and need some way
# to disable it only for newer versions. That's what this file is doing.
version: 2.1
jobs:
build:
docker:
- image: circleci/node:11.12.0
steps:
- run:
name: Dummy
command: 'echo Not running any Circle CI'
.clang-tidy 0000664 0000000 0000000 00000004634 14456142610 0013124 0 ustar 00root root 0000000 0000000 ---
# TODO Enable (some of) the explicitly disabled checks. Possibly needs helper types from gsl library or similar to enable full cppcoreguidelines.
# TODO Enable more checks (google-*, hicpp-*, llvm-*, modernize-*, mpi-*, performance-*, readability-*)
# TODO Maybe just enable * and disable a list instead?
# TODO Check if there's new checks in clang-tidy-9 and later and potentially enable them
Checks: |
clang-diagnostic-*,
clang-analyzer-*,
bugprone-*,
cert-*,
cppcoreguidelines-*,
misc-*,
boost-use-to-string,
-cert-env33-c,
-cert-err58-cpp,
-cert-err60-cpp,
-bugprone-macro-parentheses,
-bugprone-exception-escape,
-cppcoreguidelines-owning-memory,
-cppcoreguidelines-no-malloc,
-cppcoreguidelines-pro-type-const-cast,
-cppcoreguidelines-pro-bounds-pointer-arithmetic,
-cppcoreguidelines-pro-type-reinterpret-cast,
-cppcoreguidelines-special-member-functions,
-cppcoreguidelines-pro-type-cstyle-cast,
-cppcoreguidelines-pro-bounds-array-to-pointer-decay,
-cppcoreguidelines-pro-type-vararg,
-cppcoreguidelines-avoid-goto,
-cppcoreguidelines-avoid-magic-numbers,
-cppcoreguidelines-macro-usage,
-cppcoreguidelines-non-private-member-variables-in-classes,
-cppcoreguidelines-avoid-non-const-global-variables,
-clang-analyzer-optin.cplusplus.VirtualCall,
-clang-analyzer-cplusplus.NewDeleteLeaks,
-misc-macro-parentheses,
-misc-non-private-member-variables-in-classes,
-misc-unused-raii
WarningsAsErrors: '*'
HeaderFilterRegex: '/src/|/test/'
CheckOptions:
- key: google-readability-braces-around-statements.ShortStatementLines
value: '1'
- key: google-readability-function-size.StatementThreshold
value: '800'
- key: google-readability-namespace-comments.ShortNamespaceLines
value: '10'
- key: google-readability-namespace-comments.SpacesBeforeComments
value: '2'
- key: modernize-loop-convert.MaxCopySize
value: '16'
- key: modernize-loop-convert.MinConfidence
value: reasonable
- key: modernize-loop-convert.NamingStyle
value: CamelCase
- key: modernize-pass-by-value.IncludeStyle
value: llvm
- key: modernize-replace-auto-ptr.IncludeStyle
value: llvm
- key: modernize-use-nullptr.NullMacros
value: 'NULL'
...
.github/ 0000775 0000000 0000000 00000000000 14456142610 0012421 5 ustar 00root root 0000000 0000000 .github/ISSUE_TEMPLATE.md 0000664 0000000 0000000 00000000253 14456142610 0015126 0 ustar 00root root 0000000 0000000 ## Expected Behavior
## Actual Behavior
## Steps to Reproduce the Problem
1.
2.
3.
## Specifications
- CryFS Version:
- Operating System (incl. Version):
.github/workflows/ 0000775 0000000 0000000 00000000000 14456142610 0014456 5 ustar 00root root 0000000 0000000 .github/workflows/actions/ 0000775 0000000 0000000 00000000000 14456142610 0016116 5 ustar 00root root 0000000 0000000 .github/workflows/actions/install_local_dependencies/ 0000775 0000000 0000000 00000000000 14456142610 0023444 5 ustar 00root root 0000000 0000000 .github/workflows/actions/install_local_dependencies/action.yaml 0000664 0000000 0000000 00000005272 14456142610 0025613 0 ustar 00root root 0000000 0000000 name: 'Install local dependencies'
description: 'Install local dependencies'
runs:
using: "composite"
steps:
- name: Install local dependencies
shell: bash
run: |
set -v
# TODO Cache these dependencies for faster runtime
export NUMCORES=`nproc` && if [ ! -n "$NUMCORES" ]; then export NUMCORES=`sysctl -n hw.ncpu`; fi
echo Using $NUMCORES cores
echo Download range-v3
cd ~
wget https://github.com/ericniebler/range-v3/archive/0.11.0.tar.gz -O range-v3-0.11.0.tar.gz
if [ $(sha512sum range-v3-0.11.0.tar.gz | awk '{print $1;}') == "9d6cdcbc1e50104206ba731c3bdc9aab3acfcf69cd83f0e0b4de18b88df2a9e73d64e55638421768d4433c542b6619f6e5af6b17cccd3090cf8b4d4efe9863e4" ]; then
echo Correct sha512sum
else
echo Wrong sha512sum
sha512sum range-v3-0.11.0.tar.gz
exit 1
fi
tar -xvf range-v3-0.11.0.tar.gz
cd range-v3-0.11.0/
echo Install range-v3
mkdir build
cd build
cmake .. -DRANGES_HAS_WERROR=off -DRANGE_V3_EXAMPLES=off -DRANGE_V3_TESTS=off
make -j$NUMCORES
sudo make install
cd ~
rm -rf range-v3-0.11.0
rm range-v3-0.11.0.tar.gz
echo Download spdlog
cd ~
wget https://github.com/gabime/spdlog/archive/v1.8.5.tar.gz -O spdlog.tar.gz
if [ $(sha512sum spdlog.tar.gz | awk '{print $1;}') == "77cc9df0c40bbdbfe1f3e5818dccf121918bfceac28f2608f39e5bf944968b7e8e24a6fc29f01bc58a9bae41b8892d49cfb59c196935ec9868884320b50f130c" ]; then
echo Correct sha512sum
else
echo Wrong sha512sum
sha512sum spdlog.tar.gz
exit 1
fi
tar -xvf spdlog.tar.gz
rm spdlog.tar.gz
cd spdlog-1.8.5
echo Install spdlog
mkdir build
cd build
cmake ..
make -j$NUMCORES
sudo make install
echo Download boost
cd ~
wget -O boost.tar.bz2 https://sourceforge.net/projects/boost/files/boost/1.75.0/boost_1_75_0.tar.bz2/download
if [ $(sha512sum boost.tar.bz2 | awk '{print $1;}') == "d86f060245e98dca5c7f3f831c98ea9ccbfa8310f20830dd913d9d4c939fbe7cb94accd35f1128e7c4faf6c27adb6f4bb54e5477a6bde983dfc7aa33c4eed03a" ]; then
echo Correct sha512sum
else
echo Wrong sha512sum
sha512sum boost.tar.bz2
exit 1
fi
echo Extracting boost
tar -xf boost.tar.bz2
rm boost.tar.bz2
cd boost_1_75_0
echo Install boost
./bootstrap.sh --with-libraries=filesystem,system,thread,chrono,program_options
sudo ./b2 link=shared cxxflags=-fPIC --prefix=/usr -d0 -j$NUMCORES install
.github/workflows/actions/run_build/ 0000775 0000000 0000000 00000000000 14456142610 0020101 5 ustar 00root root 0000000 0000000 .github/workflows/actions/run_build/action.yaml 0000664 0000000 0000000 00000003711 14456142610 0022244 0 ustar 00root root 0000000 0000000 name: 'Build'
description: 'Compile CryFS'
inputs:
cc:
description: "Which C compiler to use for the build"
required: true
cxx:
description: "Which C++ compiler to use for the build"
required: true
build_type:
description: "Which cmake build type to use (e.g. Release, Debug, RelWithDebInfo)"
required: true
extra_cmake_flags:
description: "Extra flags to add to the cmake command"
required: true
extra_cxxflags:
description: "Extra flags to add to the compiler"
required: true
runs:
using: "composite"
steps:
- name: Show build system information
shell: bash
run: |
set -v
echo CMake version:
cmake --version
echo Ninja version:
ninja --version
echo CC: ${{inputs.cc}}
${{inputs.cc}} --version
echo CXX: ${{inputs.cxx}}
${{inputs.cxx}} --version
echo CCache:
ccache --version
ccache -s
- name: Run cmake
shell: bash
run: |
set -v
export CXXFLAGS="$CXXFLAGS ${{inputs.extra_cxxflags}}"
if [[ "${{inputs.cxx}}" == clang* && "${{inputs.build_type}}" == "Debug" ]]; then
# TODO Our linux clang build actually use libstdc++11 instead of libc++, we need to fix this check
# TODO Add the corresponding libstdc++11 debug macros when building with gcc
echo We are doing a debug build on clang. Adding some more debug flags for libc++
export CXXFLAGS="$CXXFLAGS -D_LIBCPP_DEBUG=1 -D_LIBCPP_ENABLE_NODISCARD=1 -D_LIBCPP_ENABLE_DEPRECATION_WARNINGS=1"
fi
mkdir build
cd build
cmake .. -GNinja -DCMAKE_CXX_COMPILER=${{inputs.cxx}} -DCMAKE_C_COMPILER=${{inputs.cc}} -DBUILD_TESTING=on -DCMAKE_BUILD_TYPE=${{inputs.build_type}} -DCMAKE_CXX_COMPILER_LAUNCHER=ccache -DCMAKE_C_COMPILER_LAUNCHER=ccache ${{inputs.extra_cmake_flags}}
- name: Run ninja
shell: bash
run: |
set -v
cd build
ninja
.github/workflows/actions/run_tests/ 0000775 0000000 0000000 00000000000 14456142610 0020144 5 ustar 00root root 0000000 0000000 .github/workflows/actions/run_tests/action.yaml 0000664 0000000 0000000 00000002450 14456142610 0022306 0 ustar 00root root 0000000 0000000 name: 'Test'
description: 'Run CryFS Tests'
inputs:
gtest_args:
description: "Extra arguments for gtest runners, for example tests to exclude"
required: true
extra_env_vars:
description: "Extra environment variables to set before running tests"
required: true
runs:
using: "composite"
steps:
- name: Run tests
shell: bash
run: |
set -v
echo Running on ${{runner.os}}
cd build
export ${{ inputs.extra_env_vars }}
./test/gitversion/gitversion-test ${{inputs.gtest_args}}
./test/cpp-utils/cpp-utils-test ${{inputs.gtest_args}}
./test/parallelaccessstore/parallelaccessstore-test ${{inputs.gtest_args}}
./test/blockstore/blockstore-test ${{inputs.gtest_args}}
./test/blobstore/blobstore-test ${{inputs.gtest_args}}
./test/cryfs/cryfs-test ${{inputs.gtest_args}}
# TODO Also run on macOS once fixed
if [[ "${{runner.os}}" == "macOS" ]]; then
echo Skipping some tests because they are not fixed for macOS yet
else
# TODO Also run with TSAN once fixed
if [[ "${{matrix.name}}" != "TSAN" ]]; then
./test/fspp/fspp-test ${{inputs.gtest_args}}
fi
./test/cryfs-cli/cryfs-cli-test ${{inputs.gtest_args}}
fi
.github/workflows/actions/setup_linux/ 0000775 0000000 0000000 00000000000 14456142610 0020475 5 ustar 00root root 0000000 0000000 .github/workflows/actions/setup_linux/action.yaml 0000664 0000000 0000000 00000006602 14456142610 0022642 0 ustar 00root root 0000000 0000000 name: 'Setup Linux'
description: 'Setup Linux'
inputs:
os:
description: "Exact os (i.e. ubuntu version) this runs on"
required: true
extra_apt_packages:
description: "Job-specific apt packages to install (e.g. the compiler)"
required: true
runs:
using: "composite"
steps:
- name: Install Linux dependencies
shell: bash
run: |
echo 'Acquire::Retries "20";' | sudo tee -a /etc/apt/apt.conf.d/80-retries
if [[ "${{inputs.os}}" == "ubuntu-22.04" ]]; then
echo Adding apt repositories for newer clang versions on Ubuntu 22.04
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key|sudo apt-key add -
sudo touch /etc/apt/sources.list.d/clang.list
sudo chmod o+w /etc/apt/sources.list.d/clang.list
echo "deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy main" >> /etc/apt/sources.list.d/clang.list
echo "deb-src http://apt.llvm.org/jammy/ llvm-toolchain-jammy main" >> /etc/apt/sources.list.d/clang.list
echo "deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-15 main" >> /etc/apt/sources.list.d/clang.list
echo "deb-src http://apt.llvm.org/jammy/ llvm-toolchain-jammy-15 main" >> /etc/apt/sources.list.d/clang.list
echo "deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-16 main" >> /etc/apt/sources.list.d/clang.list
echo "deb-src http://apt.llvm.org/jammy/ llvm-toolchain-jammy-16 main" >> /etc/apt/sources.list.d/clang.list
sudo chmod o-w /etc/apt/sources.list.d/clang.list
elif [[ "${{inputs.os}}" == "ubuntu-20.04" ]]; then
echo Adding apt repositories for newer clang versions on Ubuntu 20.04
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key|sudo apt-key add -
sudo touch /etc/apt/sources.list.d/clang.list
sudo chmod o+w /etc/apt/sources.list.d/clang.list
echo "deb http://apt.llvm.org/focal/ llvm-toolchain-focal main" >> /etc/apt/sources.list.d/clang.list
echo "deb-src http://apt.llvm.org/focal/ llvm-toolchain-focal main" >> /etc/apt/sources.list.d/clang.list
echo "deb http://apt.llvm.org/focal/ llvm-toolchain-focal-13 main" >> /etc/apt/sources.list.d/clang.list
echo "deb-src http://apt.llvm.org/focal/ llvm-toolchain-focal-13 main" >> /etc/apt/sources.list.d/clang.list
echo "deb http://apt.llvm.org/focal/ llvm-toolchain-focal-14 main" >> /etc/apt/sources.list.d/clang.list
echo "deb-src http://apt.llvm.org/focal/ llvm-toolchain-focal-14 main" >> /etc/apt/sources.list.d/clang.list
echo "deb http://apt.llvm.org/focal/ llvm-toolchain-focal-15 main" >> /etc/apt/sources.list.d/clang.list
echo "deb-src http://apt.llvm.org/focal/ llvm-toolchain-focal-15 main" >> /etc/apt/sources.list.d/clang.list
echo "deb http://apt.llvm.org/focal/ llvm-toolchain-focal-16 main" >> /etc/apt/sources.list.d/clang.list
echo "deb-src http://apt.llvm.org/focal/ llvm-toolchain-focal-16 main" >> /etc/apt/sources.list.d/clang.list
sudo chmod o-w /etc/apt/sources.list.d/clang.list
fi
sudo apt-get update
sudo apt-get install ninja-build libcurl4-openssl-dev libfuse-dev ccache ${{inputs.extra_apt_packages}}
- name: Speed up random generator
run: |
set -v
# Use /dev/urandom when /dev/random is accessed to use less entropy
sudo cp -a /dev/urandom /dev/random
shell: bash
.github/workflows/actions/setup_macos/ 0000775 0000000 0000000 00000000000 14456142610 0020440 5 ustar 00root root 0000000 0000000 .github/workflows/actions/setup_macos/action.yaml 0000664 0000000 0000000 00000000613 14456142610 0022601 0 ustar 00root root 0000000 0000000 name: 'Setup macOS'
description: 'Setup macOS'
inputs:
extra_homebrew_packages:
description: "Job-specific homebrew packages to install (e.g. the compiler)"
required: true
runs:
using: "composite"
steps:
- name: Install macOS dependencies
shell: bash
run: |
brew install ninja macfuse libomp ccache md5sha1sum pkg-config ${{inputs.extra_homebrew_packages}}
.github/workflows/actions/setup_windows/ 0000775 0000000 0000000 00000000000 14456142610 0021030 5 ustar 00root root 0000000 0000000 .github/workflows/actions/setup_windows/action.yaml 0000664 0000000 0000000 00000000423 14456142610 0023170 0 ustar 00root root 0000000 0000000 name: 'Setup Windows'
description: 'Setup Windows'
runs:
using: "composite"
steps:
- name: Install Windows dependencies
shell: bash
run: |
choco install -y ninja
choco install -y dokany --version 1.2.2.1001 --installargs INSTALLDEVFILES=1
.github/workflows/main.yaml 0000664 0000000 0000000 00000107113 14456142610 0016271 0 ustar 00root root 0000000 0000000 name: CI
on: ['push', 'pull_request']
jobs:
linux_macos:
name: CI (Linux/macOS)
strategy:
fail-fast: false
matrix:
name: [""]
os:
- macos-11
- macos-12
- macos-13
- ubuntu-20.04
- ubuntu-22.04
compiler:
- cxx: g++-7
cc: gcc-7
macos_cxx: g++-7
macos_cc: gcc-7
homebrew_package: gcc@7
apt_package: g++-7
- cxx: g++-8
cc: gcc-8
macos_cxx: g++-8
macos_cc: gcc-8
homebrew_package: gcc@8
apt_package: g++-8
- cxx: g++-9
cc: gcc-9
macos_cxx: g++-9
macos_cc: gcc-9
apt_package: g++-9
homebrew_package: gcc@9
- cxx: g++-10
cc: gcc-10
macos_cxx: g++-10
macos_cc: gcc-10
apt_package: g++-10
homebrew_package: gcc@10
- cxx: g++-11
cc: gcc-11
macos_cxx: g++-11
macos_cc: gcc-11
apt_package: g++-11
homebrew_package: gcc@11
- cxx: g++-12
cc: gcc-12
macos_cxx: g++-12
macos_cc: gcc-12
apt_package: g++-12
homebrew_package: gcc@12
- cxx: g++-13
cc: gcc-13
macos_cxx: g++-13
macos_cc: gcc-13
apt_package: g++-13
homebrew_package: gcc@13
- cxx: clang++-7
cc: clang-7
macos_cxx: /usr/local/opt/llvm@7/bin/clang++
macos_cc: /usr/local/opt/llvm@7/bin/clang
apt_package: clang-7
homebrew_package: llvm@7
- cxx: clang++-8
cc: clang-8
macos_cxx: /usr/local/opt/llvm@8/bin/clang++
macos_cc: /usr/local/opt/llvm@8/bin/clang
apt_package: clang-8
homebrew_package: llvm@8
- cxx: clang++-9
cc: clang-9
macos_cxx: /usr/local/opt/llvm@9/bin/clang++
macos_cc: /usr/local/opt/llvm@9/bin/clang
apt_package: clang-9
homebrew_package: llvm@9
- cxx: clang++-10
cc: clang-10
macos_cxx: /usr/local/opt/llvm@10/bin/clang++
macos_cc: /usr/local/opt/llvm@10/bin/clang
apt_package: clang-10
homebrew_package: llvm@10
- cxx: clang++-11
cc: clang-11
macos_cxx: /usr/local/opt/llvm@11/bin/clang++
macos_cc: /usr/local/opt/llvm@11/bin/clang
apt_package: clang-11 libomp5-11 libomp-11-dev
homebrew_package: llvm@11
- cxx: clang++-12
cc: clang-12
macos_cxx: /usr/local/opt/llvm@12/bin/clang++
macos_cc: /usr/local/opt/llvm@12/bin/clang
apt_package: clang-12 libomp5-12 libomp-12-dev
homebrew_package: llvm@12
- cxx: clang++-13
cc: clang-13
macos_cxx: /usr/local/opt/llvm@13/bin/clang++
macos_cc: /usr/local/opt/llvm@13/bin/clang
apt_package: clang-13 libomp5-13 libomp-13-dev
homebrew_package: llvm@13
- cxx: clang++-14
cc: clang-14
macos_cxx: /usr/local/opt/llvm@14/bin/clang++
macos_cc: /usr/local/opt/llvm@14/bin/clang
apt_package: clang-14 libomp5-14 libomp-14-dev
homebrew_package: llvm@14
- cxx: clang++-15
cc: clang-15
macos_cxx: /usr/local/opt/llvm@15/bin/clang++
macos_cc: /usr/local/opt/llvm@15/bin/clang
apt_package: clang-15 libomp5-15 libomp-15-dev
homebrew_package: llvm@15
# Apple Clang
# - cxx: clang++
# cc: clang
# homebrew_package: ""
build_type:
- Debug
- Release
- RelWithDebInfo
extra_cmake_flags: [""]
extra_cxxflags: [""]
extra_env_vars_for_test: [""]
install_dependencies_manually: [false]
run_build: [true]
run_tests: [true]
run_clang_tidy: [false]
exclude:
# MacOS CI doesn't have Clang 7, 8, 9, 10 or GCC 7, 8 anymore
- os: macos-11
compiler: {cxx: clang++-7, cc: clang-7, macos_cxx: /usr/local/opt/llvm@7/bin/clang++, macos_cc: /usr/local/opt/llvm@7/bin/clang, apt_package: clang-7, homebrew_package: llvm@7}
- os: macos-11
compiler: {cxx: clang++-8, cc: clang-8, macos_cxx: /usr/local/opt/llvm@8/bin/clang++, macos_cc: /usr/local/opt/llvm@8/bin/clang, apt_package: clang-8, homebrew_package: llvm@8}
- os: macos-11
compiler: {cxx: clang++-9, cc: clang-9, macos_cxx: /usr/local/opt/llvm@9/bin/clang++, macos_cc: /usr/local/opt/llvm@9/bin/clang, apt_package: clang-9, homebrew_package: llvm@9}
- os: macos-11
compiler: {cxx: clang++-10, cc: clang-10, macos_cxx: /usr/local/opt/llvm@10/bin/clang++, macos_cc: /usr/local/opt/llvm@10/bin/clang, apt_package: clang-10, homebrew_package: llvm@10}
- os: macos-11
compiler: {cxx: g++-7, cc: gcc-7, macos_cxx: g++-7, macos_cc: gcc-7, homebrew_package: gcc@7, apt_package: g++-7}
- os: macos-11
compiler: {cxx: g++-8, cc: gcc-8, macos_cxx: g++-8, macos_cc: gcc-8, homebrew_package: gcc@8, apt_package: g++-8}
- os: macos-12
compiler: {cxx: clang++-7, cc: clang-7, macos_cxx: /usr/local/opt/llvm@7/bin/clang++, macos_cc: /usr/local/opt/llvm@7/bin/clang, apt_package: clang-7, homebrew_package: llvm@7}
- os: macos-12
compiler: {cxx: clang++-8, cc: clang-8, macos_cxx: /usr/local/opt/llvm@8/bin/clang++, macos_cc: /usr/local/opt/llvm@8/bin/clang, apt_package: clang-8, homebrew_package: llvm@8}
- os: macos-12
compiler: {cxx: clang++-9, cc: clang-9, macos_cxx: /usr/local/opt/llvm@9/bin/clang++, macos_cc: /usr/local/opt/llvm@9/bin/clang, apt_package: clang-9, homebrew_package: llvm@9}
- os: macos-12
compiler: {cxx: g++-7, cc: gcc-7, macos_cxx: g++-7, macos_cc: gcc-7, homebrew_package: gcc@7, apt_package: g++-7}
- os: macos-12
compiler: {cxx: g++-8, cc: gcc-8, macos_cxx: g++-8, macos_cc: gcc-8, homebrew_package: gcc@8, apt_package: g++-8}
- os: macos-12
compiler: {cxx: g++-9, cc: gcc-9, macos_cxx: g++-9, macos_cc: gcc-9, homebrew_package: gcc@9, apt_package: g++-9}
- os: macos-12
compiler: {cxx: clang++-10, cc: clang-10, macos_cxx: /usr/local/opt/llvm@10/bin/clang++, macos_cc: /usr/local/opt/llvm@10/bin/clang, apt_package: clang-10, homebrew_package: llvm@10}
- os: macos-13
compiler: {cxx: clang++-7, cc: clang-7, macos_cxx: /usr/local/opt/llvm@7/bin/clang++, macos_cc: /usr/local/opt/llvm@7/bin/clang, apt_package: clang-7, homebrew_package: llvm@7}
- os: macos-13
compiler: {cxx: clang++-10, cc: clang-10, macos_cxx: /usr/local/opt/llvm@10/bin/clang++, macos_cc: /usr/local/opt/llvm@10/bin/clang, apt_package: clang-10, homebrew_package: llvm@10}
# Ubuntu 20.04 doesn't have GCC 12, 13 yet
- os: ubuntu-20.04
compiler: {cxx: g++-12, cc: gcc-12, macos_cxx: g++-12, macos_cc: gcc-12, homebrew_package: gcc@12, apt_package: g++-12}
- os: ubuntu-20.04
compiler: {cxx: g++-13, cc: gcc-13, macos_cxx: g++-13, macos_cc: gcc-13, homebrew_package: gcc@13, apt_package: g++-13}
# Ubuntu 22.04 doesn't have gcc 7, 8 or clang 7, 8, 9, 10 anymore
- os: ubuntu-22.04
compiler: {cxx: g++-7, cc: gcc-7, macos_cxx: g++-7, macos_cc: gcc-7, homebrew_package: gcc@7, apt_package: g++-7}
- os: ubuntu-22.04
compiler: {cxx: g++-8, cc: gcc-8, macos_cxx: g++-8, macos_cc: gcc-8, homebrew_package: gcc@8, apt_package: g++-8}
- os: ubuntu-22.04
compiler: {cxx: clang++-7, cc: clang-7, macos_cxx: /usr/local/opt/llvm@7/bin/clang++, macos_cc: /usr/local/opt/llvm@7/bin/clang, apt_package: clang-7, homebrew_package: llvm@7}
- os: ubuntu-22.04
compiler: {cxx: clang++-8, cc: clang-8, macos_cxx: /usr/local/opt/llvm@8/bin/clang++, macos_cc: /usr/local/opt/llvm@8/bin/clang, apt_package: clang-8, homebrew_package: llvm@8}
- os: ubuntu-22.04
compiler: {cxx: clang++-9, cc: clang-9, macos_cxx: /usr/local/opt/llvm@9/bin/clang++, macos_cc: /usr/local/opt/llvm@9/bin/clang, apt_package: clang-9, homebrew_package: llvm@9}
- os: ubuntu-22.04
compiler: {cxx: clang++-10, cc: clang-10, macos_cxx: /usr/local/opt/llvm@10/bin/clang++, macos_cc: /usr/local/opt/llvm@10/bin/clang, apt_package: clang-10, homebrew_package: llvm@10}
# Clang 11 on Ubuntu seems to have a bug that fails CI
- os: ubuntu-22.04
compiler: {cxx: clang++-11, cc: clang-11, macos_cxx: /usr/local/opt/llvm@11/bin/clang++, macos_cc: /usr/local/opt/llvm@11/bin/clang, apt_package: "clang-11 libomp5-11 libomp-11-dev", homebrew_package: llvm@11}
build_type: Debug
include:
- name: Local dependencies
os: ubuntu-22.04
compiler:
cxx: clang++-11
cc: clang-11
apt_package: clang-11 libomp5-11 libomp-11-dev
build_type: RelWithDebInfo
extra_cmake_flags: -DDEPENDENCY_CONFIG=../cmake-utils/DependenciesFromLocalSystem.cmake
extra_cxxflags: ""
extra_env_vars_for_test: ""
install_dependencies_manually: true
run_build: true
run_tests: true
- name: Local dependencies
os: ubuntu-22.04
compiler:
cxx: clang++-11
cc: clang-11
apt_package: clang-11 libomp5-11 libomp-11-dev
build_type: RelWithDebInfo
extra_cmake_flags: -DDEPENDENCY_CONFIG=../cmake-utils/DependenciesFromLocalSystem.cmake
extra_cxxflags: ""
extra_env_vars_for_test: ""
install_dependencies_manually: true
run_build: true
run_tests: true
- name: Werror gcc
os: ubuntu-22.04
compiler:
cxx: g++-9
cc: gcc-9
apt_package: g++-9
build_type: RelWithDebInfo
extra_cmake_flags: -DUSE_WERROR=on
extra_cxxflags: ""
install_dependencies_manually: false
run_build: true
run_tests: false
- name: Werror clang
os: ubuntu-22.04
compiler:
cxx: clang++-11
cc: clang-11
apt_package: clang-11 libomp5-11 libomp-11-dev
build_type: RelWithDebInfo
extra_cmake_flags: -DUSE_WERROR=on
extra_cxxflags: ""
install_dependencies_manually: false
run_build: true
run_tests: false
- name: No compatibility
os: ubuntu-22.04
compiler:
cxx: clang++-11
cc: clang-11
apt_package: clang-11 libomp5-11 libomp-11-dev
build_type: RelWithDebInfo
extra_cmake_flags: ""
extra_cxxflags: "-DCRYFS_NO_COMPATIBILITY"
extra_env_vars_for_test: ""
install_dependencies_manually: false
run_build: true
run_tests: true
- name: ASAN
# TODO Update to ubuntu-22.04
os: ubuntu-20.04
compiler:
cxx: clang++-11
cc: clang-11
apt_package: clang-11 libomp5-11 libomp-11-dev
build_type: Debug
# OpenMP crashes under asan. Disable OpenMP.
# TODO is it enough to replace this with omp_num_threads: 1 ?
extra_cmake_flags: "-DDISABLE_OPENMP=ON"
extra_cxxflags: "-O1 -fsanitize=address -fno-omit-frame-pointer -fno-optimize-sibling-calls -fno-common -fsanitize-address-use-after-scope"
extra_env_vars_for_test: ASAN_OPTIONS="detect_leaks=1 check_initialization_order=1 detect_stack_use_after_return=1 detect_invalid_pointer_pairs=1 atexit=1"
install_dependencies_manually: false
run_build: true
run_tests: true
- name: UBSAN
# TODO Update to ubuntu-22.04
os: ubuntu-20.04
compiler:
cxx: clang++-11
cc: clang-11
apt_package: clang-11 libomp5-11 libomp-11-dev
build_type: Debug
# OpenMP crashes under ubsan. Disable OpenMP.
# TODO is it enough to replace this with omp_num_threads: 1 ?
extra_cmake_flags: "-DDISABLE_OPENMP=ON"
extra_cxxflags: "-O1 -fno-sanitize-recover=undefined,nullability,implicit-conversion,unsigned-integer-overflow,local-bounds,float-divide-by-zero -fno-omit-frame-pointer -fno-optimize-sibling-calls -fno-common"
extra_env_vars_for_test: UBSAN_OPTIONS="print_stacktrace=1"
install_dependencies_manually: false
run_build: true
run_tests: true
- name: TSAN
# TODO Update to ubuntu-22.04
os: ubuntu-20.04
compiler:
cxx: clang++-11
cc: clang-11
apt_package: clang-11 libomp5-11 libomp-11-dev
build_type: Debug
extra_cmake_flags: ""
extra_cxxflags: "-O2 -fsanitize=thread -fno-omit-frame-pointer -fno-omit-frame-pointer -fno-optimize-sibling-calls -fno-common"
install_dependencies_manually: false
run_build: true
run_tests: true
gtest_args: "--gtest_filter=-LoggingTest.LoggingAlsoWorksAfterFork:AssertTest_*:BacktraceTest.*:SubprocessTest.*:SignalCatcherTest.*_thenDies:SignalHandlerTest.*_thenDies:SignalHandlerTest.givenMultipleSigIntHandlers_whenRaising_thenCatchesCorrectSignal:CliTest_Setup.*:CliTest_IntegrityCheck.*:*/CliTest_WrongEnvironment.*:CliTest_Unmount.*:CliTest.WorksWithCommasInBasedir"
extra_env_vars_for_test: OMP_NUM_THREADS=1
- name: clang-tidy
os: ubuntu-22.04
compiler:
cxx: clang++-11
cc: clang-11
apt_package: clang-11 clang-tidy-11 libomp5-11 libomp-11-dev
build_type: RelWithDebInfo
extra_cmake_flags: ""
extra_cxxflags: ""
install_dependencies_manually: false
run_build: false
run_tests: false
extra_env_vars_for_test: ""
run_clang_tidy: true
runs-on: ${{matrix.os}}
env:
# Setting conan cache dir to a location where our Github Cache Action can find it
CONAN_USER_HOME: "${{ github.workspace }}/conan-cache/"
steps:
- name: Checkout
uses: actions/checkout@v1
#TODO Ideally, all the setup actions would be in their own subaction, but Github doesn't support using third party actions (e.g. cache) from nested actions yet, see https://github.com/actions/runner/issues/862
- name: Setup MacOS
if: ${{ runner.os == 'macOS' }}
uses: ./.github/workflows/actions/setup_macos
with:
extra_homebrew_packages: ${{ matrix.compiler.homebrew_package }}
- name: Setup Linux
if: ${{ runner.os == 'Linux' }}
uses: ./.github/workflows/actions/setup_linux
with:
os: ${{ matrix.os }}
extra_apt_packages: ${{ matrix.compiler.apt_package }}
- name: Install local dependencies
if: ${{ matrix.install_dependencies_manually }}
uses: ./.github/workflows/actions/install_local_dependencies
- name: Find pip cache location
id: pip_cache_dir
run: |
# We need at least pip 20.1 to get the "pip cache dir" command. Ubuntu doesn't have pip 20.1 by default yet, let's upgrade it
python3 -m pip install -U pip
python3 -m pip --version
echo "::set-output name=pip_cache_dir::$(python3 -m pip cache dir)"
shell: bash
- name: Retrieve pip cache
# Many jobs access the cache in parallel an we might observe an incomplete state that isn't valid. This would fail with a checksum error. Let's not fail the CI job but continue it, later on this job will upload a new new cache as part of the regular job run.
continue-on-error: true
# We're using an S3 based cache because the standard GitHub Action cache (actions/cache) only gives us 5GB of storage and we need more
uses: leroy-merlin-br/action-s3-cache@8d75079437b388688b9ea9c7d73dff4ef975c5fa # v1.0.5
with:
action: get
# note: this access key has read-only access to the cache. It's public so it runs on PRs.
aws-access-key-id: AKIAV5S2KH4F5OUZXV5E
aws-secret-access-key: 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
aws-region: eu-west-1
bucket: ci-cache.cryfs
key: v0-${{ runner.os }}-${{ matrix.os }}-setup-pip
- name: Install Conan
shell: bash
run: |
# Using "python3 -m pip" instead of "pip3" to make sure we get the same pip that we queried the cache dir for the Github Cache action
python3 -m pip install conan==1.59
- name: Save pip cache
# note: this access key has write access to the cache. This can't run on PRs.
if: ${{github.event_name == 'push' }}
# Cache things sometimes indeterministically fail (roughly 1% of times this is run), let's not fail the job for it
continue-on-error: true
uses: leroy-merlin-br/action-s3-cache@8d75079437b388688b9ea9c7d73dff4ef975c5fa # v1.0.5
with:
action: put
aws-access-key-id: ${{ secrets.CACHE_AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.CACHE_AWS_SECRET_ACCESS_KEY }}
aws-region: eu-west-1
bucket: ci-cache.cryfs
key: v0-${{ runner.os }}-${{ matrix.os }}-setup-pip
artifacts: ${{ steps.pip_cache_dir.outputs.pip_cache_dir }}
#TODO Ideally, the Setup ccache step would be part of the build action, but Github doesn't support nested actions yet, see https://github.com/actions/runner/issues/862
- name: Configure ccache
shell: bash
run: |
set -v
ccache --set-config=compiler_check=content
ccache --set-config=max_size=500M
ccache --set-config=cache_dir=${{github.workspace}}/.ccache
ccache --set-config=compression=true
ccache --set-config=sloppiness=include_file_mtime,include_file_ctime
echo CCache config:
ccache -p
echo Clearing ccache statistics
ccache -z
- name: Hash flags
id: hash_flags
run: |
# Write it into file first so we fail if the command fails. Errors inside $() are ignored by bash unfortunately.
echo __${{matrix.extra_cmake_flags}}__${{matrix.extra_cxxflags}}__ | md5sum > /tmp/hash_flags
echo "::set-output name=hash_flags::$(cat /tmp/hash_flags)"
rm /tmp/hash_flags
shell: bash
- name: Retrieve ccache cache
# Many jobs access the cache in parallel an we might observe an incomplete state that isn't valid. This would fail with a checksum error. Let's not fail the CI job but continue it, later on this job will upload a new new cache as part of the regular job run.
continue-on-error: true
# We're using an S3 based cache because the standard GitHub Action cache (actions/cache) only gives us 5GB of storage and we need more
uses: leroy-merlin-br/action-s3-cache@8d75079437b388688b9ea9c7d73dff4ef975c5fa # v1.0.5
with:
action: get
# note: this access key has read-only access to the cache. It's public so it runs on PRs.
aws-access-key-id: AKIAV5S2KH4F5OUZXV5E
aws-secret-access-key: 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
aws-region: eu-west-1
bucket: ci-cache.cryfs
key: v0-${{ runner.os }}-${{ matrix.os }}-ccache__${{matrix.compiler.cxx}}__${{matrix.compiler.cc}}__${{matrix.build_type}}__${{matrix.run_build}}__${{matrix.run_clang_tidy}}__${{steps.hash_flags.outputs.hash_flags}}__
- name: Show ccache statistics
shell: bash
run: |
set -v
ccache -s
# TODO Ideally, the Setup conan cache step would be part of the build action, but Github doesn't support nested actions yet, see https://github.com/actions/runner/issues/862
- name: Retrieve conan cache
# Many jobs access the cache in parallel an we might observe an incomplete state that isn't valid. This would fail with a checksum error. Let's not fail the CI job but continue it, later on this job will upload a new new cache as part of the regular job run.
continue-on-error: true
# We're using an S3 based cache because the standard GitHub Action cache (actions/cache) only gives us 5GB of storage and we need more
uses: leroy-merlin-br/action-s3-cache@8d75079437b388688b9ea9c7d73dff4ef975c5fa # v1.0.5
with:
action: get
# note: this access key has read-only access to the cache. It's public so it runs on PRs.
aws-access-key-id: AKIAV5S2KH4F5OUZXV5E
aws-secret-access-key: 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
aws-region: eu-west-1
bucket: ci-cache.cryfs
key: v1-${{ runner.os }}-${{ matrix.os }}-conancache__${{matrix.compiler.cxx}}__${{matrix.compiler.cc}}__${{matrix.build_type}}__
- name: Build (macOS)
if: ${{ matrix.run_build && runner.os == 'macOS' }}
uses: ./.github/workflows/actions/run_build
with:
cxx: ${{ matrix.compiler.macos_cxx }}
cc: ${{ matrix.compiler.macos_cc }}
build_type: ${{ matrix.build_type }}
- name: Build (Linux)
if: ${{ matrix.run_build && runner.os == 'Linux' }}
uses: ./.github/workflows/actions/run_build
with:
cxx: ${{ matrix.compiler.cxx }}
cc: ${{ matrix.compiler.cc }}
build_type: ${{ matrix.build_type }}
extra_cmake_flags: ${{ matrix.extra_cmake_flags }}
extra_cxxflags: ${{ matrix.extra_cxxflags }}
- name: Run clang-tidy
id: clang_tidy
if: ${{ matrix.run_clang_tidy }}
shell: bash
run: |
set -v
mkdir cmake
cd cmake
if ! ../run-clang-tidy.sh -fix ; then
git diff > /tmp/clang-tidy-fixes
echo Found clang tidy fixes:
cat /tmp/clang-tidy-fixes
exit 1
else
echo Did not find any clang-tidy fixes
fi
- name: Upload fixes as artifact
if: ${{ always() && matrix.run_clang_tidy }}
uses: actions/upload-artifact@v2
with:
name: clang-tidy-fixes
path: /tmp/clang-tidy-fixes
- name: Show ccache statistics
shell: bash
run: |
set -v
ccache -s
- name: Reduce ccache size
if: ${{ runner.os == 'macOS' }}
shell: bash
run: |
set -v
ccache --evict-older-than 7d
ccache -s
- name: Save ccache cache
# note: this access key has write access to the cache. This can't run on PRs.
if: ${{ github.event_name == 'push' }}
# Cache things sometimes indeterministically fail (roughly 1% of times this is run), let's not fail the job for it
continue-on-error: true
uses: leroy-merlin-br/action-s3-cache@8d75079437b388688b9ea9c7d73dff4ef975c5fa # v1.0.5
with:
action: put
aws-access-key-id: ${{ secrets.CACHE_AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.CACHE_AWS_SECRET_ACCESS_KEY }}
aws-region: eu-west-1
bucket: ci-cache.cryfs
key: v0-${{ runner.os }}-${{ matrix.os }}-ccache__${{matrix.compiler.cxx}}__${{matrix.compiler.cc}}__${{matrix.build_type}}__${{matrix.run_build}}__${{matrix.run_clang_tidy}}__${{steps.hash_flags.outputs.hash_flags}}__
artifacts: ${{ github.workspace }}/.ccache
- name: Save conan cache
# note: this access key has write access to the cache. This can't run on PRs.
if: ${{ github.event_name == 'push' }}
# Cache things sometimes indeterministically fail (roughly 1% of times this is run), let's not fail the job for it
continue-on-error: true
uses: leroy-merlin-br/action-s3-cache@8d75079437b388688b9ea9c7d73dff4ef975c5fa # v1.0.5
with:
action: put
aws-access-key-id: ${{ secrets.CACHE_AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.CACHE_AWS_SECRET_ACCESS_KEY }}
aws-region: eu-west-1
bucket: ci-cache.cryfs
key: v1-${{ runner.os }}-${{ matrix.os }}-conancache__${{matrix.compiler.cxx}}__${{matrix.compiler.cc}}__${{matrix.build_type}}__
artifacts: ${{ env.CONAN_USER_HOME }}
- name: Test
if: ${{ matrix.run_tests }}
uses: ./.github/workflows/actions/run_tests
with:
gtest_args: ${{matrix.gtest_args}}
extra_env_vars: ${{matrix.extra_env_vars_for_test}}
windows:
name: CI (Windows)
strategy:
fail-fast: false
matrix:
name: [""]
os:
- windows-2019
arch:
- Win32
- x64
build_type:
- Debug
- Release
- RelWithDebInfo
runs-on: ${{matrix.os}}
env:
# Setting conan cache dir to a location where our Github Cache Action can find it
CONAN_USER_HOME: "D:/.conan/f/"
CONAN_USER_HOME_SHORT: "D:/.conan/s/"
steps:
- name: Checkout
uses: actions/checkout@v1
#TODO Ideally, all the setup actions would be in their own subaction, but Github doesn't support using third party actions (e.g. cache) from nested actions yet, see https://github.com/actions/runner/issues/862
- name: Setup Windows
uses: ./.github/workflows/actions/setup_windows
- name: Find pip cache location
id: pip_cache_dir
run: |
# We need at least pip 20.1 to get the "pip cache dir" command. Ubuntu doesn't have pip 20.1 by default yet, let's upgrade it
python3 -m pip install -U pip
python3 -m pip --version
echo "::set-output name=pip_cache_dir::$(python3 -m pip cache dir)"
shell: bash
- name: Retrieve pip cache
# Many jobs access the cache in parallel an we might observe an incomplete state that isn't valid. This would fail with a checksum error. Let's not fail the CI job but continue it, later on this job will upload a new new cache as part of the regular job run.
continue-on-error: true
# We're using an S3 based cache because the standard GitHub Action cache (actions/cache) only gives us 5GB of storage and we need more
uses: leroy-merlin-br/action-s3-cache@8d75079437b388688b9ea9c7d73dff4ef975c5fa # v1.0.5
with:
action: get
# note: this access key has read-only access to the cache. It's public so it runs on PRs.
aws-access-key-id: AKIAV5S2KH4F5OUZXV5E
aws-secret-access-key: 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
aws-region: eu-west-1
bucket: ci-cache.cryfs
key: v0-${{ runner.os }}-${{ matrix.os }}-setup-pip
- name: Install Conan
shell: bash
run: |
# Using "python3 -m pip" instead of "pip3" to make sure we get the same pip that we queried the cache dir for the Github Cache action
python3 -m pip install conan==1.59
- name: Save pip cache
# note: this access key has write access to the cache. This can't run on PRs.
if: ${{github.event_name == 'push' }}
# Cache things sometimes indeterministically fail (roughly 1% of times this is run), let's not fail the job for it
continue-on-error: true
uses: leroy-merlin-br/action-s3-cache@8d75079437b388688b9ea9c7d73dff4ef975c5fa # v1.0.5
with:
action: put
aws-access-key-id: ${{ secrets.CACHE_AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.CACHE_AWS_SECRET_ACCESS_KEY }}
aws-region: eu-west-1
bucket: ci-cache.cryfs
key: v0-${{ runner.os }}-${{ matrix.os }}-setup-pip
artifacts: ${{ steps.pip_cache_dir.outputs.pip_cache_dir }}
#TODO Ideally, the Setup ccache step would be part of the build action, but Github doesn't support nested actions yet, see https://github.com/actions/runner/issues/862
# - name: Configure ccache
# shell: bash
# run: |
# set -v
# ccache --set-config=compiler_check=content
# ccache --set-config=max_size=500M
# ccache --set-config=cache_dir=${{github.workspace}}/.ccache
# ccache --set-config=compression=true
# ccache --set-config=sloppiness=include_file_mtime,include_file_ctime
# echo CCache config:
# ccache -p
# echo Clearing ccache statistics
# ccache -z
# - name: Hash flags
# id: hash_flags
# run: |
# # Write it into file first so we fail if the command fails. Errors inside $() are ignored by bash unfortunately.
# echo __${{matrix.extra_cmake_flags}}__${{matrix.extra_cxxflags}}__ | md5sum > /tmp/hash_flags
# echo "::set-output name=hash_flags::$(cat /tmp/hash_flags)"
# rm /tmp/hash_flags
# shell: bash
# - name: Retrieve ccache cache
# # Many jobs access the cache in parallel an we might observe an incomplete state that isn't valid. This would fail with a checksum error. Let's not fail the CI job but continue it, later on this job will upload a new new cache as part of the regular job run.
# continue-on-error: true
# # We're using an S3 based cache because the standard GitHub Action cache (actions/cache) only gives us 5GB of storage and we need more
# uses: leroy-merlin-br/action-s3-cache@8d75079437b388688b9ea9c7d73dff4ef975c5fa # v1.0.5
# with:
# action: get
# # note: this access key has read-only access to the cache. It's public so it runs on PRs.
# aws-access-key-id: AKIAV5S2KH4F5OUZXV5E
# aws-secret-access-key: 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
# aws-region: eu-west-1
# bucket: ci-cache.cryfs
# key: v0-${{ runner.os }}-${{ matrix.os }}-ccache__${{matrix.compiler.cxx}}__${{matrix.compiler.cc}}__${{matrix.build_type}}__${{matrix.run_build}}__${{matrix.run_clang_tidy}}__${{steps.hash_flags.outputs.hash_flags}}__
# - name: Show ccache statistics
# shell: bash
# run: |
# set -v
# ccache -s
# TODO Ideally, the Setup conan cache step would be part of the build action, but Github doesn't support nested actions yet, see https://github.com/actions/runner/issues/862
- name: Retrieve conan cache
# Many jobs access the cache in parallel an we might observe an incomplete state that isn't valid. This would fail with a checksum error. Let's not fail the CI job but continue it, later on this job will upload a new new cache as part of the regular job run.
continue-on-error: true
# We're using an S3 based cache because the standard GitHub Action cache (actions/cache) only gives us 5GB of storage and we need more
uses: leroy-merlin-br/action-s3-cache@8d75079437b388688b9ea9c7d73dff4ef975c5fa # v1.0.5
with:
action: get
# note: this access key has read-only access to the cache. It's public so it runs on PRs.
aws-access-key-id: AKIAV5S2KH4F5OUZXV5E
aws-secret-access-key: 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
aws-region: eu-west-1
bucket: ci-cache.cryfs
key: v1-${{ runner.os }}-${{ matrix.os }}-conancache__${{matrix.compiler.cxx}}__${{matrix.compiler.cc}}__${{matrix.build_type}}__
- name: Build
shell: bash
run: |
set -v
# note: The cmake+ninja workflow requires us to set build type in both cmake commands ('cmake' and 'cmake --build'), otherwise the cryfs.exe will depend on debug versions of the visual studio c++ runtime (i.e. msvcp140d.dll)
# note: The CMAKE_SYSTEM_VERSION variable is set to 10.0.18362.0 because as of this writing, appveyor uses 10.0.17763.0 and that has a bug, see https://developercommunity.visualstudio.com/content/problem/343296/sdk-and-experimentalpreprocessor.html
# TODO CMAKE_SYSTEM_VERSION is probably not needed anymore
mkdir build
cd build
cmake .. -G "Visual Studio 16 2019" -DCMAKE_BUILD_TYPE=${{matrix.build_type}} -DBUILD_TESTING=on -DDOKAN_PATH="C:/Program Files/Dokan/DokanLibrary-1.2.2" -A ${{matrix.arch}} -DCMAKE_SYSTEM_VERSION="10.0.18362.0"
cmake --build . --config ${{matrix.build_type}}
# - name: Show ccache statistics
# shell: bash
# run: |
# set -v
# ccache -s
# - name: Reduce ccache size
# if: ${{ runner.os == 'macOS' }}
# shell: bash
# run: |
# set -v
# ccache --evict-older-than 7d
# ccache -s
# - name: Save ccache cache
# # note: this access key has write access to the cache. This can't run on PRs.
# if: ${{ github.event_name == 'push' }}
# # Cache things sometimes indeterministically fail (roughly 1% of times this is run), let's not fail the job for it
# continue-on-error: true
# uses: leroy-merlin-br/action-s3-cache@8d75079437b388688b9ea9c7d73dff4ef975c5fa # v1.0.5
# with:
# action: put
# aws-access-key-id: ${{ secrets.CACHE_AWS_ACCESS_KEY_ID }}
# aws-secret-access-key: ${{ secrets.CACHE_AWS_SECRET_ACCESS_KEY }}
# aws-region: eu-west-1
# bucket: ci-cache.cryfs
# key: v0-${{ runner.os }}-${{ matrix.os }}-ccache__${{matrix.compiler.cxx}}__${{matrix.compiler.cc}}__${{matrix.build_type}}__${{matrix.run_build}}__${{matrix.run_clang_tidy}}__${{steps.hash_flags.outputs.hash_flags}}__
# artifacts: ${{ github.workspace }}/.ccache
- name: Save conan cache
# note: this access key has write access to the cache. This can't run on PRs.
if: ${{ github.event_name == 'push' }}
# Cache things sometimes indeterministically fail (roughly 1% of times this is run), let's not fail the job for it
continue-on-error: true
uses: leroy-merlin-br/action-s3-cache@8d75079437b388688b9ea9c7d73dff4ef975c5fa # v1.0.5
with:
action: put
aws-access-key-id: ${{ secrets.CACHE_AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.CACHE_AWS_SECRET_ACCESS_KEY }}
aws-region: eu-west-1
bucket: ci-cache.cryfs
key: v1-${{ runner.os }}-${{ matrix.os }}-conancache__${{matrix.compiler.cxx}}__${{matrix.compiler.cc}}__${{matrix.build_type}}__
artifacts: |
${{ env.CONAN_USER_HOME }}
${{ env.CONAN_USER_HOME_SHORT }}
- name: Test
shell: bash
run: |
set -v
cd build
./test/gitversion/${{matrix.build_type}}/gitversion-test.exe
./test/cpp-utils/${{matrix.build_type}}/cpp-utils-test.exe
# ./test/fspp/${{matrix.build_type}}/fspp-test.exe
./test/parallelaccessstore/${{matrix.build_type}}/parallelaccessstore-test.exe
./test/blockstore/${{matrix.build_type}}/blockstore-test.exe
./test/blobstore/${{matrix.build_type}}/blobstore-test.exe
./test/cryfs/${{matrix.build_type}}/cryfs-test.exe
# TODO Enable cryfs-cli-test on Windows
# ./test/cryfs-cli/${{matrix.build_type}}/cryfs-cli-test.exe
- name: CPack
shell: bash
run: |
set -v
cd build
cpack -C ${{matrix.build_type}} --verbose -G WIX
- name: Upload installers as artifact
uses: actions/upload-artifact@v2
with:
name: cryfs-${{matrix.arch}}-${{matrix.build_type}}.msi
path: build/cryfs-*.msi
.gitignore 0000664 0000000 0000000 00000000275 14456142610 0013055 0 ustar 00root root 0000000 0000000 umltest.inner.sh
umltest.status
/build
/cmake
/cmake-build-*
/.idea
*~
/.vs
/.vscode
src/gitversion/*.pyc
src/gitversion/__pycache__
cmake-build-debug
cmake-build-release
cmake-build-test
CMakeLists.txt 0000664 0000000 0000000 00000003736 14456142610 0013632 0 ustar 00root root 0000000 0000000 cmake_minimum_required(VERSION 3.10 FATAL_ERROR)
# TODO Remove this deprecated policy switch once we're on cmake 3.4 or later
cmake_policy(SET CMP0065 OLD)
# TODO Perf test:
# - try if setting CRYPTOPP_NATIVE_ARCH=ON and adding -march=native to the compile commands for cryfs source files makes a difference
# -> if yes, offer a cmake option to enable both of these
project(cryfs)
list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_LIST_DIR}/cmake-utils)
include(utils)
require_gcc_version(7.0)
require_clang_version(7.0)
# Default value is not to build test cases
option(BUILD_TESTING "build test cases" OFF)
option(CRYFS_UPDATE_CHECKS "let cryfs check for updates and security vulnerabilities" ON)
option(DISABLE_OPENMP "allow building without OpenMP libraries. This will cause performance degradations." OFF)
# The following options are helpful for development and/or CI
option(USE_WERROR "build with -Werror flag")
option(USE_CLANG_TIDY "build with clang-tidy checks enabled" OFF)
option(USE_IWYU "build with iwyu checks enabled" OFF)
option(CLANG_TIDY_WARNINGS_AS_ERRORS "treat clang-tidy warnings as errors" OFF)
set(DEPENDENCY_CONFIG "cmake-utils/DependenciesFromConan.cmake" CACHE FILEPATH "cmake configuration file defining how to get dependencies")
if (MSVC)
option(DOKAN_PATH "Location of the Dokan library, e.g. C:\\Program Files\\Dokan\\DokanLibrary-1.1.0" "")
endif()
# Default value is to build in release mode but with debug symbols
if(NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE RelWithDebInfo CACHE INTERNAL "CMAKE_BUILD_TYPE")
endif(NOT CMAKE_BUILD_TYPE)
# We don't use LTO because crypto++ has problems with it, see https://github.com/weidai11/cryptopp/issues/1031 and https://www.cryptopp.com/wiki/Link_Time_Optimization
# The MSVC version on AppVeyor CI needs this
if(MSVC)
add_definitions(/bigobj)
endif()
include(${DEPENDENCY_CONFIG})
add_subdirectory(vendor EXCLUDE_FROM_ALL)
add_subdirectory(src)
add_subdirectory(doc)
add_subdirectory(test)
add_subdirectory(cpack)
CMakeSettings.json 0000664 0000000 0000000 00000004017 14456142610 0014457 0 ustar 00root root 0000000 0000000 {
"configurations": [
{
"name": "x86-Debug",
"generator": "Ninja",
"configurationType": "Debug",
"inheritEnvironments": [ "msvc_x86" ],
"buildRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\build\\${name}",
"installRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\install\\${name}",
"cmakeCommandArgs": "-DBUILD_TESTING=on -DDOKAN_PATH=\"C:\\Program Files\\Dokan\\Dokan Library-1.2.2\"",
"buildCommandArgs": "-v",
"ctestCommandArgs": ""
},
{
"name": "x86-Release",
"generator": "Ninja",
"configurationType": "RelWithDebInfo",
"inheritEnvironments": [ "msvc_x86" ],
"buildRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\build\\${name}",
"installRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\install\\${name}",
"cmakeCommandArgs": "-DBUILD_TESTING=on -DDOKAN_PATH=\"C:\\Program Files\\Dokan\\Dokan Library-1.2.2\"",
"buildCommandArgs": "-v",
"ctestCommandArgs": ""
},
{
"name": "x64-Debug",
"generator": "Ninja",
"configurationType": "Debug",
"inheritEnvironments": [ "msvc_x64_x64" ],
"buildRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\build\\${name}",
"installRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\install\\${name}",
"cmakeCommandArgs": "-DBUILD_TESTING=on -DDOKAN_PATH=\"C:\\Program Files\\Dokan\\Dokan Library-1.2.2\"",
"buildCommandArgs": "-v",
"ctestCommandArgs": ""
},
{
"name": "x64-Release",
"generator": "Ninja",
"configurationType": "RelWithDebInfo",
"inheritEnvironments": [ "msvc_x64_x64" ],
"buildRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\build\\${name}",
"installRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\install\\${name}",
"cmakeCommandArgs": "-DBUILD_TESTING=on -DDOKAN_PATH=\"C:\\Program Files\\Dokan\\Dokan Library-1.2.2\"",
"buildCommandArgs": "-v",
"ctestCommandArgs": ""
}
]
}
ChangeLog.txt 0000664 0000000 0000000 00000040710 14456142610 0013453 0 ustar 00root root 0000000 0000000 Version 0.11.4
---------------
* Fixed build issue with GCC 13 (see https://github.com/cryfs/cryfs/pull/448 )
* Fixed build issue with Python 3.12 (see https://github.com/cryfs/cryfs/issues/459 )
Version 0.11.3
---------------
* Fixed build issue on systems with libfmt 9.0 (see https://github.com/cryfs/cryfs/issues/432 )
* Fixed build issue on Apple Silicon Macs (see https://github.com/cryfs/homebrew-tap/issues/10 )
* Fixed build issue on systems that only have `python3` but no `python` executable (see https://github.com/cryfs/homebrew-tap/issues/12 )
Version 0.11.2
---------------
Bugfix:
* Time to mount a file system was very long because the build didn't correctly use OpenMP. This is now fixed and file systems should open faster again.
Version 0.11.1
---------------
Bugfix:
* Fix building of the range-v3 dependency. The conan remote URL for this dependency changed and we have to use the new URL. See https://github.com/cryfs/cryfs/issues/398
* Update to CryptoPP 8.6. This fixes a rare bug where CryptoPP 8.5 encrypts data wrongly, see https://github.com/weidai11/cryptopp/issues/1069
* cryfs-unmount correctly unmounts paths that contain spaces, see https://github.com/cryfs/cryfs/issues/372
* Updated to DokanY 1.2.2.1001
Version 0.11.0
---------------
Backwards Compatibility:
* Filesystems created with CryFS 0.10.x can be mounted without requiring a migration.
* Filesystems created with CryFS 0.11.x can be mounted by CryFS 0.10.x if you configure it to use a cipher supported by CryFS 0.10.x, e.g. AES-256-GCM. The new default, XChaCha20-Poly1305, is not supported by CryFS 0.10.x.
Security:
* Added the XChaCha20-Poly1305 encryption cipher. For new filesystems, this will be the default, but you're still able to create a filesystem with the previous default of AES-256-GCM
by saying "no" to the "use default settings?" question when creating the file system. Also, old filesystems will not be automatically converted and will keep using AES-256-GCM.
XChaCha20-Poly1305 is significantly slower than AES-256-GCM on modern CPUs, but it is more secure for large filesystems (>64GB).
For AES-256-GCM, it is recommended to encrypt at most 2^32 blocks, which at the CryFS default block size of 16KB would be 64GB. The more the filesystem grows above that, the
more likely it gets that a nonce gets reused and the two corresponding blocks become decryptable by an adversary. Other blocks would not be affected, but an adversary being
able to access those two blocks (i.e. 64KB of the stored data) is bad enough. See Section 8.3 in https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38d.pdf
XChaCha20-Poly1305 does not suffer from this constraint and stays secure even if the filesystem gets very large.
New platforms:
* CryFS now works on devices with Apple M1 silicon
Build changes:
* Switch to Conan package manager
* Allow an easy way to modify how the dependencies are found. This is mostly helpful for package maintainers. See "Using local dependencies" in the README.
* Build with macFUSE instead of osxfuse on OSX
* Now requires CMake 3.10 or later, and GCC 7 or later, or Clang 7 or later
* Fix a build issue on Gentoo systems
* Fix a build issue when building with boost 1.77
Improvements:
* Display the file system configuration when mounting a file system
* Now shows a better error message when failing to load the config file that distinguishes between "wrong password" and "config file not found".
New features:
* Add support for atime mount options (noatime, strictatime, relatime, atime, nodiratime).
* The new default is now *noatime* (in 0.10.x is was relatime).
Noatime reduces the amount of writes necessary and with that reduces the probability of synchronization conflicts,
and the probability of corrupted file systems if a power outage happens while writing.
* Add an --immediate flag to cryfs-unmount that tries to unmount immediately and doesn't wait for processes to release their locks on the file system.
* Add a --create-missing-basedir and --create-missing-mountpoint flag to create the base directory and mount directory respectively, if they don't exist, skipping the confirmation prompt.
Other:
* Updated to spdlog 1.8.5
* Updated to ranges-v3 0.11.0
* Updated to boost 1.75
* Updated to crypto++ 8.5
Version 0.10.4
--------------
Fixed bugs:
* Fixed an issue when compiling with GCC 11, see https://github.com/cryfs/cryfs/issues/389
Version 0.10.3
---------------
Fixed bugs:
* A comma in the base directory name would make the file system fail to mount, https://github.com/cryfs/cryfs/issues/326
* Fixed determining the user's homedir: If $HOME and the /etc/passwd entry for the current user contradict each other, now $HOME takes preference over /etc/passwd.
* Fix Android compilation, https://github.com/cryfs/cryfs/issues/345
* Remove cryfs-stats tool which isn't ready yet and could destroy the file system
* Fixed crash on startup when running in an environment that doesn't have $HOME set (e.g. an empty env), https://github.com/cryfs/cryfs/issues/374
Version 0.10.2
---------------
Fixed bugs:
* Fix occasional crash in mkdir() on Windows
* Fix a race condition when a file descriptor is closed while there's read/write requests for that file being processed.
Improvements:
* Better logging when local state can't be loaded
Other:
* Updated to crypto++ 8.2
Version 0.10.1
---------------
Fixed bugs:
* If file system migration encounters files or folders with the wrong format in the base directory, it now just ignores them instead of crashing.
* When trying to migrate a file system from CryFS 0.9.3 or older, show an error message suggesting to first open it with 0.9.10 because we can't load that anymore.
* The '--unmount-idle' parameter works again
* Fix building with boost 1.67
Compatibility:
* Fixed some incompatibilities with systems using the musl libc
* Use boost::stacktrace instead of libbacktrace to build stack traces. This fixes a segfault issue with platforms using libexecinfo and is generally more portable.
Other:
* Updated to crypto++ 8.1
* Updated to DokanY 1.2.1
* Unit tests can now be run from any directory
Version 0.10.0
---------------
New Features & Improvements:
* Experimental Windows support
* Integrity checks ensure you notice when someone modifies your file system.
* File system nodes (files, directories, symlinks) store a parent pointer to the directory that contains them. This information can be used in later versions to resolve some synchronization conflicts.
* Allow mounting using system mount tool and /etc/fstab (e.g. mount -t fuse.cryfs basedir mountdir)
* Performance improvements
* Use relatime instead of strictatime (further performance improvement)
* Pass fuse options directly to cryfs (i.e. 'cryfs basedir mountdir -o allow_other' instead of 'cryfs basedir mountdir -- -o allow_other')
* CryFS tells the operating system to lock the encryption key to memory, i.e. not swap it to the disk (note: this is best-effort and cannot be guaranteed. Hibernation, for example, will still write the encryption key to the disk).
* New block size options: 4KB and 16KB
* New default block size: 16KB. This should decrease the size of the ciphertext directory for most users.
* Increased scrypt hardness to (N=1048576, r=4, p=8) to make it harder to crack the key while allowing cryfs to take advantage of multicore machines.
* cryfs-unmount tool to unmount filesystems
Fixed bugs:
* `du` shows correct file system size on Mac OS X.
* On Mac OS X, Finder shows the correct name for the mount directory
Version 0.9.11
--------------
Fixed bugs:
* Fix a race condition when a file descriptor is closed while there's read/write requests for that file being processed.
Version 0.9.10
--------------
Fixed bugs:
* Fixed occasional deadlock (https://github.com/cryfs/cryfs/issues/64)
* Fix for reading empty files out of bounds
* Fixed race condition (https://github.com/cryfs/cryfs/issues/224 and https://github.com/cryfs/cryfs/issues/243)
Version 0.9.9
--------------
Improvements:
* Add --allow-filesystem-upgrade option which will upgrade old file systems without asking the user. This will be especially helpful for GUI tools.
* Add --version option that shows the CryFS version and exits.
* When CryFS fails to load a file system, the process stops with a helpful error code, which can be used by GUI tools to show detailed messages.
* Only migrate a file system if the underlying storage format changed
Version 0.9.8
--------------
Compatibility:
* Runs on Debian with FreeBSD kernel
* Runs on FreeBSD 11.1
* Works with Crypto++ 6.0
Improvements:
* added a man page
Fixed bugs:
* `du` shows correct file system size
* Updated spdlog dependency to fix build on newer systems
Version 0.9.7
--------------
Compatibility:
* Runs on FreeBSD
* Works with Clang++ 3.8 (Debian experimental or newer Ubuntu systems)
* Works with GCC 7
Version 0.9.6
---------------
Fixed bugs:
* Fix potential deadlock
* Fix potential crash
Improvements:
* Allow building with -DCRYFS_UPDATE_CHECKS=off, which will create an executable with disabled update checks (the alternative to disable them in the environment also still works).
* Automatically disable update checks when running in noninteractive mode.
* More detailed error reporting if key derivation fails
Compatibility:
* Compatible with libcurl version >= 7.50.0, and <= 7.21.6 (tested down to 7.19.0)
* Compatible with Crypto++ 5.6.4
* Compatible with compilers running under hardening-wrapper
Version 0.9.5
---------------
Fixed Bugs:
* Fixed a bug that prevented mounting a file system on Mac OS X.
* File system operations correctly update the timestamps (access time, modification time and status change time).
* Reacts correctly to fsync() and fdatasync() syscalls by flushing the corresponding data to the disk.
Improvements:
* When mounting an old file system, CryFS will ask before migrating it to the newest version.
* Operating system tools like the mount command or /proc/self/mountinfo report correct file system type and also report the base directory.
* Compatibility with GCC 6
Version 0.9.4
---------------
Improvements:
* Ciphertext blocks are split into subdirectories (before, all were on top level) to reduce number of files per directory. Some unix tools don't work well with directories with too many entries.
Fixed Bugs:
* Renaming a file to an existing file (i.e. overwriting an existing file) didn't free the allocated memory for the overwritten file
* Renaming a file to an existing file could hurt an invariant in the directory layout (directory entries have to be sorted) and doing so could cause files to seemingly disappear.
* Fix a potential deadlock in the cache
Compatibility:
* The generated .deb packages work for any Ubuntu/Debian based distribution, but will not install the package source for automatic updates if it's an unsupported operating system.
Version 0.9.3
---------------
New Features:
* The ciphertext block size is configurable. You can use the "--blocksize" command line argument.
If not specified, CryFS will ask you for a block size when creating a file system.
* It's easier for tools and scripts to use CryFS:
If an environment variable CRYFS_FRONTEND=noninteractive is set, we don't ask for options
(but take default values for everything that's not specified on command line).
Furthermore, in noninteractive mode, we won't ask for password confirmation when creating a file system.
The password only has to be sent once to stdin.
* You can disable the automatic update check by setting CRYFS_NO_UPDATE_CHECK=true in your environment.
Fixed Bugs:
* Building CryFS from the GitHub tarball (i.e. when there is no .git directory present) works.
* A bug in the fstat implementation caused problems with some text editors (e.g. nano) falsely thinking a file changed since they opened it.
* When trying to rename a file to an already existing file name, a bug deleted it instead.
* Rename operation allows overwriting existing files, as specified in the rename(2) man page.
Compatibility:
* The generated .deb packages for Debian also work for the Devuan operating system.
Version 0.9.2
---------------
* Experimental support for installing CryFS on Mac OS X using homebrew
(0.9.2 is not released for Linux)
Version 0.9.1
---------------
* Report file system usage statistics to the operating system (e.g. amount of space used). This information can be queried using the 'df' tool on linux. See https://github.com/cryfs/cryfs/commit/68acc27e88ff5209ca55ddb4e91f5a449d77fb54
* Use stronger scrypt parameters when generating the config file key from the user password. This makes it a bit more secure, but also takes a bit longer to load a file system. See https://github.com/cryfs/cryfs/commit/7f1493ab9210319cab008e71d4ee8f4d7d920f39
* Fix a bug where deleting a non-empty directory could leave some blocks over. See https://github.com/cryfs/cryfs/commit/df041ac84511e4560c4f099cd8cc089d08e05737
Version 0.9.0
---------------
(warning) file systems created with earlier CryFS versions are incompatible with this release.
* Fully support file access times
* Fix: Password is read from stdin, not from glibc getpass(). This enables external tools (e.g. GUIs) to pass in the password without problems.
* Remove --extpass parameter, because that encourages tool writers to do bad things like storing a password in a file and using --extpass="cat filename".
The password can now be passed in to stdin without problems, so tools should use that.
* Works with zuluMount GUI, https://mhogomchungu.github.io/zuluCrypt/
* Introduce version flags for file system entities to allow future CryFS versions to be backwards-compatible even if the format changes.
* (for developers) New git repository layout. All subrepositories have been merged to one directory.
* (for developers) Using CMake instead of biicode as build system.
Version 0.8.6
---------------
* Fix a deadlock that was caused when a very high load of parallel resize operations was issued, see https://github.com/cryfs/cryfs/issues/3
* Fix a bug that prevented deleting symlinks, see https://github.com/cryfs/cryfs/issues/2
* Gracefully accept modifications to the file access times instead of failing, although they're not stored yet (they will be stored in 0.9.0). This should fix https://github.com/cryfs/cryfs/issues/4
Version 0.8.5
---------------
* Fix package manager warning when installing the .deb package
* Offer a default configuration when creating new filesystems
* If the given base or mount directory doesn't exist, offer to create them
Version 0.8.4
---------------
* Offering .deb packages for Debian and Ubuntu
* Compatibility with 32bit systems
* Support files larger than 4GB
Version 0.8.3
---------------
* Ask for password confirmation when creating new filesystem
* Check for new CryFS versions and ask the user to update if a new version is available
* Implemented a mechanism that can show warnings about security bugs to users of a certain CryFS version. Let's hope this won't be necessary ;)
* Compatibility with GCC 4.8 (that allows compiling on Ubuntu 14.04 for example)
Version 0.8.2
---------------
* Mount directory, base directory, logfile and config file can be specified as relative paths
* Improved error messages
Version 0.8.1
---------------
* Config File Encryption: Configuration files are encrypted with two ciphers. The user specifies a password, which is then used with the scrypt KDF to generate the two encryption keys.
- Inner level: Encrypts the config data using the user specified cipher.
- Outer level: Encrypts the name of the inner cipher and the inner level ciphertext using aes-256-gcm.
The config file is padded to hide the size of the configuration data (including the name of the cipher used).
* No external config file needed: If the configuration file is not specified as command line parameter, it will be put into the base directory. This way, the filesystem can be mounted with the password only, without specifying a config file on command line.
* Logfiles: Added a --logfile option to specify where logs should be written to. If the option is not specified, CryFs logs to syslog.
* Running in Background: Fixed daemonization. When CryFs is run without "-f" flag, it will run in background.
* Better error messages when base directory is not existing, not readable or not writeable.
* Allow --cipher=xxx to specify cipher on command line. If cryfs is creating a new filesystem, it will use this cipher. If it is opening an existing filesystem, it will check whether this is the cipher used by it.
* --show-ciphers shows a list of all supported ciphers
* --extpass allows using an external program for password input
* --unmount-idle x automatically unmounts the filesystem after x minutes without a filesystem operation.
LICENSE.txt 0000664 0000000 0000000 00000016743 14456142610 0012717 0 ustar 00root root 0000000 0000000 GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc.
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
This version of the GNU Lesser General Public License incorporates
the terms and conditions of version 3 of the GNU General Public
License, supplemented by the additional permissions listed below.
0. Additional Definitions.
As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU
General Public License.
"The Library" refers to a covered work governed by this License,
other than an Application or a Combined Work as defined below.
An "Application" is any work that makes use of an interface provided
by the Library, but which is not otherwise based on the Library.
Defining a subclass of a class defined by the Library is deemed a mode
of using an interface provided by the Library.
A "Combined Work" is a work produced by combining or linking an
Application with the Library. The particular version of the Library
with which the Combined Work was made is also called the "Linked
Version".
The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code
for portions of the Combined Work that, considered in isolation, are
based on the Application, and not on the Linked Version.
The "Corresponding Application Code" for a Combined Work means the
object code and/or source code for the Application, including any data
and utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.
1. Exception to Section 3 of the GNU GPL.
You may convey a covered work under sections 3 and 4 of this License
without being bound by section 3 of the GNU GPL.
2. Conveying Modified Versions.
If you modify a copy of the Library, and, in your modifications, a
facility refers to a function or data to be supplied by an Application
that uses the facility (other than as an argument passed when the
facility is invoked), then you may convey a copy of the modified
version:
a) under this License, provided that you make a good faith effort to
ensure that, in the event an Application does not supply the
function or data, the facility still operates, and performs
whatever part of its purpose remains meaningful, or
b) under the GNU GPL, with none of the additional permissions of
this License applicable to that copy.
3. Object Code Incorporating Material from Library Header Files.
The object code form of an Application may incorporate material from
a header file that is part of the Library. You may convey such object
code under terms of your choice, provided that, if the incorporated
material is not limited to numerical parameters, data structure
layouts and accessors, or small macros, inline functions and templates
(ten or fewer lines in length), you do both of the following:
a) Give prominent notice with each copy of the object code that the
Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the object code with a copy of the GNU GPL and this license
document.
4. Combined Works.
You may convey a Combined Work under terms of your choice that,
taken together, effectively do not restrict modification of the
portions of the Library contained in the Combined Work and reverse
engineering for debugging such modifications, if you also do each of
the following:
a) Give prominent notice with each copy of the Combined Work that
the Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the Combined Work with a copy of the GNU GPL and this license
document.
c) For a Combined Work that displays copyright notices during
execution, include the copyright notice for the Library among
these notices, as well as a reference directing the user to the
copies of the GNU GPL and this license document.
d) Do one of the following:
0) Convey the Minimal Corresponding Source under the terms of this
License, and the Corresponding Application Code in a form
suitable for, and under terms that permit, the user to
recombine or relink the Application with a modified version of
the Linked Version to produce a modified Combined Work, in the
manner specified by section 6 of the GNU GPL for conveying
Corresponding Source.
1) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (a) uses at run time
a copy of the Library already present on the user's computer
system, and (b) will operate properly with a modified version
of the Library that is interface-compatible with the Linked
Version.
e) Provide Installation Information, but only if you would otherwise
be required to provide such information under section 6 of the
GNU GPL, and only to the extent that such information is
necessary to install and execute a modified version of the
Combined Work produced by recombining or relinking the
Application with a modified version of the Linked Version. (If
you use option 4d0, the Installation Information must accompany
the Minimal Corresponding Source and Corresponding Application
Code. If you use option 4d1, you must provide the Installation
Information in the manner specified by section 6 of the GNU GPL
for conveying Corresponding Source.)
5. Combined Libraries.
You may place library facilities that are a work based on the
Library side by side in a single library together with other library
facilities that are not Applications and are not covered by this
License, and convey such a combined library under terms of your
choice, if you do both of the following:
a) Accompany the combined library with a copy of the same work based
on the Library, uncombined with any other library facilities,
conveyed under the terms of this License.
b) Give prominent notice with the combined library that part of it
is a work based on the Library, and explaining where to find the
accompanying uncombined form of the same work.
6. Revised Versions of the GNU Lesser General Public License.
The Free Software Foundation may publish revised and/or new versions
of the GNU Lesser General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the
Library as you received it specifies that a certain numbered version
of the GNU Lesser General Public License "or any later version"
applies to it, you have the option of following the terms and
conditions either of that published version or of any later version
published by the Free Software Foundation. If the Library as you
received it does not specify a version number of the GNU Lesser
General Public License, you may choose any version of the GNU Lesser
General Public License ever published by the Free Software Foundation.
If the Library as you received it specifies that a proxy can decide
whether future versions of the GNU Lesser General Public License shall
apply, that proxy's public statement of acceptance of any version is
permanent authorization for you to choose that version for the
Library.
README.md 0000664 0000000 0000000 00000024576 14456142610 0012356 0 ustar 00root root 0000000 0000000 # CryFS [](https://circleci.com/gh/cryfs/cryfs/tree/master) [](https://ci.appveyor.com/project/smessmer/cryfs/branch/master)
CryFS encrypts your files, so you can safely store them anywhere. It works well together with cloud services like Dropbox, iCloud, OneDrive and others.
See [https://www.cryfs.org](https://www.cryfs.org).
Install latest release
======================
Linux
------
This only works for Ubuntu 17.04 and later, and Debian Stretch and later.
You can also use CryFS on older versions of these distributions by following the **Building from source** instructions below.
sudo apt install cryfs
The following should work on Arch and Arch-based distros:
sudo pacman -S cryfs
Additionally, the following would work for any Linux distro with the Nix package manager:
nix-env -iA nixpkgs.cryfs
OSX
----
CryFS is distributed via Homebrew, MacPorts, and Nix.
If you use Homebrew:
brew install --cask macfuse
brew install cryfs/tap/cryfs
If you use MacPorts (only available for OSX 10.12 to 10.14 at the moment):
port install cryfs
For Nix, the macOS build for cryfs is available in the Nixpkgs channel 21.05
and later:
brew install --cask macfuse # or download from https://osxfuse.github.io/
nix-env -iA nixpkgs.cryfs
Windows (experimental)
----------------------
CryFS has experimental Windows support since the 0.10 release series. To install it, do:
1. Install [DokanY](https://github.com/dokan-dev/dokany/releases)
2. Install [Microsoft Visual C++ Redistributable for Visual Studio 2019](https://support.microsoft.com/en-us/help/2977003/the-latest-supported-visual-c-downloads)
3. Install [CryFS](https://www.cryfs.org/#download)
GUI
===
Theres some GUI applications with CryFS support. You usually have to install the GUI **and** also CryFS itself for it to work.
- [SiriKali](https://mhogomchungu.github.io/sirikali/)
- [Plasma Vault](https://www.kde.org/announcements/plasma-5.11.0.php) in KDE Plasma >= 5.11
Stability / Production readiness
====================
CryFS 0.10 or later is stable for most use cases, but has a couple of known issues that can corrupt your file system.
They don't happen in normal day to day use, but can happen if you don't pay attention or aren't aware of them.
This is why the version number hasn't reached 1.0 yet.
- If you kill the CryFS process while it was in the middle of writing data (either intentionally or unintentionally by losing power to your PC), your file system could get corrupted.
CryFS does not do journaling. Note that in 0.10.x, read accesses into a CryFS file system can cause writes because file timestamps get updated. So if you're unlucky, your file system
could get corrupted if you lose power while you were reading files as well. Read accesses aren't an issue in CryFS 0.11.x anymore, because it mounts the filesystem with `noatime` by default.
- The same corruption mentioned above can happen when CryFS is trying to write data but your disk ran out of space, causing the write to fail.
- CryFS does not currently support concurrent access, i.e. accessing a file system from multiple devices at the same time.
CryFS works very well for storing data in a cloud and using it from multiple devices, but you need to make sure that only one CryFS process is active at any point in time, and you also need
to make sure that the cloud synchronization client (e.g. Dropbox) finishes its synchronization before you switch devices. There are some ideas on how concurrent access could be supported in
future versions, but it's a hard problem to solve. If you do happen to access the file system from multiple devices at the same time, it will likely go well most of the time, but it can corrupt your file system.
- In addition to the scenarios above that can corrupt your file system, note that there is currently no fsck-like tool for CryFS that could recover your data. Although such a tool is in theory, possible,
it hasn't been implemented yet and a corrupted file system will most likely cause a loss of your data.
If the scenarios mentioned above don't apply to you, then you can consider CryFS 0.10.x as stable. The 0.9.x versions are not recommended anymore.
Building from source
====================
Requirements
------------
- Git (for getting the source code)
- GCC version >= 7 or Clang >= 7
- CMake version >= 3.10
- pkg-config (on Unix)
- Conan package manager (version 1.x)
- libcurl4 (including development headers)
- SSL development libraries (including development headers, e.g. libssl-dev)
- libFUSE version >= 2.8.6 (including development headers), on Mac OS X instead install macFUSE from https://osxfuse.github.io/
- Python >= 3.5
- OpenMP
You can use the following commands to install these requirements
# Ubuntu
$ sudo apt install git g++ cmake make pkg-config libcurl4-openssl-dev libssl-dev libfuse-dev python python3-pip
$ sudo pip3 install conan==1.59
# Fedora
$ sudo dnf install git gcc-c++ cmake make pkgconf libcurl-devel openssl-devel fuse-devel python python3-pip
$ sudo pip3 install conan==1.59
# Macintosh
$ brew install cmake pkg-config openssl libomp macfuse
$ sudo pip3 install conan==1.59
Build & Install
---------------
1. Clone repository
$ git clone https://github.com/cryfs/cryfs.git cryfs
$ cd cryfs
2. Build
$ mkdir build && cd build
$ cmake ..
$ make
3. Install
$ sudo make install
You can pass the following variables to the *cmake* command (using *-Dvariablename=value*):
- **-DCMAKE_BUILD_TYPE**=[Release|Debug]: Whether to run code optimization or add debug symbols. Default: Release
- **-DBUILD_TESTING**=[on|off]: Whether to build the test cases (can take a long time). Default: off
- **-DCRYFS_UPDATE_CHECKS**=off: Build a CryFS that doesn't check online for updates and security vulnerabilities.
Building on Windows (experimental)
----------------------------------
1. Install conan. If you want to use "pip install conan", you may have to install Python first.
2. Install DokanY 1.2.2. Other versions may not work.
3. Run CMake to generate Visual Studio 2019 project files (this may not be necessary, but it makes sure everything works as expected and you can see potential errors happening during this step)
$ mkdir build && cd build
$ cmake .. -G "Visual Studio 16 2019" -DDOKAN_PATH=[dokan library location, e.g. "C:\Program Files\Dokan\DokanLibrary-1.2.2"]
4. Potentially modify CMakeSettings.json file to fit your needs
5. Open the cryfs source folder with Visual Studio 2019, or alternatively build on command line using
$ cd build && cmake --build . --config RelWithDebInfo
Troubleshooting
---------------
On most systems, CMake should find the libraries automatically. However, that doesn't always work.
1. **Fuse library not found**
Pass in the library path with
PKG_CONFIG_PATH=/path-to-fuse-or-macFUSE/lib/pkgconfig cmake ..
2. **Fuse headers not found**
Pass in the include path with
PKG_CONFIG_PATH=/path-to-fuse-or-macFUSE/lib/pkgconfig cmake ..
3. **Openssl headers not found**
Pass in the include path with
cmake .. -DCMAKE_C_FLAGS="-I/path/to/openssl/include"
4. **OpenMP not found (osx)**
Either build it without OpenMP
cmake .. -DDISABLE_OPENMP=on
but this will cause slower file system mount times (performance after mounting will be unaffected).
If you installed OpenMP with homebrew or macports, it will be autodetected.
If that doesn't work for some reason (or you want to use a different installation than the autodetected one),
pass in these flags:
cmake .. -DOpenMP_CXX_FLAGS='-Xpreprocessor -fopenmp -I/path/to/openmp/include' -DOpenMP_CXX_LIB_NAMES=omp -DOpenMP_omp_LIBRARY=/path/to/libomp.dylib
Using local dependencies
-------------------------------
Starting with CryFS 0.11, Conan is used for dependency management.
When you build CryFS, Conan downloads the exact version of each dependency library that was also used for development.
All dependencies are linked statically, so there should be no incompatibility with locally installed libraries.
This is the recommended way because it has the highest probability of working correctly.
However, some distributions prefer software packages to be built against dependencies dynamically and against locally installed versions of libraries.
So if you're building a package for such a distribution, you have the option of doing that, at the cost of potential incompatibilities.
If you follow this workflow, please make sure to extensively test your build of CryFS.
You're using a setup that wasn't tested by the CryFS developers.
To use local dependencies, you need to tell the CryFS build how to get these dependencies.
You can do this by writing a small CMake configuration file and passing it to the CryFS build using `-DDEPENDENCY_CONFIG=filename`.
This configuration file needs to define a cmake target for each of the dependencies.
Here's an [example config file](cmake-utils/DependenciesFromConan.cmake) that gets the dependencies from conan.
And here's another [example config file](cmake-utils/DependenciesFromLocalSystem.cmake) that works for getting dependencies that are locally installed in Ubuntu.
You can create your own configuration file to tell the build how to get its dependencies and, for example, mix and match. Get some dependencies from Conan and others from the local system.
Creating .deb and .rpm packages
-------------------------------
It is recommended to install CryFS using packages, because that allows for an easy way to uninstall it again once you don't need it anymore.
If you want to create a .rpm package, you need to install rpmbuild.
1. Clone repository
$ git clone https://github.com/cryfs/cryfs.git cryfs
$ cd cryfs
2. Build
$ mkdir cmake && cd cmake
$ cmake .. -DCMAKE_BUILD_TYPE=RelWithDebInfo -DBUILD_TESTING=off
$ make package
Disclaimer
----------------------
In the event of a password leak, you are strongly advised to create a new filesystem and copy all the data over from the previous one. Then, remove all copies of the compromised filesystem and config file(e.g, from the "previous versions" feature of your cloud system) to prevent access to the key (and, as a result, your data) using the leaked password.
appveyor.yml 0000664 0000000 0000000 00000000236 14456142610 0013452 0 ustar 00root root 0000000 0000000 image:
- Visual Studio 2019
platform:
- x64
configuration:
- Release
build_script:
- cmd: echo Appveyor CI is disabled since we now have Github Actions
archive.sh 0000775 0000000 0000000 00000000420 14456142610 0013035 0 ustar 00root root 0000000 0000000 #!/bin/bash
TAG=$1
GPGHOMEDIR=$2
git archive --format=tgz "$1" > cryfs-$1.tar.gz
gpg --homedir "$GPGHOMEDIR" --armor --detach-sign cryfs-$1.tar.gz
git archive --format=tar "$1" | xz -9 > cryfs-$1.tar.xz
gpg --homedir "$GPGHOMEDIR" --armor --detach-sign cryfs-$1.tar.xz
cmake-utils/ 0000775 0000000 0000000 00000000000 14456142610 0013277 5 ustar 00root root 0000000 0000000 cmake-utils/DependenciesFromConan.cmake 0000664 0000000 0000000 00000001243 14456142610 0020472 0 ustar 00root root 0000000 0000000 include(cmake-utils/conan.cmake)
conan_cmake_autodetect(settings)
conan_cmake_install(
PATH_OR_REFERENCE ${CMAKE_CURRENT_SOURCE_DIR}/conanfile.py
BUILD missing
SETTINGS ${settings})
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_basic_setup(TARGETS SKIP_STD NO_OUTPUT_DIRS)
add_library(CryfsDependencies_range-v3 INTERFACE)
target_link_libraries(CryfsDependencies_range-v3 INTERFACE CONAN_PKG::range-v3)
add_library(CryfsDependencies_spdlog INTERFACE)
target_link_libraries(CryfsDependencies_spdlog INTERFACE CONAN_PKG::spdlog)
add_library(CryfsDependencies_boost INTERFACE)
target_link_libraries(CryfsDependencies_boost INTERFACE CONAN_PKG::boost)
cmake-utils/DependenciesFromLocalSystem.cmake 0000664 0000000 0000000 00000005406 14456142610 0021700 0 ustar 00root root 0000000 0000000 # This configuration file can be used to build CryFS against local dependencies instead of using Conan.
#
# Example:
# $ mkdir build && cd build && cmake .. -DDEPENDENCY_CONFIG=../cmake-utils/DependenciesFromLocalSystem.cmake
#
# Note that this is only provided as an example and not officially supported. Please still open issues
# on GitHub if it doesn't work though.
#
# There's another file in this directory, DependenciesFromConan.cmake, which, well, gets the dependencies from
# Conan instead of from the local system. This is the default. You can also create your own file to tell the build
# how to get its dependencies, for example you can mix and match, get some dependencies from Conan and others
# from the local system. If you mix and match Conan and local dependencies, please call conan_basic_setup()
# **after** running all find_package() for your local dependencies, otherwise find_package() might also find
# the versions from Conan.
#
# Note that if you use dependencies from the local system, you're very likely using different versions of the
# dependencies than were used in the development of CryFS. The official version of each dependency required is
# listed in conanfile.py. Different versions might work but are untested. Please intensively test your CryFS build
# if you build it with different versions of the dependencies.
function(check_target_is_not_from_conan TARGET)
get_target_property(INCLUDE_DIRS ${TARGET} INTERFACE_INCLUDE_DIRECTORIES)
if("${INCLUDE_DIRS}" MATCHES "conan")
message(WARNING "It seems setting up the local ${TARGET} dependency didn't work correctly and it got the version from Conan instead. Please set up cmake so that it sets up conan after all local dependencies are defined.")
endif()
endfunction()
# Setup range-v3 dependency
find_package(range-v3 REQUIRED)
check_target_is_not_from_conan(range-v3::range-v3)
add_library(CryfsDependencies_range-v3 INTERFACE)
target_link_libraries(CryfsDependencies_range-v3 INTERFACE range-v3::range-v3)
# Setup boost dependency
set(Boost_USE_STATIC_LIBS OFF)
find_package(Boost 1.65.1
REQUIRED
COMPONENTS filesystem system thread chrono program_options)
check_target_is_not_from_conan(Boost::boost)
add_library(CryfsDependencies_boost INTERFACE)
target_link_libraries(CryfsDependencies_boost INTERFACE Boost::boost Boost::filesystem Boost::thread Boost::chrono Boost::program_options)
if(${CMAKE_SYSTEM_NAME} MATCHES "Linux")
# Also link to rt, because boost thread needs that.
target_link_libraries(CryfsDependencies_boost INTERFACE rt)
endif()
# Setup spdlog dependency
find_package(spdlog REQUIRED)
check_target_is_not_from_conan(spdlog::spdlog)
add_library(CryfsDependencies_spdlog INTERFACE)
target_link_libraries(CryfsDependencies_spdlog INTERFACE spdlog::spdlog)
cmake-utils/TargetArch.cmake 0000664 0000000 0000000 00000015516 14456142610 0016335 0 ustar 00root root 0000000 0000000 # This file is taken from https://github.com/axr/solar-cmake/blob/73cfea0db0284c5e2010aca23989046e5bda95c9/TargetArch.cmake
# License:
# Copyright (c) 2012 Petroules Corporation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Based on the Qt 5 processor detection code, so should be very accurate
# https://qt.gitorious.org/qt/qtbase/blobs/master/src/corelib/global/qprocessordetection.h
# Currently handles arm (v5, v6, v7), x86 (32/64), ia64, and ppc (32/64)
# Regarding POWER/PowerPC, just as is noted in the Qt source,
# "There are many more known variants/revisions that we do not handle/detect."
set(archdetect_c_code "
#if defined(__arm__) || defined(__TARGET_ARCH_ARM)
#if defined(__ARM_ARCH_7__) \\
|| defined(__ARM_ARCH_7A__) \\
|| defined(__ARM_ARCH_7R__) \\
|| defined(__ARM_ARCH_7M__) \\
|| (defined(__TARGET_ARCH_ARM) && __TARGET_ARCH_ARM-0 >= 7)
#error cmake_ARCH armv7
#elif defined(__ARM_ARCH_6__) \\
|| defined(__ARM_ARCH_6J__) \\
|| defined(__ARM_ARCH_6T2__) \\
|| defined(__ARM_ARCH_6Z__) \\
|| defined(__ARM_ARCH_6K__) \\
|| defined(__ARM_ARCH_6ZK__) \\
|| defined(__ARM_ARCH_6M__) \\
|| (defined(__TARGET_ARCH_ARM) && __TARGET_ARCH_ARM-0 >= 6)
#error cmake_ARCH armv6
#elif defined(__ARM_ARCH_5TEJ__) \\
|| (defined(__TARGET_ARCH_ARM) && __TARGET_ARCH_ARM-0 >= 5)
#error cmake_ARCH armv5
#else
#error cmake_ARCH arm
#endif
#elif defined(__i386) || defined(__i386__) || defined(_M_IX86)
#error cmake_ARCH i386
#elif defined(__x86_64) || defined(__x86_64__) || defined(__amd64) || defined(_M_X64)
#error cmake_ARCH x86_64
#elif defined(__ia64) || defined(__ia64__) || defined(_M_IA64)
#error cmake_ARCH ia64
#elif defined(__ppc__) || defined(__ppc) || defined(__powerpc__) \\
|| defined(_ARCH_COM) || defined(_ARCH_PWR) || defined(_ARCH_PPC) \\
|| defined(_M_MPPC) || defined(_M_PPC)
#if defined(__ppc64__) || defined(__powerpc64__) || defined(__64BIT__)
#error cmake_ARCH ppc64
#else
#error cmake_ARCH ppc
#endif
#endif
#error cmake_ARCH unknown
")
# Set ppc_support to TRUE before including this file or ppc and ppc64
# will be treated as invalid architectures since they are no longer supported by Apple
function(target_architecture output_var)
if(APPLE AND CMAKE_OSX_ARCHITECTURES)
# On OS X we use CMAKE_OSX_ARCHITECTURES *if* it was set
# First let's normalize the order of the values
# Note that it's not possible to compile PowerPC applications if you are using
# the OS X SDK version 10.6 or later - you'll need 10.4/10.5 for that, so we
# disable it by default
# See this page for more information:
# http://stackoverflow.com/questions/5333490/how-can-we-restore-ppc-ppc64-as-well-as-full-10-4-10-5-sdk-support-to-xcode-4
# Architecture defaults to i386 or ppc on OS X 10.5 and earlier, depending on the CPU type detected at runtime.
# On OS X 10.6+ the default is x86_64 if the CPU supports it, i386 otherwise.
foreach(osx_arch ${CMAKE_OSX_ARCHITECTURES})
if("${osx_arch}" STREQUAL "ppc" AND ppc_support)
set(osx_arch_ppc TRUE)
elseif("${osx_arch}" STREQUAL "i386")
set(osx_arch_i386 TRUE)
elseif("${osx_arch}" STREQUAL "x86_64")
set(osx_arch_x86_64 TRUE)
elseif("${osx_arch}" STREQUAL "ppc64" AND ppc_support)
set(osx_arch_ppc64 TRUE)
else()
message(FATAL_ERROR "Invalid OS X arch name: ${osx_arch}")
endif()
endforeach()
# Now add all the architectures in our normalized order
if(osx_arch_ppc)
list(APPEND ARCH ppc)
endif()
if(osx_arch_i386)
list(APPEND ARCH i386)
endif()
if(osx_arch_x86_64)
list(APPEND ARCH x86_64)
endif()
if(osx_arch_ppc64)
list(APPEND ARCH ppc64)
endif()
else()
file(WRITE "${CMAKE_BINARY_DIR}/arch.c" "${archdetect_c_code}")
enable_language(C)
# Detect the architecture in a rather creative way...
# This compiles a small C program which is a series of ifdefs that selects a
# particular #error preprocessor directive whose message string contains the
# target architecture. The program will always fail to compile (both because
# file is not a valid C program, and obviously because of the presence of the
# #error preprocessor directives... but by exploiting the preprocessor in this
# way, we can detect the correct target architecture even when cross-compiling,
# since the program itself never needs to be run (only the compiler/preprocessor)
try_run(
run_result_unused
compile_result_unused
"${CMAKE_BINARY_DIR}"
"${CMAKE_BINARY_DIR}/arch.c"
COMPILE_OUTPUT_VARIABLE ARCH
CMAKE_FLAGS CMAKE_OSX_ARCHITECTURES=${CMAKE_OSX_ARCHITECTURES}
)
# Parse the architecture name from the compiler output
string(REGEX MATCH "cmake_ARCH ([a-zA-Z0-9_]+)" ARCH "${ARCH}")
# Get rid of the value marker leaving just the architecture name
string(REPLACE "cmake_ARCH " "" ARCH "${ARCH}")
# If we are compiling with an unknown architecture this variable should
# already be set to "unknown" but in the case that it's empty (i.e. due
# to a typo in the code), then set it to unknown
if (NOT ARCH)
set(ARCH unknown)
endif()
endif()
set(${output_var} "${ARCH}" PARENT_SCOPE)
endfunction() cmake-utils/conan.cmake 0000664 0000000 0000000 00000106570 14456142610 0015410 0 ustar 00root root 0000000 0000000 # Taken from https://github.com/conan-io/cmake-conan/blob/v0.16.1/conan.cmake
# The MIT License (MIT)
# Copyright (c) 2018 JFrog
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This file comes from: https://github.com/conan-io/cmake-conan. Please refer
# to this repository for issues and documentation.
# Its purpose is to wrap and launch Conan C/C++ Package Manager when cmake is called.
# It will take CMake current settings (os, compiler, compiler version, architecture)
# and translate them to conan settings for installing and retrieving dependencies.
# It is intended to facilitate developers building projects that have conan dependencies,
# but it is only necessary on the end-user side. It is not necessary to create conan
# packages, in fact it shouldn't be use for that. Check the project documentation.
# version: 0.16.1
include(CMakeParseArguments)
function(_get_msvc_ide_version result)
set(${result} "" PARENT_SCOPE)
if(NOT MSVC_VERSION VERSION_LESS 1400 AND MSVC_VERSION VERSION_LESS 1500)
set(${result} 8 PARENT_SCOPE)
elseif(NOT MSVC_VERSION VERSION_LESS 1500 AND MSVC_VERSION VERSION_LESS 1600)
set(${result} 9 PARENT_SCOPE)
elseif(NOT MSVC_VERSION VERSION_LESS 1600 AND MSVC_VERSION VERSION_LESS 1700)
set(${result} 10 PARENT_SCOPE)
elseif(NOT MSVC_VERSION VERSION_LESS 1700 AND MSVC_VERSION VERSION_LESS 1800)
set(${result} 11 PARENT_SCOPE)
elseif(NOT MSVC_VERSION VERSION_LESS 1800 AND MSVC_VERSION VERSION_LESS 1900)
set(${result} 12 PARENT_SCOPE)
elseif(NOT MSVC_VERSION VERSION_LESS 1900 AND MSVC_VERSION VERSION_LESS 1910)
set(${result} 14 PARENT_SCOPE)
elseif(NOT MSVC_VERSION VERSION_LESS 1910 AND MSVC_VERSION VERSION_LESS 1920)
set(${result} 15 PARENT_SCOPE)
elseif(NOT MSVC_VERSION VERSION_LESS 1920 AND MSVC_VERSION VERSION_LESS 1930)
set(${result} 16 PARENT_SCOPE)
else()
message(FATAL_ERROR "Conan: Unknown MSVC compiler version [${MSVC_VERSION}]")
endif()
endfunction()
macro(_conan_detect_build_type)
conan_parse_arguments(${ARGV})
if(ARGUMENTS_BUILD_TYPE)
set(_CONAN_SETTING_BUILD_TYPE ${ARGUMENTS_BUILD_TYPE})
elseif(CMAKE_BUILD_TYPE)
set(_CONAN_SETTING_BUILD_TYPE ${CMAKE_BUILD_TYPE})
else()
message(FATAL_ERROR "Please specify in command line CMAKE_BUILD_TYPE (-DCMAKE_BUILD_TYPE=Release)")
endif()
string(TOUPPER ${_CONAN_SETTING_BUILD_TYPE} _CONAN_SETTING_BUILD_TYPE_UPPER)
if (_CONAN_SETTING_BUILD_TYPE_UPPER STREQUAL "DEBUG")
set(_CONAN_SETTING_BUILD_TYPE "Debug")
elseif(_CONAN_SETTING_BUILD_TYPE_UPPER STREQUAL "RELEASE")
set(_CONAN_SETTING_BUILD_TYPE "Release")
elseif(_CONAN_SETTING_BUILD_TYPE_UPPER STREQUAL "RELWITHDEBINFO")
set(_CONAN_SETTING_BUILD_TYPE "RelWithDebInfo")
elseif(_CONAN_SETTING_BUILD_TYPE_UPPER STREQUAL "MINSIZEREL")
set(_CONAN_SETTING_BUILD_TYPE "MinSizeRel")
endif()
endmacro()
macro(_conan_check_system_name)
#handle -s os setting
if(CMAKE_SYSTEM_NAME AND NOT CMAKE_SYSTEM_NAME STREQUAL "Generic")
#use default conan os setting if CMAKE_SYSTEM_NAME is not defined
set(CONAN_SYSTEM_NAME ${CMAKE_SYSTEM_NAME})
if(${CMAKE_SYSTEM_NAME} STREQUAL "Darwin")
set(CONAN_SYSTEM_NAME Macos)
endif()
if(${CMAKE_SYSTEM_NAME} STREQUAL "QNX")
set(CONAN_SYSTEM_NAME Neutrino)
endif()
set(CONAN_SUPPORTED_PLATFORMS Windows Linux Macos Android iOS FreeBSD WindowsStore WindowsCE watchOS tvOS FreeBSD SunOS AIX Arduino Emscripten Neutrino)
list (FIND CONAN_SUPPORTED_PLATFORMS "${CONAN_SYSTEM_NAME}" _index)
if (${_index} GREATER -1)
#check if the cmake system is a conan supported one
set(_CONAN_SETTING_OS ${CONAN_SYSTEM_NAME})
else()
message(FATAL_ERROR "cmake system ${CONAN_SYSTEM_NAME} is not supported by conan. Use one of ${CONAN_SUPPORTED_PLATFORMS}")
endif()
endif()
endmacro()
macro(_conan_check_language)
get_property(_languages GLOBAL PROPERTY ENABLED_LANGUAGES)
if (";${_languages};" MATCHES ";CXX;")
set(LANGUAGE CXX)
set(USING_CXX 1)
elseif (";${_languages};" MATCHES ";C;")
set(LANGUAGE C)
set(USING_CXX 0)
else ()
message(FATAL_ERROR "Conan: Neither C or C++ was detected as a language for the project. Unabled to detect compiler version.")
endif()
endmacro()
macro(_conan_detect_compiler)
conan_parse_arguments(${ARGV})
if(ARGUMENTS_ARCH)
set(_CONAN_SETTING_ARCH ${ARGUMENTS_ARCH})
endif()
if (${CMAKE_${LANGUAGE}_COMPILER_ID} STREQUAL GNU)
# using GCC
# TODO: Handle other params
string(REPLACE "." ";" VERSION_LIST ${CMAKE_${LANGUAGE}_COMPILER_VERSION})
list(GET VERSION_LIST 0 MAJOR)
list(GET VERSION_LIST 1 MINOR)
set(COMPILER_VERSION ${MAJOR}.${MINOR})
if(${MAJOR} GREATER 4)
set(COMPILER_VERSION ${MAJOR})
endif()
set(_CONAN_SETTING_COMPILER gcc)
set(_CONAN_SETTING_COMPILER_VERSION ${COMPILER_VERSION})
if (USING_CXX)
conan_cmake_detect_unix_libcxx(_LIBCXX)
set(_CONAN_SETTING_COMPILER_LIBCXX ${_LIBCXX})
endif ()
elseif (${CMAKE_${LANGUAGE}_COMPILER_ID} STREQUAL Intel)
string(REPLACE "." ";" VERSION_LIST ${CMAKE_${LANGUAGE}_COMPILER_VERSION})
list(GET VERSION_LIST 0 MAJOR)
list(GET VERSION_LIST 1 MINOR)
set(COMPILER_VERSION ${MAJOR}.${MINOR})
set(_CONAN_SETTING_COMPILER intel)
set(_CONAN_SETTING_COMPILER_VERSION ${COMPILER_VERSION})
if (USING_CXX)
conan_cmake_detect_unix_libcxx(_LIBCXX)
set(_CONAN_SETTING_COMPILER_LIBCXX ${_LIBCXX})
endif ()
elseif (${CMAKE_${LANGUAGE}_COMPILER_ID} STREQUAL AppleClang)
# using AppleClang
string(REPLACE "." ";" VERSION_LIST ${CMAKE_${LANGUAGE}_COMPILER_VERSION})
list(GET VERSION_LIST 0 MAJOR)
list(GET VERSION_LIST 1 MINOR)
set(_CONAN_SETTING_COMPILER apple-clang)
set(_CONAN_SETTING_COMPILER_VERSION ${MAJOR}.${MINOR})
if (USING_CXX)
conan_cmake_detect_unix_libcxx(_LIBCXX)
set(_CONAN_SETTING_COMPILER_LIBCXX ${_LIBCXX})
endif ()
elseif (${CMAKE_${LANGUAGE}_COMPILER_ID} STREQUAL Clang)
string(REPLACE "." ";" VERSION_LIST ${CMAKE_${LANGUAGE}_COMPILER_VERSION})
list(GET VERSION_LIST 0 MAJOR)
list(GET VERSION_LIST 1 MINOR)
set(_CONAN_SETTING_COMPILER clang)
set(_CONAN_SETTING_COMPILER_VERSION ${MAJOR}.${MINOR})
if(APPLE)
cmake_policy(GET CMP0025 APPLE_CLANG_POLICY)
if(NOT APPLE_CLANG_POLICY STREQUAL NEW)
message(STATUS "Conan: APPLE and Clang detected. Assuming apple-clang compiler. Set CMP0025 to avoid it")
set(_CONAN_SETTING_COMPILER apple-clang)
endif()
endif()
if(${_CONAN_SETTING_COMPILER} STREQUAL clang AND ${MAJOR} GREATER 7)
set(_CONAN_SETTING_COMPILER_VERSION ${MAJOR})
endif()
if (USING_CXX)
conan_cmake_detect_unix_libcxx(_LIBCXX)
set(_CONAN_SETTING_COMPILER_LIBCXX ${_LIBCXX})
endif ()
elseif(${CMAKE_${LANGUAGE}_COMPILER_ID} STREQUAL MSVC)
set(_VISUAL "Visual Studio")
_get_msvc_ide_version(_VISUAL_VERSION)
if("${_VISUAL_VERSION}" STREQUAL "")
message(FATAL_ERROR "Conan: Visual Studio not recognized")
else()
set(_CONAN_SETTING_COMPILER ${_VISUAL})
set(_CONAN_SETTING_COMPILER_VERSION ${_VISUAL_VERSION})
endif()
if(NOT _CONAN_SETTING_ARCH)
if (MSVC_${LANGUAGE}_ARCHITECTURE_ID MATCHES "64")
set(_CONAN_SETTING_ARCH x86_64)
elseif (MSVC_${LANGUAGE}_ARCHITECTURE_ID MATCHES "^ARM")
message(STATUS "Conan: Using default ARM architecture from MSVC")
set(_CONAN_SETTING_ARCH armv6)
elseif (MSVC_${LANGUAGE}_ARCHITECTURE_ID MATCHES "86")
set(_CONAN_SETTING_ARCH x86)
else ()
message(FATAL_ERROR "Conan: Unknown MSVC architecture [${MSVC_${LANGUAGE}_ARCHITECTURE_ID}]")
endif()
endif()
conan_cmake_detect_vs_runtime(_vs_runtime ${ARGV})
message(STATUS "Conan: Detected VS runtime: ${_vs_runtime}")
set(_CONAN_SETTING_COMPILER_RUNTIME ${_vs_runtime})
if (CMAKE_GENERATOR_TOOLSET)
set(_CONAN_SETTING_COMPILER_TOOLSET ${CMAKE_VS_PLATFORM_TOOLSET})
elseif(CMAKE_VS_PLATFORM_TOOLSET AND (CMAKE_GENERATOR STREQUAL "Ninja"))
set(_CONAN_SETTING_COMPILER_TOOLSET ${CMAKE_VS_PLATFORM_TOOLSET})
endif()
else()
message(FATAL_ERROR "Conan: compiler setup not recognized")
endif()
endmacro()
function(conan_cmake_settings result)
#message(STATUS "COMPILER " ${CMAKE_CXX_COMPILER})
#message(STATUS "COMPILER " ${CMAKE_CXX_COMPILER_ID})
#message(STATUS "VERSION " ${CMAKE_CXX_COMPILER_VERSION})
#message(STATUS "FLAGS " ${CMAKE_LANG_FLAGS})
#message(STATUS "LIB ARCH " ${CMAKE_CXX_LIBRARY_ARCHITECTURE})
#message(STATUS "BUILD TYPE " ${CMAKE_BUILD_TYPE})
#message(STATUS "GENERATOR " ${CMAKE_GENERATOR})
#message(STATUS "GENERATOR WIN64 " ${CMAKE_CL_64})
message(STATUS "Conan: Automatic detection of conan settings from cmake")
conan_parse_arguments(${ARGV})
_conan_detect_build_type(${ARGV})
_conan_check_system_name()
_conan_check_language()
_conan_detect_compiler(${ARGV})
# If profile is defined it is used
if(CMAKE_BUILD_TYPE STREQUAL "Debug" AND ARGUMENTS_DEBUG_PROFILE)
set(_APPLIED_PROFILES ${ARGUMENTS_DEBUG_PROFILE})
elseif(CMAKE_BUILD_TYPE STREQUAL "Release" AND ARGUMENTS_RELEASE_PROFILE)
set(_APPLIED_PROFILES ${ARGUMENTS_RELEASE_PROFILE})
elseif(CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo" AND ARGUMENTS_RELWITHDEBINFO_PROFILE)
set(_APPLIED_PROFILES ${ARGUMENTS_RELWITHDEBINFO_PROFILE})
elseif(CMAKE_BUILD_TYPE STREQUAL "MinSizeRel" AND ARGUMENTS_MINSIZEREL_PROFILE)
set(_APPLIED_PROFILES ${ARGUMENTS_MINSIZEREL_PROFILE})
elseif(ARGUMENTS_PROFILE)
set(_APPLIED_PROFILES ${ARGUMENTS_PROFILE})
endif()
foreach(ARG ${_APPLIED_PROFILES})
set(_SETTINGS ${_SETTINGS} -pr=${ARG})
endforeach()
foreach(ARG ${ARGUMENTS_PROFILE_BUILD})
conan_check(VERSION 1.24.0 REQUIRED DETECT_QUIET)
set(_SETTINGS ${_SETTINGS} -pr:b=${ARG})
endforeach()
if(NOT _SETTINGS OR ARGUMENTS_PROFILE_AUTO STREQUAL "ALL")
set(ARGUMENTS_PROFILE_AUTO arch build_type compiler compiler.version
compiler.runtime compiler.libcxx compiler.toolset)
endif()
# remove any manually specified settings from the autodetected settings
foreach(ARG ${ARGUMENTS_SETTINGS})
string(REGEX MATCH "[^=]*" MANUAL_SETTING "${ARG}")
message(STATUS "Conan: ${MANUAL_SETTING} was added as an argument. Not using the autodetected one.")
list(REMOVE_ITEM ARGUMENTS_PROFILE_AUTO "${MANUAL_SETTING}")
endforeach()
# Automatic from CMake
foreach(ARG ${ARGUMENTS_PROFILE_AUTO})
string(TOUPPER ${ARG} _arg_name)
string(REPLACE "." "_" _arg_name ${_arg_name})
if(_CONAN_SETTING_${_arg_name})
set(_SETTINGS ${_SETTINGS} -s ${ARG}=${_CONAN_SETTING_${_arg_name}})
endif()
endforeach()
foreach(ARG ${ARGUMENTS_SETTINGS})
set(_SETTINGS ${_SETTINGS} -s ${ARG})
endforeach()
message(STATUS "Conan: Settings= ${_SETTINGS}")
set(${result} ${_SETTINGS} PARENT_SCOPE)
endfunction()
function(conan_cmake_detect_unix_libcxx result)
# Take into account any -stdlib in compile options
get_directory_property(compile_options DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} COMPILE_OPTIONS)
string(GENEX_STRIP "${compile_options}" compile_options)
# Take into account any _GLIBCXX_USE_CXX11_ABI in compile definitions
get_directory_property(defines DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} COMPILE_DEFINITIONS)
string(GENEX_STRIP "${defines}" defines)
foreach(define ${defines})
if(define MATCHES "_GLIBCXX_USE_CXX11_ABI")
if(define MATCHES "^-D")
set(compile_options ${compile_options} "${define}")
else()
set(compile_options ${compile_options} "-D${define}")
endif()
endif()
endforeach()
# add additional compiler options ala cmRulePlaceholderExpander::ExpandRuleVariable
set(EXPAND_CXX_COMPILER ${CMAKE_CXX_COMPILER})
if(CMAKE_CXX_COMPILER_ARG1)
# CMake splits CXX="foo bar baz" into CMAKE_CXX_COMPILER="foo", CMAKE_CXX_COMPILER_ARG1="bar baz"
# without this, ccache, winegcc, or other wrappers might lose all their arguments
separate_arguments(SPLIT_CXX_COMPILER_ARG1 NATIVE_COMMAND ${CMAKE_CXX_COMPILER_ARG1})
list(APPEND EXPAND_CXX_COMPILER ${SPLIT_CXX_COMPILER_ARG1})
endif()
if(CMAKE_CXX_COMPILE_OPTIONS_TARGET AND CMAKE_CXX_COMPILER_TARGET)
# without --target= we may be calling the wrong underlying GCC
list(APPEND EXPAND_CXX_COMPILER "${CMAKE_CXX_COMPILE_OPTIONS_TARGET}${CMAKE_CXX_COMPILER_TARGET}")
endif()
if(CMAKE_CXX_COMPILE_OPTIONS_EXTERNAL_TOOLCHAIN AND CMAKE_CXX_COMPILER_EXTERNAL_TOOLCHAIN)
list(APPEND EXPAND_CXX_COMPILER "${CMAKE_CXX_COMPILE_OPTIONS_EXTERNAL_TOOLCHAIN}${CMAKE_CXX_COMPILER_EXTERNAL_TOOLCHAIN}")
endif()
if(CMAKE_CXX_COMPILE_OPTIONS_SYSROOT)
# without --sysroot= we may find the wrong #include
if(CMAKE_SYSROOT_COMPILE)
list(APPEND EXPAND_CXX_COMPILER "${CMAKE_CXX_COMPILE_OPTIONS_SYSROOT}${CMAKE_SYSROOT_COMPILE}")
elseif(CMAKE_SYSROOT)
list(APPEND EXPAND_CXX_COMPILER "${CMAKE_CXX_COMPILE_OPTIONS_SYSROOT}${CMAKE_SYSROOT}")
endif()
endif()
separate_arguments(SPLIT_CXX_FLAGS NATIVE_COMMAND ${CMAKE_CXX_FLAGS})
if(CMAKE_OSX_SYSROOT)
set(xcode_sysroot_option "--sysroot=${CMAKE_OSX_SYSROOT}")
endif()
execute_process(
COMMAND ${CMAKE_COMMAND} -E echo "#include "
COMMAND ${EXPAND_CXX_COMPILER} ${SPLIT_CXX_FLAGS} -x c++ ${xcode_sysroot_option} ${compile_options} -E -dM -
OUTPUT_VARIABLE string_defines
)
if(string_defines MATCHES "#define __GLIBCXX__")
# Allow -D_GLIBCXX_USE_CXX11_ABI=ON/OFF as argument to cmake
if(DEFINED _GLIBCXX_USE_CXX11_ABI)
if(_GLIBCXX_USE_CXX11_ABI)
set(${result} libstdc++11 PARENT_SCOPE)
return()
else()
set(${result} libstdc++ PARENT_SCOPE)
return()
endif()
endif()
if(string_defines MATCHES "#define _GLIBCXX_USE_CXX11_ABI 1\n")
set(${result} libstdc++11 PARENT_SCOPE)
else()
# Either the compiler is missing the define because it is old, and so
# it can't use the new abi, or the compiler was configured to use the
# old abi by the user or distro (e.g. devtoolset on RHEL/CentOS)
set(${result} libstdc++ PARENT_SCOPE)
endif()
else()
set(${result} libc++ PARENT_SCOPE)
endif()
endfunction()
function(conan_cmake_detect_vs_runtime result)
conan_parse_arguments(${ARGV})
if(ARGUMENTS_BUILD_TYPE)
set(build_type "${ARGUMENTS_BUILD_TYPE}")
elseif(CMAKE_BUILD_TYPE)
set(build_type "${CMAKE_BUILD_TYPE}")
else()
message(FATAL_ERROR "Please specify in command line CMAKE_BUILD_TYPE (-DCMAKE_BUILD_TYPE=Release)")
endif()
if(build_type)
string(TOUPPER "${build_type}" build_type)
endif()
set(variables CMAKE_CXX_FLAGS_${build_type} CMAKE_C_FLAGS_${build_type} CMAKE_CXX_FLAGS CMAKE_C_FLAGS)
foreach(variable ${variables})
if(NOT "${${variable}}" STREQUAL "")
string(REPLACE " " ";" flags "${${variable}}")
foreach (flag ${flags})
if("${flag}" STREQUAL "/MD" OR "${flag}" STREQUAL "/MDd" OR "${flag}" STREQUAL "/MT" OR "${flag}" STREQUAL "/MTd")
string(SUBSTRING "${flag}" 1 -1 runtime)
set(${result} "${runtime}" PARENT_SCOPE)
return()
endif()
endforeach()
endif()
endforeach()
if("${build_type}" STREQUAL "DEBUG")
set(${result} "MDd" PARENT_SCOPE)
else()
set(${result} "MD" PARENT_SCOPE)
endif()
endfunction()
function(_collect_settings result)
set(ARGUMENTS_PROFILE_AUTO arch build_type compiler compiler.version
compiler.runtime compiler.libcxx compiler.toolset)
foreach(ARG ${ARGUMENTS_PROFILE_AUTO})
string(TOUPPER ${ARG} _arg_name)
string(REPLACE "." "_" _arg_name ${_arg_name})
if(_CONAN_SETTING_${_arg_name})
set(detected_setings ${detected_setings} ${ARG}=${_CONAN_SETTING_${_arg_name}})
endif()
endforeach()
set(${result} ${detected_setings} PARENT_SCOPE)
endfunction()
function(conan_cmake_autodetect detected_settings)
_conan_detect_build_type()
_conan_check_system_name()
_conan_check_language()
_conan_detect_compiler()
_collect_settings(collected_settings)
set(${detected_settings} ${collected_settings} PARENT_SCOPE)
endfunction()
macro(conan_parse_arguments)
set(options BASIC_SETUP CMAKE_TARGETS UPDATE KEEP_RPATHS NO_LOAD NO_OUTPUT_DIRS OUTPUT_QUIET NO_IMPORTS SKIP_STD)
set(oneValueArgs CONANFILE ARCH BUILD_TYPE INSTALL_FOLDER CONAN_COMMAND)
set(multiValueArgs DEBUG_PROFILE RELEASE_PROFILE RELWITHDEBINFO_PROFILE MINSIZEREL_PROFILE
PROFILE REQUIRES OPTIONS IMPORTS SETTINGS BUILD ENV GENERATORS PROFILE_AUTO
INSTALL_ARGS CONFIGURATION_TYPES PROFILE_BUILD BUILD_REQUIRES)
cmake_parse_arguments(ARGUMENTS "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
endmacro()
function(old_conan_cmake_install)
# Calls "conan install"
# Argument BUILD is equivalant to --build={missing, PkgName,...} or
# --build when argument is 'BUILD all' (which builds all packages from source)
# Argument CONAN_COMMAND, to specify the conan path, e.g. in case of running from source
# cmake does not identify conan as command, even if it is +x and it is in the path
conan_parse_arguments(${ARGV})
if(CONAN_CMAKE_MULTI)
set(ARGUMENTS_GENERATORS ${ARGUMENTS_GENERATORS} cmake_multi)
else()
set(ARGUMENTS_GENERATORS ${ARGUMENTS_GENERATORS} cmake)
endif()
set(CONAN_BUILD_POLICY "")
foreach(ARG ${ARGUMENTS_BUILD})
if(${ARG} STREQUAL "all")
set(CONAN_BUILD_POLICY ${CONAN_BUILD_POLICY} --build)
break()
else()
set(CONAN_BUILD_POLICY ${CONAN_BUILD_POLICY} --build=${ARG})
endif()
endforeach()
if(ARGUMENTS_CONAN_COMMAND)
set(CONAN_CMD ${ARGUMENTS_CONAN_COMMAND})
else()
conan_check(REQUIRED)
endif()
set(CONAN_OPTIONS "")
if(ARGUMENTS_CONANFILE)
if(IS_ABSOLUTE ${ARGUMENTS_CONANFILE})
set(CONANFILE ${ARGUMENTS_CONANFILE})
else()
set(CONANFILE ${CMAKE_CURRENT_SOURCE_DIR}/${ARGUMENTS_CONANFILE})
endif()
else()
set(CONANFILE ".")
endif()
foreach(ARG ${ARGUMENTS_OPTIONS})
set(CONAN_OPTIONS ${CONAN_OPTIONS} -o=${ARG})
endforeach()
if(ARGUMENTS_UPDATE)
set(CONAN_INSTALL_UPDATE --update)
endif()
if(ARGUMENTS_NO_IMPORTS)
set(CONAN_INSTALL_NO_IMPORTS --no-imports)
endif()
set(CONAN_INSTALL_FOLDER "")
if(ARGUMENTS_INSTALL_FOLDER)
set(CONAN_INSTALL_FOLDER -if=${ARGUMENTS_INSTALL_FOLDER})
endif()
foreach(ARG ${ARGUMENTS_GENERATORS})
set(CONAN_GENERATORS ${CONAN_GENERATORS} -g=${ARG})
endforeach()
foreach(ARG ${ARGUMENTS_ENV})
set(CONAN_ENV_VARS ${CONAN_ENV_VARS} -e=${ARG})
endforeach()
set(conan_args install ${CONANFILE} ${settings} ${CONAN_ENV_VARS} ${CONAN_GENERATORS} ${CONAN_BUILD_POLICY} ${CONAN_INSTALL_UPDATE} ${CONAN_INSTALL_NO_IMPORTS} ${CONAN_OPTIONS} ${CONAN_INSTALL_FOLDER} ${ARGUMENTS_INSTALL_ARGS})
string (REPLACE ";" " " _conan_args "${conan_args}")
message(STATUS "Conan executing: ${CONAN_CMD} ${_conan_args}")
if(ARGUMENTS_OUTPUT_QUIET)
execute_process(COMMAND ${CONAN_CMD} ${conan_args}
RESULT_VARIABLE return_code
OUTPUT_VARIABLE conan_output
ERROR_VARIABLE conan_output
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
else()
execute_process(COMMAND ${CONAN_CMD} ${conan_args}
RESULT_VARIABLE return_code
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
endif()
if(NOT "${return_code}" STREQUAL "0")
message(FATAL_ERROR "Conan install failed='${return_code}'")
endif()
endfunction()
function(conan_cmake_install)
if(DEFINED CONAN_COMMAND)
set(CONAN_CMD ${CONAN_COMMAND})
else()
conan_check(REQUIRED)
endif()
set(installOptions UPDATE NO_IMPORTS OUTPUT_QUIET ERROR_QUIET)
set(installOneValueArgs PATH_OR_REFERENCE REFERENCE REMOTE LOCKFILE LOCKFILE_OUT LOCKFILE_NODE_ID INSTALL_FOLDER)
set(installMultiValueArgs GENERATOR BUILD ENV ENV_HOST ENV_BUILD OPTIONS_HOST OPTIONS OPTIONS_BUILD PROFILE
PROFILE_HOST PROFILE_BUILD SETTINGS SETTINGS_HOST SETTINGS_BUILD)
cmake_parse_arguments(ARGS "${installOptions}" "${installOneValueArgs}" "${installMultiValueArgs}" ${ARGN})
foreach(arg ${installOptions})
if(ARGS_${arg})
set(${arg} ${${arg}} ${ARGS_${arg}})
endif()
endforeach()
foreach(arg ${installOneValueArgs})
if(DEFINED ARGS_${arg})
if("${arg}" STREQUAL "REMOTE")
set(flag "--remote")
elseif("${arg}" STREQUAL "LOCKFILE")
set(flag "--lockfile")
elseif("${arg}" STREQUAL "LOCKFILE_OUT")
set(flag "--lockfile-out")
elseif("${arg}" STREQUAL "LOCKFILE_NODE_ID")
set(flag "--lockfile-node-id")
elseif("${arg}" STREQUAL "INSTALL_FOLDER")
set(flag "--install-folder")
endif()
set(${arg} ${${arg}} ${flag} ${ARGS_${arg}})
endif()
endforeach()
foreach(arg ${installMultiValueArgs})
if(DEFINED ARGS_${arg})
if("${arg}" STREQUAL "GENERATOR")
set(flag "--generator")
elseif("${arg}" STREQUAL "BUILD")
set(flag "--build")
elseif("${arg}" STREQUAL "ENV")
set(flag "--env")
elseif("${arg}" STREQUAL "ENV_HOST")
set(flag "--env:host")
elseif("${arg}" STREQUAL "ENV_BUILD")
set(flag "--env:build")
elseif("${arg}" STREQUAL "OPTIONS")
set(flag "--options")
elseif("${arg}" STREQUAL "OPTIONS_HOST")
set(flag "--options:host")
elseif("${arg}" STREQUAL "OPTIONS_BUILD")
set(flag "--options:build")
elseif("${arg}" STREQUAL "PROFILE")
set(flag "--profile")
elseif("${arg}" STREQUAL "PROFILE_HOST")
set(flag "--profile:host")
elseif("${arg}" STREQUAL "PROFILE_BUILD")
set(flag "--profile:build")
elseif("${arg}" STREQUAL "SETTINGS")
set(flag "--settings")
elseif("${arg}" STREQUAL "SETTINGS_HOST")
set(flag "--settings:host")
elseif("${arg}" STREQUAL "SETTINGS_BUILD")
set(flag "--settings:build")
endif()
list(LENGTH ARGS_${arg} numargs)
foreach(item ${ARGS_${arg}})
if(${item} STREQUAL "all" AND ${arg} STREQUAL "BUILD")
set(${arg} "--build")
break()
endif()
set(${arg} ${${arg}} ${flag} ${item})
endforeach()
endif()
endforeach()
if(DEFINED UPDATE)
set(UPDATE --update)
endif()
if(DEFINED NO_IMPORTS)
set(NO_IMPORTS --no-imports)
endif()
set(install_args install ${PATH_OR_REFERENCE} ${REFERENCE} ${UPDATE} ${NO_IMPORTS} ${REMOTE} ${LOCKFILE} ${LOCKFILE_OUT} ${LOCKFILE_NODE_ID} ${INSTALL_FOLDER}
${GENERATOR} ${BUILD} ${ENV} ${ENV_HOST} ${ENV_BUILD} ${OPTIONS} ${OPTIONS_HOST} ${OPTIONS_BUILD}
${PROFILE} ${PROFILE_HOST} ${PROFILE_BUILD} ${SETTINGS} ${SETTINGS_HOST} ${SETTINGS_BUILD})
string(REPLACE ";" " " _install_args "${install_args}")
message(STATUS "Conan executing: ${CONAN_CMD} ${_install_args}")
if(ARGS_OUTPUT_QUIET)
set(OUTPUT_OPT OUTPUT_QUIET)
endif()
if(ARGS_ERROR_QUIET)
set(ERROR_OPT ERROR_QUIET)
endif()
execute_process(COMMAND ${CONAN_CMD} ${install_args}
RESULT_VARIABLE return_code
${OUTPUT_OPT}
${ERROR_OPT}
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
if(NOT "${return_code}" STREQUAL "0")
if (ARGS_ERROR_QUIET)
message(WARNING "Conan install failed='${return_code}'")
else()
message(FATAL_ERROR "Conan install failed='${return_code}'")
endif()
endif()
endfunction()
function(conan_cmake_setup_conanfile)
conan_parse_arguments(${ARGV})
if(ARGUMENTS_CONANFILE)
get_filename_component(_CONANFILE_NAME ${ARGUMENTS_CONANFILE} NAME)
# configure_file will make sure cmake re-runs when conanfile is updated
configure_file(${ARGUMENTS_CONANFILE} ${CMAKE_CURRENT_BINARY_DIR}/${_CONANFILE_NAME}.junk COPYONLY)
file(REMOVE ${CMAKE_CURRENT_BINARY_DIR}/${_CONANFILE_NAME}.junk)
else()
conan_cmake_generate_conanfile(ON ${ARGV})
endif()
endfunction()
function(conan_cmake_configure)
conan_cmake_generate_conanfile(OFF ${ARGV})
endfunction()
# Generate, writing in disk a conanfile.txt with the requires, options, and imports
# specified as arguments
# This will be considered as temporary file, generated in CMAKE_CURRENT_BINARY_DIR)
function(conan_cmake_generate_conanfile DEFAULT_GENERATOR)
conan_parse_arguments(${ARGV})
set(_FN "${CMAKE_CURRENT_BINARY_DIR}/conanfile.txt")
file(WRITE ${_FN} "")
if(DEFINED ARGUMENTS_REQUIRES)
file(APPEND ${_FN} "[requires]\n")
foreach(REQUIRE ${ARGUMENTS_REQUIRES})
file(APPEND ${_FN} ${REQUIRE} "\n")
endforeach()
endif()
if (DEFAULT_GENERATOR OR DEFINED ARGUMENTS_GENERATORS)
file(APPEND ${_FN} "[generators]\n")
if (DEFAULT_GENERATOR)
file(APPEND ${_FN} "cmake\n")
endif()
if (DEFINED ARGUMENTS_GENERATORS)
foreach(GENERATOR ${ARGUMENTS_GENERATORS})
file(APPEND ${_FN} ${GENERATOR} "\n")
endforeach()
endif()
endif()
if(DEFINED ARGUMENTS_BUILD_REQUIRES)
file(APPEND ${_FN} "[build_requires]\n")
foreach(BUILD_REQUIRE ${ARGUMENTS_BUILD_REQUIRES})
file(APPEND ${_FN} ${BUILD_REQUIRE} "\n")
endforeach()
endif()
if(DEFINED ARGUMENTS_IMPORTS)
file(APPEND ${_FN} "[imports]\n")
foreach(IMPORTS ${ARGUMENTS_IMPORTS})
file(APPEND ${_FN} ${IMPORTS} "\n")
endforeach()
endif()
if(DEFINED ARGUMENTS_OPTIONS)
file(APPEND ${_FN} "[options]\n")
foreach(OPTION ${ARGUMENTS_OPTIONS})
file(APPEND ${_FN} ${OPTION} "\n")
endforeach()
endif()
endfunction()
macro(conan_load_buildinfo)
if(CONAN_CMAKE_MULTI)
set(_CONANBUILDINFO conanbuildinfo_multi.cmake)
else()
set(_CONANBUILDINFO conanbuildinfo.cmake)
endif()
if(ARGUMENTS_INSTALL_FOLDER)
set(_CONANBUILDINFOFOLDER ${ARGUMENTS_INSTALL_FOLDER})
else()
set(_CONANBUILDINFOFOLDER ${CMAKE_CURRENT_BINARY_DIR})
endif()
# Checks for the existence of conanbuildinfo.cmake, and loads it
# important that it is macro, so variables defined at parent scope
if(EXISTS "${_CONANBUILDINFOFOLDER}/${_CONANBUILDINFO}")
message(STATUS "Conan: Loading ${_CONANBUILDINFO}")
include(${_CONANBUILDINFOFOLDER}/${_CONANBUILDINFO})
else()
message(FATAL_ERROR "${_CONANBUILDINFO} doesn't exist in ${CMAKE_CURRENT_BINARY_DIR}")
endif()
endmacro()
macro(conan_cmake_run)
conan_parse_arguments(${ARGV})
if(ARGUMENTS_CONFIGURATION_TYPES AND NOT CMAKE_CONFIGURATION_TYPES)
message(WARNING "CONFIGURATION_TYPES should only be specified for multi-configuration generators")
elseif(ARGUMENTS_CONFIGURATION_TYPES AND ARGUMENTS_BUILD_TYPE)
message(WARNING "CONFIGURATION_TYPES and BUILD_TYPE arguments should not be defined at the same time.")
endif()
if(CMAKE_CONFIGURATION_TYPES AND NOT CMAKE_BUILD_TYPE AND NOT CONAN_EXPORTED
AND NOT ARGUMENTS_BUILD_TYPE)
set(CONAN_CMAKE_MULTI ON)
if (NOT ARGUMENTS_CONFIGURATION_TYPES)
set(ARGUMENTS_CONFIGURATION_TYPES "Release;Debug")
endif()
message(STATUS "Conan: Using cmake-multi generator")
else()
set(CONAN_CMAKE_MULTI OFF)
endif()
if(NOT CONAN_EXPORTED)
conan_cmake_setup_conanfile(${ARGV})
if(CONAN_CMAKE_MULTI)
foreach(CMAKE_BUILD_TYPE ${ARGUMENTS_CONFIGURATION_TYPES})
set(ENV{CONAN_IMPORT_PATH} ${CMAKE_BUILD_TYPE})
conan_cmake_settings(settings ${ARGV})
old_conan_cmake_install(SETTINGS ${settings} ${ARGV})
endforeach()
set(CMAKE_BUILD_TYPE)
else()
conan_cmake_settings(settings ${ARGV})
old_conan_cmake_install(SETTINGS ${settings} ${ARGV})
endif()
endif()
if (NOT ARGUMENTS_NO_LOAD)
conan_load_buildinfo()
endif()
if(ARGUMENTS_BASIC_SETUP)
foreach(_option CMAKE_TARGETS KEEP_RPATHS NO_OUTPUT_DIRS SKIP_STD)
if(ARGUMENTS_${_option})
if(${_option} STREQUAL "CMAKE_TARGETS")
list(APPEND _setup_options "TARGETS")
else()
list(APPEND _setup_options ${_option})
endif()
endif()
endforeach()
conan_basic_setup(${_setup_options})
endif()
endmacro()
macro(conan_check)
# Checks conan availability in PATH
# Arguments REQUIRED, DETECT_QUIET and VERSION are optional
# Example usage:
# conan_check(VERSION 1.0.0 REQUIRED)
set(options REQUIRED DETECT_QUIET)
set(oneValueArgs VERSION)
cmake_parse_arguments(CONAN "${options}" "${oneValueArgs}" "" ${ARGN})
if(NOT CONAN_DETECT_QUIET)
message(STATUS "Conan: checking conan executable")
endif()
find_program(CONAN_CMD conan)
if(NOT CONAN_CMD AND CONAN_REQUIRED)
message(FATAL_ERROR "Conan executable not found! Please install conan.")
endif()
if(NOT CONAN_DETECT_QUIET)
message(STATUS "Conan: Found program ${CONAN_CMD}")
endif()
execute_process(COMMAND ${CONAN_CMD} --version
RESULT_VARIABLE return_code
OUTPUT_VARIABLE CONAN_VERSION_OUTPUT
ERROR_VARIABLE CONAN_VERSION_OUTPUT)
if(NOT "${return_code}" STREQUAL "0")
message(FATAL_ERROR "Conan --version failed='${return_code}'")
endif()
if(NOT CONAN_DETECT_QUIET)
message(STATUS "Conan: Version found ${CONAN_VERSION_OUTPUT}")
endif()
if(DEFINED CONAN_VERSION)
string(REGEX MATCH ".*Conan version ([0-9]+\\.[0-9]+\\.[0-9]+)" FOO
"${CONAN_VERSION_OUTPUT}")
if(${CMAKE_MATCH_1} VERSION_LESS ${CONAN_VERSION})
message(FATAL_ERROR "Conan outdated. Installed: ${CMAKE_MATCH_1}, \
required: ${CONAN_VERSION}. Consider updating via 'pip \
install conan==${CONAN_VERSION}'.")
endif()
endif()
endmacro()
function(conan_add_remote)
# Adds a remote
# Arguments URL and NAME are required, INDEX, COMMAND and VERIFY_SSL are optional
# Example usage:
# conan_add_remote(NAME bincrafters INDEX 1
# URL https://api.bintray.com/conan/bincrafters/public-conan
# VERIFY_SSL True)
set(oneValueArgs URL NAME INDEX COMMAND VERIFY_SSL)
cmake_parse_arguments(CONAN "" "${oneValueArgs}" "" ${ARGN})
if(DEFINED CONAN_INDEX)
set(CONAN_INDEX_ARG "-i ${CONAN_INDEX}")
endif()
if(DEFINED CONAN_COMMAND)
set(CONAN_CMD ${CONAN_COMMAND})
else()
conan_check(REQUIRED)
endif()
set(CONAN_VERIFY_SSL_ARG "True")
if(DEFINED CONAN_VERIFY_SSL)
set(CONAN_VERIFY_SSL_ARG ${CONAN_VERIFY_SSL})
endif()
message(STATUS "Conan: Adding ${CONAN_NAME} remote repository (${CONAN_URL}) verify ssl (${CONAN_VERIFY_SSL_ARG})")
execute_process(COMMAND ${CONAN_CMD} remote add ${CONAN_NAME} ${CONAN_INDEX_ARG} -f ${CONAN_URL} ${CONAN_VERIFY_SSL_ARG}
RESULT_VARIABLE return_code)
if(NOT "${return_code}" STREQUAL "0")
message(FATAL_ERROR "Conan remote failed='${return_code}'")
endif()
endfunction()
macro(conan_config_install)
# install a full configuration from a local or remote zip file
# Argument ITEM is required, arguments TYPE, SOURCE, TARGET and VERIFY_SSL are optional
# Example usage:
# conan_config_install(ITEM https://github.com/conan-io/cmake-conan.git
# TYPE git SOURCE source-folder TARGET target-folder VERIFY_SSL false)
set(oneValueArgs ITEM TYPE SOURCE TARGET VERIFY_SSL)
set(multiValueArgs ARGS)
cmake_parse_arguments(CONAN "" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
find_program(CONAN_CMD conan)
if(NOT CONAN_CMD AND CONAN_REQUIRED)
message(FATAL_ERROR "Conan executable not found!")
endif()
if(DEFINED CONAN_VERIFY_SSL)
set(CONAN_VERIFY_SSL_ARG "--verify-ssl=${CONAN_VERIFY_SSL}")
endif()
if(DEFINED CONAN_TYPE)
set(CONAN_TYPE_ARG "--type=${CONAN_TYPE}")
endif()
if(DEFINED CONAN_ARGS)
set(CONAN_ARGS_ARGS "--args=\"${CONAN_ARGS}\"")
endif()
if(DEFINED CONAN_SOURCE)
set(CONAN_SOURCE_ARGS "--source-folder=${CONAN_SOURCE}")
endif()
if(DEFINED CONAN_TARGET)
set(CONAN_TARGET_ARGS "--target-folder=${CONAN_TARGET}")
endif()
set (CONAN_CONFIG_INSTALL_ARGS ${CONAN_VERIFY_SSL_ARG}
${CONAN_TYPE_ARG}
${CONAN_ARGS_ARGS}
${CONAN_SOURCE_ARGS}
${CONAN_TARGET_ARGS})
message(STATUS "Conan: Installing config from ${CONAN_ITEM}")
execute_process(COMMAND ${CONAN_CMD} config install ${CONAN_ITEM} ${CONAN_CONFIG_INSTALL_ARGS}
RESULT_VARIABLE return_code)
if(NOT "${return_code}" STREQUAL "0")
message(FATAL_ERROR "Conan config failed='${return_code}'")
endif()
endmacro() cmake-utils/utils.cmake 0000664 0000000 0000000 00000013576 14456142610 0015455 0 ustar 00root root 0000000 0000000 include(CheckCXXCompilerFlag)
###################################################
# Activate C++14
#
# Uses: target_activate_cpp14(buildtarget)
###################################################
function(target_activate_cpp14 TARGET)
if(MSVC)
# Required by range-v3, see its README.md
set_property(TARGET ${TARGET} PROPERTY CXX_STANDARD 17)
else()
set_property(TARGET ${TARGET} PROPERTY CXX_STANDARD 14)
endif()
set_property(TARGET ${TARGET} PROPERTY CXX_STANDARD_REQUIRED ON)
# Ideally, we'd like to use libc++ on linux as well, but:
# - http://stackoverflow.com/questions/37096062/get-a-basic-c-program-to-compile-using-clang-on-ubuntu-16
# - https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=808086
# so only use it on Apple systems...
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND APPLE)
target_compile_options(${TARGET} PUBLIC -stdlib=libc++)
endif(CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND APPLE)
endfunction(target_activate_cpp14)
# Find clang-tidy executable (for use in target_enable_style_warnings)
if (USE_CLANG_TIDY)
find_program(
CLANG_TIDY_EXE
NAMES "clang-tidy"
DOC "Path to clang-tidy executable"
)
if(NOT CLANG_TIDY_EXE)
message(FATAL_ERROR "clang-tidy not found. Please install clang-tidy or run without -DUSE_CLANG_TIDY=on.")
else()
set(CLANG_TIDY_OPTIONS "-system-headers=0")
if (CLANG_TIDY_WARNINGS_AS_ERRORS)
set(CLANG_TIDY_OPTIONS "${CLANG_TIDY_OPTIONS}" "-warnings-as-errors=*")
endif()
message(STATUS "Clang-tidy is enabled. Executable: ${CLANG_TIDY_EXE} Arguments: ${CLANG_TIDY_OPTIONS}")
set(CLANG_TIDY_CLI "${CLANG_TIDY_EXE}" "${CLANG_TIDY_OPTIONS}")
endif()
endif()
# Find iwyu (for use in target_enable_style_warnings)
if (USE_IWYU)
find_program(
IWYU_EXE NAMES
include-what-you-use
iwyu
)
if(NOT IWYU_EXE)
message(FATAL_ERROR "include-what-you-use not found. Please install iwyu or run without -DUSE_IWYU=on.")
else()
message(STATUS "iwyu found: ${IWYU_EXE}")
set(DO_IWYU "${IWYU_EXE}")
endif()
endif()
#################################################
# Enable style compiler warnings
#
# Uses: target_enable_style_warnings(buildtarget)
#################################################
function(target_enable_style_warnings TARGET)
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC")
# TODO
elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang")
target_compile_options(${TARGET} PRIVATE -Wall -Wextra -Wold-style-cast -Wcast-align -Wno-unused-command-line-argument) # TODO consider -Wpedantic -Wchkp -Wcast-qual -Wctor-dtor-privacy -Wdisabled-optimization -Wformat=2 -Winit-self -Wlogical-op -Wmissing-include-dirs -Wnoexcept -Wold-style-cast -Woverloaded-virtual -Wredundant-decls -Wshadow -Wsign-promo -Wstrict-null-sentinel -Wstrict-overflow=5 -Wundef -Wno-unused -Wno-variadic-macros -Wno-parentheses -fdiagnostics-show-option -Wconversion and others?
elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
target_compile_options(${TARGET} PRIVATE -Wall -Wextra -Wold-style-cast -Wcast-align -Wno-maybe-uninitialized) # TODO consider -Wpedantic -Wchkp -Wcast-qual -Wctor-dtor-privacy -Wdisabled-optimization -Wformat=2 -Winit-self -Wlogical-op -Wmissing-include-dirs -Wnoexcept -Wold-style-cast -Woverloaded-virtual -Wredundant-decls -Wshadow -Wsign-promo -Wstrict-null-sentinel -Wstrict-overflow=5 -Wundef -Wno-unused -Wno-variadic-macros -Wno-parentheses -fdiagnostics-show-option -Wconversion and others?
endif()
if (USE_WERROR)
message(STATUS Building ${TARGET} with -Werror)
target_compile_options(${TARGET} PRIVATE -Werror)
endif()
# Enable clang-tidy
if(USE_CLANG_TIDY)
set_target_properties(
${TARGET} PROPERTIES
CXX_CLANG_TIDY "${CLANG_TIDY_CLI}"
)
endif()
if(USE_IWYU)
set_target_properties(
${TARGET} PROPERTIES
CXX_INCLUDE_WHAT_YOU_USE "${DO_IWYU}"
)
endif()
endfunction(target_enable_style_warnings)
##################################################
# Add boost to the project
#
# Uses:
# target_add_boost(buildtarget)
##################################################
function(target_add_boost TARGET)
target_link_libraries(${TARGET} PUBLIC CryfsDependencies_boost)
target_compile_definitions(${TARGET} PUBLIC BOOST_THREAD_VERSION=4)
endfunction(target_add_boost)
##################################################
# Specify that a specific minimal version of gcc is required
#
# Uses:
# require_gcc_version(4.9)
##################################################
function(require_gcc_version VERSION)
if (CMAKE_COMPILER_IS_GNUCXX)
execute_process(COMMAND ${CMAKE_CXX_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION)
if (GCC_VERSION VERSION_LESS ${VERSION})
message(FATAL_ERROR "Needs at least gcc version ${VERSION}, found gcc ${GCC_VERSION}")
endif (GCC_VERSION VERSION_LESS ${VERSION})
endif (CMAKE_COMPILER_IS_GNUCXX)
endfunction(require_gcc_version)
##################################################
# Specify that a specific minimal version of clang is required
#
# Uses:
# require_clang_version(3.5)
##################################################
function(require_clang_version VERSION)
if (CMAKE_CXX_COMPILER_ID MATCHES "Clang")
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${VERSION})
message(FATAL_ERROR "Needs at least clang version ${VERSION}, found clang ${CMAKE_CXX_COMPILER_VERSION}")
endif (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${VERSION})
endif(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
endfunction(require_clang_version)
include(cmake-utils/TargetArch.cmake)
function(get_target_architecture output_var)
target_architecture(local_output_var)
set(${output_var} ${local_output_var} PARENT_SCOPE)
endfunction()
conanfile.py 0000664 0000000 0000000 00000003125 14456142610 0013372 0 ustar 00root root 0000000 0000000 from conans import ConanFile, CMake
class CryFSConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
requires = [
"range-v3/0.11.0",
"spdlog/1.8.5",
"boost/1.75.0",
]
generators = "cmake"
default_options = {
"boost:system_no_deprecated": True,
"boost:asio_no_deprecated": True,
"boost:filesystem_no_deprecated": True,
"boost:without_atomic": False, # needed by boost thread
"boost:without_chrono": False, # needed by CryFS
"boost:without_container": False, # needed by boost thread
"boost:without_context": True,
"boost:without_contract": True,
"boost:without_coroutine": True,
"boost:without_date_time": False, # needed by boost thread
"boost:without_exception": False, # needed by boost thread
"boost:without_fiber": True,
"boost:without_filesystem": False, # needed by CryFS
"boost:without_graph": True,
"boost:without_graph_parallel": True,
"boost:without_iostreams": True,
"boost:without_json": True,
"boost:without_locale": True,
"boost:without_log": True,
"boost:without_math": True,
"boost:without_mpi": True,
"boost:without_nowide": True,
"boost:without_program_options": False, # needed by CryFS
"boost:without_python": True,
"boost:without_random": True,
"boost:without_regex": True,
"boost:without_serialization": False, # needed by boost date_time
"boost:without_stacktrace": True,
"boost:without_system": False, # needed by CryFS
"boost:without_test": True,
"boost:without_thread": False, # needed by CryFS
"boost:without_timer": True,
"boost:without_type_erasure": True,
"boost:without_wave": True,
}
cpack/ 0000775 0000000 0000000 00000000000 14456142610 0012142 5 ustar 00root root 0000000 0000000 cpack/CMakeLists.txt 0000664 0000000 0000000 00000007573 14456142610 0014716 0 ustar 00root root 0000000 0000000 # appends a build number from the APPVEYOR_BUILD_NUMBER environment variable as fourth component to a version number,
# i.e. "0.10" becomes "0.10.0.[buildnumber]", "1" becomes "1.0.0.[buildnumber]".
function(append_build_number VERSION_NUMBER OUTPUT)
string(REPLACE "." ";" VERSION_COMPONENTS ${STRIPPED_VERSION_NUMBER})
list(LENGTH VERSION_COMPONENTS NUM_VERSION_COMPONENTS)
if (${NUM_VERSION_COMPONENTS} LESS_EQUAL 0)
message(FATAL_ERROR "Didn't find any version components")
endif()
if (${NUM_VERSION_COMPONENTS} LESS_EQUAL 1)
string(APPEND STRIPPED_VERSION_NUMBER ".0")
endif()
if (${NUM_VERSION_COMPONENTS} LESS_EQUAL 2)
string(APPEND STRIPPED_VERSION_NUMBER ".0")
endif()
if (NOT $ENV{APPVEYOR_BUILD_NUMBER} STREQUAL "")
string(APPEND STRIPPED_VERSION_NUMBER ".$ENV{APPVEYOR_BUILD_NUMBER}")
endif()
set(${OUTPUT} "${STRIPPED_VERSION_NUMBER}" PARENT_SCOPE)
endfunction()
if("${CMAKE_VERSION}" VERSION_LESS "3.3")
# Earlier cmake versions generate .deb packages for which the package manager says they're bad quality
# and asks the user whether they really want to install it. Cmake 3.3 fixes this.
message(WARNING "Distribution package generation is only supported for CMake version >= 3.3. You're using ${CMAKE_VERSION}. You will be able to build and install CryFS, but you won't be able to generate .deb packages.")
else()
# Fix debfiles permissions. Unfortunately, git doesn't store file permissions.
# When installing the .deb package and these files have the wrong permissions, the package manager complains.
execute_process(COMMAND /bin/bash -c "chmod 0755 ${CMAKE_CURRENT_SOURCE_DIR}/debfiles/*")
set(CPACK_PACKAGE_NAME "cryfs")
set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "Encrypt your files and store them in the cloud.")
set(CPACK_PACKAGE_DESCRIPTION "CryFS encrypts your files, so you can safely store them anywhere. It works well together with cloud services like Dropbox, iCloud, OneDrive and others.")
set(CPACK_PACKAGE_CONTACT "Sebastian Messmer ")
set(CPACK_PACKAGE_VENDOR "Sebastian Messmer")
set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/../LICENSE.txt")
get_git_version(GITVERSION_VERSION_STRING)
if(WIN32 AND NOT UNIX)
set(CPACK_GENERATOR WIX)
string(REGEX REPLACE "^([0-9\\.]+)([-+][0-9\\.a-zA-Z+-]+)?$" "\\1" STRIPPED_VERSION_NUMBER "${GITVERSION_VERSION_STRING}")
append_build_number(${STRIPPED_VERSION_NUMBER} WIX_VERSION_NUMBER)
message(STATUS "WIX package version is ${WIX_VERSION_NUMBER}")
set(CPACK_PACKAGE_VERSION "${WIX_VERSION_NUMBER}")
set(CPACK_WIX_UPGRADE_GUID "8b872ce1-557d-48e6-ac57-9f5e574feabf")
set(CPACK_WIX_PRODUCT_GUID "26116061-4f99-4c44-a178-2153fa396308")
#set(CPACK_WIX_PRODUCT_ICON "...")
set(CPACK_WIX_PROPERTY_ARPURLINFOABOUT "https://www.cryfs.org")
set(CPACK_PACKAGE_INSTALL_DIRECTORY "CryFS/${GITVERSION_VERSION_STRING}")
set(CPACK_WIX_PATCH_FILE "${CMAKE_CURRENT_SOURCE_DIR}/wix/change_path_env.xml")
else()
set(CPACK_GENERATOR TGZ DEB RPM)
set(CPACK_PACKAGE_VERSION "${GITVERSION_VERSION_STRING}")
set(CPACK_STRIP_FILES OFF)
set(CPACK_SOURCE_STRIP_FILES OFF)
endif()
set(CPACK_PACKAGE_EXECUTABLES "cryfs" "CryFS")
set(CPACK_DEBIAN_PACKAGE_SECTION "utils")
set(CPACK_DEBIAN_PACKAGE_SHLIBDEPS ON)
# Needs gnupg2, lsb-release for postinst script
set(CPACK_DEBIAN_PACKAGE_DEPENDS "fuse, gnupg2, lsb-release")
set(CPACK_DEBIAN_PACKAGE_HOMEPAGE "https://www.cryfs.org")
set(CPACK_RPM_PACKAGE_LICENSE "LGPLv3")
set(CPACK_RPM_PACKAGE_DESCRIPTION ${CPACK_PACKAGE_DESCRIPTION})
set(CPACK_RPM_EXCLUDE_FROM_AUTO_FILELIST_ADDITION "/usr/bin;/usr/share/man;/usr/share/man/man1")
set(CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA "${CMAKE_CURRENT_SOURCE_DIR}/debfiles/postinst;${CMAKE_CURRENT_SOURCE_DIR}/debfiles/postrm")
include(CPack)
endif()
cpack/debfiles/ 0000775 0000000 0000000 00000000000 14456142610 0013717 5 ustar 00root root 0000000 0000000 cpack/debfiles/postinst 0000775 0000000 0000000 00000011316 14456142610 0015532 0 ustar 00root root 0000000 0000000 #!/bin/bash
# This script is called after the cryfs .deb package is installed.
# It sets up the package source so the user gets automatic updates for cryfs.
# DEVELOPER WARNING: There is a lot of redundancy between this file and the install.sh script in the cryfs-web repository. Please port modifications to there!
set -e
DEBIAN_REPO_URL="http://apt.cryfs.org/debian"
UBUNTU_REPO_URL="http://apt.cryfs.org/ubuntu"
DISTRIBUTION=`lsb_release -s -i`
DISTRIBUTION_VERSION=`lsb_release -s -c`
containsElement () {
local e
for e in "${@:2}"; do [[ "$e" == "$1" ]] && return 0; done
return 1
}
get_repo_url () {
if [[ "$DISTRIBUTION" == "Debian" ]] || [[ "$DISTRIBUTION" == "Devuan" ]]; then
echo $DEBIAN_REPO_URL
elif [[ "$DISTRIBUTION" == "Ubuntu" ]]; then
echo $UBUNTU_REPO_URL
else
echo Not adding package source because $DISTRIBUTION is not supported. Please keep CryFS manually up to date. 1>&2
exit 0
fi
}
get_apt_config () {
apt-config dump|grep "$1 "|sed -e "s/^$1\ \"\([^\"]*\)\"\;/\1/g"
}
sources_list_dir () {
root=$(get_apt_config "Dir")
etc=$(get_apt_config "Dir::Etc")
sourceparts=$(get_apt_config "Dir::Etc::sourceparts")
echo "$root/$etc/$sourceparts"
}
add_repository () {
dir=$(sources_list_dir)
repo_url=$(get_repo_url)
echo "deb $repo_url $DISTRIBUTION_VERSION main" > $dir/cryfs.list
}
install_key () {
# Key from http://www.cryfs.org/apt.key
apt-key add - > /dev/null <&2
exit 1
;;
esac
set +e
exit 0
cpack/debfiles/postrm 0000775 0000000 0000000 00000001546 14456142610 0015177 0 ustar 00root root 0000000 0000000 #!/bin/bash
# This script is called after the cryfs .deb package is uninstalled.
# It removes the package source that was used to get automatic updates.
set -e
get_apt_config () {
apt-config dump|grep "$1 "|sed -e "s/^$1\ \"\([^\"]*\)\"\;/\1/g"
}
sources_list_dir () {
root=$(get_apt_config "Dir")
etc=$(get_apt_config "Dir::Etc")
sourceparts=$(get_apt_config "Dir::Etc::sourceparts")
echo $root$etc$sourceparts
}
remove_repository () {
dir=$(sources_list_dir)
rm -f $dir/cryfs.list
}
remove_key () {
# Don't fail if key was already removed
apt-key rm 549E65B2 2>&1 > /dev/null || true
}
case "$1" in
purge)
remove_repository
remove_key
;;
abort-install|abort-upgrade|remove|upgrade|failed-upgrade)
;;
*)
echo "postrm called with unknown argument '$1'" >&2
exit 1
;;
esac
set +e
exit 0
cpack/wix/ 0000775 0000000 0000000 00000000000 14456142610 0012751 5 ustar 00root root 0000000 0000000 cpack/wix/change_path_env.xml 0000664 0000000 0000000 00000000327 14456142610 0016606 0 ustar 00root root 0000000 0000000
doc/ 0000775 0000000 0000000 00000000000 14456142610 0011626 5 ustar 00root root 0000000 0000000 doc/CMakeLists.txt 0000664 0000000 0000000 00000001041 14456142610 0014362 0 ustar 00root root 0000000 0000000 project (doc)
IF (WIN32)
MESSAGE(STATUS "This is Windows. Will not install man page")
ELSE (WIN32)
INCLUDE(GNUInstallDirs)
find_program(GZIP gzip)
add_custom_command(
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/cryfs.1.gz
COMMAND ${GZIP} -c ${CMAKE_CURRENT_SOURCE_DIR}/man/cryfs.1 > ${CMAKE_CURRENT_BINARY_DIR}/cryfs.1.gz
)
add_custom_target(man ALL DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/cryfs.1.gz)
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/cryfs.1.gz
DESTINATION ${CMAKE_INSTALL_MANDIR}/man1
CONFIGURATIONS Release
)
ENDIF(WIN32)
doc/man/ 0000775 0000000 0000000 00000000000 14456142610 0012401 5 ustar 00root root 0000000 0000000 doc/man/cryfs.1 0000664 0000000 0000000 00000017204 14456142610 0013615 0 ustar 00root root 0000000 0000000 .\" cryfs(1) man page
.
.TH cryfs 1
.
.
.
.SH NAME
cryfs \- cryptographic filesystem for the cloud
.
.
.
.SH SYNOPSIS
.\" mount/create syntax
.B cryfs
[\fB\-c\fR \fIfile\fR]
[\fB\-f\fR]
[\fIoptions\fR]
.I basedir mountpoint
.br
.\" show-ciphers syntax
.B cryfs \-\-help\fR|\fB\-\-version\fR|\fB\-\-show-ciphers
.
.
.
.SH DESCRIPTION
.
.B CryFS
encrypts your files, so you can safely store them anywhere.
.PP
.
The goal of CryFS is not only to keep file contents, but also
file sizes, metadata and directory structure confidential.
CryFS uses
.B encrypted same-size blocks
to store both the files themselves and the block's relations to another.
These blocks are stored as individual files in the base directory,
which can then be synchronized with cloud services such as Dropbox.
.PP
.
The blocks are encrypted using a random key, which is stored in a
.B configuration file
encrypted by the user's passphrase.
By default, it will be stored together with the data in the base directory,
but you can choose a different location if you do not want it in your cloud
or when using a weak passphrase.
.
.
.
.SH USING CRYFS
.
.SS Selecting base and mount directories
.
While you can access your files through your
.B mount directory,
CryFS actually places them in your
.B base directory
after encrypting.
CryFS will encrypt and decrypt your files 'on the fly' as they are accessed,
so files will never be stored on the disk in unencrypted form.
.PP
.
You can choose any empty directory as your base, but your mount directory
should be outside of any cloud storage, as your cloud may try to sync your
(temporarily mounted) unencrypted files as well.
.
.SS Setup and usage of your encrypted directory
.
.TP
Creating and mounting your encrypted storage use the same command-line syntax:
.B cryfs
.I basedir mountpoint
.PP
.
If CryFS detects an encrypted storage in the given base directory, you will
be asked for the passphrase to unlock and mount it. Otherwise, CryFS will
help you with creating one, just follow the on-screen instructions.
.PP
.
.TP
After you are done working with your encrypted files, unmount your storage \
with the command
.B cryfs-unmount
.I mountpoint
.
.
.SS Changing your passphrase
.
As the encryption key to your CryFS storage is stored in your configuration
file, it would be possible to re-encrypt it using a different passphrase
(although this feature has not been implemented yet).
.PP
.
However, this does not change the actual encryption key of your storage, so
someone with access to the old passphrase and configuration file (for example
through the file history of your cloud or your file system) could still access
your files, even those created after the password change.
.PP
.
For this reason, the recommended way to change your passphrase is to create a
new CryFS storage with the new passphrase and move your files from the old to
the new one.
.
.
.
.SH OPTIONS
.
.SS Getting help
.
.TP
\fB\-h\fR, \fB\-\-help\fR
.
Show a help message containing short descriptions for all options.
.
.
.TP
\fB\-\-show\-ciphers\fR
.
Show a list of all supported encryption ciphers.
.
.
.TP
\fB\-\-version\fR
.
Show the CryFS version number.
.
.
.SS Encryption parameters
.
.TP
\fB\-\-blocksize\fR \fIarg\fR
.
Set the block size to \fIarg\fR bytes. Defaults to
.BR 32768 .
.br
\" Intentional space
.br
A higher block size may help reducing the file count in your base directory
(especially when storing large files), but will also waste more space when
storing smaller files.
.
.
.TP
\fB\-\-cipher\fR \fIarg\fR
.
Use \fIarg\fR as the cipher for the encryption. Defaults to
.BR aes-256-gcm .
.
.
.TP
\fB\-c\fR \fIfile\fR, \fB\-\-config\fR \fIfile\fR
.
Use \fIfile\fR as configuration file for this CryFS storage instead of
\fIbasedir\fR/cryfs.config
.
.
.SS General options
.
.TP
\fB\-f\fR, \fB\-\-foreground\fI
.
Run CryFS in the foreground. Stop using CTRL-C.
.
.
.TP
\fB\-\-allow-filesystem-upgrade\fI
.
Allow upgrading the file system if it was created with an old CryFS version. After the upgrade, older CryFS versions might not be able to use the file system anymore.
.
.
.TP
\fB\-\-allow-integrity-violations\fI
.
By default, CryFS checks for integrity violations, i.e. will notice if an adversary modified or rolled back the file system. Using this flag, you can disable the integrity checks. This can for example be helpful for loading an old snapshot of your file system without CryFS thinking an adversary rolled it back.
.
.
.TP
\fB\-\-allow-replaced-filesystem\fI
.
By default, CryFS remembers file systems it has seen in this base directory and checks that it didn't get replaced by an attacker with an entirely different file system since the last time it was loaded. However, if you do want to replace the file system with an entirely new one, you can pass in this option to disable the check.
.
.
.TP
\fB\-\-create-missing-basedir\fI
.
Creates the base directory even if there is no directory currently there, skipping the normal confirmation message to create it later.
.
.
.TP
\fB\-\-create-missing-mountpoint\fI
.
Creates the mountpoint even if there is no directory currently there, skipping the normal confirmation message to create it later.
.
.
.TP
\fB\-\-missing-block-is-integrity-violation\fR=true
.
When CryFS encounters a missing ciphertext block, it cannot cannot (yet) know if it was deleted by an unauthorized adversary or by a second authorized client. This is one of the restrictions of the integrity checks currently in place. You can enable this flag to treat missing ciphertext blocks as integrity violations, but then your file system will not be usable by multiple clients anymore. By default, this flag is disabled.
.
.
.TP
\fB\-\-logfile\fR \fIfile\fR
.
Write status information to \fIfile\fR. If no logfile is given, CryFS will
write them to syslog in background mode, or to stdout in foreground mode.
.
.
.TP
\fB\-\-unmount\-idle\fR \fIarg\fR
.
Unmount automatically after \fIarg\fR minutes of inactivity.
.
.
.
.SH FUSE Options
.
.TP
\fB\-o\fR \fIoption\fR, \fB\-\-fuse\-option\fR \fIoption\fR
.
Pass through options to the FUSE filesystem driver.
.TP
For example:
.TP
\fB\-o\fR \fIallow_other\fR
This option overrides the security measure restricting file
access to the filesystem owner, so that all users (including
root) can access the files.
.TP
\fB\-o\fR \fIallow_root\fR
This option is similar to allow_other but file access is
limited to the filesystem owner and root. This option and
allow_other are mutually exclusive.
.
.
.
.SH ENVIRONMENT
.
.TP
\fBCRYFS_FRONTEND\fR=noninteractive
.
With this option set, CryFS will only ask for the encryption passphrase once.
Instead of asking the user for parameters not specified on the command line,
it will just use the default values. CryFS will also not ask you to confirm
your passphrase when creating a new CryFS storage.
.br
\" Intentional space
.br
Set this environment variable when automating CryFS using external tools or
shell scripts.
.
.
.TP
\fBCRYFS_NO_UPDATE_CHECK\fR=true
.
By default, CryFS connects to the internet to check for known security
vulnerabilities and new versions. This option disables this.
.
.
.TP
\fBCRYFS_LOCAL_STATE_DIR\fR=[path]
.
Sets the directory cryfs uses to store local state. This local state
is used to recognize known file systems and run integrity checks
(i.e. check that they haven't been modified by an attacker.
Default value: ${HOME}/.cryfs
.
.
.
.SH SEE ALSO
.
.BR mount.fuse (1),
.BR fusermount (1)
.PP
.
For more information about the design of CryFS, visit
.B https://www.cryfs.org
.PP
.
Visit the development repository at
.B https://github.com/cryfs/cryfs
for the source code and the full list of contributors to CryFS.
.
.
.
.SH AUTHORS
.
CryFS was created by Sebastian Messmer and contributors.
This man page was written by Maximilian Wende.
run-clang-tidy.sh 0000775 0000000 0000000 00000002037 14456142610 0014257 0 ustar 00root root 0000000 0000000 #!/bin/bash
# Note: Call this from a cmake build directory (e.g. cmake/) for out-of-source builds
# Examples:
# mkdir cmake && cd cmake && ../run-clang-tidy.sh
# mkdir cmake && cd cmake && ../run-clang-tidy.sh -fix
# mkdir cmake && cd cmake && ../run-clang-tidy.sh -export-fixes fixes.yaml
set -e
CXX=clang++-11
CC=clang-11
SCRIPT=run-clang-tidy-11.py
export NUMCORES=`nproc` && if [ ! -n "$NUMCORES" ]; then export NUMCORES=`sysctl -n hw.ncpu`; fi
echo Using ${NUMCORES} cores
# Run cmake in current working directory, but on source that is in the same directory as this script file
cmake -DBUILD_TESTING=on -DCMAKE_CXX_COMPILER=${CXX} -DCMAKE_C_COMPILER=${CC} -DCMAKE_EXPORT_COMPILE_COMMANDS=ON "${0%/*}"
# Filter all third party code from the compilation database
cat compile_commands.json|jq "map(select(.file | test(\"^$(realpath ${0%/*})/(src|test)/.*$\")))" > compile_commands2.json
rm compile_commands.json
mv compile_commands2.json compile_commands.json
${SCRIPT} -j${NUMCORES} -quiet -header-filter "$(realpath ${0%/*})/(src|test)/.*" $@
run-iwyu.sh 0000775 0000000 0000000 00000002045 14456142610 0013220 0 ustar 00root root 0000000 0000000 #!/bin/bash
# Note: Call this from a cmake build directory (e.g. cmake/) for out-of-source builds
# Examples:
# mkdir cmake && cd cmake && ../run-iwqu.sh
# mkdir cmake && cd cmake && ../run-iwqu.sh -fix
set -e
export NUMCORES=`nproc` && if [ ! -n "$NUMCORES" ]; then export NUMCORES=`sysctl -n hw.ncpu`; fi
echo Using ${NUMCORES} cores
# Run cmake in current working directory, but on source that is in the same directory as this script file
cmake -DBUILD_TESTING=on -DCMAKE_EXPORT_COMPILE_COMMANDS=ON "${0%/*}"
# Filter all third party code from the compilation database
cat compile_commands.json|jq "map(select(.file | test(\"^$(realpath ${0%/*})/(src|test)/.*$\")))" > compile_commands2.json
rm compile_commands.json
mv compile_commands2.json compile_commands.json
if [ "$1" = "-fix" ]; then
TMPFILE=/tmp/iwyu.`cat /dev/urandom | tr -cd 'a-f0-9' | head -c 8`.out
function cleanup {
rm ${TMPFILE}
}
trap cleanup EXIT
iwyu_tool -j${NUMCORES} -p. ${@:2} | tee ${TMPFILE}
fix_include < ${TMPFILE}
else
iwyu_tool -j${NUMCORES} -p. $@
fi
src/ 0000775 0000000 0000000 00000000000 14456142610 0011650 5 ustar 00root root 0000000 0000000 src/CMakeLists.txt 0000664 0000000 0000000 00000000515 14456142610 0014411 0 ustar 00root root 0000000 0000000 include_directories(${CMAKE_CURRENT_SOURCE_DIR})
add_subdirectory(gitversion)
add_subdirectory(cpp-utils)
add_subdirectory(fspp)
add_subdirectory(parallelaccessstore)
add_subdirectory(blockstore)
add_subdirectory(blobstore)
add_subdirectory(cryfs)
add_subdirectory(cryfs-cli)
add_subdirectory(cryfs-unmount)
add_subdirectory(stats)
src/blobstore/ 0000775 0000000 0000000 00000000000 14456142610 0013643 5 ustar 00root root 0000000 0000000 src/blobstore/CMakeLists.txt 0000664 0000000 0000000 00000002374 14456142610 0016411 0 ustar 00root root 0000000 0000000 project (blobstore)
set(SOURCES
implementations/onblocks/parallelaccessdatatreestore/ParallelAccessDataTreeStoreAdapter.cpp
implementations/onblocks/parallelaccessdatatreestore/DataTreeRef.cpp
implementations/onblocks/parallelaccessdatatreestore/ParallelAccessDataTreeStore.cpp
implementations/onblocks/utils/Math.cpp
implementations/onblocks/BlobStoreOnBlocks.cpp
implementations/onblocks/datanodestore/DataNode.cpp
implementations/onblocks/datanodestore/DataLeafNode.cpp
implementations/onblocks/datanodestore/DataInnerNode.cpp
implementations/onblocks/datanodestore/DataNodeStore.cpp
implementations/onblocks/datatreestore/impl/algorithms.cpp
implementations/onblocks/datatreestore/impl/CachedValue.cpp
implementations/onblocks/datatreestore/impl/LeafTraverser.cpp
implementations/onblocks/datatreestore/LeafHandle.cpp
implementations/onblocks/datatreestore/DataTree.cpp
implementations/onblocks/datatreestore/DataTreeStore.cpp
implementations/onblocks/BlobOnBlocks.cpp
)
add_library(${PROJECT_NAME} STATIC ${SOURCES})
target_link_libraries(${PROJECT_NAME} PUBLIC cpp-utils blockstore)
target_add_boost(${PROJECT_NAME})
target_enable_style_warnings(${PROJECT_NAME})
target_activate_cpp14(${PROJECT_NAME})
src/blobstore/implementations/ 0000775 0000000 0000000 00000000000 14456142610 0017053 5 ustar 00root root 0000000 0000000 src/blobstore/implementations/onblocks/ 0000775 0000000 0000000 00000000000 14456142610 0020665 5 ustar 00root root 0000000 0000000 src/blobstore/implementations/onblocks/BlobOnBlocks.cpp 0000664 0000000 0000000 00000003115 14456142610 0023702 0 ustar 00root root 0000000 0000000 #include "parallelaccessdatatreestore/DataTreeRef.h"
#include "BlobOnBlocks.h"
#include "datanodestore/DataLeafNode.h"
#include "datanodestore/DataNodeStore.h"
#include "utils/Math.h"
#include
#include
#include "datatreestore/LeafHandle.h"
using cpputils::unique_ref;
using cpputils::Data;
using blockstore::BlockId;
namespace blobstore {
namespace onblocks {
using parallelaccessdatatreestore::DataTreeRef;
BlobOnBlocks::BlobOnBlocks(unique_ref datatree)
: _datatree(std::move(datatree)) {
}
BlobOnBlocks::~BlobOnBlocks() {
} // NOLINT (workaround https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82481 )
uint64_t BlobOnBlocks::size() const {
return _datatree->numBytes();
}
void BlobOnBlocks::resize(uint64_t numBytes) {
_datatree->resizeNumBytes(numBytes);
}
Data BlobOnBlocks::readAll() const {
return _datatree->readAllBytes();
}
void BlobOnBlocks::read(void *target, uint64_t offset, uint64_t count) const {
return _datatree->readBytes(target, offset, count);
}
uint64_t BlobOnBlocks::tryRead(void *target, uint64_t offset, uint64_t count) const {
return _datatree->tryReadBytes(target, offset, count);
}
void BlobOnBlocks::write(const void *source, uint64_t offset, uint64_t count) {
_datatree->writeBytes(source, offset, count);
}
void BlobOnBlocks::flush() {
_datatree->flush();
}
uint32_t BlobOnBlocks::numNodes() const {
return _datatree->numNodes();
}
const BlockId &BlobOnBlocks::blockId() const {
return _datatree->blockId();
}
unique_ref BlobOnBlocks::releaseTree() {
return std::move(_datatree);
}
}
}
src/blobstore/implementations/onblocks/BlobOnBlocks.h 0000664 0000000 0000000 00000003310 14456142610 0023344 0 ustar 00root root 0000000 0000000 #pragma once
#ifndef MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_BLOBONBLOCKS_H_
#define MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_BLOBONBLOCKS_H_
#include "../../interface/Blob.h"
#include "datatreestore/LeafHandle.h"
#include
#include
#include
namespace blobstore {
namespace onblocks {
namespace datanodestore {
class DataLeafNode;
}
namespace parallelaccessdatatreestore {
class DataTreeRef;
}
class BlobOnBlocks final: public Blob {
public:
BlobOnBlocks(cpputils::unique_ref datatree);
~BlobOnBlocks();
const blockstore::BlockId &blockId() const override;
uint64_t size() const override;
void resize(uint64_t numBytes) override;
cpputils::Data readAll() const override;
void read(void *target, uint64_t offset, uint64_t size) const override;
uint64_t tryRead(void *target, uint64_t offset, uint64_t size) const override;
void write(const void *source, uint64_t offset, uint64_t size) override;
void flush() override;
uint32_t numNodes() const override;
cpputils::unique_ref releaseTree();
private:
uint64_t _tryRead(void *target, uint64_t offset, uint64_t size) const;
void _read(void *target, uint64_t offset, uint64_t count) const;
void _traverseLeaves(uint64_t offsetBytes, uint64_t sizeBytes, std::function onExistingLeaf, std::function onCreateLeaf) const;
cpputils::unique_ref _datatree;
DISALLOW_COPY_AND_ASSIGN(BlobOnBlocks);
};
}
}
#endif
src/blobstore/implementations/onblocks/BlobStoreOnBlocks.cpp 0000664 0000000 0000000 00000004547 14456142610 0024731 0 ustar 00root root 0000000 0000000 #include "parallelaccessdatatreestore/DataTreeRef.h"
#include "parallelaccessdatatreestore/ParallelAccessDataTreeStore.h"
#include
#include "datanodestore/DataLeafNode.h"
#include "datanodestore/DataNodeStore.h"
#include "datatreestore/DataTreeStore.h"
#include "datatreestore/DataTree.h"
#include "BlobStoreOnBlocks.h"
#include "BlobOnBlocks.h"
#include
#include
using cpputils::unique_ref;
using cpputils::make_unique_ref;
using blockstore::BlockStore;
using blockstore::parallelaccess::ParallelAccessBlockStore;
using blockstore::BlockId;
using cpputils::dynamic_pointer_move;
using boost::optional;
using boost::none;
namespace blobstore {
namespace onblocks {
using datanodestore::DataNodeStore;
using datatreestore::DataTreeStore;
using parallelaccessdatatreestore::ParallelAccessDataTreeStore;
BlobStoreOnBlocks::BlobStoreOnBlocks(unique_ref blockStore, uint64_t physicalBlocksizeBytes)
: _dataTreeStore(make_unique_ref(make_unique_ref(make_unique_ref(make_unique_ref(std::move(blockStore)), physicalBlocksizeBytes)))) {
}
BlobStoreOnBlocks::~BlobStoreOnBlocks() {
}
unique_ref BlobStoreOnBlocks::create() {
return make_unique_ref(_dataTreeStore->createNewTree());
}
optional> BlobStoreOnBlocks::load(const BlockId &blockId) {
auto tree = _dataTreeStore->load(blockId);
if (tree == none) {
return none;
}
return optional>(make_unique_ref(std::move(*tree)));
}
void BlobStoreOnBlocks::remove(unique_ref blob) {
auto _blob = dynamic_pointer_move(blob);
ASSERT(_blob != none, "Passed Blob in BlobStoreOnBlocks::remove() is not a BlobOnBlocks.");
_dataTreeStore->remove((*_blob)->releaseTree());
}
void BlobStoreOnBlocks::remove(const BlockId &blockId) {
_dataTreeStore->remove(blockId);
}
uint64_t BlobStoreOnBlocks::virtualBlocksizeBytes() const {
return _dataTreeStore->virtualBlocksizeBytes();
}
uint64_t BlobStoreOnBlocks::numBlocks() const {
return _dataTreeStore->numNodes();
}
uint64_t BlobStoreOnBlocks::estimateSpaceForNumBlocksLeft() const {
return _dataTreeStore->estimateSpaceForNumNodesLeft();
}
}
}
src/blobstore/implementations/onblocks/BlobStoreOnBlocks.h 0000664 0000000 0000000 00000002750 14456142610 0024370 0 ustar 00root root 0000000 0000000 #pragma once
#ifndef MESSMER_BLOBSTORE_IMPLEMENTATIONS_BLOCKED_BLOBSTOREONBLOCKS_H_
#define MESSMER_BLOBSTORE_IMPLEMENTATIONS_BLOCKED_BLOBSTOREONBLOCKS_H_
#include "../../interface/BlobStore.h"
#include "BlobOnBlocks.h"
#include
namespace blobstore {
namespace onblocks {
namespace parallelaccessdatatreestore {
class ParallelAccessDataTreeStore;
}
//TODO Make blobstore able to cope with incomplete data (some blocks missing, because they're not synchronized yet) and write test cases for that
class BlobStoreOnBlocks final: public BlobStore {
public:
BlobStoreOnBlocks(cpputils::unique_ref blockStore, uint64_t physicalBlocksizeBytes);
~BlobStoreOnBlocks();
cpputils::unique_ref create() override;
boost::optional> load(const blockstore::BlockId &blockId) override;
void remove(cpputils::unique_ref blob) override;
void remove(const blockstore::BlockId &blockId) override;
//TODO Test blocksizeBytes/numBlocks/estimateSpaceForNumBlocksLeft
//virtual means "space we can use" as opposed to "space it takes on the disk" (i.e. virtual is without headers, checksums, ...)
uint64_t virtualBlocksizeBytes() const override;
uint64_t numBlocks() const override;
uint64_t estimateSpaceForNumBlocksLeft() const override;
private:
cpputils::unique_ref _dataTreeStore;
DISALLOW_COPY_AND_ASSIGN(BlobStoreOnBlocks);
};
}
}
#endif
src/blobstore/implementations/onblocks/datanodestore/ 0000775 0000000 0000000 00000000000 14456142610 0023521 5 ustar 00root root 0000000 0000000 src/blobstore/implementations/onblocks/datanodestore/DataInnerNode.cpp 0000664 0000000 0000000 00000006441 14456142610 0026705 0 ustar 00root root 0000000 0000000 #include "DataInnerNode.h"
#include "DataNodeStore.h"
#include
using blockstore::Block;
using blockstore::BlockStore;
using cpputils::Data;
using cpputils::unique_ref;
using cpputils::make_unique_ref;
using blockstore::BlockId;
using std::vector;
namespace blobstore {
namespace onblocks {
namespace datanodestore {
DataInnerNode::DataInnerNode(DataNodeView view)
: DataNode(std::move(view)) {
ASSERT(depth() > 0, "Inner node can't have depth 0. Is this a leaf maybe?");
if (node().FormatVersion() != FORMAT_VERSION_HEADER) {
throw std::runtime_error("This node format (" + std::to_string(node().FormatVersion()) + ") is not supported. Was it created with a newer version of CryFS?");
}
}
DataInnerNode::~DataInnerNode() {
}
unique_ref DataInnerNode::InitializeNewNode(unique_ref block, const DataNodeLayout &layout, uint8_t depth, const vector &children) {
ASSERT(children.size() >= 1, "An inner node must have at least one child");
Data data = _serializeChildren(children);
return make_unique_ref(DataNodeView::initialize(std::move(block), layout, DataNode::FORMAT_VERSION_HEADER, depth, children.size(), std::move(data)));
}
unique_ref DataInnerNode::CreateNewNode(BlockStore *blockStore, const DataNodeLayout &layout, uint8_t depth, const vector &children) {
ASSERT(children.size() >= 1, "An inner node must have at least one child");
Data data = _serializeChildren(children);
return make_unique_ref(DataNodeView::create(blockStore, layout, DataNode::FORMAT_VERSION_HEADER, depth, children.size(), std::move(data)));
}
Data DataInnerNode::_serializeChildren(const vector &children) {
Data data(sizeof(ChildEntry) * children.size());
uint32_t i = 0;
for (const BlockId &child : children) {
child.ToBinary(data.dataOffset(i * BlockId::BINARY_LENGTH));
++i;
}
return data;
}
uint32_t DataInnerNode::numChildren() const {
return node().Size();
}
DataInnerNode::ChildEntry DataInnerNode::readChild(unsigned int index) const {
ASSERT(index < numChildren(), "Accessing child out of range");
return ChildEntry(BlockId::FromBinary(static_cast(node().data()) + index * sizeof(ChildEntry)));
}
void DataInnerNode::_writeChild(unsigned int index, const ChildEntry& child) {
ASSERT(index < numChildren(), "Accessing child out of range");
node().write(child.blockId().data().data(), index * sizeof(ChildEntry), sizeof(ChildEntry));
}
DataInnerNode::ChildEntry DataInnerNode::readLastChild() const {
return readChild(numChildren() - 1);
}
void DataInnerNode::_writeLastChild(const ChildEntry& child) {
_writeChild(numChildren() - 1, child);
}
void DataInnerNode::addChild(const DataNode &child) {
ASSERT(numChildren() < maxStoreableChildren(), "Adding more children than we can store");
ASSERT(child.depth() == depth()-1, "The child that should be added has wrong depth");
node().setSize(node().Size()+1);
_writeLastChild(ChildEntry(child.blockId()));
}
void DataInnerNode::removeLastChild() {
ASSERT(node().Size() > 1, "There is no child to remove");
_writeLastChild(ChildEntry(BlockId::Null()));
node().setSize(node().Size()-1);
}
uint32_t DataInnerNode::maxStoreableChildren() const {
return node().layout().maxChildrenPerInnerNode();
}
}
}
}
src/blobstore/implementations/onblocks/datanodestore/DataInnerNode.h 0000664 0000000 0000000 00000002547 14456142610 0026355 0 ustar 00root root 0000000 0000000 #pragma once
#ifndef MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATANODESTORE_DATAINNERNODE_H_
#define MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATANODESTORE_DATAINNERNODE_H_
#include "DataNode.h"
#include "DataInnerNode_ChildEntry.h"
namespace blobstore {
namespace onblocks {
namespace datanodestore {
class DataInnerNode final: public DataNode {
public:
static cpputils::unique_ref InitializeNewNode(cpputils::unique_ref block, const DataNodeLayout &layout, uint8_t depth, const std::vector &children);
static cpputils::unique_ref CreateNewNode(blockstore::BlockStore *blockStore, const DataNodeLayout &layout, uint8_t depth, const std::vector &children);
using ChildEntry = DataInnerNode_ChildEntry;
DataInnerNode(DataNodeView block);
~DataInnerNode();
uint32_t maxStoreableChildren() const;
ChildEntry readChild(unsigned int index) const;
ChildEntry readLastChild() const;
uint32_t numChildren() const;
void addChild(const DataNode &child_blockId);
void removeLastChild();
private:
void _writeChild(unsigned int index, const ChildEntry& child);
void _writeLastChild(const ChildEntry& child);
static cpputils::Data _serializeChildren(const std::vector &children);
DISALLOW_COPY_AND_ASSIGN(DataInnerNode);
};
}
}
}
#endif
src/blobstore/implementations/onblocks/datanodestore/DataInnerNode_ChildEntry.h 0000664 0000000 0000000 00000001533 14456142610 0030474 0 ustar 00root root 0000000 0000000 #pragma once
#ifndef MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATANODESTORE_DATAINNERNODE_CHILDENTRY_H_
#define MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATANODESTORE_DATAINNERNODE_CHILDENTRY_H_
#include
namespace blobstore{
namespace onblocks{
namespace datanodestore{
struct DataInnerNode_ChildEntry final {
public:
DataInnerNode_ChildEntry(const blockstore::BlockId &blockId): _blockId(blockId) {}
const blockstore::BlockId& blockId() const {
return _blockId;
}
DataInnerNode_ChildEntry(const DataInnerNode_ChildEntry&) = delete;
DataInnerNode_ChildEntry& operator=(const DataInnerNode_ChildEntry&) = delete;
DataInnerNode_ChildEntry(DataInnerNode_ChildEntry&&) = default;
DataInnerNode_ChildEntry& operator=(DataInnerNode_ChildEntry&&) = default;
private:
blockstore::BlockId _blockId;
};
}
}
}
#endif
src/blobstore/implementations/onblocks/datanodestore/DataLeafNode.cpp 0000664 0000000 0000000 00000005460 14456142610 0026501 0 ustar 00root root 0000000 0000000 #include "DataLeafNode.h"
#include "DataInnerNode.h"
#include
using cpputils::Data;
using blockstore::BlockId;
using blockstore::BlockStore;
using cpputils::unique_ref;
using cpputils::make_unique_ref;
namespace blobstore {
namespace onblocks {
namespace datanodestore {
DataLeafNode::DataLeafNode(DataNodeView view)
: DataNode(std::move(view)) {
ASSERT(node().Depth() == 0, "Leaf node must have depth 0. Is it an inner node instead?");
ASSERT(numBytes() <= maxStoreableBytes(), "Leaf says it stores more bytes than it has space for");
if (node().FormatVersion() != FORMAT_VERSION_HEADER) {
throw std::runtime_error("This node format is not supported. Was it created with a newer version of CryFS?");
}
}
DataLeafNode::~DataLeafNode() {
}
unique_ref DataLeafNode::CreateNewNode(BlockStore *blockStore, const DataNodeLayout &layout, Data data) {
ASSERT(data.size() <= layout.maxBytesPerLeaf(), "Data passed in is too large for one leaf.");
uint32_t size = data.size();
return make_unique_ref(DataNodeView::create(blockStore, layout, DataNode::FORMAT_VERSION_HEADER, 0, size, std::move(data)));
}
unique_ref DataLeafNode::OverwriteNode(BlockStore *blockStore, const DataNodeLayout &layout, const BlockId &blockId, Data data) {
ASSERT(data.size() == layout.maxBytesPerLeaf(), "Data passed in is too large for one leaf.");
uint32_t size = data.size();
return make_unique_ref(DataNodeView::overwrite(blockStore, layout, DataNode::FORMAT_VERSION_HEADER, 0, size, blockId, std::move(data)));
}
void DataLeafNode::read(void *target, uint64_t offset, uint64_t size) const {
ASSERT(offset <= node().Size() && offset + size <= node().Size(), "Read out of valid area"); // Also check offset, because the addition could lead to overflows
std::memcpy(target, static_cast(node().data()) + offset, size);
}
void DataLeafNode::write(const void *source, uint64_t offset, uint64_t size) {
ASSERT(offset <= node().Size() && offset + size <= node().Size(), "Write out of valid area"); // Also check offset, because the addition could lead to overflows
node().write(source, offset, size);
}
uint32_t DataLeafNode::numBytes() const {
return node().Size();
}
void DataLeafNode::resize(uint32_t new_size) {
ASSERT(new_size <= maxStoreableBytes(), "Trying to resize to a size larger than the maximal size");
uint32_t old_size = node().Size();
if (new_size < old_size) {
fillDataWithZeroesFromTo(new_size, old_size);
}
node().setSize(new_size);
}
void DataLeafNode::fillDataWithZeroesFromTo(uint64_t begin, uint64_t end) {
Data ZEROES(end-begin);
ZEROES.FillWithZeroes();
node().write(ZEROES.data(), begin, end-begin);
}
uint64_t DataLeafNode::maxStoreableBytes() const {
return node().layout().maxBytesPerLeaf();
}
}
}
}
src/blobstore/implementations/onblocks/datanodestore/DataLeafNode.h 0000664 0000000 0000000 00000002270 14456142610 0026142 0 ustar 00root root 0000000 0000000 #pragma once
#ifndef MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATANODESTORE_DATALEAFNODE_H_
#define MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATANODESTORE_DATALEAFNODE_H_
#include "DataNode.h"
namespace blobstore {
namespace onblocks {
namespace datanodestore {
class DataInnerNode;
class DataLeafNode final: public DataNode {
public:
static cpputils::unique_ref CreateNewNode(blockstore::BlockStore *blockStore, const DataNodeLayout &layout, cpputils::Data data);
static cpputils::unique_ref OverwriteNode(blockstore::BlockStore *blockStore, const DataNodeLayout &layout, const blockstore::BlockId &blockId, cpputils::Data data);
DataLeafNode(DataNodeView block);
~DataLeafNode();
//Returning uint64_t, because calculations handling this probably need to be done in 64bit to support >4GB blobs.
uint64_t maxStoreableBytes() const;
void read(void *target, uint64_t offset, uint64_t size) const;
void write(const void *source, uint64_t offset, uint64_t size);
uint32_t numBytes() const;
void resize(uint32_t size);
private:
void fillDataWithZeroesFromTo(uint64_t begin, uint64_t end);
DISALLOW_COPY_AND_ASSIGN(DataLeafNode);
};
}
}
}
#endif
src/blobstore/implementations/onblocks/datanodestore/DataNode.cpp 0000664 0000000 0000000 00000002201 14456142610 0025677 0 ustar 00root root 0000000 0000000 #include "DataInnerNode.h"
#include "DataLeafNode.h"
#include "DataNode.h"
#include "DataNodeStore.h"
#include
using blockstore::BlockId;
using cpputils::unique_ref;
namespace blobstore {
namespace onblocks {
namespace datanodestore {
constexpr uint16_t DataNode::FORMAT_VERSION_HEADER;
DataNode::DataNode(DataNodeView node)
: _node(std::move(node)) {
}
DataNode::~DataNode() {
}
DataNodeView &DataNode::node() {
return const_cast(const_cast(this)->node());
}
const DataNodeView &DataNode::node() const {
return _node;
}
const BlockId &DataNode::blockId() const {
return _node.blockId();
}
uint8_t DataNode::depth() const {
return _node.Depth();
}
unique_ref DataNode::convertToNewInnerNode(unique_ref node, const DataNodeLayout &layout, const DataNode &first_child) {
auto block = node->_node.releaseBlock();
blockstore::utils::fillWithZeroes(block.get());
return DataInnerNode::InitializeNewNode(std::move(block), layout, first_child.depth()+1, {first_child.blockId()});
}
void DataNode::flush() const {
_node.flush();
}
}
}
}
src/blobstore/implementations/onblocks/datanodestore/DataNode.h 0000664 0000000 0000000 00000001771 14456142610 0025357 0 ustar 00root root 0000000 0000000 #pragma once
#ifndef MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATANODESTORE_DATANODE_H_
#define MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATANODESTORE_DATANODE_H_
#include "DataNodeView.h"
#include
namespace blobstore {
namespace onblocks {
namespace datanodestore {
class DataNodeStore;
class DataInnerNode;
class DataNode {
public:
virtual ~DataNode();
const blockstore::BlockId &blockId() const;
uint8_t depth() const;
static cpputils::unique_ref convertToNewInnerNode(cpputils::unique_ref node, const DataNodeLayout &layout, const DataNode &first_child);
void flush() const;
protected:
// The FORMAT_VERSION_HEADER is used to allow future versions to have compatibility.
static constexpr uint16_t FORMAT_VERSION_HEADER = 0;
DataNode(DataNodeView block);
DataNodeView &node();
const DataNodeView &node() const;
friend class DataNodeStore;
private:
DataNodeView _node;
DISALLOW_COPY_AND_ASSIGN(DataNode);
};
}
}
}
#endif
src/blobstore/implementations/onblocks/datanodestore/DataNodeStore.cpp 0000664 0000000 0000000 00000011671 14456142610 0026727 0 ustar 00root root 0000000 0000000 #include "DataInnerNode.h"
#include "DataLeafNode.h"
#include "DataNodeStore.h"
#include
#include
#include
#include
using blockstore::BlockStore;
using blockstore::Block;
using blockstore::BlockId;
using cpputils::Data;
using cpputils::unique_ref;
using cpputils::make_unique_ref;
using cpputils::dynamic_pointer_move;
using std::runtime_error;
using boost::optional;
using boost::none;
using std::vector;
namespace blobstore {
namespace onblocks {
namespace datanodestore {
DataNodeStore::DataNodeStore(unique_ref blockstore, uint64_t physicalBlocksizeBytes)
: _blockstore(std::move(blockstore)), _layout(_blockstore->blockSizeFromPhysicalBlockSize(physicalBlocksizeBytes)) {
}
DataNodeStore::~DataNodeStore() {
}
unique_ref DataNodeStore::load(unique_ref block) {
DataNodeView node(std::move(block));
if (node.Depth() == 0) {
return make_unique_ref(std::move(node));
} else if (node.Depth() <= MAX_DEPTH) {
return make_unique_ref(std::move(node));
} else {
throw runtime_error("Tree is to deep. Data corruption?");
}
}
unique_ref DataNodeStore::createNewInnerNode(uint8_t depth, const vector &children) {
ASSERT(children.size() >= 1, "Inner node must have at least one child");
return DataInnerNode::CreateNewNode(_blockstore.get(), _layout, depth, children);
}
unique_ref DataNodeStore::createNewLeafNode(Data data) {
return DataLeafNode::CreateNewNode(_blockstore.get(), _layout, std::move(data));
}
unique_ref DataNodeStore::overwriteLeaf(const BlockId &blockId, Data data) {
return DataLeafNode::OverwriteNode(_blockstore.get(), _layout, blockId, std::move(data));
}
optional> DataNodeStore::load(const BlockId &blockId) {
auto block = _blockstore->load(blockId);
if (block == none) {
return none;
} else {
ASSERT((*block)->size() == _layout.blocksizeBytes(), "Loading block of wrong size");
return load(std::move(*block));
}
}
unique_ref DataNodeStore::createNewNodeAsCopyFrom(const DataNode &source) {
ASSERT(source.node().layout().blocksizeBytes() == _layout.blocksizeBytes(), "Source node has wrong layout. Is it from the same DataNodeStore?");
auto newBlock = blockstore::utils::copyToNewBlock(_blockstore.get(), source.node().block());
return load(std::move(newBlock));
}
unique_ref DataNodeStore::overwriteNodeWith(unique_ref target, const DataNode &source) {
ASSERT(target->node().layout().blocksizeBytes() == _layout.blocksizeBytes(), "Target node has wrong layout. Is it from the same DataNodeStore?");
ASSERT(source.node().layout().blocksizeBytes() == _layout.blocksizeBytes(), "Source node has wrong layout. Is it from the same DataNodeStore?");
auto targetBlock = target->node().releaseBlock();
cpputils::destruct(std::move(target)); // Call destructor
blockstore::utils::copyTo(targetBlock.get(), source.node().block());
return DataNodeStore::load(std::move(targetBlock));
}
void DataNodeStore::remove(unique_ref node) {
BlockId blockId = node->blockId();
cpputils::destruct(std::move(node));
remove(blockId);
}
void DataNodeStore::remove(const BlockId &blockId) {
_blockstore->remove(blockId);
}
void DataNodeStore::removeSubtree(unique_ref node) {
auto leaf = dynamic_pointer_move(node);
if (leaf != none) {
remove(std::move(*leaf));
return;
}
auto inner = dynamic_pointer_move(node);
ASSERT(inner != none, "Is neither a leaf nor an inner node");
for (uint32_t i = 0; i < (*inner)->numChildren(); ++i) {
removeSubtree((*inner)->depth()-1, (*inner)->readChild(i).blockId());
}
remove(std::move(*inner));
}
// NOLINTNEXTLINE(misc-no-recursion)
void DataNodeStore::removeSubtree(uint8_t depth, const BlockId &blockId) {
if (depth == 0) {
remove(blockId);
} else {
auto node = load(blockId);
ASSERT(node != none, "Node for removeSubtree not found");
auto inner = dynamic_pointer_move(*node);
ASSERT(inner != none, "Is not an inner node, but depth was not zero");
ASSERT((*inner)->depth() == depth, "Wrong depth given");
for (uint32_t i = 0; i < (*inner)->numChildren(); ++i) {
removeSubtree(depth-1, (*inner)->readChild(i).blockId());
}
remove(std::move(*inner));
}
}
uint64_t DataNodeStore::numNodes() const {
return _blockstore->numBlocks();
}
uint64_t DataNodeStore::estimateSpaceForNumNodesLeft() const {
return _blockstore->estimateNumFreeBytes() / _layout.blocksizeBytes();
}
uint64_t DataNodeStore::virtualBlocksizeBytes() const {
return _layout.blocksizeBytes();
}
DataNodeLayout DataNodeStore::layout() const {
return _layout;
}
void DataNodeStore::forEachNode(std::function callback) const {
_blockstore->forEachBlock(std::move(callback));
}
}
}
}
src/blobstore/implementations/onblocks/datanodestore/DataNodeStore.h 0000664 0000000 0000000 00000004172 14456142610 0026372 0 ustar 00root root 0000000 0000000 #pragma once
#ifndef MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATANODESTORE_DATANODESTORE_H_
#define MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATANODESTORE_DATANODESTORE_H_
#include
#include
#include "DataNodeView.h"
#include
namespace blockstore{
class Block;
class BlockStore;
}
namespace blobstore {
namespace onblocks {
namespace datanodestore {
class DataNode;
class DataLeafNode;
class DataInnerNode;
class DataNodeStore final {
public:
DataNodeStore(cpputils::unique_ref blockstore, uint64_t physicalBlocksizeBytes);
~DataNodeStore();
static constexpr uint8_t MAX_DEPTH = 10;
DataNodeLayout layout() const;
boost::optional> load(const blockstore::BlockId &blockId);
static cpputils::unique_ref load(cpputils::unique_ref block);
cpputils::unique_ref createNewLeafNode(cpputils::Data data);
cpputils::unique_ref createNewInnerNode(uint8_t depth, const std::vector &children);
cpputils::unique_ref createNewNodeAsCopyFrom(const DataNode &source);
cpputils::unique_ref overwriteNodeWith(cpputils::unique_ref target, const DataNode &source);
cpputils::unique_ref overwriteLeaf(const blockstore::BlockId &blockId, cpputils::Data data);
void remove(cpputils::unique_ref node);
void remove(const blockstore::BlockId &blockId);
void removeSubtree(uint8_t depth, const blockstore::BlockId &blockId);
void removeSubtree(cpputils::unique_ref node);
//TODO Test blocksizeBytes/numBlocks/estimateSpaceForNumBlocksLeft
uint64_t virtualBlocksizeBytes() const;
uint64_t numNodes() const;
uint64_t estimateSpaceForNumNodesLeft() const;
//TODO Test overwriteNodeWith(), createNodeAsCopyFrom(), removeSubtree()
void forEachNode(std::function callback) const;
private:
cpputils::unique_ref _blockstore;
const DataNodeLayout _layout;
DISALLOW_COPY_AND_ASSIGN(DataNodeStore);
};
}
}
}
#endif
src/blobstore/implementations/onblocks/datanodestore/DataNodeView.h 0000664 0000000 0000000 00000013715 14456142610 0026213 0 ustar 00root root 0000000 0000000 #pragma once
#ifndef MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATANODESTORE_DATANODEVIEW_H_
#define MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATANODESTORE_DATANODEVIEW_H_
#include
#include "../BlobStoreOnBlocks.h"
#include "DataInnerNode_ChildEntry.h"
#include
#include
#include
#include
namespace blobstore {
namespace onblocks {
namespace datanodestore {
//TODO Move DataNodeLayout into own file
class DataNodeLayout final {
public:
constexpr DataNodeLayout(uint64_t blocksizeBytes)
:_blocksizeBytes(
(HEADERSIZE_BYTES + 2*sizeof(DataInnerNode_ChildEntry) <= blocksizeBytes)
? blocksizeBytes
: throw std::logic_error("Blocksize too small, not enough space to store two children in an inner node")) {
}
//Total size of the header
static constexpr uint32_t HEADERSIZE_BYTES = 8;
//Where in the header is the format version field (used to allow compatibility with future versions of CryFS)
static constexpr uint32_t FORMAT_VERSION_OFFSET_BYTES = 0; //format version uses 2 bytes
//Where in the header is the depth field
static constexpr uint32_t DEPTH_OFFSET_BYTES = 3; // depth uses 1 byte
//Where in the header is the size field (for inner nodes: number of children, for leafs: content data size)
static constexpr uint32_t SIZE_OFFSET_BYTES = 4; // size uses 4 bytes
//Size of a block (header + data region)
constexpr uint64_t blocksizeBytes() const {
return _blocksizeBytes;
}
//Number of bytes in the data region of a node
constexpr uint64_t datasizeBytes() const {
return _blocksizeBytes - HEADERSIZE_BYTES;
}
//Maximum number of children an inner node can store
constexpr uint64_t maxChildrenPerInnerNode() const {
return datasizeBytes() / sizeof(DataInnerNode_ChildEntry);
}
//Maximum number of bytes a leaf can store
constexpr uint64_t maxBytesPerLeaf() const {
return datasizeBytes();
}
private:
uint32_t _blocksizeBytes;
};
class DataNodeView final {
public:
DataNodeView(cpputils::unique_ref block): _block(std::move(block)) {
}
~DataNodeView() {}
static DataNodeView create(blockstore::BlockStore *blockStore, const DataNodeLayout &layout, uint16_t formatVersion, uint8_t depth, uint32_t size, cpputils::Data data) {
ASSERT(data.size() <= layout.datasizeBytes(), "Data is too large for node");
cpputils::Data serialized = serialize_(layout, formatVersion, depth, size, std::move(data));
ASSERT(serialized.size() == layout.blocksizeBytes(), "Wrong block size");
auto block = blockStore->create(serialized);
return DataNodeView(std::move(block));
}
static DataNodeView initialize(cpputils::unique_ref block, const DataNodeLayout &layout, uint16_t formatVersion, uint8_t depth, uint32_t size, cpputils::Data data) {
ASSERT(data.size() <= DataNodeLayout(block->size()).datasizeBytes(), "Data is too large for node");
cpputils::Data serialized = serialize_(layout, formatVersion, depth, size, std::move(data));
ASSERT(serialized.size() == block->size(), "Block has wrong size");
block->write(serialized.data(), 0, serialized.size());
return DataNodeView(std::move(block));
}
static DataNodeView overwrite(blockstore::BlockStore *blockStore, const DataNodeLayout &layout, uint16_t formatVersion, uint8_t depth, uint32_t size, const blockstore::BlockId &blockId, cpputils::Data data) {
ASSERT(data.size() <= layout.datasizeBytes(), "Data is too large for node");
cpputils::Data serialized = serialize_(layout, formatVersion, depth, size, std::move(data));
auto block = blockStore->overwrite(blockId, std::move(serialized));
return DataNodeView(std::move(block));
}
DataNodeView(DataNodeView &&rhs) = default;
uint16_t FormatVersion() const {
return cpputils::deserializeWithOffset(_block->data(), DataNodeLayout::FORMAT_VERSION_OFFSET_BYTES);
}
void setFormatVersion(uint16_t value) {
_block->write(&value, DataNodeLayout::FORMAT_VERSION_OFFSET_BYTES, sizeof(value));
}
uint8_t Depth() const {
return cpputils::deserializeWithOffset(_block->data(), DataNodeLayout::DEPTH_OFFSET_BYTES);
}
void setDepth(uint8_t value) {
_block->write(&value, DataNodeLayout::DEPTH_OFFSET_BYTES, sizeof(value));
}
uint32_t Size() const {
return cpputils::deserializeWithOffset(_block->data(), DataNodeLayout::SIZE_OFFSET_BYTES);
}
void setSize(uint32_t value) {
_block->write(&value, DataNodeLayout::SIZE_OFFSET_BYTES, sizeof(value));
}
const void *data() const {
return static_cast(_block->data()) + DataNodeLayout::HEADERSIZE_BYTES;
}
void write(const void *source, uint64_t offset, uint64_t size) {
_block->write(source, offset + DataNodeLayout::HEADERSIZE_BYTES, size);
}
DataNodeLayout layout() const {
return DataNodeLayout(_block->size());
}
cpputils::unique_ref releaseBlock() {
return std::move(_block);
}
const blockstore::Block &block() const {
return *_block;
}
const blockstore::BlockId &blockId() const {
return _block->blockId();
}
void flush() const {
_block->flush();
}
private:
static cpputils::Data serialize_(const DataNodeLayout &layout, uint16_t formatVersion, uint8_t depth, uint32_t size, cpputils::Data data) {
cpputils::Data result(layout.blocksizeBytes());
cpputils::serialize(result.dataOffset(layout.FORMAT_VERSION_OFFSET_BYTES), formatVersion);
cpputils::serialize(result.dataOffset(layout.DEPTH_OFFSET_BYTES), depth);
cpputils::serialize(result.dataOffset(layout.SIZE_OFFSET_BYTES), size);
std::memcpy(result.dataOffset(layout.HEADERSIZE_BYTES), data.data(), data.size());
std::memset(result.dataOffset(layout.HEADERSIZE_BYTES+data.size()), 0, layout.datasizeBytes()-data.size());
return result;
}
cpputils::unique_ref _block;
DISALLOW_COPY_AND_ASSIGN(DataNodeView);
};
}
}
}
#endif
src/blobstore/implementations/onblocks/datatreestore/ 0000775 0000000 0000000 00000000000 14456142610 0023533 5 ustar 00root root 0000000 0000000 src/blobstore/implementations/onblocks/datatreestore/DataTree.cpp 0000664 0000000 0000000 00000035320 14456142610 0025733 0 ustar 00root root 0000000 0000000 #include "DataTree.h"
#include "../datanodestore/DataNodeStore.h"
#include "../datanodestore/DataInnerNode.h"
#include "../datanodestore/DataLeafNode.h"
#include "../utils/Math.h"
#include "impl/algorithms.h"
#include
#include
#include
#include
#include "impl/LeafTraverser.h"
#include
#include
using blockstore::BlockId;
using blobstore::onblocks::datanodestore::DataNodeStore;
using blobstore::onblocks::datanodestore::DataNode;
using blobstore::onblocks::datanodestore::DataInnerNode;
using blobstore::onblocks::datanodestore::DataLeafNode;
using std::function;
using boost::shared_mutex;
using boost::shared_lock;
using boost::unique_lock;
using boost::none;
using boost::optional;
using cpputils::optional_ownership_ptr;
using cpputils::unique_ref;
using cpputils::Data;
using namespace cpputils::logging;
//TODO shared_lock currently not enough for traverse because of root replacement. Can be fixed while keeping shared?
namespace blobstore {
namespace onblocks {
namespace datatreestore {
DataTree::DataTree(DataNodeStore *nodeStore, unique_ref rootNode)
: _treeStructureMutex(), _nodeStore(nodeStore), _rootNode(std::move(rootNode)), _blockId(_rootNode->blockId()), _sizeCache() {
}
DataTree::~DataTree() {
}
const BlockId &DataTree::blockId() const {
return _blockId;
}
void DataTree::flush() const {
// By grabbing a lock, we ensure that all modifying functions don't run currently and are therefore flushed.
// It's only a shared lock, because this doesn't modify the tree structure.
shared_lock lock(_treeStructureMutex);
// We also have to flush the root node
_rootNode->flush();
}
unique_ref DataTree::releaseRootNode() {
// Lock also ensures that the root node is currently set (traversing unsets it temporarily)
// It's a unique lock because this "modifies" tree structure by changing _rootNode.
unique_lock lock(_treeStructureMutex);
return std::move(_rootNode);
}
uint32_t DataTree::numNodes() const {
uint32_t numNodesCurrentLevel = numLeaves();
uint32_t totalNumNodes = numNodesCurrentLevel;
for(size_t level = 0; level < _rootNode->depth(); ++level) {
numNodesCurrentLevel = blobstore::onblocks::utils::ceilDivision(numNodesCurrentLevel, static_cast(_nodeStore->layout().maxChildrenPerInnerNode()));
totalNumNodes += numNodesCurrentLevel;
}
return totalNumNodes;
}
uint32_t DataTree::numLeaves() const {
shared_lock lock(_treeStructureMutex);
return _getOrComputeSizeCache().numLeaves;
}
uint64_t DataTree::numBytes() const {
shared_lock lock(_treeStructureMutex);
return _numBytes();
}
uint64_t DataTree::_numBytes() const {
return _getOrComputeSizeCache().numBytes;
}
DataTree::SizeCache DataTree::_getOrComputeSizeCache() const {
return _sizeCache.getOrCompute([this] () {
return _computeSizeCache(*_rootNode);
});
}
uint32_t DataTree::forceComputeNumLeaves() const {
_sizeCache.clear();
return numLeaves();
}
// NOLINTNEXTLINE(misc-no-recursion)
DataTree::SizeCache DataTree::_computeSizeCache(const DataNode &node) const {
const DataLeafNode *leaf = dynamic_cast(&node);
if (leaf != nullptr) {
return {1, leaf->numBytes()};
}
const DataInnerNode &inner = dynamic_cast(node);
uint32_t numLeavesInLeftChildren = static_cast(inner.numChildren()-1) * _leavesPerFullChild(inner);
uint64_t numBytesInLeftChildren = numLeavesInLeftChildren * _nodeStore->layout().maxBytesPerLeaf();
auto lastChild = _nodeStore->load(inner.readLastChild().blockId());
ASSERT(lastChild != none, "Couldn't load last child");
SizeCache sizeInRightChild = _computeSizeCache(**lastChild);
return SizeCache {
numLeavesInLeftChildren + sizeInRightChild.numLeaves,
numBytesInLeftChildren + sizeInRightChild.numBytes
};
}
void DataTree::_traverseLeavesByLeafIndices(uint32_t beginIndex, uint32_t endIndex, bool readOnlyTraversal,
function onExistingLeaf,
function onCreateLeaf,
function onBacktrackFromSubtree) const {
if(endIndex <= beginIndex) {
return;
}
// TODO no const cast
LeafTraverser(_nodeStore, readOnlyTraversal).traverseAndUpdateRoot(&const_cast(this)->_rootNode, beginIndex, endIndex, onExistingLeaf, onCreateLeaf, onBacktrackFromSubtree);
}
void DataTree::_traverseLeavesByByteIndices(uint64_t beginByte, uint64_t sizeBytes, bool readOnlyTraversal, function onExistingLeaf, function onCreateLeaf) const {
if (sizeBytes == 0) {
return;
}
uint64_t endByte = beginByte + sizeBytes;
uint64_t _maxBytesPerLeaf = maxBytesPerLeaf();
uint32_t firstLeaf = beginByte / _maxBytesPerLeaf;
uint32_t endLeaf = utils::ceilDivision(endByte, _maxBytesPerLeaf);
bool blobIsGrowingFromThisTraversal = false;
auto _onExistingLeaf = [&onExistingLeaf, beginByte, endByte, endLeaf, _maxBytesPerLeaf, &blobIsGrowingFromThisTraversal] (uint32_t leafIndex, bool isRightBorderLeaf, LeafHandle leafHandle) {
uint64_t indexOfFirstLeafByte = leafIndex * _maxBytesPerLeaf;
ASSERT(endByte > indexOfFirstLeafByte, "Traversal went too far right");
uint32_t dataBegin = utils::maxZeroSubtraction(beginByte, indexOfFirstLeafByte);
uint32_t dataEnd = std::min(_maxBytesPerLeaf, endByte - indexOfFirstLeafByte);
// If we are traversing exactly until the last leaf, then the last leaf wasn't resized by the traversal and might have a wrong size. We have to fix it.
if (isRightBorderLeaf) {
ASSERT(leafIndex == endLeaf-1, "If we traversed further right, this wouldn't be the right border leaf.");
auto leaf = leafHandle.node();
if (leaf->numBytes() < dataEnd) {
leaf->resize(dataEnd);
blobIsGrowingFromThisTraversal = true;
}
}
onExistingLeaf(indexOfFirstLeafByte, std::move(leafHandle), dataBegin, dataEnd-dataBegin);
};
auto _onCreateLeaf = [&onCreateLeaf, _maxBytesPerLeaf, beginByte, firstLeaf, endByte, endLeaf, &blobIsGrowingFromThisTraversal, readOnlyTraversal] (uint32_t leafIndex) -> Data {
ASSERT(!readOnlyTraversal, "Cannot create leaves in a read-only traversal");
blobIsGrowingFromThisTraversal = true;
uint64_t indexOfFirstLeafByte = leafIndex * _maxBytesPerLeaf;
ASSERT(endByte > indexOfFirstLeafByte, "Traversal went too far right");
uint32_t dataBegin = utils::maxZeroSubtraction(beginByte, indexOfFirstLeafByte);
uint32_t dataEnd = std::min(_maxBytesPerLeaf, endByte - indexOfFirstLeafByte);
ASSERT(leafIndex == firstLeaf || dataBegin == 0, "Only the leftmost leaf can have a gap on the left.");
ASSERT(leafIndex == endLeaf-1 || dataEnd == _maxBytesPerLeaf, "Only the rightmost leaf can have a gap on the right");
Data data = onCreateLeaf(indexOfFirstLeafByte + dataBegin, dataEnd-dataBegin);
ASSERT(data.size() == dataEnd-dataBegin, "Returned leaf data with wrong size");
// If this leaf is created but only partly in the traversed region (i.e. dataBegin > leafBegin), we have to fill the data before the traversed region with zeroes.
if (dataBegin != 0) {
Data actualData(dataBegin + data.size());
std::memset(actualData.data(), 0, dataBegin);
std::memcpy(actualData.dataOffset(dataBegin), data.data(), data.size());
data = std::move(actualData);
}
return data;
};
auto _onBacktrackFromSubtree = [] (DataInnerNode* /*node*/) {};
_traverseLeavesByLeafIndices(firstLeaf, endLeaf, readOnlyTraversal, _onExistingLeaf, _onCreateLeaf, _onBacktrackFromSubtree);
ASSERT(!readOnlyTraversal || !blobIsGrowingFromThisTraversal, "Blob grew from traversal that didn't allow growing (i.e. reading)");
if (blobIsGrowingFromThisTraversal) {
_sizeCache.update([endLeaf, endByte] (optional* cache) {
*cache = SizeCache{endLeaf, endByte};
});
}
}
uint32_t DataTree::_leavesPerFullChild(const DataInnerNode &root) const {
return utils::intPow(_nodeStore->layout().maxChildrenPerInnerNode(), static_cast(root.depth())-1);
}
void DataTree::resizeNumBytes(uint64_t newNumBytes) {
std::unique_lock lock(_treeStructureMutex);
uint32_t newNumLeaves = std::max(UINT64_C(1), utils::ceilDivision(newNumBytes, _nodeStore->layout().maxBytesPerLeaf()));
uint32_t newLastLeafSize = newNumBytes - (newNumLeaves-1) * _nodeStore->layout().maxBytesPerLeaf();
uint32_t maxChildrenPerInnerNode = _nodeStore->layout().maxChildrenPerInnerNode();
auto onExistingLeaf = [newLastLeafSize] (uint32_t /*index*/, bool /*isRightBorderLeaf*/, LeafHandle leafHandle) {
auto leaf = leafHandle.node();
// This is only called, if the new last leaf was already existing
if (leaf->numBytes() != newLastLeafSize) {
leaf->resize(newLastLeafSize);
}
};
auto onCreateLeaf = [newLastLeafSize] (uint32_t /*index*/) -> Data {
// This is only called, if the new last leaf was not existing yet
return Data(newLastLeafSize).FillWithZeroes();
};
auto onBacktrackFromSubtree = [this, newNumLeaves, maxChildrenPerInnerNode] (DataInnerNode* node) {
// This is only called for the right border nodes of the new tree.
// When growing size, the following is a no-op. When shrinking, we're deleting the children that aren't needed anymore.
uint32_t maxLeavesPerChild = utils::intPow(static_cast(maxChildrenPerInnerNode), (static_cast(node->depth())-1));
uint32_t neededNodesOnChildLevel = utils::ceilDivision(newNumLeaves, maxLeavesPerChild);
uint32_t neededSiblings = utils::ceilDivision(neededNodesOnChildLevel, maxChildrenPerInnerNode);
uint32_t neededChildrenForRightBorderNode = neededNodesOnChildLevel - (neededSiblings-1) * maxChildrenPerInnerNode;
ASSERT(neededChildrenForRightBorderNode <= node->numChildren(), "Node has too few children");
// All children to the right of the new right-border-node are removed including their subtree.
while(node->numChildren() > neededChildrenForRightBorderNode) {
_nodeStore->removeSubtree(node->depth()-1, node->readLastChild().blockId());
node->removeLastChild();
}
};
_traverseLeavesByLeafIndices(newNumLeaves - 1, newNumLeaves, false, onExistingLeaf, onCreateLeaf, onBacktrackFromSubtree);
_sizeCache.update([newNumLeaves, newNumBytes] (boost::optional* cache) {
*cache = SizeCache{newNumLeaves, newNumBytes};
});
}
uint64_t DataTree::maxBytesPerLeaf() const {
return _nodeStore->layout().maxBytesPerLeaf();
}
uint8_t DataTree::depth() const {
shared_lock lock(_treeStructureMutex);
return _rootNode->depth();
}
void DataTree::readBytes(void *target, uint64_t offset, uint64_t count) const {
shared_lock lock(_treeStructureMutex);
const uint64_t _size = _numBytes();
if(offset > _size || offset + count > _size) {
throw std::runtime_error("BlobOnBlocks::read() read outside blob. Use BlobOnBlocks::tryRead() if this should be allowed.");
}
const uint64_t read = _tryReadBytes(target, offset, count);
if (read != count) {
throw std::runtime_error("BlobOnBlocks::read() couldn't read all requested bytes. Use BlobOnBlocks::tryRead() if this should be allowed.");
}
}
Data DataTree::readAllBytes() const {
shared_lock lock(_treeStructureMutex);
//TODO Querying numBytes can be inefficient. Is this possible without a call to size()?
uint64_t count = _numBytes();
Data result(count);
_doReadBytes(result.data(), 0, count);
return result;
}
uint64_t DataTree::tryReadBytes(void *target, uint64_t offset, uint64_t count) const {
shared_lock lock(_treeStructureMutex);
auto result = _tryReadBytes(target, offset, count);
return result;
}
uint64_t DataTree::_tryReadBytes(void *target, uint64_t offset, uint64_t count) const {
//TODO Quite inefficient to call size() here, because that has to traverse the tree
const uint64_t _size = _numBytes();
const uint64_t realCount = std::max(INT64_C(0), std::min(static_cast(count), static_cast(_size)-static_cast(offset)));
_doReadBytes(target, offset, realCount);
return realCount;
}
void DataTree::_doReadBytes(void *target, uint64_t offset, uint64_t count) const {
auto onExistingLeaf = [target, offset, count] (uint64_t indexOfFirstLeafByte, LeafHandle leaf, uint32_t leafDataOffset, uint32_t leafDataSize) {
ASSERT(indexOfFirstLeafByte+leafDataOffset>=offset && indexOfFirstLeafByte-offset+leafDataOffset <= count && indexOfFirstLeafByte-offset+leafDataOffset+leafDataSize <= count, "Writing to target out of bounds");
//TODO Simplify formula, make it easier to understand
leaf.node()->read(static_cast(target) + indexOfFirstLeafByte - offset + leafDataOffset, leafDataOffset, leafDataSize);
};
auto onCreateLeaf = [] (uint64_t /*beginByte*/, uint32_t /*count*/) -> Data {
ASSERT(false, "Reading shouldn't create new leaves.");
};
_traverseLeavesByByteIndices(offset, count, true, onExistingLeaf, onCreateLeaf);
}
void DataTree::writeBytes(const void *source, uint64_t offset, uint64_t count) {
unique_lock lock(_treeStructureMutex);
auto onExistingLeaf = [source, offset, count] (uint64_t indexOfFirstLeafByte, LeafHandle leaf, uint32_t leafDataOffset, uint32_t leafDataSize) {
ASSERT(indexOfFirstLeafByte+leafDataOffset>=offset && indexOfFirstLeafByte-offset+leafDataOffset <= count && indexOfFirstLeafByte-offset+leafDataOffset+leafDataSize <= count, "Reading from source out of bounds");
if (leafDataOffset == 0 && leafDataSize == leaf.nodeStore()->layout().maxBytesPerLeaf()) {
Data leafData(leafDataSize);
std::memcpy(leafData.data(), static_cast(source) + indexOfFirstLeafByte - offset, leafDataSize);
leaf.nodeStore()->overwriteLeaf(leaf.blockId(), std::move(leafData));
} else {
//TODO Simplify formula, make it easier to understand
leaf.node()->write(static_cast(source) + indexOfFirstLeafByte - offset + leafDataOffset, leafDataOffset,
leafDataSize);
}
};
auto onCreateLeaf = [source, offset, count] (uint64_t beginByte, uint32_t numBytes) -> Data {
ASSERT(beginByte >= offset && beginByte-offset <= count && beginByte-offset+numBytes <= count, "Reading from source out of bounds");
Data result(numBytes);
//TODO Simplify formula, make it easier to understand
std::memcpy(result.data(), static_cast(source) + beginByte - offset, numBytes);
return result;
};
_traverseLeavesByByteIndices(offset, count, false, onExistingLeaf, onCreateLeaf);
}
}
}
}
src/blobstore/implementations/onblocks/datatreestore/DataTree.h 0000664 0000000 0000000 00000007164 14456142610 0025405 0 ustar 00root root 0000000 0000000 #pragma once
#ifndef MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATATREE_H_
#define MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATATREE_H_
#include
#include
#include
#include "../datanodestore/DataNodeView.h"
//TODO Replace with C++14 once std::shared_mutex is supported
#include
#include
#include "LeafHandle.h"
#include "impl/CachedValue.h"
namespace blobstore {
namespace onblocks {
namespace datanodestore {
class DataNodeStore;
class DataInnerNode;
class DataLeafNode;
class DataNode;
}
namespace datatreestore {
//TODO It is strange that DataLeafNode is still part in the public interface of DataTree. This should be separated somehow.
class DataTree final {
public:
DataTree(datanodestore::DataNodeStore *nodeStore, cpputils::unique_ref rootNode);
~DataTree();
const blockstore::BlockId &blockId() const;
//Returning uint64_t, because calculations handling this probably need to be done in 64bit to support >4GB blobs.
uint64_t maxBytesPerLeaf() const;
uint64_t tryReadBytes(void *target, uint64_t offset, uint64_t count) const;
void readBytes(void *target, uint64_t offset, uint64_t count) const;
cpputils::Data readAllBytes() const;
void writeBytes(const void *source, uint64_t offset, uint64_t count);
void resizeNumBytes(uint64_t newNumBytes);
uint32_t numNodes() const;
uint32_t numLeaves() const;
uint64_t numBytes() const;
uint8_t depth() const;
// only used by test cases
uint32_t forceComputeNumLeaves() const;
void flush() const;
private:
// This mutex must protect the tree structure, i.e. which nodes exist and how they're connected.
// Also protects total number of bytes (i.e. number of leaves + size of last leaf).
// It also protects the data in leaf nodes, because writing bytes might grow the blob and change the structure.
mutable boost::shared_mutex _treeStructureMutex;
datanodestore::DataNodeStore *_nodeStore;
cpputils::unique_ref _rootNode;
blockstore::BlockId _blockId; // BlockId is stored in a member variable, since _rootNode is nullptr while traversing, but we still want to be able to return the blockId.
struct SizeCache final {
uint32_t numLeaves;
uint64_t numBytes;
};
mutable CachedValue _sizeCache;
cpputils::unique_ref releaseRootNode();
friend class DataTreeStore;
void _traverseLeavesByLeafIndices(uint32_t beginIndex, uint32_t endIndex, bool readOnlyTraversal,
std::function onExistingLeaf,
std::function onCreateLeaf,
std::function onBacktrackFromSubtree) const;
void _traverseLeavesByByteIndices(uint64_t beginByte, uint64_t sizeBytes, bool readOnlyTraversal, std::function onExistingLeaf, std::function onCreateLeaf) const;
uint32_t _leavesPerFullChild(const datanodestore::DataInnerNode &root) const;
SizeCache _getOrComputeSizeCache() const;
SizeCache _computeSizeCache(const datanodestore::DataNode &node) const;
uint64_t _tryReadBytes(void *target, uint64_t offset, uint64_t count) const;
void _doReadBytes(void *target, uint64_t offset, uint64_t count) const;
uint64_t _numBytes() const;
DISALLOW_COPY_AND_ASSIGN(DataTree);
};
}
}
}
#endif
src/blobstore/implementations/onblocks/datatreestore/DataTreeStore.cpp 0000664 0000000 0000000 00000002310 14456142610 0026741 0 ustar 00root root 0000000 0000000 #include "DataTreeStore.h"
#include "../datanodestore/DataLeafNode.h"
#include "DataTree.h"
using cpputils::unique_ref;
using cpputils::make_unique_ref;
using cpputils::Data;
using boost::optional;
using boost::none;
using blobstore::onblocks::datanodestore::DataNodeStore;
namespace blobstore {
namespace onblocks {
namespace datatreestore {
DataTreeStore::DataTreeStore(unique_ref nodeStore)
: _nodeStore(std::move(nodeStore)) {
}
DataTreeStore::~DataTreeStore() {
}
optional> DataTreeStore::load(const blockstore::BlockId &blockId) {
auto node = _nodeStore->load(blockId);
if (node == none) {
return none;
}
return make_unique_ref(_nodeStore.get(), std::move(*node));
}
unique_ref DataTreeStore::createNewTree() {
auto newleaf = _nodeStore->createNewLeafNode(Data(0));
return make_unique_ref(_nodeStore.get(), std::move(newleaf));
}
void DataTreeStore::remove(unique_ref tree) {
_nodeStore->removeSubtree(tree->releaseRootNode());
}
void DataTreeStore::remove(const blockstore::BlockId &blockId) {
auto tree = load(blockId);
ASSERT(tree != none, "Tree to remove not found");
remove(std::move(*tree));
}
}
}
}
src/blobstore/implementations/onblocks/datatreestore/DataTreeStore.h 0000664 0000000 0000000 00000002743 14456142610 0026420 0 ustar 00root root 0000000 0000000 #pragma once
#ifndef MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATATREESTORE_DATATREESTORE_H_
#define MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATATREESTORE_DATATREESTORE_H_
#include
#include
#include
#include
#include
#include "../datanodestore/DataNodeStore.h"
namespace blobstore {
namespace onblocks {
namespace datatreestore {
class DataTree;
class DataTreeStore final {
public:
DataTreeStore(cpputils::unique_ref nodeStore);
~DataTreeStore();
boost::optional> load(const blockstore::BlockId &blockId);
cpputils::unique_ref createNewTree();
void remove(cpputils::unique_ref tree);
void remove(const blockstore::BlockId &blockId);
//TODO Test blocksizeBytes/numBlocks/estimateSpaceForNumBlocksLeft
uint64_t virtualBlocksizeBytes() const;
uint64_t numNodes() const;
uint64_t estimateSpaceForNumNodesLeft() const;
private:
cpputils::unique_ref _nodeStore;
DISALLOW_COPY_AND_ASSIGN(DataTreeStore);
};
inline uint64_t DataTreeStore::numNodes() const {
return _nodeStore->numNodes();
}
inline uint64_t DataTreeStore::estimateSpaceForNumNodesLeft() const {
return _nodeStore->estimateSpaceForNumNodesLeft();
}
inline uint64_t DataTreeStore::virtualBlocksizeBytes() const {
return _nodeStore->virtualBlocksizeBytes();
}
}
}
}
#endif
src/blobstore/implementations/onblocks/datatreestore/LeafHandle.cpp 0000664 0000000 0000000 00000002606 14456142610 0026226 0 ustar 00root root 0000000 0000000 #include "LeafHandle.h"
#include "../datanodestore/DataLeafNode.h"
#include "../datanodestore/DataNodeStore.h"
using cpputils::WithOwnership;
using cpputils::WithoutOwnership;
using boost::none;
using cpputils::dynamic_pointer_move;
using blobstore::onblocks::datanodestore::DataLeafNode;
using blobstore::onblocks::datanodestore::DataNodeStore;
using blockstore::BlockId;
namespace blobstore {
namespace onblocks {
namespace datatreestore {
LeafHandle::LeafHandle(DataNodeStore *nodeStore, const BlockId &blockId)
: _nodeStore(nodeStore), _blockId(blockId), _leaf(cpputils::null()) {
}
LeafHandle::LeafHandle(DataNodeStore *nodeStore, DataLeafNode *node)
: _nodeStore(nodeStore), _blockId(node->blockId()),
_leaf(WithoutOwnership(node)) {
}
DataLeafNode *LeafHandle::node() {
if (_leaf.get() == nullptr) {
auto loaded = _nodeStore->load(_blockId);
ASSERT(loaded != none, "Leaf not found");
auto leaf = dynamic_pointer_move(*loaded);
ASSERT(leaf != none, "Loaded leaf is not leaf node");
_leaf = WithOwnership(std::move(*leaf));
}
return _leaf.get();
}
}
}
}
src/blobstore/implementations/onblocks/datatreestore/LeafHandle.h 0000664 0000000 0000000 00000002534 14456142610 0025673 0 ustar 00root root 0000000 0000000 #pragma once
#ifndef MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_LEAFHANDLE_H_
#define MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_LEAFHANDLE_H_
#include
#include
#include
namespace blobstore {
namespace onblocks {
namespace datanodestore {
class DataNodeStore;
class DataLeafNode;
}
namespace datatreestore {
class LeafHandle final {
public:
LeafHandle(datanodestore::DataNodeStore *nodeStore, const blockstore::BlockId &blockId);
LeafHandle(datanodestore::DataNodeStore *nodeStore, datanodestore::DataLeafNode *node);
LeafHandle(LeafHandle &&rhs) = default;
const blockstore::BlockId &blockId() {
return _blockId;
}
datanodestore::DataLeafNode *node();
datanodestore::DataNodeStore *nodeStore() {
return _nodeStore;
}
private:
datanodestore::DataNodeStore *_nodeStore;
blockstore::BlockId _blockId;
cpputils::optional_ownership_ptr _leaf;
DISALLOW_COPY_AND_ASSIGN(LeafHandle);
};
}
}
}
#endif
src/blobstore/implementations/onblocks/datatreestore/impl/ 0000775 0000000 0000000 00000000000 14456142610 0024474 5 ustar 00root root 0000000 0000000 src/blobstore/implementations/onblocks/datatreestore/impl/CachedValue.cpp 0000664 0000000 0000000 00000000032 14456142610 0027337 0 ustar 00root root 0000000 0000000 #include "CachedValue.h"
src/blobstore/implementations/onblocks/datatreestore/impl/CachedValue.h 0000664 0000000 0000000 00000002002 14456142610 0027003 0 ustar 00root root 0000000 0000000 #pragma once
#ifndef MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_IMPL_CACHEDVALUE_H_
#define MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_IMPL_CACHEDVALUE_H_
#include
#include
#include
namespace blobstore {
namespace onblocks {
// TODO Test
template
class CachedValue final {
public:
CachedValue() :_cache(boost::none), _mutex() {}
T getOrCompute(std::function compute) {
boost::upgrade_lock readLock(_mutex);
if (_cache == boost::none) {
boost::upgrade_to_unique_lock writeLock(readLock);
_cache = compute();
}
return *_cache;
}
void update(std::function*)> func) {
boost::unique_lock writeLock(_mutex);
func(&_cache);
}
void clear() {
update([] (boost::optional* cache) {
*cache = boost::none;
});
}
private:
boost::optional _cache;
boost::shared_mutex _mutex;
};
}
}
#endif
src/blobstore/implementations/onblocks/datatreestore/impl/LeafTraverser.cpp 0000664 0000000 0000000 00000044072 14456142610 0027754 0 ustar 00root root 0000000 0000000 #include "LeafTraverser.h"
#include
#include "../../datanodestore/DataLeafNode.h"
#include "../../datanodestore/DataInnerNode.h"
#include "../../datanodestore/DataNodeStore.h"
#include "../../utils/Math.h"
using std::function;
using std::vector;
using boost::none;
using cpputils::Data;
using cpputils::unique_ref;
using cpputils::dynamic_pointer_move;
using blobstore::onblocks::datanodestore::DataNodeStore;
using blobstore::onblocks::datanodestore::DataNode;
using blobstore::onblocks::datanodestore::DataInnerNode;
using blobstore::onblocks::datanodestore::DataLeafNode;
namespace blobstore {
namespace onblocks {
namespace datatreestore {
LeafTraverser::LeafTraverser(DataNodeStore *nodeStore, bool readOnlyTraversal)
: _nodeStore(nodeStore), _readOnlyTraversal(readOnlyTraversal) {
}
void LeafTraverser::traverseAndUpdateRoot(unique_ref* root, uint32_t beginIndex, uint32_t endIndex, function onExistingLeaf, function onCreateLeaf, function onBacktrackFromSubtree) {
_traverseAndUpdateRoot(root, beginIndex, endIndex, true, onExistingLeaf, onCreateLeaf, onBacktrackFromSubtree);
}
// NOLINTNEXTLINE(misc-no-recursion)
void LeafTraverser::_traverseAndUpdateRoot(unique_ref* root, uint32_t beginIndex, uint32_t endIndex, bool isLeftBorderOfTraversal, function onExistingLeaf, function onCreateLeaf, function onBacktrackFromSubtree) {
ASSERT(beginIndex <= endIndex, "Invalid parameters");
//TODO Test cases with numLeaves < / >= beginIndex, ideally test all configurations:
// beginIndexdepth());
bool increaseTreeDepth = endIndex > maxLeavesForDepth;
ASSERT(!_readOnlyTraversal || !increaseTreeDepth, "Tried to grow a tree on a read only traversal");
if ((*root)->depth() == 0) {
DataLeafNode *leaf = dynamic_cast(root->get());
ASSERT(leaf != nullptr, "Depth 0 has to be leaf node");
if (increaseTreeDepth && leaf->numBytes() != _nodeStore->layout().maxBytesPerLeaf()) {
leaf->resize(_nodeStore->layout().maxBytesPerLeaf());
}
if (beginIndex == 0 && endIndex >= 1) {
bool isRightBorderLeaf = (endIndex == 1);
onExistingLeaf(0, isRightBorderLeaf, LeafHandle(_nodeStore, leaf));
}
} else {
DataInnerNode *inner = dynamic_cast(root->get());
ASSERT(inner != nullptr, "Depth != 0 has to be leaf node");
_traverseExistingSubtree(inner, std::min(beginIndex, maxLeavesForDepth),
std::min(endIndex, maxLeavesForDepth), 0, isLeftBorderOfTraversal, !increaseTreeDepth,
increaseTreeDepth, onExistingLeaf, onCreateLeaf, onBacktrackFromSubtree);
}
// If the traversal goes too far right for a tree this depth, increase tree depth by one and continue traversal.
// This is recursive, i.e. will be repeated if the tree is still not deep enough.
// We don't increase to the full needed tree depth in one step, because we want the traversal to go as far as possible
// and only then increase the depth - this causes the tree to be in consistent shape (balanced) for longer.
if (increaseTreeDepth) {
ASSERT(!_readOnlyTraversal, "Can't increase tree depth in a read-only traversal");
// TODO Test cases that increase tree depth by 0, 1, 2, ... levels
*root = _increaseTreeDepth(std::move(*root));
_traverseAndUpdateRoot(root, std::max(beginIndex, maxLeavesForDepth), endIndex, false, onExistingLeaf, onCreateLeaf, onBacktrackFromSubtree);
} else {
// Once we're done growing the tree and done with the traversal, we might have to decrease tree depth,
// because the callbacks could have deleted nodes (this happens for example when shrinking the tree using a traversal).
_whileRootHasOnlyOneChildReplaceRootWithItsChild(root);
}
}
unique_ref LeafTraverser::_increaseTreeDepth(unique_ref root) {
ASSERT(!_readOnlyTraversal, "Can't increase tree depth in a read-only traversal");
auto copyOfOldRoot = _nodeStore->createNewNodeAsCopyFrom(*root);
return DataNode::convertToNewInnerNode(std::move(root), _nodeStore->layout(), *copyOfOldRoot);
}
// NOLINTNEXTLINE(misc-no-recursion)
void LeafTraverser::_traverseExistingSubtree(const blockstore::BlockId &blockId, uint8_t depth, uint32_t beginIndex, uint32_t endIndex, uint32_t leafOffset, bool isLeftBorderOfTraversal, bool isRightBorderNode, bool growLastLeaf, function onExistingLeaf, function onCreateLeaf, function onBacktrackFromSubtree) {
if (depth == 0) {
ASSERT(beginIndex <= 1 && endIndex <= 1,
"If root node is a leaf, the (sub)tree has only one leaf - access indices must be 0 or 1.");
LeafHandle leafHandle(_nodeStore, blockId);
if (growLastLeaf) {
if (leafHandle.node()->numBytes() != _nodeStore->layout().maxBytesPerLeaf()) {
ASSERT(!_readOnlyTraversal, "Can't grow the last leaf in a read-only traversal");
leafHandle.node()->resize(_nodeStore->layout().maxBytesPerLeaf());
}
}
if (beginIndex == 0 && endIndex == 1) {
onExistingLeaf(leafOffset, isRightBorderNode, std::move(leafHandle));
}
} else {
auto node = _nodeStore->load(blockId);
if (node == none) {
throw std::runtime_error("Couldn't find child node " + blockId.ToString());
}
auto inner = dynamic_pointer_move(*node);
ASSERT(inner != none, "Has to be either leaf or inner node");
ASSERT((*inner)->depth() == depth, "Wrong depth given");
_traverseExistingSubtree(inner->get(), beginIndex, endIndex, leafOffset, isLeftBorderOfTraversal,
isRightBorderNode, growLastLeaf, onExistingLeaf, onCreateLeaf, onBacktrackFromSubtree);
}
}
// NOLINTNEXTLINE(misc-no-recursion)
void LeafTraverser::_traverseExistingSubtree(DataInnerNode *root, uint32_t beginIndex, uint32_t endIndex, uint32_t leafOffset, bool isLeftBorderOfTraversal, bool isRightBorderNode, bool growLastLeaf, function onExistingLeaf, function onCreateLeaf, function onBacktrackFromSubtree) {
ASSERT(beginIndex <= endIndex, "Invalid parameters");
//TODO Call callbacks for different leaves in parallel.
uint32_t leavesPerChild = _maxLeavesForTreeDepth(root->depth()-1);
uint32_t beginChild = beginIndex/leavesPerChild;
uint32_t endChild = utils::ceilDivision(endIndex, leavesPerChild);
ASSERT(endChild <= _nodeStore->layout().maxChildrenPerInnerNode(), "Traversal region would need increasing the tree depth. This should have happened before calling this function.");
uint32_t numChildren = root->numChildren();
ASSERT(!growLastLeaf || endChild >= numChildren, "Can only grow last leaf if it exists");
ASSERT(!_readOnlyTraversal || endChild <= numChildren, "Can only traverse out of bounds in a read-only traversal");
bool shouldGrowLastExistingLeaf = growLastLeaf || endChild > numChildren;
// If we traverse outside of the valid region (i.e. usually would only traverse to new leaves and not to the last leaf),
// we still have to descend to the last old child to fill it with leaves and grow the last old leaf.
if (isLeftBorderOfTraversal && beginChild >= numChildren) {
ASSERT(numChildren > 0, "Node doesn't have children.");
auto childBlockId = root->readLastChild().blockId();
uint32_t childOffset = (numChildren-1) * leavesPerChild;
_traverseExistingSubtree(childBlockId, root->depth()-1, leavesPerChild, leavesPerChild, childOffset, true, false, true,
[] (uint32_t /*index*/, bool /*isRightBorderNode*/, LeafHandle /*leaf*/) {ASSERT(false, "We don't actually traverse any leaves.");},
[] (uint32_t /*index*/) -> Data {ASSERT(false, "We don't actually traverse any leaves.");},
[] (DataInnerNode* /*node*/) {ASSERT(false, "We don't actually traverse any leaves.");});
}
// Traverse existing children
for (uint32_t childIndex = beginChild; childIndex < std::min(endChild, numChildren); ++childIndex) {
auto childBlockId = root->readChild(childIndex).blockId();
uint32_t childOffset = childIndex * leavesPerChild;
uint32_t localBeginIndex = utils::maxZeroSubtraction(beginIndex, childOffset);
uint32_t localEndIndex = std::min(leavesPerChild, endIndex - childOffset);
bool isFirstChild = (childIndex == beginChild);
bool isLastExistingChild = (childIndex == numChildren - 1);
bool isLastChild = isLastExistingChild && (numChildren == endChild);
ASSERT(localEndIndex <= leavesPerChild, "We don't want the child to add a tree level because it doesn't have enough space for the traversal.");
_traverseExistingSubtree(childBlockId, root->depth()-1, localBeginIndex, localEndIndex, leafOffset + childOffset, isLeftBorderOfTraversal && isFirstChild,
isRightBorderNode && isLastChild, shouldGrowLastExistingLeaf && isLastExistingChild, onExistingLeaf, onCreateLeaf, onBacktrackFromSubtree);
}
// Traverse new children (including gap children, i.e. children that are created but not traversed because they're to the right of the current size, but to the left of the traversal region)
for (uint32_t childIndex = numChildren; childIndex < endChild; ++childIndex) {
ASSERT(!_readOnlyTraversal, "Can't create new children in a read-only traversal");
uint32_t childOffset = childIndex * leavesPerChild;
uint32_t localBeginIndex = std::min(leavesPerChild, utils::maxZeroSubtraction(beginIndex, childOffset));
uint32_t localEndIndex = std::min(leavesPerChild, endIndex - childOffset);
auto leafCreator = (childIndex >= beginChild) ? onCreateLeaf : _createMaxSizeLeaf();
auto child = _createNewSubtree(localBeginIndex, localEndIndex, leafOffset + childOffset, root->depth() - 1, leafCreator, onBacktrackFromSubtree);
root->addChild(*child);
}
// This is only a backtrack, if we actually visited a leaf here.
if (endIndex > beginIndex) {
onBacktrackFromSubtree(root);
}
}
// NOLINTNEXTLINE(misc-no-recursion)
unique_ref LeafTraverser::_createNewSubtree(uint32_t beginIndex, uint32_t endIndex, uint32_t leafOffset, uint8_t depth, function onCreateLeaf, function onBacktrackFromSubtree) {
ASSERT(!_readOnlyTraversal, "Can't create a new subtree in a read-only traversal");
ASSERT(beginIndex <= endIndex, "Invalid parameters");
if (0 == depth) {
ASSERT(beginIndex <= 1 && endIndex == 1, "With depth 0, we can only traverse one or zero leaves (i.e. traverse one leaf or traverse a gap leaf).");
auto leafCreator = (beginIndex
== 0) ? onCreateLeaf : _createMaxSizeLeaf();
return _nodeStore->createNewLeafNode(leafCreator(leafOffset));
}
uint8_t minNeededDepth = utils::ceilLog(_nodeStore->layout().maxChildrenPerInnerNode(), static_cast(endIndex));
ASSERT(depth >= minNeededDepth, "Given tree depth doesn't fit given number of leaves to create.");
uint32_t leavesPerChild = _maxLeavesForTreeDepth(depth-1);
uint32_t beginChild = beginIndex/leavesPerChild;
uint32_t endChild = utils::ceilDivision(endIndex, leavesPerChild);
vector children;
children.reserve(endChild);
// TODO Remove redundancy of following two for loops by using min/max for calculating the parameters of the recursive call.
// Create gap children (i.e. children before the traversal but after the current size)
for (uint32_t childIndex = 0; childIndex < beginChild; ++childIndex) {
uint32_t childOffset = childIndex * leavesPerChild;
auto child = _createNewSubtree(leavesPerChild, leavesPerChild, leafOffset + childOffset, depth - 1,
[] (uint32_t /*index*/)->Data {ASSERT(false, "We're only creating gap leaves here, not traversing any.");},
[] (DataInnerNode* /*node*/) {});
ASSERT(child->depth() == depth-1, "Created child node has wrong depth");
children.push_back(child->blockId());
}
// Create new children that are traversed
for(uint32_t childIndex = beginChild; childIndex < endChild; ++childIndex) {
uint32_t childOffset = childIndex * leavesPerChild;
uint32_t localBeginIndex = utils::maxZeroSubtraction(beginIndex, childOffset);
uint32_t localEndIndex = std::min(leavesPerChild, endIndex - childOffset);
auto child = _createNewSubtree(localBeginIndex, localEndIndex, leafOffset + childOffset, depth - 1, onCreateLeaf, onBacktrackFromSubtree);
ASSERT(child->depth() == depth-1, "Created child node has wrong depth");
children.push_back(child->blockId());
}
ASSERT(children.size() > 0, "No children created");
auto newNode = _nodeStore->createNewInnerNode(depth, children);
// This is only a backtrack, if we actually created a leaf here.
if (endIndex > beginIndex) {
onBacktrackFromSubtree(newNode.get());
}
return newNode;
}
uint32_t LeafTraverser::_maxLeavesForTreeDepth(uint8_t depth) const {
return utils::intPow(_nodeStore->layout().maxChildrenPerInnerNode(), static_cast(depth));
}
function LeafTraverser::_createMaxSizeLeaf() const {
ASSERT(!_readOnlyTraversal, "Can't create a new leaf in a read-only traversal");
uint64_t maxBytesPerLeaf = _nodeStore->layout().maxBytesPerLeaf();
return [maxBytesPerLeaf] (uint32_t /*index*/) -> Data {
return Data(maxBytesPerLeaf).FillWithZeroes();
};
}
void LeafTraverser::_whileRootHasOnlyOneChildReplaceRootWithItsChild(unique_ref* root) {
DataInnerNode *inner = dynamic_cast(root->get());
if (inner != nullptr && inner->numChildren() == 1) {
ASSERT(!_readOnlyTraversal, "Can't decrease tree depth in a read-only traversal");
auto newRoot = _whileRootHasOnlyOneChildRemoveRootReturnChild(inner->readChild(0).blockId());
*root = _nodeStore->overwriteNodeWith(std::move(*root), *newRoot);
_nodeStore->remove(std::move(newRoot));
}
}
// NOLINTNEXTLINE(misc-no-recursion)
unique_ref LeafTraverser::_whileRootHasOnlyOneChildRemoveRootReturnChild(const blockstore::BlockId &blockId) {
ASSERT(!_readOnlyTraversal, "Can't decrease tree depth in a read-only traversal");
auto current = _nodeStore->load(blockId);
ASSERT(current != none, "Node not found");
auto inner = dynamic_pointer_move(*current);
if (inner == none) {
return std::move(*current);
} else if ((*inner)->numChildren() == 1) {
auto result = _whileRootHasOnlyOneChildRemoveRootReturnChild((*inner)->readChild(0).blockId());
_nodeStore->remove(std::move(*inner));
return result;
} else {
return std::move(*inner);
}
}
}
}
}
src/blobstore/implementations/onblocks/datatreestore/impl/LeafTraverser.h 0000664 0000000 0000000 00000010465 14456142610 0027420 0 ustar 00root root 0000000 0000000 #pragma once
#ifndef MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_IMPL_LEAFTRAVERSER_H_
#define MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_IMPL_LEAFTRAVERSER_H_
#include
#include
#include
#include
#include "blobstore/implementations/onblocks/datatreestore/LeafHandle.h"
namespace blobstore {
namespace onblocks {
namespace datanodestore {
class DataNodeStore;
class DataNode;
class DataLeafNode;
class DataInnerNode;
}
namespace datatreestore {
/**
* LeafTraverser can create leaves if they don't exist yet (i.e. endIndex > numLeaves), but
* it cannot increase the tree depth. That is, the tree has to be deep enough to allow
* creating the number of leaves.
*/
class LeafTraverser final {
public:
LeafTraverser(datanodestore::DataNodeStore *nodeStore, bool readOnlyTraversal);
void traverseAndUpdateRoot(
cpputils::unique_ref* root, uint32_t beginIndex, uint32_t endIndex,
std::function onExistingLeaf,
std::function onCreateLeaf,
std::function