pax_global_header00006660000000000000000000000064145401064230014511gustar00rootroot0000000000000052 comment=34e069d90460f422c549b3a8994aa26820cf6544 soci-snapshotter-0.4.1/000077500000000000000000000000001454010642300150205ustar00rootroot00000000000000soci-snapshotter-0.4.1/.dockerignore000066400000000000000000000000061454010642300174700ustar00rootroot00000000000000/.git soci-snapshotter-0.4.1/.github/000077500000000000000000000000001454010642300163605ustar00rootroot00000000000000soci-snapshotter-0.4.1/.github/CODEOWNERS000066400000000000000000000001031454010642300177450ustar00rootroot00000000000000# Global (repository-wide) owners: * @awslabs/soci-maintainers soci-snapshotter-0.4.1/.github/ISSUE_TEMPLATE/000077500000000000000000000000001454010642300205435ustar00rootroot00000000000000soci-snapshotter-0.4.1/.github/ISSUE_TEMPLATE/bug_report.yaml000066400000000000000000000014321454010642300235770ustar00rootroot00000000000000name: Bug report description: Create a bug report to help improve soci-snapshotter title: "[Bug] " labels: ["bug"] body: - type: textarea attributes: label: Description description: | Briefly describe the problem/bug you are having. validations: required: true - type: textarea attributes: label: Steps to reproduce the bug - type: textarea attributes: label: Describe the results you expected validations: required: true - type: textarea attributes: label: Host information value: | 1. OS: 2. Snapshotter Version: 3. Containerd Version: validations: required: true - type: textarea attributes: label: Any additional context or information about the bugsoci-snapshotter-0.4.1/.github/ISSUE_TEMPLATE/config.yml000066400000000000000000000006631454010642300225400ustar00rootroot00000000000000blank_issues_enabled: true contact_links: - name: Ask a question (GitHub Discussions) url: https://github.com/awslabs/soci-snapshotter/discussions about: | In most cases, GitHub Discussions is the preferred place to ask a question, and visible to more users. If you encountered a bug or request a feature, please use the issue template. Otherwise, please consider asking in GitHub Discussions first.soci-snapshotter-0.4.1/.github/ISSUE_TEMPLATE/feature_request.yaml000066400000000000000000000012111454010642300246250ustar00rootroot00000000000000name: Feature request description: Suggest an idea or new feature for this project title: "[FEATURE] " labels: ["feature"] body: - type: textarea attributes: label: Description description: | Briefly describe the feature request and, if related to a problem, what the problem is. validations: required: true - type: textarea attributes: label: Describe the solution you'd like - type: textarea attributes: label: Describe any alternative solutions/features you've considered - type: textarea attributes: label: Any additional context or information about the feature requestsoci-snapshotter-0.4.1/.github/PULL_REQUEST_TEMPLATE.md000066400000000000000000000003041454010642300221560ustar00rootroot00000000000000**Issue #, if available:** **Description of changes:** **Testing performed:** By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. soci-snapshotter-0.4.1/.github/dependabot.yml000066400000000000000000000020001454010642300212000ustar00rootroot00000000000000version: 2 updates: # Automatic upgrade for go modules. - package-ecosystem: "gomod" directory: "/" schedule: interval: "daily" ignore: # skip k8s deps since they use the latest go version/features that may not be in the go version soci uses # Also ignored in /scripts/bump-deps.sh - dependency-name: "k8s.io/*" # Automatic upgrade for go modules of cmd package. - package-ecosystem: "gomod" directory: "/cmd" schedule: interval: "daily" ignore: # skip k8s deps and soci-snapshotter itself # Also ignored in /scripts/bump-deps.sh - dependency-name: "github.com/awslabs/soci-snapshotter" - dependency-name: "k8s.io/*" # Automatic upgrade for base images used in the Dockerfile - package-ecosystem: "docker" directory: "/" schedule: interval: "daily" # Automatic upgrade for Github Actions - package-ecosystem: "github-actions" directory: "/" # means ".github/workflows" schedule: interval: "daily" soci-snapshotter-0.4.1/.github/workflows/000077500000000000000000000000001454010642300204155ustar00rootroot00000000000000soci-snapshotter-0.4.1/.github/workflows/benchmark_visualization.yml000066400000000000000000000072021454010642300260540ustar00rootroot00000000000000name: Visualize Benchmark and upload results on: push: branches: [ main ] paths: - '**' - '!docs/**' # ignore docs changes - '!**.md' # ignore markdown changes permissions: contents: write deployments: write env: GO_VERSION: '1.20.12' jobs: benchmark: runs-on: ubuntu-20.04 steps: - name: Checkout main branch uses: actions/checkout@v3 with: fetch-depth: 0 submodules: true - uses: actions/setup-go@v4 with: go-version: ${{ env.GO_VERSION }} - run: make - name: Run benchmark run: make benchmarks-perf-test - name: Upload latest benchmark result uses: actions/upload-artifact@v3 with: name: benchmark-result-artifact path: ${{github.workspace}}/benchmark/performanceTest/output/results.json download-and-convert-benchmark-result-to-visualization-data: runs-on: ubuntu-20.04 needs: benchmark steps: - name: Checkout main branch uses: actions/checkout@v3 - name: Download latest benchmark result uses: actions/download-artifact@v3 with: name: benchmark-result-artifact path: ${{github.workspace}} - name: Make temporary folder run: mkdir ${{github.workspace}}/current - name: Change permission of visualization_data_converter.sh run: chmod +x ${{github.workspace}}/scripts/visualization_data_converter.sh - name: Convert benchmark result to visualization data run: ${{github.workspace}}/scripts/visualization_data_converter.sh ${{github.workspace}}/results.json ${{github.workspace}}/current - name: Build matrix of filepaths id: set-matrix # Create a JSON array with the file paths and store it in the 'files' output run: echo "files=$(find ${{github.workspace}}/current -type f -name '*.json' -printf '%p\n' | jq -R . | jq -cs .)" >> $GITHUB_OUTPUT - name: Upload visualization files as github artifacts uses: actions/upload-artifact@v3 with: name: benchmark-gh-pages-artifact path: ${{github.workspace}}/current outputs: matrix: ${{ steps.set-matrix.outputs.files }} push-benchmark-result-gh-pages: name: Push benchmark result to Github-pages runs-on: ubuntu-20.04 needs: download-and-convert-benchmark-result-to-visualization-data strategy: matrix: file: ${{ fromJson(needs.download-and-convert-benchmark-result-to-visualization-data.outputs.matrix) }} steps: - name: Checkout main branch uses: actions/checkout@v3 - name: Create current folder run: mkdir ${{github.workspace}}/current - name: Download latest benchmark visualization files uses: actions/download-artifact@v3 with: name: benchmark-gh-pages-artifact path: ${{github.workspace}}/current - name: Extract file names id: get_filename run: | # Get the filename without the path filename=$(basename "${{ matrix.file }}") # Remove the .json extension filename_without_extension="${filename%.json}" echo "filename=$filename_without_extension" >> $GITHUB_OUTPUT - name: Run benchmark action uses: benchmark-action/github-action-benchmark@v1 with: name: Soci Benchmark tool: 'customSmallerIsBetter' benchmark-data-dir-path: "dev/benchmarks/performanceTest/${{ steps.get_filename.outputs.filename }}" output-file-path: ${{ matrix.file }} github-token: ${{ secrets.GITHUB_TOKEN }} auto-push: true soci-snapshotter-0.4.1/.github/workflows/build.yml000066400000000000000000000017461454010642300222470ustar00rootroot00000000000000name: Build on: push: branches: ['main', 'release/**'] paths: - '**' - '!docs/**' # ignore docs changes - '!**.md' # ignore markdown changes pull_request: branches: ['main', 'release/**'] paths: - '**.go' - 'go.*' - 'cmd/go.*' - 'Makefile' - 'Dockerfile' - 'integration/**' - 'scripts/**' env: GO_VERSION: '1.20.12' jobs: test: runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: go-version: ${{ env.GO_VERSION }} - run: make - run: make test integration: runs-on: ubuntu-20.04 strategy: fail-fast: false matrix: containerd: ["1.6.19", "1.7.0"] env: DOCKER_BUILD_ARGS: "CONTAINERD_VERSION=${{ matrix.containerd }}" steps: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: go-version: ${{ env.GO_VERSION }} - run: make integration soci-snapshotter-0.4.1/.github/workflows/bump-deps.yml000066400000000000000000000021351454010642300230350ustar00rootroot00000000000000# adapted based on # https://github.com/google/go-containerregistry/blob/main/.github/workflows/bump-deps.yaml name: Bump Deps on: schedule: - cron: '0 10 * * 2' # weekly at 10AM Tuesday workflow_dispatch: permissions: contents: write pull-requests: write jobs: bump-deps: name: Bump Deps # Don't bother bumping deps on forks. if: ${{ github.repository == 'awslabs/soci-snapshotter' }} runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: go-version: '1.20.6' - run: ./scripts/bump-deps.sh - name: Create Pull Request uses: peter-evans/create-pull-request@v5 with: title: "Bump dependencies using scripts/bump-deps.sh" commit-message: "Bump dependencies using scripts/bump-deps.sh" body: "This PR created by [create-pull-request](https://github.com/peter-evans/create-pull-request) must be closed and reopened manually to trigger automated checks." labels: dependencies delete-branch: true author: "GitHub " signoff: true soci-snapshotter-0.4.1/.github/workflows/comparision-test.yml000066400000000000000000000006261454010642300244440ustar00rootroot00000000000000name: Comparision Tests on: schedule: - cron: "0 0 */2 * *" # every 2 days env: GO_VERSION: '1.20.12' jobs: check: runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: go-version: ${{ env.GO_VERSION }} - run: make - run: make benchmarks - run: cd benchmark/comparisonTest && cat output/results.json soci-snapshotter-0.4.1/.github/workflows/prebuild.yml000066400000000000000000000017441454010642300227540ustar00rootroot00000000000000name: Pre-build on: push: branches: [ main ] pull_request: branches: [ main ] env: GO_VERSION: '1.20.12' jobs: check: runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v3 with: # check-dco will check the last 20 commits, but commit ranges # exclude the start commit in the result, but need that commit # in order to calculate the range. i.e. HEAD~20..HEAD includes # 20 commits, but including HEAD it needs 21 commits. fetch-depth: 21 - uses: actions/setup-go@v4 with: go-version: ${{ env.GO_VERSION }} - run: wget https://github.com/google/flatbuffers/releases/download/v22.9.29/Linux.flatc.binary.g++-10.zip - run: unzip Linux.flatc.binary.g++-10.zip - run: ./scripts/install-check-tools.sh - run: ./scripts/check-ltag.sh - run: ./scripts/check-dco.sh - run: ./scripts/check-lint.sh - run: PATH=$PATH:$(pwd) ./scripts/check-flatc.sh soci-snapshotter-0.4.1/.github/workflows/releases.yml000066400000000000000000000045721454010642300227530ustar00rootroot00000000000000name: Release on: push: tags: - 'v[0-9]+.[0-9]+.[0-9]+' env: GO_VERSION: '1.20.12' permissions: contents: write deployments: write jobs: # Any way we can just call build.yml? test: runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v4 - uses: actions/setup-go@v4 with: go-version: ${{ env.GO_VERSION }} - run: make - run: make test integration: runs-on: ubuntu-20.04 strategy: fail-fast: false matrix: containerd: ["1.6.19", "1.7.0"] env: DOCKER_BUILD_ARGS: "CONTAINERD_VERSION=${{ matrix.containerd }}" steps: - uses: actions/checkout@v4 - uses: actions/setup-go@v4 with: go-version: ${{ env.GO_VERSION }} - run: make integration generate-artifacts: runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v4 - name: Setup and export variables run: | export release_tag=${GITHUB_REF#refs/*/} # Strip down to raw tag name export release_version=${release_tag/v/} # Remove v from tag name echo "release_tag=${release_tag}">> $GITHUB_ENV echo "dynamic_binary_name=soci-snapshotter-${release_version}-linux-amd64.tar.gz" >> $GITHUB_ENV echo "static_binary_name=soci-snapshotter-${release_version}-linux-amd64-static.tar.gz" >> $GITHUB_ENV mkdir release - name: Create release binaries run: make RELEASE_TAG=${{ env.release_tag }} release - uses: actions/upload-artifact@v3 with: name: artifacts path: release/ if-no-files-found: error outputs: dynamic_binary_name: ${{ env.dynamic_binary_name }} static_binary_name: ${{ env.static_binary_name }} create-release: needs: generate-artifacts runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v4 - uses: actions/download-artifact@v3 with: name: artifacts - uses: softprops/action-gh-release@v1 with: draft: true prerelease: false generate_release_notes: true files: | ${{ needs.generate-artifacts.outputs.dynamic_binary_name }} ${{ needs.generate-artifacts.outputs.dynamic_binary_name }}.sha256sum ${{ needs.generate-artifacts.outputs.static_binary_name }} ${{ needs.generate-artifacts.outputs.static_binary_name }}.sha256sum soci-snapshotter-0.4.1/.gitignore000066400000000000000000000001141454010642300170040ustar00rootroot00000000000000/out *.db .vscode/ go.work go.work.sum /benchmark/*/output/ /benchmark/bin/ soci-snapshotter-0.4.1/.golangci.yml000066400000000000000000000010731454010642300174050ustar00rootroot00000000000000# https://golangci-lint.run/usage/configuration#config-file linters: enable: - staticcheck - unconvert - gofmt - goimports - ineffassign - revive - vet - unused - misspell disable: - errcheck issues: exclude-rules: - linters: - revive # Ignore unused parameter rule. It's not enforced by go and it can make it hard # to understand what the unused parameter was supposed to be used for. text: "unused-parameter:" run: deadline: 4m skip-dirs: - docs - images - out - script soci-snapshotter-0.4.1/.headers/000077500000000000000000000000001454010642300165115ustar00rootroot00000000000000soci-snapshotter-0.4.1/.headers/go.txt000066400000000000000000000011321454010642300176540ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ soci-snapshotter-0.4.1/CODE_OF_CONDUCT.md000066400000000000000000000004651454010642300176240ustar00rootroot00000000000000## Code of Conduct This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact opensource-codeofconduct@amazon.com with any additional questions or comments. soci-snapshotter-0.4.1/CONTRIBUTING.md000066400000000000000000000104261454010642300172540ustar00rootroot00000000000000# Contributing Guidelines Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional documentation, we greatly value feedback and contributions from our community. Please read through this document before submitting any issues or pull requests to ensure we have all the necessary information to effectively respond to your bug report or contribution. ## Reporting Bugs/Feature Requests We welcome you to use the GitHub issue tracker to report bugs or suggest features. When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: * A reproducible test case or series of steps * The version of our code being used * Any modifications you've made relevant to the bug * Anything unusual about your environment or deployment ## Contributing via Pull Requests Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 1. You are working against the latest source on the *main* branch. 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. To send us a pull request, please: 1. Fork the repository. 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 3. Ensure local tests pass. 4. Commit to your fork using clear commit messages and a [DCO Sign-Off](https://wiki.linuxfoundation.org/dco) on each commit. 1. Every commit should compile successfully. 2. Every commit should pass tests. 3. No commit should exist solely to fix a bug introduced by another commit in the same PR. 4. Squash and rebase commits as necessary so that PRs satisfy these requirements. 5. Send us a pull request, answering any default questions in the pull request interface. 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). ## Coding conventions and Style guides In addition to pursuing code correctness, please also keep the following standards, guides, norms, and conventions in mind when drafting contributions. ### Enforced via CI * [Staticcheck](https://staticcheck.io/) (configured for "SA*" checks) * [gofmt](https://pkg.go.dev/cmd/gofmt) See [.golangci.yml](.golangci.yml) for an exhaustive list. Configuring your editor to automatically apply these will reduce time spent updating PRs after CI failures. `make check` will also run these and others in linting mode. ### Considered during Code Review * [Effective Go](https://go.dev/doc/effective_go) * [Google's Go Style Guide](https://google.github.io/styleguide/go/) ### Project-specific Durations should be [de]serialized as `int64` and kept as [`time.Duration`](https://pkg.go.dev/time#Duration) internally where feasible. ## Finding contributions to work on Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. ## Code of Conduct This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact opensource-codeofconduct@amazon.com with any additional questions or comments. ## Security issue notifications If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. ## Licensing See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. soci-snapshotter-0.4.1/Dockerfile000066400000000000000000000044551454010642300170220ustar00rootroot00000000000000# Copyright The containerd Authors. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ARG CONTAINERD_VERSION=1.6.17 ARG RUNC_VERSION=1.1.5 ARG NERDCTL_VERSION=1.3.0 FROM golang:1.20.12-bookworm AS golang-base FROM golang-base AS containerd-snapshotter-base ARG CONTAINERD_VERSION ARG RUNC_VERSION ARG NERDCTL_VERSION ARG TARGETARCH COPY . $GOPATH/src/github.com/awslabs/soci-snapshotter ENV GOPROXY direct RUN apt-get update -y && apt-get install -y libbtrfs-dev libseccomp-dev libz-dev gcc fuse pigz RUN cp $GOPATH/src/github.com/awslabs/soci-snapshotter/out/soci /usr/local/bin/ && \ cp $GOPATH/src/github.com/awslabs/soci-snapshotter/out/soci-snapshotter-grpc /usr/local/bin/ && \ mkdir /etc/soci-snapshotter-grpc && \ mkdir /etc/containerd/ && \ cp $GOPATH/src/github.com/awslabs/soci-snapshotter/integration/config/etc/soci-snapshotter-grpc/config.toml /etc/soci-snapshotter-grpc/ && \ cp $GOPATH/src/github.com/awslabs/soci-snapshotter/integration/config/etc/containerd/config.toml /etc/containerd/ RUN curl -sSL --output /tmp/containerd.tgz https://github.com/containerd/containerd/releases/download/v${CONTAINERD_VERSION}/containerd-${CONTAINERD_VERSION}-linux-${TARGETARCH:-amd64}.tar.gz && \ tar zxvf /tmp/containerd.tgz -C /usr/local/ && \ rm -f /tmp/containerd.tgz RUN curl -sSL --output /tmp/runc https://github.com/opencontainers/runc/releases/download/v${RUNC_VERSION}/runc.${TARGETARCH:-amd64} && \ cp /tmp/runc /usr/local/bin/ && \ chmod +x /usr/local/bin/runc && \ rm -f /tmp/runc RUN curl -sSL --output /tmp/nerdctl.tgz https://github.com/containerd/nerdctl/releases/download/v${NERDCTL_VERSION}/nerdctl-${NERDCTL_VERSION}-linux-${TARGETARCH:-amd64}.tar.gz && \ tar zxvf /tmp/nerdctl.tgz -C /usr/local/bin/ && \ rm -f /tmp/nerdctl.tgz FROM registry:2 AS registry2 soci-snapshotter-0.4.1/LICENSE000066400000000000000000000236361454010642300160370ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. soci-snapshotter-0.4.1/Makefile000066400000000000000000000100331454010642300164550ustar00rootroot00000000000000# Copyright The containerd Authors. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Base path used to install. CMD_DESTDIR ?= /usr/local GO111MODULE_VALUE=auto OUTDIR ?= $(CURDIR)/out PKG=github.com/awslabs/soci-snapshotter VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always --tags) REVISION=$(shell git rev-parse HEAD)$(shell if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi) GO_BUILDTAGS ?= ifneq ($(STATIC),) GO_BUILDTAGS += osusergo netgo static_build endif GO_TAGS=$(if $(GO_BUILDTAGS),-tags "$(strip $(GO_BUILDTAGS))",) GO_LD_FLAGS=-ldflags '-s -w -X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) $(GO_EXTRA_LDFLAGS) ifneq ($(STATIC),) GO_LD_FLAGS += -extldflags "-static" endif GO_LD_FLAGS+=' SOCI_SNAPSHOTTER_PROJECT_ROOT ?= $(shell pwd) LTAG_TEMPLATE_FLAG=-t ./.headers FBS_FILE_PATH=$(CURDIR)/ztoc/fbs/ztoc.fbs FBS_FILE_PATH_COMPRESSION=$(CURDIR)/ztoc/compression/fbs/zinfo.fbs COMMIT=$(shell git rev-parse HEAD) STARGZ_BINARY?=/usr/local/bin/containerd-stargz-grpc CMD=soci-snapshotter-grpc soci CMD_BINARIES=$(addprefix $(OUTDIR)/,$(CMD)) .PHONY: all build check add-ltag install uninstall clean test integration release all: build build: $(CMD) FORCE: soci-snapshotter-grpc: FORCE cd cmd/ ; GO111MODULE=$(GO111MODULE_VALUE) go build -o $(OUTDIR)/$@ $(GO_BUILD_FLAGS) $(GO_LD_FLAGS) $(GO_TAGS) ./soci-snapshotter-grpc soci: FORCE cd cmd/ ; GO111MODULE=$(GO111MODULE_VALUE) go build -o $(OUTDIR)/$@ $(GO_BUILD_FLAGS) $(GO_LD_FLAGS) $(GO_TAGS) ./soci check: cd scripts/ ; ./check-all.sh flatc: rm -rf $(CURDIR)/ztoc/fbs/ztoc flatc -o $(CURDIR)/ztoc/fbs -g $(FBS_FILE_PATH) rm -rf $(CURDIR)/ztoc/compression/fbs/zinfo flatc -o $(CURDIR)/ztoc/compression/fbs -g $(FBS_FILE_PATH_COMPRESSION) install: @echo "$@" @mkdir -p $(CMD_DESTDIR)/bin @install $(CMD_BINARIES) $(CMD_DESTDIR)/bin uninstall: @echo "$@" @rm -f $(addprefix $(CMD_DESTDIR)/bin/,$(notdir $(CMD_BINARIES))) clean: rm -rf $(OUTDIR) vendor: @GO111MODULE=$(GO111MODULE_VALUE) go mod tidy @cd ./cmd ; GO111MODULE=$(GO111MODULE_VALUE) go mod tidy test: @echo "$@" @GO111MODULE=$(GO111MODULE_VALUE) go test $(GO_TEST_FLAGS) $(GO_LD_FLAGS) -race ./... integration: build @echo "$@" @echo "SOCI_SNAPSHOTTER_PROJECT_ROOT=$(SOCI_SNAPSHOTTER_PROJECT_ROOT)" @GO111MODULE=$(GO111MODULE_VALUE) SOCI_SNAPSHOTTER_PROJECT_ROOT=$(SOCI_SNAPSHOTTER_PROJECT_ROOT) ENABLE_INTEGRATION_TEST=true go test $(GO_TEST_FLAGS) -v -timeout=0 ./integration release: @echo "$@" @$(SOCI_SNAPSHOTTER_PROJECT_ROOT)/scripts/create-releases.sh $(RELEASE_TAG) benchmarks: @echo "$@" @cd benchmark/performanceTest ; GO111MODULE=$(GO111MODULE_VALUE) go build -o ../bin/PerfTests . && sudo ../bin/PerfTests @cd benchmark/comparisonTest ; GO111MODULE=$(GO111MODULE_VALUE) go build -o ../bin/CompTests . && sudo ../bin/CompTests build-benchmarks: @echo "$@" @cd benchmark/performanceTest ; GO111MODULE=$(GO111MODULE_VALUE) go build -o ../bin/PerfTests . @cd benchmark/comparisonTest ; GO111MODULE=$(GO111MODULE_VALUE) go build -o ../bin/CompTests . benchmarks-perf-test: @echo "$@" @cd benchmark/performanceTest ; sudo rm -rf output ; GO111MODULE=$(GO111MODULE_VALUE) go build -o ../bin/PerfTests . && sudo ../bin/PerfTests -show-commit benchmarks-stargz: @echo "$@" @cd benchmark/stargzTest ; GO111MODULE=$(GO111MODULE_VALUE) go build -o ../bin/StargzTests . && sudo ../bin/StargzTests $(COMMIT) ../singleImage.csv 10 $(STARGZ_BINARY) benchmarks-parser: @echo "$@" @cd benchmark/parser ; GO111MODULE=$(GO111MODULE_VALUE) go build -o ../bin/Parser . soci-snapshotter-0.4.1/NOTICE.md000066400000000000000000000121241454010642300163230ustar00rootroot00000000000000The source code developed under the SOCI Snapshotter Project is licensed under Apache License 2.0. The SOCI Snapshotter project contains modified subcomponents from the Stargz Snapshotter Project, which is also licensed under Apache License 2.0. However, the SOCI Snapshotter project contains modified subcomponents from Container Registry Filesystem Project with separate copyright notices and license terms. Your use of the source code for the subcomponent is subject to the terms and conditions as defined by the source project. Files in these subcomponents contain following file header. ``` Copyright 2019 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the NOTICE.md file. ``` This source code is governed by a 3-Clause BSD license. The copyright notice, list of conditions and disclaimer are the following. ``` Copyright (c) 2019 Google LLC. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ``` The SOCI Snapshotter project also contains modified benchmarking code from HelloBench Project with separate copyright notices and license terms. Your use of the source code for the benchmarking code is subject to the terms and conditions as defined by the source project. This source code is governed by a MIT license. The copyright notice, condition and disclaimer are the following. The file in the benchmarking code contains it as the file header. ``` The MIT License (MIT) Copyright (c) 2015 Tintri Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ``` The SOCI Snapshotter project also contains modified code from the Zlib Project with separate copyright notices and license terms. Your use of that C source code is subject to the terms and conditions as defined by the source project. The copyright notice, condition and disclaimer are the following. ``` Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. Jean-loup Gailly Mark Adler jloup@gzip.org madler@alumni.caltech.edu ```soci-snapshotter-0.4.1/README.md000066400000000000000000000064361454010642300163100ustar00rootroot00000000000000# SOCI Snapshotter SOCI Snapshotter is a [containerd](https://github.com/containerd/containerd) snapshotter plugin. It enables standard OCI images to be lazily loaded without requiring a build-time conversion step. "SOCI" is short for "Seekable OCI", and is pronounced "so-CHEE". The standard method for launching containers starts with a setup phase during which the container image data is completely downloaded from a remote registry and a filesystem is assembled. The application is not launched until this process is complete. Using a representative suite of images, Harter et al [FAST '16](https://www.usenix.org/node/194431) found that image download accounts for 76% of container startup time, but on average only 6.4% of the fetched data is actually needed for the container to start doing useful work. One approach for addressing this is to eliminate the need to download the entire image before launching the container, and to instead lazily load data on demand, and also prefetch data in the background. ## Design considerations ### No image conversion Existing lazy loading snapshotters rely on a build-time conversion step, to produce a new image artifact. This is problematic for container developers who won't or can't modify their CI/CD pipeline, or don't want to manage the cost and complexity of keeping copies of images in two formats. It also creates problems for image signing, since the conversion step invalidates any signatures that were created against the original OCI image. SOCI addresses these issues by loading from the original, unmodified OCI image. Instead of converting the image, it builds a separate index artifact (the "SOCI index"), which lives in the remote registry, right next to the image itself. At container launch time, SOCI Snapshotter queries the registry for the presence of the SOCI index using the mechanism developed by the [OCI Reference Types working group](https://github.com/opencontainers/wg-reference-types). ### Workload-specific load order optimization Another big consideration that we haven't implmented/integrated into SOCI is to image load order based on your specific workload. See [design README](./docs/design-docs/README.md#workload-specific-load-order-optimization) for more details. ## Documentation - [Getting Started](docs/getting-started.md): walk through SOCI setups and features. - [Build](docs/build.md): how to build SOCI from source, test SOCI (and contribute). - [Install](docs/install.md): how to install SOCI as a systemd unit. - [Debug](docs/debug.md): accessing logs/metrics and debugging common errors. - [Glossary](docs/glossary.md): glossary we use in the project. ## Project Origin There a few different lazy loading projects in the containerd snapshotter community. This project began as a fork of the popular [Stargz-snapshotter project](https://github.com/containerd/stargz-snapshotter) from commit 743e5e70a7fdec9cd4ab218e1d4782fbbd253803 with the intention of an upstream patch. During development the changes were fundamental enough that the decision was made to create soci-snapshotter as a standalone project. Soci-snapshotter builds on stargz's success and innovative ideas. Long term, this project intends and hopes to join [containerd](https://github.com/containerd/containerd) as a non-core project and intends to follow CNCF best practices. soci-snapshotter-0.4.1/RELEASES.md000066400000000000000000000123151454010642300165470ustar00rootroot00000000000000# Releases ## Versioning Versions follow the `Major.Minor.Patch-PreRelease` scheme of [Semantic Versioning](https://semver.org). ### SOCI CLI / snapshotter / library The SOCI CLI, snapshotter, and library all share the same version to make it easy to understand compatibility. Collectively, this will be referred to as the "SOCI version". ### zTOC data format zTOC has its own version which will evolve independently from the other components of SOCI. A bump in zTOC version will translate to a bump in the SOCI version, but the reverse is not true. Since zTOCs are stored in content-addressable registries, changes to the zTOC format will be strongly discouraged to avoid churn for SOCI users. *__Note:__ The zTOC data format is separately versioned, but the library that's used to build, interact with, and manipulate zTOCs is part of the SOCI library and follows the SOCI version.* ## Supported Versions The SOCI projct is still under rapid development. As such, official SOCI project support will be in the form of new Major and Minor versions. We may release Patch versions in an *ad hoc* manner if there is sufficient demand from the community. This policy will be changed once SOCI reaches v1.0.0. ## Releases Releases are made through [GitHub Releases](https://github.com/awslabs/soci-snapshotter/releases). If there is demand, we will consider creating official linux packages for various distributions. ### Major Version Releases (branch: `main`) Major versions are developed on the main branch. When the time comes to release a new major version, a new `release/Major.0` branch will be created following the same process used for minor releases. ### Minor Version Releases (branch: `main`) Minor versions are developed on the main branch. When the time comes to release a new minor version, a new `release/Major.Minor` branch will be created from the tip of main. Once the new branch has been created, a new `vMajor.Minor.0` tag will be created following the process used for patch releases. ### Patch Version Releases (branch: `release/Major.Minor`) Patch versions are developed after the initial minor version tag in the `release/Major.Minor` branch to which they belong. Patch releases are used for security fixes and major or widespread bug fixes. Patch releases will not include new features. When the time comes to release a new patch version, a new commit will be added to the `release/Major.Minor` branch to add any necessary release artifacts (e.g. a finalized copy of third party licenses, finalized change log, etc), and a new `vMajor.Minor.Patch` tag will be created from that commit. Once the tag is created, a release will automatically be added to github with the same `vMajor.Minor.Patch` version scheme containing the release artifacts. For a full list of artifacts contained in the release, see [Release Artifacts](#release-artifacts). *__NOTE:__ release automation is currently aspirational. See https://github.com/awslabs/soci-snapshotter/issues/447* ## API Stability Semantic Versioning expects minor versions to contain only backwards-compatible, new features. In order to avoid partial features from preventing new minor version releases, we do not guarantee that new features are stable when they are introduced. Instead, we maintain the following table indicating the stability of each new feature along with which SOCI version stabilized the feature. | Feature/API | Stability | When it became stable | | ------------------------------------- | --------- | ----------------------------- | | [zTOC data format](#ztoc-data-format) | Beta | targetting before SOCI v1.0.0 | | [zTOC API](#ztoc-api) | Unstable | targetting v1.0.0 | | [SOCI API](#soci-api) | Unstable | targetting v1.0.0 | | [SOCI CLI](#soci-cli) | Unstable | | ### zTOC data format The zTOC data format is the serialized form of a zTOC. Since zTOCs will be stored in registries and registries are content addressable, we don't anticipate many changes to the zTOC format. ### zTOC API The zTOC API is the portion of the SOCI library used for creating and working with zTOCs. ### SOCI API The SOCI API is the portion of the SOCI library used for creating and interacting with SOCI indices. ### SOCI CLI The SOCI CLI (`bin/soci`) is the binary that can be used to create and inspect SOCI indicies and zTOCs. ## Release Artifacts ``` Changelog soci-snapshotter-$VERSION-linux-amd64.tar.gz soci-snapshotter-$VERSION-linux-amd64.tar.gz.sha256sum soci-snapshotter-$VERSION-linux-amd64-static.tar.gz soci-snapshotter-$VERSION-linux-amd64-static.tar.gz.sha256sum soci-snapshotter-$VERSION-linux-arm64.tar.gz soci-snapshotter-$VERSION-linux-arm64.tar.gz.sha256sum soci-snapshotter-$VERSION-linux-arm64-static.tar.gz soci-snapshotter-$VERSION-linux-arm64-static.tar.gz.sha256sum Source code (zip) Source code (tar.gz) ``` Each release tarball contains the following: ``` soci-snapshotter-grpc soci THIRD_PARTY_LICENSES NOTICE.md ``` ## Next Release / Release Cadence The next release is tracked via [GitHub milestones](https://github.com/awslabs/soci-snapshotter/milestones). The SOCI project doesn’t follow any fixed release cadence. soci-snapshotter-0.4.1/THIRD_PARTY_LICENSES000066400000000000000000004232521454010642300200710ustar00rootroot00000000000000** github.com/containerd/containerd; version v1.7.2 - https://github.com/containerd/containerd ** github.com/containerd/continuity; version v0.4.1 - https://github.com/containerd/continuity ** github.com/containerd/fifo; version v1.1.0 - https://github.com/containerd/fifo ** github.com/containerd/ttrpc; version v1.2.2 - https://github.com/containerd/ttrpc ** github.com/containerd/typeurl/v2; version v2.1.1 - https://github.com/containerd/typeurl/v2 ** github.com/docker/cli/cli/config; version v24.0.4 - https://github.com/docker/cli/cli/config ** github.com/docker/docker/pkg/homedir; version v23.0.5 - https://github.com/docker/docker/pkg/homedir ** github.com/docker/go-events; version v0.0.0-20190806004212-e31b211e4f1c - https://github.com/docker/go-events ** github.com/docker/go-metrics; version v0.0.1 - https://github.com/docker/go-metrics ** github.com/go-logr/logr; version v1.2.4 - https://github.com/go-logr/logr ** github.com/go-logr/stdr; version v1.2.2 - https://github.com/go-logr/stdr ** github.com/go-openapi/jsonpointer; version v0.19.5 - https://github.com/go-openapi/jsonpointer ** github.com/go-openapi/jsonreference; version v0.20.0 - https://github.com/go-openapi/jsonreference ** github.com/go-openapi/swag; version v0.19.14 - https://github.com/go-openapi/swag ** github.com/golang/groupcache/lru; version v0.0.0-20210331224755-41bb18bfe9da - https://github.com/golang/groupcache/lru ** github.com/google/flatbuffers/go; version v23.5.26 - https://github.com/google/flatbuffers/go ** github.com/google/gnostic; version v0.5.7-v3refs - https://github.com/google/gnostic ** github.com/google/gofuzz; version v1.2.0 - https://github.com/google/gofuzz ** github.com/klauspost/compress; version v1.16.7 - https://github.com/klauspost/compress ** github.com/matttproud/golang_protobuf_extensions/pbutil; version v1.0.4 - https://github.com/matttproud/golang_protobuf_extensions/pbutil ** github.com/moby/locker; version v1.0.1 - https://github.com/moby/locker ** github.com/moby/sys/mountinfo; version v0.6.2 - https://github.com/moby/sys/mountinfo ** github.com/moby/sys/signal; version v0.7.0 - https://github.com/moby/sys/signal ** github.com/modern-go/concurrent; version v0.0.0-20180306012644-bacd9c7ef1dd - https://github.com/modern-go/concurrent ** github.com/modern-go/reflect2; version v1.0.2 - https://github.com/modern-go/reflect2 ** github.com/opencontainers/go-digest; version v1.0.0 - https://github.com/opencontainers/go-digest ** github.com/opencontainers/image-spec; version v1.1.0-rc4 - https://github.com/opencontainers/image-spec ** github.com/opencontainers/runc/libcontainer/user; version v1.1.7 - https://github.com/opencontainers/runc/libcontainer/user ** github.com/opencontainers/runtime-spec/specs-go; version v1.1.0-rc.3 - https://github.com/opencontainers/runtime-spec/specs-go ** github.com/opencontainers/selinux; version v1.11.0 - https://github.com/opencontainers/selinux ** github.com/pelletier/go-toml; version v1.9.5 - https://github.com/pelletier/go-toml ** github.com/prometheus/client_golang/prometheus; version v1.16.0 - https://github.com/prometheus/client_golang/prometheus ** github.com/prometheus/client_model/go; version v0.4.0 - https://github.com/prometheus/client_model/go ** github.com/prometheus/common; version v0.44.0 - https://github.com/prometheus/common ** github.com/prometheus/procfs; version v0.11.0 - https://github.com/prometheus/procfs ** go.opentelemetry.io/otel; version v1.16.0 - https://go.opentelemetry.io/otel ** go.opentelemetry.io/otel/metric; version v1.16.0 - https://go.opentelemetry.io/otel/metric ** go.opentelemetry.io/otel/trace; version v1.16.0 - https://go.opentelemetry.io/otel/trace ** google.golang.org/genproto/googleapis/rpc; version v0.0.0-20230717213848-3f92550aa753 - https://google.golang.org/genproto/googleapis/rpc ** google.golang.org/genproto/protobuf/field_mask; version v0.0.0-20230717213848-3f92550aa753 - https://google.golang.org/genproto/protobuf/field_mask ** google.golang.org/grpc; version v1.56.2 - https://google.golang.org/grpc ** gopkg.in/yaml.v2; version v2.4.0 - https://gopkg.in/yaml.v2 ** k8s.io/api; version v0.26.3 - https://k8s.io/api ** k8s.io/apimachinery/pkg; version v0.26.3 - https://k8s.io/apimachinery/pkg ** k8s.io/client-go; version v0.26.3 - https://k8s.io/client-go ** k8s.io/klog/v2; version v2.90.1 - https://k8s.io/klog/v2 ** k8s.io/kube-openapi/pkg; version v0.0.0-20221012153701-172d655c2280 - https://k8s.io/kube-openapi/pkg ** k8s.io/kube-openapi/pkg/validation/spec; version v0.0.0-20221012153701-172d655c2280 - https://k8s.io/kube-openapi/pkg/validation/spec ** k8s.io/utils; version v0.0.0-20230220204549-a5ecb0141aa5 - https://k8s.io/utils ** oras.land/oras-go/v2; version v2.2.1 - https://oras.land/oras-go/v2 ** sigs.k8s.io/json; version v0.0.0-20220713155537-f223a00ba0e2 - https://sigs.k8s.io/json ** sigs.k8s.io/structured-merge-diff/v4; version v4.2.3 - https://sigs.k8s.io/structured-merge-diff/v4 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -------------------------------------------------------------------------------- ** github.com/beorn7/perks/quantile; version v1.0.1 - https://github.com/beorn7/perks/quantile Copyright (C) 2013 Blake Mizerany Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- ** github.com/cespare/xxhash/v2; version v2.2.0 - https://github.com/cespare/xxhash/v2 Copyright (c) 2016 Caleb Spare MIT License Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- ** github.com/davecgh/go-spew/spew; version v1.1.1 - https://github.com/davecgh/go-spew/spew ISC License Copyright (c) 2012-2016 Dave Collins Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -------------------------------------------------------------------------------- ** github.com/docker/docker-credential-helpers; version v0.7.0 - https://github.com/docker/docker-credential-helpers Copyright (c) 2016 David Calavera Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- ** github.com/emicklei/go-restful/v3; version v3.10.1 - https://github.com/emicklei/go-restful/v3 Copyright (c) 2012,2013 Ernest Micklei MIT License Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- ** github.com/gogo/protobuf; version v1.3.2 - https://github.com/gogo/protobuf Copyright (c) 2013, The GoGo Authors. All rights reserved. Protocol Buffers for Go with Gadgets Go support for Protocol Buffers - Google's data interchange format Copyright 2010 The Go Authors. All rights reserved. https://github.com/golang/protobuf Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- ** github.com/golang/protobuf; version v1.5.3 - https://github.com/golang/protobuf Copyright 2010 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- ** github.com/google/go-cmp/cmp; version v0.5.9 - https://github.com/google/go-cmp/cmp Copyright (c) 2017 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- ** github.com/google/uuid; version v1.3.0 - https://github.com/google/uuid Copyright (c) 2009,2014 Google Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- ** github.com/hanwen/go-fuse/v2; version v2.4.1 - https://github.com/hanwen/go-fuse/v2 New BSD License Copyright (c) 2010 the Go-FUSE Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Ivan Krasin nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- ** github.com/hashicorp/errwrap; version v1.1.0 - https://github.com/hashicorp/errwrap Mozilla Public License, version 2.0 1. Definitions 1.1. “Contributor” means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. 1.2. “Contributor Version” means the combination of the Contributions of others (if any) used by a Contributor and that particular Contributor’s Contribution. 1.3. “Contribution” means Covered Software of a particular Contributor. 1.4. “Covered Software” means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. 1.5. “Incompatible With Secondary Licenses” means a. that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or b. that the Covered Software was made available under the terms of version 1.1 or earlier of the License, but not also under the terms of a Secondary License. 1.6. “Executable Form” means any form of the work other than Source Code Form. 1.7. “Larger Work” means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software. 1.8. “License” means this document. 1.9. “Licensable” means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently, any and all of the rights conveyed by this License. 1.10. “Modifications” means any of the following: a. any file in Source Code Form that results from an addition to, deletion from, or modification of the contents of Covered Software; or b. any new file in Source Code Form that contains any Covered Software. 1.11. “Patent Claims” of a Contributor means any patent claim(s), including without limitation, method, process, and apparatus claims, in any patent Licensable by such Contributor that would be infringed, but for the grant of the License, by the making, using, selling, offering for sale, having made, import, or transfer of either its Contributions or its Contributor Version. 1.12. “Secondary License” means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. 1.13. “Source Code Form” means the form of the work preferred for making modifications. 1.14. “You” (or “Your”) means an individual or a legal entity exercising rights under this License. For legal entities, “You” includes any entity that controls, is controlled by, or is under common control with You. For purposes of this definition, “control” means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. 2. License Grants and Conditions 2.1. Grants Each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: a. under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its Contributions, either on an unmodified basis, with Modifications, or as part of a Larger Work; and b. under Patent Claims of such Contributor to make, use, sell, offer for sale, have made, import, and otherwise transfer either its Contributions or its Contributor Version. 2.2. Effective Date The licenses granted in Section 2.1 with respect to any Contribution become effective for each Contribution on the date the Contributor first distributes such Contribution. 2.3. Limitations on Grant Scope The licenses granted in this Section 2 are the only rights granted under this License. No additional rights or licenses will be implied from the distribution or licensing of Covered Software under this License. Notwithstanding Section 2.1(b) above, no patent license is granted by a Contributor: a. for any code that a Contributor has removed from Covered Software; or b. for infringements caused by: (i) Your and any other third party’s modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or c. under Patent Claims infringed by Covered Software in the absence of its Contributions. This License does not grant any rights in the trademarks, service marks, or logos of any Contributor (except as may be necessary to comply with the notice requirements in Section 3.4). 2.4. Subsequent Licenses No Contributor makes additional grants as a result of Your choice to distribute the Covered Software under a subsequent version of this License (see Section 10.2) or under the terms of a Secondary License (if permitted under the terms of Section 3.3). 2.5. Representation Each Contributor represents that the Contributor believes its Contributions are its original creation(s) or it has sufficient rights to grant the rights to its Contributions conveyed by this License. 2.6. Fair Use This License is not intended to limit any rights You have under applicable copyright doctrines of fair use, fair dealing, or other equivalents. 2.7. Conditions Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in Section 2.1. 3. Responsibilities 3.1. Distribution of Source Form All distribution of Covered Software in Source Code Form, including any Modifications that You create or to which You contribute, must be under the terms of this License. You must inform recipients that the Source Code Form of the Covered Software is governed by the terms of this License, and how they can obtain a copy of this License. You may not attempt to alter or restrict the recipients’ rights in the Source Code Form. 3.2. Distribution of Executable Form If You distribute Covered Software in Executable Form then: a. such Covered Software must also be made available in Source Code Form, as described in Section 3.1, and You must inform recipients of the Executable Form how they can obtain a copy of such Source Code Form by reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and b. You may distribute such Executable Form under the terms of this License, or sublicense it under different terms, provided that the license for the Executable Form does not attempt to limit or alter the recipients’ rights in the Source Code Form under this License. 3.3. Distribution of a Larger Work You may create and distribute a Larger Work under terms of Your choice, provided that You also comply with the requirements of this License for the Covered Software. If the Larger Work is a combination of Covered Software with a work governed by one or more Secondary Licenses, and the Covered Software is not Incompatible With Secondary Licenses, this License permits You to additionally distribute such Covered Software under the terms of such Secondary License(s), so that the recipient of the Larger Work may, at their option, further distribute the Covered Software under the terms of either this License or such Secondary License(s). 3.4. Notices You may not remove or alter the substance of any license notices (including copyright notices, patent notices, disclaimers of warranty, or limitations of liability) contained within the Source Code Form of the Covered Software, except that You may alter any license notices to the extent required to remedy known factual inaccuracies. 3.5. Application of Additional Terms You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, You may do so only on Your own behalf, and not on behalf of any Contributor. You must make it absolutely clear that any such warranty, support, indemnity, or liability obligation is offered by You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any jurisdiction. 4. Inability to Comply Due to Statute or Regulation If it is impossible for You to comply with any of the terms of this License with respect to some or all of the Covered Software due to statute, judicial order, or regulation then You must: (a) comply with the terms of this License to the maximum extent possible; and (b) describe the limitations and the code they affect. Such description must be placed in a text file included with all distributions of the Covered Software under this License. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill to be able to understand it. 5. Termination 5.1. The rights granted under this License will terminate automatically if You fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor explicitly and finally terminates Your grants, and (b) on an ongoing basis, if such Contributor fails to notify You of the non-compliance by some reasonable means prior to 60 days after You have come back into compliance. Moreover, Your grants from a particular Contributor are reinstated on an ongoing basis if such Contributor notifies You of the non-compliance by some reasonable means, this is the first time You have received notice of non-compliance with this License from such Contributor, and You become compliant prior to 30 days after Your receipt of the notice. 5.2. If You initiate litigation against any entity by asserting a patent infringement claim (excluding declaratory judgment actions, counter-claims, and cross-claims) alleging that a Contributor Version directly or indirectly infringes any patent, then the rights granted to You by any and all Contributors for the Covered Software under Section 2.1 of this License shall terminate. 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been validly granted by You or Your distributors under this License prior to termination shall survive termination. 6. Disclaimer of Warranty Covered Software is provided under this License on an “as is” basis, without warranty of any kind, either expressed, implied, or statutory, including, without limitation, warranties that the Covered Software is free of defects, merchantable, fit for a particular purpose or non-infringing. The entire risk as to the quality and performance of the Covered Software is with You. Should any Covered Software prove defective in any respect, You (not any Contributor) assume the cost of any necessary servicing, repair, or correction. This disclaimer of warranty constitutes an essential part of this License. No use of any Covered Software is authorized under this License except under this disclaimer. 7. Limitation of Liability Under no circumstances and under no legal theory, whether tort (including negligence), contract, or otherwise, shall any Contributor, or anyone who distributes Covered Software as permitted above, be liable to You for any direct, indirect, special, incidental, or consequential damages of any character including, without limitation, damages for lost profits, loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses, even if such party shall have been informed of the possibility of such damages. This limitation of liability shall not apply to liability for death or personal injury resulting from such party’s negligence to the extent applicable law prohibits such limitation. Some jurisdictions do not allow the exclusion or limitation of incidental or consequential damages, so this exclusion and limitation may not apply to You. 8. Litigation Any litigation relating to this License may be brought only in the courts of a jurisdiction where the defendant maintains its principal place of business and such litigation shall be governed by laws of that jurisdiction, without reference to its conflict-of-law provisions. Nothing in this Section shall prevent a party’s ability to bring cross-claims or counter-claims. 9. Miscellaneous This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not be used to construe this License against a Contributor. 10. Versions of the License 10.1. New Versions Mozilla Foundation is the license steward. Except as provided in Section 10.3, no one other than the license steward has the right to modify or publish new versions of this License. Each version will be given a distinguishing version number. 10.2. Effect of New Versions You may distribute the Covered Software under the terms of the version of the License under which You originally received the Covered Software, or under the terms of any subsequent version published by the license steward. 10.3. Modified Versions If you create software not governed by this License, and you want to create a new license for such software, you may create and use a modified version of this License if you rename the license and remove any references to the name of the license steward (except to note that such modified license differs from this License). 10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses If You choose to distribute Source Code Form that is Incompatible With Secondary Licenses under the terms of this version of the License, the notice described in Exhibit B of this License must be attached. Exhibit A - Source Code Form License Notice This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice. You may add additional accurate notices of copyright ownership. Exhibit B - “Incompatible With Secondary Licenses” Notice This Source Code Form is “Incompatible With Secondary Licenses”, as defined by the Mozilla Public License, v. 2.0. -------------------------------------------------------------------------------- ** github.com/hashicorp/go-cleanhttp; version v0.5.2 - https://github.com/hashicorp/go-cleanhttp Mozilla Public License, version 2.0 1. Definitions 1.1. "Contributor" means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. 1.2. "Contributor Version" means the combination of the Contributions of others (if any) used by a Contributor and that particular Contributor's Contribution. 1.3. "Contribution" means Covered Software of a particular Contributor. 1.4. "Covered Software" means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. 1.5. "Incompatible With Secondary Licenses" means a. that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or b. that the Covered Software was made available under the terms of version 1.1 or earlier of the License, but not also under the terms of a Secondary License. 1.6. "Executable Form" means any form of the work other than Source Code Form. 1.7. "Larger Work" means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software. 1.8. "License" means this document. 1.9. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently, any and all of the rights conveyed by this License. 1.10. "Modifications" means any of the following: a. any file in Source Code Form that results from an addition to, deletion from, or modification of the contents of Covered Software; or b. any new file in Source Code Form that contains any Covered Software. 1.11. "Patent Claims" of a Contributor means any patent claim(s), including without limitation, method, process, and apparatus claims, in any patent Licensable by such Contributor that would be infringed, but for the grant of the License, by the making, using, selling, offering for sale, having made, import, or transfer of either its Contributions or its Contributor Version. 1.12. "Secondary License" means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. 1.13. "Source Code Form" means the form of the work preferred for making modifications. 1.14. "You" (or "Your") means an individual or a legal entity exercising rights under this License. For legal entities, "You" includes any entity that controls, is controlled by, or is under common control with You. For purposes of this definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. 2. License Grants and Conditions 2.1. Grants Each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: a. under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its Contributions, either on an unmodified basis, with Modifications, or as part of a Larger Work; and b. under Patent Claims of such Contributor to make, use, sell, offer for sale, have made, import, and otherwise transfer either its Contributions or its Contributor Version. 2.2. Effective Date The licenses granted in Section 2.1 with respect to any Contribution become effective for each Contribution on the date the Contributor first distributes such Contribution. 2.3. Limitations on Grant Scope The licenses granted in this Section 2 are the only rights granted under this License. No additional rights or licenses will be implied from the distribution or licensing of Covered Software under this License. Notwithstanding Section 2.1(b) above, no patent license is granted by a Contributor: a. for any code that a Contributor has removed from Covered Software; or b. for infringements caused by: (i) Your and any other third party's modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or c. under Patent Claims infringed by Covered Software in the absence of its Contributions. This License does not grant any rights in the trademarks, service marks, or logos of any Contributor (except as may be necessary to comply with the notice requirements in Section 3.4). 2.4. Subsequent Licenses No Contributor makes additional grants as a result of Your choice to distribute the Covered Software under a subsequent version of this License (see Section 10.2) or under the terms of a Secondary License (if permitted under the terms of Section 3.3). 2.5. Representation Each Contributor represents that the Contributor believes its Contributions are its original creation(s) or it has sufficient rights to grant the rights to its Contributions conveyed by this License. 2.6. Fair Use This License is not intended to limit any rights You have under applicable copyright doctrines of fair use, fair dealing, or other equivalents. 2.7. Conditions Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in Section 2.1. 3. Responsibilities 3.1. Distribution of Source Form All distribution of Covered Software in Source Code Form, including any Modifications that You create or to which You contribute, must be under the terms of this License. You must inform recipients that the Source Code Form of the Covered Software is governed by the terms of this License, and how they can obtain a copy of this License. You may not attempt to alter or restrict the recipients' rights in the Source Code Form. 3.2. Distribution of Executable Form If You distribute Covered Software in Executable Form then: a. such Covered Software must also be made available in Source Code Form, as described in Section 3.1, and You must inform recipients of the Executable Form how they can obtain a copy of such Source Code Form by reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and b. You may distribute such Executable Form under the terms of this License, or sublicense it under different terms, provided that the license for the Executable Form does not attempt to limit or alter the recipients' rights in the Source Code Form under this License. 3.3. Distribution of a Larger Work You may create and distribute a Larger Work under terms of Your choice, provided that You also comply with the requirements of this License for the Covered Software. If the Larger Work is a combination of Covered Software with a work governed by one or more Secondary Licenses, and the Covered Software is not Incompatible With Secondary Licenses, this License permits You to additionally distribute such Covered Software under the terms of such Secondary License(s), so that the recipient of the Larger Work may, at their option, further distribute the Covered Software under the terms of either this License or such Secondary License(s). 3.4. Notices You may not remove or alter the substance of any license notices (including copyright notices, patent notices, disclaimers of warranty, or limitations of liability) contained within the Source Code Form of the Covered Software, except that You may alter any license notices to the extent required to remedy known factual inaccuracies. 3.5. Application of Additional Terms You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, You may do so only on Your own behalf, and not on behalf of any Contributor. You must make it absolutely clear that any such warranty, support, indemnity, or liability obligation is offered by You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any jurisdiction. 4. Inability to Comply Due to Statute or Regulation If it is impossible for You to comply with any of the terms of this License with respect to some or all of the Covered Software due to statute, judicial order, or regulation then You must: (a) comply with the terms of this License to the maximum extent possible; and (b) describe the limitations and the code they affect. Such description must be placed in a text file included with all distributions of the Covered Software under this License. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill to be able to understand it. 5. Termination 5.1. The rights granted under this License will terminate automatically if You fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor explicitly and finally terminates Your grants, and (b) on an ongoing basis, if such Contributor fails to notify You of the non-compliance by some reasonable means prior to 60 days after You have come back into compliance. Moreover, Your grants from a particular Contributor are reinstated on an ongoing basis if such Contributor notifies You of the non-compliance by some reasonable means, this is the first time You have received notice of non-compliance with this License from such Contributor, and You become compliant prior to 30 days after Your receipt of the notice. 5.2. If You initiate litigation against any entity by asserting a patent infringement claim (excluding declaratory judgment actions, counter-claims, and cross-claims) alleging that a Contributor Version directly or indirectly infringes any patent, then the rights granted to You by any and all Contributors for the Covered Software under Section 2.1 of this License shall terminate. 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been validly granted by You or Your distributors under this License prior to termination shall survive termination. 6. Disclaimer of Warranty Covered Software is provided under this License on an "as is" basis, without warranty of any kind, either expressed, implied, or statutory, including, without limitation, warranties that the Covered Software is free of defects, merchantable, fit for a particular purpose or non-infringing. The entire risk as to the quality and performance of the Covered Software is with You. Should any Covered Software prove defective in any respect, You (not any Contributor) assume the cost of any necessary servicing, repair, or correction. This disclaimer of warranty constitutes an essential part of this License. No use of any Covered Software is authorized under this License except under this disclaimer. 7. Limitation of Liability Under no circumstances and under no legal theory, whether tort (including negligence), contract, or otherwise, shall any Contributor, or anyone who distributes Covered Software as permitted above, be liable to You for any direct, indirect, special, incidental, or consequential damages of any character including, without limitation, damages for lost profits, loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses, even if such party shall have been informed of the possibility of such damages. This limitation of liability shall not apply to liability for death or personal injury resulting from such party's negligence to the extent applicable law prohibits such limitation. Some jurisdictions do not allow the exclusion or limitation of incidental or consequential damages, so this exclusion and limitation may not apply to You. 8. Litigation Any litigation relating to this License may be brought only in the courts of a jurisdiction where the defendant maintains its principal place of business and such litigation shall be governed by laws of that jurisdiction, without reference to its conflict-of-law provisions. Nothing in this Section shall prevent a party's ability to bring cross-claims or counter-claims. 9. Miscellaneous This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not be used to construe this License against a Contributor. 10. Versions of the License 10.1. New Versions Mozilla Foundation is the license steward. Except as provided in Section 10.3, no one other than the license steward has the right to modify or publish new versions of this License. Each version will be given a distinguishing version number. 10.2. Effect of New Versions You may distribute the Covered Software under the terms of the version of the License under which You originally received the Covered Software, or under the terms of any subsequent version published by the license steward. 10.3. Modified Versions If you create software not governed by this License, and you want to create a new license for such software, you may create and use a modified version of this License if you rename the license and remove any references to the name of the license steward (except to note that such modified license differs from this License). 10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses If You choose to distribute Source Code Form that is Incompatible With Secondary Licenses under the terms of this version of the License, the notice described in Exhibit B of this License must be attached. Exhibit A - Source Code Form License Notice This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice. You may add additional accurate notices of copyright ownership. Exhibit B - "Incompatible With Secondary Licenses" Notice This Source Code Form is "Incompatible With Secondary Licenses", as defined by the Mozilla Public License, v. 2.0. -------------------------------------------------------------------------------- ** github.com/hashicorp/go-multierror; version v1.1.1 - https://github.com/hashicorp/go-multierror Mozilla Public License, version 2.0 1. Definitions 1.1. “Contributor” means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. 1.2. “Contributor Version” means the combination of the Contributions of others (if any) used by a Contributor and that particular Contributor’s Contribution. 1.3. “Contribution” means Covered Software of a particular Contributor. 1.4. “Covered Software” means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. 1.5. “Incompatible With Secondary Licenses” means a. that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or b. that the Covered Software was made available under the terms of version 1.1 or earlier of the License, but not also under the terms of a Secondary License. 1.6. “Executable Form” means any form of the work other than Source Code Form. 1.7. “Larger Work” means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software. 1.8. “License” means this document. 1.9. “Licensable” means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently, any and all of the rights conveyed by this License. 1.10. “Modifications” means any of the following: a. any file in Source Code Form that results from an addition to, deletion from, or modification of the contents of Covered Software; or b. any new file in Source Code Form that contains any Covered Software. 1.11. “Patent Claims” of a Contributor means any patent claim(s), including without limitation, method, process, and apparatus claims, in any patent Licensable by such Contributor that would be infringed, but for the grant of the License, by the making, using, selling, offering for sale, having made, import, or transfer of either its Contributions or its Contributor Version. 1.12. “Secondary License” means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. 1.13. “Source Code Form” means the form of the work preferred for making modifications. 1.14. “You” (or “Your”) means an individual or a legal entity exercising rights under this License. For legal entities, “You” includes any entity that controls, is controlled by, or is under common control with You. For purposes of this definition, “control” means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. 2. License Grants and Conditions 2.1. Grants Each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: a. under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its Contributions, either on an unmodified basis, with Modifications, or as part of a Larger Work; and b. under Patent Claims of such Contributor to make, use, sell, offer for sale, have made, import, and otherwise transfer either its Contributions or its Contributor Version. 2.2. Effective Date The licenses granted in Section 2.1 with respect to any Contribution become effective for each Contribution on the date the Contributor first distributes such Contribution. 2.3. Limitations on Grant Scope The licenses granted in this Section 2 are the only rights granted under this License. No additional rights or licenses will be implied from the distribution or licensing of Covered Software under this License. Notwithstanding Section 2.1(b) above, no patent license is granted by a Contributor: a. for any code that a Contributor has removed from Covered Software; or b. for infringements caused by: (i) Your and any other third party’s modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or c. under Patent Claims infringed by Covered Software in the absence of its Contributions. This License does not grant any rights in the trademarks, service marks, or logos of any Contributor (except as may be necessary to comply with the notice requirements in Section 3.4). 2.4. Subsequent Licenses No Contributor makes additional grants as a result of Your choice to distribute the Covered Software under a subsequent version of this License (see Section 10.2) or under the terms of a Secondary License (if permitted under the terms of Section 3.3). 2.5. Representation Each Contributor represents that the Contributor believes its Contributions are its original creation(s) or it has sufficient rights to grant the rights to its Contributions conveyed by this License. 2.6. Fair Use This License is not intended to limit any rights You have under applicable copyright doctrines of fair use, fair dealing, or other equivalents. 2.7. Conditions Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in Section 2.1. 3. Responsibilities 3.1. Distribution of Source Form All distribution of Covered Software in Source Code Form, including any Modifications that You create or to which You contribute, must be under the terms of this License. You must inform recipients that the Source Code Form of the Covered Software is governed by the terms of this License, and how they can obtain a copy of this License. You may not attempt to alter or restrict the recipients’ rights in the Source Code Form. 3.2. Distribution of Executable Form If You distribute Covered Software in Executable Form then: a. such Covered Software must also be made available in Source Code Form, as described in Section 3.1, and You must inform recipients of the Executable Form how they can obtain a copy of such Source Code Form by reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and b. You may distribute such Executable Form under the terms of this License, or sublicense it under different terms, provided that the license for the Executable Form does not attempt to limit or alter the recipients’ rights in the Source Code Form under this License. 3.3. Distribution of a Larger Work You may create and distribute a Larger Work under terms of Your choice, provided that You also comply with the requirements of this License for the Covered Software. If the Larger Work is a combination of Covered Software with a work governed by one or more Secondary Licenses, and the Covered Software is not Incompatible With Secondary Licenses, this License permits You to additionally distribute such Covered Software under the terms of such Secondary License(s), so that the recipient of the Larger Work may, at their option, further distribute the Covered Software under the terms of either this License or such Secondary License(s). 3.4. Notices You may not remove or alter the substance of any license notices (including copyright notices, patent notices, disclaimers of warranty, or limitations of liability) contained within the Source Code Form of the Covered Software, except that You may alter any license notices to the extent required to remedy known factual inaccuracies. 3.5. Application of Additional Terms You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, You may do so only on Your own behalf, and not on behalf of any Contributor. You must make it absolutely clear that any such warranty, support, indemnity, or liability obligation is offered by You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any jurisdiction. 4. Inability to Comply Due to Statute or Regulation If it is impossible for You to comply with any of the terms of this License with respect to some or all of the Covered Software due to statute, judicial order, or regulation then You must: (a) comply with the terms of this License to the maximum extent possible; and (b) describe the limitations and the code they affect. Such description must be placed in a text file included with all distributions of the Covered Software under this License. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill to be able to understand it. 5. Termination 5.1. The rights granted under this License will terminate automatically if You fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor explicitly and finally terminates Your grants, and (b) on an ongoing basis, if such Contributor fails to notify You of the non-compliance by some reasonable means prior to 60 days after You have come back into compliance. Moreover, Your grants from a particular Contributor are reinstated on an ongoing basis if such Contributor notifies You of the non-compliance by some reasonable means, this is the first time You have received notice of non-compliance with this License from such Contributor, and You become compliant prior to 30 days after Your receipt of the notice. 5.2. If You initiate litigation against any entity by asserting a patent infringement claim (excluding declaratory judgment actions, counter-claims, and cross-claims) alleging that a Contributor Version directly or indirectly infringes any patent, then the rights granted to You by any and all Contributors for the Covered Software under Section 2.1 of this License shall terminate. 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been validly granted by You or Your distributors under this License prior to termination shall survive termination. 6. Disclaimer of Warranty Covered Software is provided under this License on an “as is” basis, without warranty of any kind, either expressed, implied, or statutory, including, without limitation, warranties that the Covered Software is free of defects, merchantable, fit for a particular purpose or non-infringing. The entire risk as to the quality and performance of the Covered Software is with You. Should any Covered Software prove defective in any respect, You (not any Contributor) assume the cost of any necessary servicing, repair, or correction. This disclaimer of warranty constitutes an essential part of this License. No use of any Covered Software is authorized under this License except under this disclaimer. 7. Limitation of Liability Under no circumstances and under no legal theory, whether tort (including negligence), contract, or otherwise, shall any Contributor, or anyone who distributes Covered Software as permitted above, be liable to You for any direct, indirect, special, incidental, or consequential damages of any character including, without limitation, damages for lost profits, loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses, even if such party shall have been informed of the possibility of such damages. This limitation of liability shall not apply to liability for death or personal injury resulting from such party’s negligence to the extent applicable law prohibits such limitation. Some jurisdictions do not allow the exclusion or limitation of incidental or consequential damages, so this exclusion and limitation may not apply to You. 8. Litigation Any litigation relating to this License may be brought only in the courts of a jurisdiction where the defendant maintains its principal place of business and such litigation shall be governed by laws of that jurisdiction, without reference to its conflict-of-law provisions. Nothing in this Section shall prevent a party’s ability to bring cross-claims or counter-claims. 9. Miscellaneous This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not be used to construe this License against a Contributor. 10. Versions of the License 10.1. New Versions Mozilla Foundation is the license steward. Except as provided in Section 10.3, no one other than the license steward has the right to modify or publish new versions of this License. Each version will be given a distinguishing version number. 10.2. Effect of New Versions You may distribute the Covered Software under the terms of the version of the License under which You originally received the Covered Software, or under the terms of any subsequent version published by the license steward. 10.3. Modified Versions If you create software not governed by this License, and you want to create a new license for such software, you may create and use a modified version of this License if you rename the license and remove any references to the name of the license steward (except to note that such modified license differs from this License). 10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses If You choose to distribute Source Code Form that is Incompatible With Secondary Licenses under the terms of this version of the License, the notice described in Exhibit B of this License must be attached. Exhibit A - Source Code Form License Notice This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice. You may add additional accurate notices of copyright ownership. Exhibit B - “Incompatible With Secondary Licenses” Notice This Source Code Form is “Incompatible With Secondary Licenses”, as defined by the Mozilla Public License, v. 2.0. -------------------------------------------------------------------------------- ** github.com/hashicorp/go-retryablehttp; version v0.7.4 - https://github.com/hashicorp/go-retryablehttp Copyright (c) 2015 HashiCorp, Inc. Mozilla Public License, version 2.0 1. Definitions 1.1. "Contributor" means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. 1.2. "Contributor Version" means the combination of the Contributions of others (if any) used by a Contributor and that particular Contributor's Contribution. 1.3. "Contribution" means Covered Software of a particular Contributor. 1.4. "Covered Software" means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. 1.5. "Incompatible With Secondary Licenses" means a. that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or b. that the Covered Software was made available under the terms of version 1.1 or earlier of the License, but not also under the terms of a Secondary License. 1.6. "Executable Form" means any form of the work other than Source Code Form. 1.7. "Larger Work" means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software. 1.8. "License" means this document. 1.9. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently, any and all of the rights conveyed by this License. 1.10. "Modifications" means any of the following: a. any file in Source Code Form that results from an addition to, deletion from, or modification of the contents of Covered Software; or b. any new file in Source Code Form that contains any Covered Software. 1.11. "Patent Claims" of a Contributor means any patent claim(s), including without limitation, method, process, and apparatus claims, in any patent Licensable by such Contributor that would be infringed, but for the grant of the License, by the making, using, selling, offering for sale, having made, import, or transfer of either its Contributions or its Contributor Version. 1.12. "Secondary License" means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. 1.13. "Source Code Form" means the form of the work preferred for making modifications. 1.14. "You" (or "Your") means an individual or a legal entity exercising rights under this License. For legal entities, "You" includes any entity that controls, is controlled by, or is under common control with You. For purposes of this definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. 2. License Grants and Conditions 2.1. Grants Each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: a. under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its Contributions, either on an unmodified basis, with Modifications, or as part of a Larger Work; and b. under Patent Claims of such Contributor to make, use, sell, offer for sale, have made, import, and otherwise transfer either its Contributions or its Contributor Version. 2.2. Effective Date The licenses granted in Section 2.1 with respect to any Contribution become effective for each Contribution on the date the Contributor first distributes such Contribution. 2.3. Limitations on Grant Scope The licenses granted in this Section 2 are the only rights granted under this License. No additional rights or licenses will be implied from the distribution or licensing of Covered Software under this License. Notwithstanding Section 2.1(b) above, no patent license is granted by a Contributor: a. for any code that a Contributor has removed from Covered Software; or b. for infringements caused by: (i) Your and any other third party's modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or c. under Patent Claims infringed by Covered Software in the absence of its Contributions. This License does not grant any rights in the trademarks, service marks, or logos of any Contributor (except as may be necessary to comply with the notice requirements in Section 3.4). 2.4. Subsequent Licenses No Contributor makes additional grants as a result of Your choice to distribute the Covered Software under a subsequent version of this License (see Section 10.2) or under the terms of a Secondary License (if permitted under the terms of Section 3.3). 2.5. Representation Each Contributor represents that the Contributor believes its Contributions are its original creation(s) or it has sufficient rights to grant the rights to its Contributions conveyed by this License. 2.6. Fair Use This License is not intended to limit any rights You have under applicable copyright doctrines of fair use, fair dealing, or other equivalents. 2.7. Conditions Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in Section 2.1. 3. Responsibilities 3.1. Distribution of Source Form All distribution of Covered Software in Source Code Form, including any Modifications that You create or to which You contribute, must be under the terms of this License. You must inform recipients that the Source Code Form of the Covered Software is governed by the terms of this License, and how they can obtain a copy of this License. You may not attempt to alter or restrict the recipients' rights in the Source Code Form. 3.2. Distribution of Executable Form If You distribute Covered Software in Executable Form then: a. such Covered Software must also be made available in Source Code Form, as described in Section 3.1, and You must inform recipients of the Executable Form how they can obtain a copy of such Source Code Form by reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and b. You may distribute such Executable Form under the terms of this License, or sublicense it under different terms, provided that the license for the Executable Form does not attempt to limit or alter the recipients' rights in the Source Code Form under this License. 3.3. Distribution of a Larger Work You may create and distribute a Larger Work under terms of Your choice, provided that You also comply with the requirements of this License for the Covered Software. If the Larger Work is a combination of Covered Software with a work governed by one or more Secondary Licenses, and the Covered Software is not Incompatible With Secondary Licenses, this License permits You to additionally distribute such Covered Software under the terms of such Secondary License(s), so that the recipient of the Larger Work may, at their option, further distribute the Covered Software under the terms of either this License or such Secondary License(s). 3.4. Notices You may not remove or alter the substance of any license notices (including copyright notices, patent notices, disclaimers of warranty, or limitations of liability) contained within the Source Code Form of the Covered Software, except that You may alter any license notices to the extent required to remedy known factual inaccuracies. 3.5. Application of Additional Terms You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, You may do so only on Your own behalf, and not on behalf of any Contributor. You must make it absolutely clear that any such warranty, support, indemnity, or liability obligation is offered by You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any jurisdiction. 4. Inability to Comply Due to Statute or Regulation If it is impossible for You to comply with any of the terms of this License with respect to some or all of the Covered Software due to statute, judicial order, or regulation then You must: (a) comply with the terms of this License to the maximum extent possible; and (b) describe the limitations and the code they affect. Such description must be placed in a text file included with all distributions of the Covered Software under this License. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill to be able to understand it. 5. Termination 5.1. The rights granted under this License will terminate automatically if You fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor explicitly and finally terminates Your grants, and (b) on an ongoing basis, if such Contributor fails to notify You of the non-compliance by some reasonable means prior to 60 days after You have come back into compliance. Moreover, Your grants from a particular Contributor are reinstated on an ongoing basis if such Contributor notifies You of the non-compliance by some reasonable means, this is the first time You have received notice of non-compliance with this License from such Contributor, and You become compliant prior to 30 days after Your receipt of the notice. 5.2. If You initiate litigation against any entity by asserting a patent infringement claim (excluding declaratory judgment actions, counter-claims, and cross-claims) alleging that a Contributor Version directly or indirectly infringes any patent, then the rights granted to You by any and all Contributors for the Covered Software under Section 2.1 of this License shall terminate. 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been validly granted by You or Your distributors under this License prior to termination shall survive termination. 6. Disclaimer of Warranty Covered Software is provided under this License on an "as is" basis, without warranty of any kind, either expressed, implied, or statutory, including, without limitation, warranties that the Covered Software is free of defects, merchantable, fit for a particular purpose or non-infringing. The entire risk as to the quality and performance of the Covered Software is with You. Should any Covered Software prove defective in any respect, You (not any Contributor) assume the cost of any necessary servicing, repair, or correction. This disclaimer of warranty constitutes an essential part of this License. No use of any Covered Software is authorized under this License except under this disclaimer. 7. Limitation of Liability Under no circumstances and under no legal theory, whether tort (including negligence), contract, or otherwise, shall any Contributor, or anyone who distributes Covered Software as permitted above, be liable to You for any direct, indirect, special, incidental, or consequential damages of any character including, without limitation, damages for lost profits, loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses, even if such party shall have been informed of the possibility of such damages. This limitation of liability shall not apply to liability for death or personal injury resulting from such party's negligence to the extent applicable law prohibits such limitation. Some jurisdictions do not allow the exclusion or limitation of incidental or consequential damages, so this exclusion and limitation may not apply to You. 8. Litigation Any litigation relating to this License may be brought only in the courts of a jurisdiction where the defendant maintains its principal place of business and such litigation shall be governed by laws of that jurisdiction, without reference to its conflict-of-law provisions. Nothing in this Section shall prevent a party's ability to bring cross-claims or counter-claims. 9. Miscellaneous This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not be used to construe this License against a Contributor. 10. Versions of the License 10.1. New Versions Mozilla Foundation is the license steward. Except as provided in Section 10.3, no one other than the license steward has the right to modify or publish new versions of this License. Each version will be given a distinguishing version number. 10.2. Effect of New Versions You may distribute the Covered Software under the terms of the version of the License under which You originally received the Covered Software, or under the terms of any subsequent version published by the license steward. 10.3. Modified Versions If you create software not governed by this License, and you want to create a new license for such software, you may create and use a modified version of this License if you rename the license and remove any references to the name of the license steward (except to note that such modified license differs from this License). 10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses If You choose to distribute Source Code Form that is Incompatible With Secondary Licenses under the terms of this version of the License, the notice described in Exhibit B of this License must be attached. Exhibit A - Source Code Form License Notice This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice. You may add additional accurate notices of copyright ownership. Exhibit B - "Incompatible With Secondary Licenses" Notice This Source Code Form is "Incompatible With Secondary Licenses", as defined by the Mozilla Public License, v. 2.0. -------------------------------------------------------------------------------- ** github.com/imdario/mergo; version v0.3.13 - https://github.com/imdario/mergo Copyright (c) 2013 Dario Castañé. All rights reserved. Copyright (c) 2012 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- ** github.com/josharian/intern; version v1.0.0 - https://github.com/josharian/intern MIT License Copyright (c) 2019 Josh Bleecher Snyder Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- ** github.com/json-iterator/go; version v1.1.12 - https://github.com/json-iterator/go MIT License Copyright (c) 2016 json-iterator Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- ** github.com/klauspost/compress/internal/snapref; version v1.16.7 - https://github.com/klauspost/compress/internal/snapref Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- ** github.com/klauspost/compress/zstd/internal/xxhash; version v1.16.7 - https://github.com/klauspost/compress/zstd/internal/xxhash Copyright (c) 2016 Caleb Spare MIT License Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- ** github.com/mailru/easyjson; version v0.7.6 - https://github.com/mailru/easyjson Copyright (c) 2016 Mail.Ru Group Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- ** github.com/montanaflynn/stats; version v0.7.1 - https://github.com/montanaflynn/stats The MIT License (MIT) Copyright (c) 2014-2023 Montana Flynn (https://montanaflynn.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- ** github.com/munnerz/goautoneg; version v0.0.0-20191010083416-a7dc8b61c822 - https://github.com/munnerz/goautoneg Copyright (c) 2011, Open Knowledge Foundation Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of the Open Knowledge Foundation Ltd. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- ** github.com/pkg/errors; version v0.9.1 - https://github.com/pkg/errors Copyright (c) 2015, Dave Cheney All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- ** github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg; version v0.44.0 - https://github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg PACKAGE package goautoneg import "bitbucket.org/ww/goautoneg" HTTP Content-Type Autonegotiation. The functions in this package implement the behaviour specified in http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html Copyright (c) 2011, Open Knowledge Foundation Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of the Open Knowledge Foundation Ltd. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FUNCTIONS func Negotiate(header string, alternatives []string) (content_type string) Negotiate the most appropriate content_type given the accept header and a list of alternatives. func ParseAccept(header string) (accept []Accept) Parse an Accept Header string returning a sorted list of clauses TYPES type Accept struct { Type, SubType string Q float32 Params map[string]string } Structure to represent a clause in an HTTP Accept Header SUBDIRECTORIES .hg -------------------------------------------------------------------------------- ** github.com/rs/xid; version v1.5.0 - https://github.com/rs/xid Copyright (c) 2015 Olivier Poitrey Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- ** github.com/sirupsen/logrus; version v1.9.3 - https://github.com/sirupsen/logrus The MIT License (MIT) Copyright (c) 2014 Simon Eskildsen Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- ** github.com/spf13/pflag; version v1.0.5 - https://github.com/spf13/pflag Copyright (c) 2012 Alex Ogier. All rights reserved. Copyright (c) 2012 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- ** go.etcd.io/bbolt; version v1.3.7 - https://go.etcd.io/bbolt The MIT License (MIT) Copyright (c) 2013 Ben Johnson Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- ** golang.org/x/net; version v0.12.0 - https://golang.org/x/net Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- ** golang.org/x/oauth2; version v0.8.0 - https://golang.org/x/oauth2 Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- ** golang.org/x/sync; version v0.3.0 - https://golang.org/x/sync Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- ** golang.org/x/sys; version v0.10.0 - https://golang.org/x/sys Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- ** golang.org/x/term; version v0.10.0 - https://golang.org/x/term Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- ** golang.org/x/text; version v0.11.0 - https://golang.org/x/text Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- ** golang.org/x/time/rate; version v0.3.0 - https://golang.org/x/time/rate Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- ** google.golang.org/protobuf; version v1.31.0 - https://google.golang.org/protobuf Copyright (c) 2018 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- ** gopkg.in/inf.v0; version v0.9.1 - https://gopkg.in/inf.v0 Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- ** gopkg.in/yaml.v3; version v3.0.1 - https://gopkg.in/yaml.v3 This project is covered by two different licenses: MIT and Apache. #### MIT License #### The following files were ported to Go from C files of libyaml, and thus are still covered by their original MIT license, with the additional copyright staring in 2011 when the project was ported over: apic.go emitterc.go parserc.go readerc.go scannerc.go writerc.go yamlh.go yamlprivateh.go Copyright (c) 2006-2010 Kirill Simonov Copyright (c) 2006-2011 Kirill Simonov Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ### Apache License ### All the remaining project files are covered by the Apache license: Copyright (c) 2011-2019 Canonical Ltd Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -------------------------------------------------------------------------------- ** k8s.io/apimachinery/third_party/forked/golang/reflect; version v0.26.3 - https://k8s.io/apimachinery/third_party/forked/golang/reflect Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- ** k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json; version v0.0.0-20221012153701-172d655c2280 - https://k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json Copyright (c) 2020 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- ** k8s.io/utils/internal/third_party/forked/golang/net; version v0.0.0-20230220204549-a5ecb0141aa5 - https://k8s.io/utils/internal/third_party/forked/golang/net Copyright (c) 2012 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- ** sigs.k8s.io/yaml; version v1.3.0 - https://sigs.k8s.io/yaml The MIT License (MIT) Copyright (c) 2014 Sam Ghods Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Copyright (c) 2012 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. soci-snapshotter-0.4.1/benchmark/000077500000000000000000000000001454010642300167525ustar00rootroot00000000000000soci-snapshotter-0.4.1/benchmark/benchmarkTests.go000066400000000000000000000266541454010642300222730ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package benchmark import ( "context" "testing" "time" "github.com/awslabs/soci-snapshotter/benchmark/framework" "github.com/containerd/containerd" "github.com/containerd/containerd/log" "github.com/google/uuid" ) var ( outputDir = "./output" containerdAddress = "/tmp/containerd-grpc/containerd.sock" containerdRoot = "/tmp/lib/containerd" containerdState = "/tmp/containerd" containerdSociConfig = "../containerd_soci_config.toml" containerdStargzConfig = "../containerd_stargz_config.toml" platform = "linux/amd64" sociBinary = "../../out/soci-snapshotter-grpc" sociAddress = "/tmp/soci-snapshotter-grpc/soci-snapshotter-grpc.sock" sociRoot = "/tmp/lib/soci-snapshotter-grpc" sociConfig = "../soci_config.toml" stargzAddress = "/tmp/containerd-stargz-grpc/containerd-stargz-grpc.sock" stargzConfig = "../stargz_config.toml" stargzRoot = "/tmp/lib/containerd-stargz-grpc" ) func PullImageFromRegistry(ctx context.Context, b *testing.B, imageRef string) { containerdProcess, err := getContainerdProcess(ctx, containerdSociConfig) if err != nil { b.Fatalf("Error Starting Containerd: %v\n", err) } defer containerdProcess.StopProcess() b.ResetTimer() _, err = containerdProcess.PullImageFromRegistry(ctx, imageRef, platform) if err != nil { b.Fatalf("Error Pulling Image: %v\n", err) } b.StopTimer() err = containerdProcess.DeleteImage(ctx, imageRef) if err != nil { b.Fatalf("Error Deleting Image: %v\n", err) } } func SociRPullPullImage( ctx context.Context, b *testing.B, imageRef string, indexDigest string) { containerdProcess, err := getContainerdProcess(ctx, containerdSociConfig) if err != nil { b.Fatalf("Failed to create containerd proc: %v\n", err) } defer containerdProcess.StopProcess() sociProcess, err := getSociProcess() if err != nil { b.Fatalf("Failed to create soci proc: %v\n", err) } defer sociProcess.StopProcess() sociContainerdProc := SociContainerdProcess{containerdProcess} b.ResetTimer() _, err = sociContainerdProc.SociRPullImageFromRegistry(ctx, imageRef, indexDigest) if err != nil { b.Fatalf("%s", err) } b.StopTimer() } func SociFullRun( ctx context.Context, b *testing.B, imageRef string, indexDigest string, readyLine string, testName string) { testUUID := uuid.New().String() ctx = log.WithLogger(ctx, log.G(ctx).WithField("test_name", testName)) ctx = log.WithLogger(ctx, log.G(ctx).WithField("uuid", testUUID)) containerdProcess, err := getContainerdProcess(ctx, containerdSociConfig) if err != nil { b.Fatalf("Failed to create containerd proc: %v\n", err) } defer containerdProcess.StopProcess() sociProcess, err := getSociProcess() if err != nil { b.Fatalf("Failed to create soci proc: %v\n", err) } defer sociProcess.StopProcess() sociContainerdProc := SociContainerdProcess{containerdProcess} b.ResetTimer() pullStart := time.Now() log.G(ctx).WithField("benchmark", "Test").WithField("event", "Start").Infof("Start Test") log.G(ctx).WithField("benchmark", "Pull").WithField("event", "Start").Infof("Start Pull Image") image, err := sociContainerdProc.SociRPullImageFromRegistry(ctx, imageRef, indexDigest) log.G(ctx).WithField("benchmark", "Pull").WithField("event", "Stop").Infof("Stop Pull Image") pullDuration := time.Since(pullStart) b.ReportMetric(float64(pullDuration.Milliseconds()), "pullDuration") if err != nil { b.Fatalf("%s", err) } log.G(ctx).WithField("benchmark", "CreateContainer").WithField("event", "Start").Infof("Start Create Container") container, cleanupContainer, err := sociContainerdProc.CreateSociContainer(ctx, image) log.G(ctx).WithField("benchmark", "CreateContainer").WithField("event", "Stop").Infof("Stop Create Container") if err != nil { b.Fatalf("%s", err) } defer cleanupContainer() log.G(ctx).WithField("benchmark", "CreateTask").WithField("event", "Start").Infof("Start Create Task") taskDetails, cleanupTask, err := sociContainerdProc.CreateTask(ctx, container) log.G(ctx).WithField("benchmark", "CreateTask").WithField("event", "Stop").Infof("Stop Create Task") if err != nil { b.Fatalf("%s", err) } defer cleanupTask() log.G(ctx).WithField("benchmark", "RunTask").WithField("event", "Start").Infof("Start Run Task") runLazyTaskStart := time.Now() cleanupRun, err := sociContainerdProc.RunContainerTaskForReadyLine(ctx, taskDetails, readyLine) lazyTaskDuration := time.Since(runLazyTaskStart) log.G(ctx).WithField("benchmark", "RunTask").WithField("event", "Stop").Infof("Stop Run Task") b.ReportMetric(float64(lazyTaskDuration.Milliseconds()), "lazyTaskDuration") if err != nil { b.Fatalf("%s", err) } defer cleanupRun() containerSecondRun, cleanupContainerSecondRun, err := sociContainerdProc.CreateSociContainer(ctx, image) if err != nil { b.Fatalf("%s", err) } defer cleanupContainerSecondRun() taskDetailsSecondRun, cleanupTaskSecondRun, err := sociContainerdProc.CreateTask(ctx, containerSecondRun) if err != nil { b.Fatalf("%s", err) } defer cleanupTaskSecondRun() log.G(ctx).WithField("benchmark", "RunTaskTwice").WithField("event", "Start").Infof("Start Run Task Twice") runLocalStart := time.Now() cleanupRunSecond, err := sociContainerdProc.RunContainerTaskForReadyLine(ctx, taskDetailsSecondRun, readyLine) localTaskStats := time.Since(runLocalStart) log.G(ctx).WithField("benchmark", "RunTaskTwice").WithField("event", "Stop").Infof("Stop Run Task Twice") b.ReportMetric(float64(localTaskStats.Milliseconds()), "localTaskStats") if err != nil { b.Fatalf("%s", err) } defer cleanupRunSecond() log.G(ctx).WithField("benchmark", "Test").WithField("event", "Stop").Infof("Stop Test") b.StopTimer() } func OverlayFSFullRun( ctx context.Context, b *testing.B, imageRef string, readyLine string, testName string) { testUUID := uuid.New().String() ctx = log.WithLogger(ctx, log.G(ctx).WithField("test_name", testName)) ctx = log.WithLogger(ctx, log.G(ctx).WithField("uuid", testUUID)) containerdProcess, err := getContainerdProcess(ctx, containerdSociConfig) if err != nil { b.Fatalf("Failed to create containerd proc: %v\n", err) } defer containerdProcess.StopProcess() b.ResetTimer() log.G(ctx).WithField("benchmark", "Test").WithField("event", "Start").Infof("Start Test") log.G(ctx).WithField("benchmark", "Pull").WithField("event", "Start").Infof("Start Pull Image") pullStart := time.Now() image, err := containerdProcess.PullImageFromRegistry(ctx, imageRef, platform) pullDuration := time.Since(pullStart) log.G(ctx).WithField("benchmark", "Pull").WithField("event", "Stop").Infof("Stop Pull Image") b.ReportMetric(float64(pullDuration.Milliseconds()), "pullDuration") if err != nil { b.Fatalf("%s", err) } log.G(ctx).WithField("benchmark", "Unpack").WithField("event", "Start").Infof("Start Unpack Image") err = image.Unpack(ctx, "overlayfs") log.G(ctx).WithField("benchmark", "Unpack").WithField("event", "Stop").Infof("Stop Unpack Image") if err != nil { b.Fatalf("%s", err) } log.G(ctx).WithField("benchmark", "CreateContainer").WithField("event", "Start").Infof("Start Create Container") container, cleanupContainer, err := containerdProcess.CreateContainer(ctx, image) log.G(ctx).WithField("benchmark", "CreateContainer").WithField("event", "Stop").Infof("Stop Create Container") if err != nil { b.Fatalf("%s", err) } defer cleanupContainer() log.G(ctx).WithField("benchmark", "CreateTask").WithField("event", "Start").Infof("Start Create Task") taskDetails, cleanupTask, err := containerdProcess.CreateTask(ctx, container) log.G(ctx).WithField("benchmark", "CreateTask").WithField("event", "Stop").Infof("Stop Create Task") if err != nil { b.Fatalf("%s", err) } defer cleanupTask() log.G(ctx).WithField("benchmark", "RunTask").WithField("event", "Start").Infof("Start Run Task") runLazyTaskStart := time.Now() cleanupRun, err := containerdProcess.RunContainerTaskForReadyLine(ctx, taskDetails, readyLine) lazyTaskDuration := time.Since(runLazyTaskStart) log.G(ctx).WithField("benchmark", "RunTask").WithField("event", "Stop").Infof("Stop Run Task") b.ReportMetric(float64(lazyTaskDuration.Milliseconds()), "lazyTaskDuration") if err != nil { b.Fatalf("%s", err) } defer cleanupRun() containerSecondRun, cleanupContainerSecondRun, err := containerdProcess.CreateContainer(ctx, image) if err != nil { b.Fatalf("%s", err) } defer cleanupContainerSecondRun() taskDetailsSecondRun, cleanupTaskSecondRun, err := containerdProcess.CreateTask(ctx, containerSecondRun) if err != nil { b.Fatalf("%s", err) } defer cleanupTaskSecondRun() log.G(ctx).WithField("benchmark", "RunTaskTwice").WithField("event", "Start").Infof("Start Run Task Twice") runLocalStart := time.Now() cleanupRunSecond, err := containerdProcess.RunContainerTaskForReadyLine(ctx, taskDetailsSecondRun, readyLine) localTaskStats := time.Since(runLocalStart) log.G(ctx).WithField("benchmark", "RunTaskTwice").WithField("event", "Stop").Infof("Stop Run Task Twice") b.ReportMetric(float64(localTaskStats.Milliseconds()), "localTaskStats") if err != nil { b.Fatalf("%s", err) } defer cleanupRunSecond() log.G(ctx).WithField("benchmark", "Test").WithField("event", "Stop").Infof("Stop Test") b.StopTimer() } func StargzFullRun( ctx context.Context, b *testing.B, imageRef string, readyLine string, stargzBinary string) { containerdProcess, err := getContainerdProcess(ctx, containerdStargzConfig) if err != nil { b.Fatalf("Failed to create containerd proc: %v\n", err) } defer containerdProcess.StopProcess() stargzProcess, err := getStargzProcess(stargzBinary) if err != nil { b.Fatalf("Failed to create stargz proc: %v\n", err) } defer stargzProcess.StopProcess() stargzContainerdProc := StargzContainerdProcess{containerdProcess} b.ResetTimer() image, err := stargzContainerdProc.StargzRpullImageFromRegistry(ctx, imageRef) if err != nil { b.Fatalf("%s", err) } container, cleanupContainer, err := stargzContainerdProc.CreateContainer(ctx, image, containerd.WithSnapshotter("stargz")) if err != nil { b.Fatalf("%s", err) } defer cleanupContainer() taskDetails, cleanupTask, err := containerdProcess.CreateTask(ctx, container) if err != nil { b.Fatalf("%s", err) } defer cleanupTask() cleanupRun, err := containerdProcess.RunContainerTaskForReadyLine(ctx, taskDetails, readyLine) if err != nil { b.Fatalf("%s", err) } defer cleanupRun() b.StopTimer() } func getContainerdProcess(ctx context.Context, containerdConfig string) (*framework.ContainerdProcess, error) { return framework.StartContainerd( containerdAddress, containerdRoot, containerdState, containerdConfig, outputDir) } func getSociProcess() (*SociProcess, error) { return StartSoci( sociBinary, sociAddress, sociRoot, containerdAddress, sociConfig, outputDir) } func getStargzProcess(stargzBinary string) (*StargzProcess, error) { return StartStargz( stargzBinary, stargzAddress, stargzConfig, stargzRoot, outputDir) } soci-snapshotter-0.4.1/benchmark/comparisonTest/000077500000000000000000000000001454010642300217645ustar00rootroot00000000000000soci-snapshotter-0.4.1/benchmark/comparisonTest/main.go000066400000000000000000000056171454010642300232500ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "flag" "fmt" "os" "testing" "github.com/awslabs/soci-snapshotter/benchmark" "github.com/awslabs/soci-snapshotter/benchmark/framework" ) var ( outputDir = "../comparisonTest/output" ) func main() { var ( numberOfTests int configCsv string showCom bool imageList []benchmark.ImageDescriptor err error commit string ) flag.BoolVar(&showCom, "show-commit", false, "tag the commit hash to the benchmark results") flag.IntVar(&numberOfTests, "count", 5, "Describes the number of runs a benchmarker should run. Default: 5") flag.StringVar(&configCsv, "f", "default", "Path to a csv file describing image details in this order ['Name','Image ref', 'Ready line', 'manifest ref'].") flag.Parse() if showCom { commit, _ = benchmark.GetCommitHash() } else { commit = "N/A" } if configCsv == "default" { imageList = benchmark.GetDefaultWorkloads() } else { imageList, err = benchmark.GetImageListFromCsv(configCsv) if err != nil { errMsg := fmt.Sprintf("Failed to read csv file %s with error:%v\n", configCsv, err) panic(errMsg) } } err = os.Mkdir(outputDir, 0755) if err != nil && !os.IsExist(err) { panic(err) } logFile, err := os.OpenFile(outputDir+"/benchmark_log", os.O_RDWR|os.O_CREATE, 0664) if err != nil { panic(err) } defer logFile.Close() ctx, cancelCtx := framework.GetTestContext(logFile) defer cancelCtx() var drivers []framework.BenchmarkTestDriver for _, image := range imageList { shortName := image.ShortName imageRef := image.ImageRef sociIndexManifestRef := image.SociIndexManifestRef readyLine := image.ReadyLine drivers = append(drivers, framework.BenchmarkTestDriver{ TestName: "OverlayFSFull" + shortName, NumberOfTests: numberOfTests, TestFunction: func(b *testing.B) { benchmark.OverlayFSFullRun(ctx, b, imageRef, readyLine, "OverlayFSFull"+shortName) }, }) drivers = append(drivers, framework.BenchmarkTestDriver{ TestName: "SociFull" + shortName, NumberOfTests: numberOfTests, TestFunction: func(b *testing.B) { benchmark.SociFullRun(ctx, b, imageRef, sociIndexManifestRef, readyLine, "SociFull"+shortName) }, }) } benchmarks := framework.BenchmarkFramework{ OutputDir: outputDir, CommitID: commit, Drivers: drivers, } benchmarks.Run(ctx) } soci-snapshotter-0.4.1/benchmark/containerd_soci_config.toml000066400000000000000000000002511454010642300243350ustar00rootroot00000000000000version = 2 [debug] level = "DEBUG" [proxy_plugins] [proxy_plugins.soci] type = "snapshot" address = "/tmp/soci-snapshotter-grpc/soci-snapshotter-grpc.sock" soci-snapshotter-0.4.1/benchmark/containerd_stargz_config.toml000066400000000000000000000002551454010642300247160ustar00rootroot00000000000000version = 2 [debug] level = "DEBUG" [proxy_plugins] [proxy_plugins.stargz] type = "snapshot" address = "/tmp/containerd-stargz-grpc/containerd-stargz-grpc.sock" soci-snapshotter-0.4.1/benchmark/framework/000077500000000000000000000000001454010642300207475ustar00rootroot00000000000000soci-snapshotter-0.4.1/benchmark/framework/containerd_utils.go000066400000000000000000000167011454010642300246510ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package framework import ( "bufio" "context" "fmt" "io" "io/fs" "os" "os/exec" "strings" "syscall" "time" "github.com/containerd/containerd" "github.com/containerd/containerd/cio" "github.com/containerd/containerd/images" "github.com/containerd/containerd/log" "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/oci" "github.com/sirupsen/logrus" ) var ( testNamespace = "BENCHMARK_TESTING" testContainerID = "TEST_RUN_CONTAINER" testEnvironment = "TEST_RUNTIME" outputFilePerm fs.FileMode = 0777 ) type ContainerdProcess struct { command *exec.Cmd address string root string state string stdout *os.File stderr *os.File Client *containerd.Client } func StartContainerd( containerdAddress string, containerdRoot string, containerdState string, containerdConfig string, containerdOutput string) (*ContainerdProcess, error) { containerdCmd := exec.Command("containerd", "-a", containerdAddress, "--root", containerdRoot, "--state", containerdState, "-c", containerdConfig) err := os.MkdirAll(containerdOutput, outputFilePerm) if err != nil { return nil, err } stdoutFile, err := os.Create(containerdOutput + "/containerd-stdout") if err != nil { return nil, err } containerdCmd.Stdout = stdoutFile stderrFile, err := os.Create(containerdOutput + "/containerd-stderr") if err != nil { return nil, err } containerdCmd.Stderr = stderrFile err = containerdCmd.Start() if err != nil { return nil, err } client, err := newClient(containerdAddress) if err != nil { return nil, err } return &ContainerdProcess{ command: containerdCmd, address: containerdAddress, root: containerdRoot, stdout: stdoutFile, stderr: stderrFile, state: containerdState, Client: client}, nil } func (proc *ContainerdProcess) StopProcess() { if proc.Client != nil { proc.Client.Close() } if proc.stdout != nil { proc.stdout.Close() } if proc.stderr != nil { proc.stderr.Close() } if proc.command != nil { proc.command.Process.Kill() } os.RemoveAll(proc.root) os.RemoveAll(proc.state) os.RemoveAll(proc.address) } func (proc *ContainerdProcess) PullImage( ctx context.Context, imageRef string, platform string) (containerd.Image, error) { image, pullErr := proc.Client.Pull(ctx, imageRef, GetRemoteOpts(ctx, platform)...) if pullErr != nil { return nil, pullErr } return image, nil } func (proc *ContainerdProcess) DeleteImage(ctx context.Context, imageRef string) error { imageService := proc.Client.ImageService() err := imageService.Delete(ctx, imageRef, images.SynchronousDelete()) if err != nil { return err } return nil } func (proc *ContainerdProcess) CreateContainer( ctx context.Context, image containerd.Image, opts ...containerd.NewContainerOpts) (containerd.Container, func(), error) { id := fmt.Sprintf("%s-%d", testContainerID, time.Now().UnixNano()) opts = append(opts, containerd.WithNewSnapshot(id, image)) opts = append(opts, containerd.WithNewSpec(oci.WithImageConfig(image))) container, err := proc.Client.NewContainer( ctx, id, opts...) if err != nil { return nil, nil, err } cleanupFunc := func() { err = container.Delete(ctx, containerd.WithSnapshotCleanup) if err != nil { fmt.Printf("Error deleting container: %v\n", err) } } return container, cleanupFunc, nil } type TaskDetails struct { task containerd.Task stdoutReader io.Reader stderrReader io.Reader } func (proc *ContainerdProcess) CreateTask( ctx context.Context, container containerd.Container) (*TaskDetails, func(), error) { stdoutPipeReader, stdoutPipeWriter := io.Pipe() stderrPipeReader, stderrPipeWriter := io.Pipe() cioCreator := cio.NewCreator(cio.WithStreams(os.Stdin, stdoutPipeWriter, stderrPipeWriter)) task, err := container.NewTask(ctx, cioCreator) if err != nil { return nil, nil, err } cleanupFunc := func() { stdoutPipeReader.Close() stdoutPipeWriter.Close() stderrPipeReader.Close() stderrPipeWriter.Close() processStatus, _ := task.Status(ctx) if processStatus.Status != "stopped" { fmt.Printf("Tried to kill task") err = task.Kill(ctx, syscall.SIGKILL) if err != nil { fmt.Printf("Error killing task: %v\n", err) } } _, err = task.Delete(ctx) if err != nil { fmt.Printf("Error deleting task: %v\n", err) } } return &TaskDetails{ task, stdoutPipeReader, stderrPipeReader}, cleanupFunc, err } func (proc *ContainerdProcess) RunContainerTaskForReadyLine( ctx context.Context, taskDetails *TaskDetails, readyLine string) (func(), error) { stdoutScanner := bufio.NewScanner(taskDetails.stdoutReader) stderrScanner := bufio.NewScanner(taskDetails.stderrReader) exitStatusC, err := taskDetails.task.Wait(ctx) if err != nil { return nil, err } resultChannel := make(chan string, 1) timeoutCtx, cancel := context.WithTimeout(context.Background(), 180*time.Second) defer cancel() go func() { select { case <-exitStatusC: resultChannel <- "PROC_EXIT" case <-timeoutCtx.Done(): return } }() go func() { for stderrScanner.Scan() { nextLine := stderrScanner.Text() if strings.Contains(nextLine, readyLine) { resultChannel <- "READYLINE_STDERR" return } select { case <-timeoutCtx.Done(): return default: } } }() go func() { for stdoutScanner.Scan() { nextLine := stdoutScanner.Text() if strings.Contains(nextLine, readyLine) { resultChannel <- "READYLINE_STDOUT" return } select { case <-timeoutCtx.Done(): return default: } } }() if err := taskDetails.task.Start(ctx); err != nil { return nil, err } select { case <-resultChannel: break case <-timeoutCtx.Done(): break } cleanupFunc := func() { processStatus, _ := taskDetails.task.Status(ctx) if processStatus.Status == "running" { err = taskDetails.task.Kill(ctx, syscall.SIGKILL) if err != nil { fmt.Printf("Error killing task: %v\n", err) } exitChannel, _ := taskDetails.task.Wait(ctx) <-exitChannel } } return cleanupFunc, nil } func GetRemoteOpts(ctx context.Context, platform string) []containerd.RemoteOpt { var opts []containerd.RemoteOpt opts = append(opts, containerd.WithPlatform(platform)) return opts } func GetTestContext(logFile io.Writer) (context.Context, context.CancelFunc) { logrus.SetLevel(logrus.InfoLevel) logrus.SetFormatter(&logrus.JSONFormatter{ TimestampFormat: log.RFC3339NanoFixed, }) if logFile != nil { logrus.SetOutput(logFile) } else { logrus.SetOutput(os.Stderr) } ctx := log.WithLogger(context.Background(), log.L) ctx, cancel := context.WithCancel(ctx) ctx = namespaces.WithNamespace(ctx, testNamespace) return ctx, cancel } func newClient(address string) (*containerd.Client, error) { opts := []containerd.ClientOpt{} if rt := os.Getenv(testEnvironment); rt != "" { opts = append(opts, containerd.WithDefaultRuntime(rt)) } return containerd.New(address, opts...) } soci-snapshotter-0.4.1/benchmark/framework/framework.go000066400000000000000000000123631454010642300233000ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package framework import ( "context" "encoding/json" "flag" "fmt" "io/fs" "os" "strconv" "testing" "github.com/containerd/containerd/log" "github.com/montanaflynn/stats" ) var ( resultFilename = "results.json" resultFilePerm fs.FileMode = 0644 ) type BenchmarkFramework struct { OutputDir string `json:"-"` CommitID string `json:"commit"` Drivers []BenchmarkTestDriver `json:"benchmarkTests"` } type BenchmarkTestStats struct { BenchmarkTimes []float64 `json:"BenchmarkTimes"` StdDev float64 `json:"stdDev"` Mean float64 `json:"mean"` Min float64 `json:"min"` Pct25 float64 `json:"pct25"` Pct50 float64 `json:"pct50"` Pct75 float64 `json:"pct75"` Pct90 float64 `json:"pct90"` Max float64 `json:"max"` } type BenchmarkTestDriver struct { TestName string `json:"testName"` NumberOfTests int `json:"numberOfTests"` BeforeFunction func() `json:"-"` TestFunction func(*testing.B) `json:"-"` AfterFunction func() error `json:"-"` TestsRun int `json:"-"` FullRunStats BenchmarkTestStats `json:"fullRunStats"` PullStats BenchmarkTestStats `json:"pullStats"` LazyTaskStats BenchmarkTestStats `json:"lazyTaskStats"` LocalTaskStats BenchmarkTestStats `json:"localTaskStats"` } func (frame *BenchmarkFramework) Run(ctx context.Context) { testing.Init() flag.Set("test.benchtime", "1x") flag.Parse() for i := 0; i < len(frame.Drivers); i++ { testDriver := &frame.Drivers[i] fmt.Printf("Running tests for %s\n", testDriver.TestName) if testDriver.BeforeFunction != nil { testDriver.BeforeFunction() } for j := 0; j < testDriver.NumberOfTests; j++ { log.G(ctx).WithField("test_name", testDriver.TestName).Infof("TestStart for " + testDriver.TestName + "_" + strconv.Itoa(j+1)) fmt.Printf("Running test %d of %d\n", j+1, testDriver.NumberOfTests) res := testing.Benchmark(testDriver.TestFunction) testDriver.FullRunStats.BenchmarkTimes = append(testDriver.FullRunStats.BenchmarkTimes, res.T.Seconds()) testDriver.PullStats.BenchmarkTimes = append(testDriver.PullStats.BenchmarkTimes, res.Extra["pullDuration"]/1000) testDriver.LazyTaskStats.BenchmarkTimes = append(testDriver.LazyTaskStats.BenchmarkTimes, res.Extra["lazyTaskDuration"]/1000) testDriver.LocalTaskStats.BenchmarkTimes = append(testDriver.LocalTaskStats.BenchmarkTimes, res.Extra["localTaskStats"]/1000) } testDriver.calculateStats() if testDriver.AfterFunction != nil { err := testDriver.AfterFunction() if err != nil { fmt.Printf("After function error: %v\n", err) } } } json, err := json.MarshalIndent(frame, "", " ") if err != nil { fmt.Printf("JSON Marshalling Error: %v\n", err) } err = os.MkdirAll(frame.OutputDir, resultFilePerm) if err != nil { fmt.Printf("Failed to Create Output Dir: %v\n", err) } resultFileLoc := frame.OutputDir + "/" + resultFilename err = os.WriteFile(resultFileLoc, json, resultFilePerm) if err != nil { fmt.Printf("WriteFile Error: %v\n", err) } } func (driver *BenchmarkTestDriver) calculateStats() { driver.FullRunStats.calculateTestStat() driver.PullStats.calculateTestStat() driver.LazyTaskStats.calculateTestStat() driver.LocalTaskStats.calculateTestStat() } func (testStats *BenchmarkTestStats) calculateTestStat() { var err error testStats.StdDev, err = stats.StandardDeviation(testStats.BenchmarkTimes) if err != nil { fmt.Printf("Error Calculating Std Dev: %v\n", err) testStats.StdDev = -1 } testStats.Mean, err = stats.Mean(testStats.BenchmarkTimes) if err != nil { fmt.Printf("Error Calculating Mean: %v\n", err) testStats.Mean = -1 } testStats.Min, err = stats.Min(testStats.BenchmarkTimes) if err != nil { fmt.Printf("Error Calculating Min: %v\n", err) testStats.Min = -1 } testStats.Pct25, err = stats.Percentile(testStats.BenchmarkTimes, 25) if err != nil { fmt.Printf("Error Calculating 25th Pct: %v\n", err) testStats.Pct25 = -1 } testStats.Pct50, err = stats.Percentile(testStats.BenchmarkTimes, 50) if err != nil { fmt.Printf("Error Calculating 50th Pct: %v\n", err) testStats.Pct50 = -1 } testStats.Pct75, err = stats.Percentile(testStats.BenchmarkTimes, 75) if err != nil { fmt.Printf("Error Calculating 75th Pct: %v\n", err) testStats.Pct75 = -1 } testStats.Pct90, err = stats.Percentile(testStats.BenchmarkTimes, 90) if err != nil { fmt.Printf("Error Calculating 90th Pct: %v\n", err) testStats.Pct90 = -1 } testStats.Max, err = stats.Max(testStats.BenchmarkTimes) if err != nil { fmt.Printf("Error Calculating Max: %v\n", err) testStats.Max = -1 } } soci-snapshotter-0.4.1/benchmark/framework/parser/000077500000000000000000000000001454010642300222435ustar00rootroot00000000000000soci-snapshotter-0.4.1/benchmark/framework/parser/file_access.go000066400000000000000000000100111454010642300250230ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package bparser import ( "bufio" "encoding/json" "os" "sort" "strings" "time" ) var ( outputDir = "./output/" FileAccessDir = outputDir + "file_access_logs/" sociLogs = outputDir + "soci-snapshotter-stderr" containerdLogs = outputDir + "containerd-stderr" sociLogsFuseMessage = "FUSE operation" ) type FileAccessPatterns struct { ImageName string `json:"ImageName"` ContainerStartTime time.Time `json:"containerStartTime"` TotalOperationCount map[string]int `json:"TotalOperationCounts"` Operations []Operation `json:"operations"` } type SociLog struct { Msg string `json:"msg"` } type BaseOperation struct { Level string `json:"level"` Msg string `json:"msg"` Operation string `json:"operation"` Path string `json:"path"` Time time.Time `json:"time"` } type Operation struct { Operation string `json:"operation"` Path string `json:"path"` FirstAccessTimeAfterStart string `json:"firstAccessTimeAfterStart"` Count int `json:"count"` } func ParseFileAccesses(imageName string) error { startTime, err := getTaskStartTime() if err != nil { return err } totalCounts := make(map[string]int) fa := FileAccessPatterns{ ImageName: imageName, ContainerStartTime: *startTime, TotalOperationCount: totalCounts, } file, err := os.Open(sociLogs) if err != nil { return err } defer file.Close() sociLog := SociLog{} m := make(map[string]Operation) scanner := bufio.NewScanner(file) for scanner.Scan() { log := scanner.Bytes() if err := json.Unmarshal(log, &sociLog); err != nil { return err } if sociLog.Msg == sociLogsFuseMessage { var tempOperation BaseOperation if err := json.Unmarshal(log, &tempOperation); err != nil { return err } op := tempOperation.Operation + tempOperation.Path if val, ok := m[op]; ok { val.Count++ m[op] = val } else { m[op] = Operation{ Operation: tempOperation.Operation, Path: tempOperation.Path, FirstAccessTimeAfterStart: tempOperation.Time.Sub(fa.ContainerStartTime).String(), Count: 1, } } fa.TotalOperationCount[tempOperation.Operation]++ } } keys := make([]string, 0, len(m)) for key := range m { keys = append(keys, key) } sort.SliceStable(keys, func(i, j int) bool { t1, _ := time.ParseDuration(m[keys[i]].FirstAccessTimeAfterStart) t2, _ := time.ParseDuration(m[keys[j]].FirstAccessTimeAfterStart) return t1 < t2 }) for _, key := range keys { fa.Operations = append(fa.Operations, m[key]) } json, err := json.MarshalIndent(fa, "", " ") if err != nil { return err } imageFileAccessLogPath := FileAccessDir + imageName + "_access_patterns" err = os.WriteFile(imageFileAccessLogPath, json, 0644) if err != nil { return err } return nil } func getTaskStartTime() (*time.Time, error) { var taskStartTime time.Time file, err := os.Open(containerdLogs) if err != nil { return nil, err } defer file.Close() scanner := bufio.NewScanner(file) for scanner.Scan() { log := scanner.Text() if strings.Contains(log, "/tasks/start") { l := strings.Split(log, " ") temp := strings.ReplaceAll(l[0], "time=", "") taskStartTime, err = time.Parse(time.RFC3339, temp[1:len(temp)-1]) if err != nil { return nil, err } break } } return &taskStartTime, nil } soci-snapshotter-0.4.1/benchmark/framework/utils.go000066400000000000000000000040471454010642300224430ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package framework import ( "context" "io" "github.com/containerd/containerd" "github.com/containerd/containerd/reference" "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" "github.com/containerd/containerd/remotes/docker/config" dockercliconfig "github.com/docker/cli/cli/config" ) func (proc *ContainerdProcess) PullImageFromRegistry( ctx context.Context, imageRef string, platform string) (containerd.Image, error) { opts := GetRemoteOpts(ctx, platform) opts = append(opts, containerd.WithResolver(GetResolver(ctx, imageRef))) image, pullErr := proc.Client.Pull(ctx, imageRef, opts...) if pullErr != nil { return nil, pullErr } return image, nil } func GetResolver(ctx context.Context, imageRef string) remotes.Resolver { var username string var secret string refspec, err := reference.Parse(imageRef) if err != nil { panic("Failed to parse image ref") } cf := dockercliconfig.LoadDefaultConfigFile(io.Discard) if cf.ContainsAuth() { if ac, err := cf.GetAuthConfig(refspec.Hostname()); err == nil { username = ac.Username secret = ac.Password } } hostOptions := config.HostOptions{} hostOptions.Credentials = func(host string) (string, string, error) { return username, secret, nil } var PushTracker = docker.NewInMemoryTracker() options := docker.ResolverOptions{ Tracker: PushTracker, } options.Hosts = config.ConfigureHosts(ctx, hostOptions) return docker.NewResolver(options) } soci-snapshotter-0.4.1/benchmark/parser/000077500000000000000000000000001454010642300202465ustar00rootroot00000000000000soci-snapshotter-0.4.1/benchmark/parser/main.go000066400000000000000000000054171454010642300215300ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "bufio" "encoding/json" "fmt" "os" "strings" "time" ) type BenchmarkEvent struct { Benchmark string `json:"benchmark"` Event string `json:"event"` TestName string `json:"test_name"` UUID string `json:"uuid"` Timestamp string `json:"time"` } type ProfileEvent struct { EventName string StartTime time.Time StopTime time.Time } type BenchmarkProfile struct { UUID string TestName string Events map[string]ProfileEvent } func main() { logMap := make(map[string]BenchmarkProfile) logFileName := os.Args[1] logFile, err := os.Open(logFileName) if err != nil { panic("Cannot open " + logFileName) } defer logFile.Close() fileScanner := bufio.NewScanner(logFile) fileScanner.Split(bufio.ScanLines) for fileScanner.Scan() { line := fileScanner.Text() if strings.Contains(line, "benchmark") { parseLogLineToMap(fileScanner.Text(), logMap) } } if err := fileScanner.Err(); err != nil { panic("scan error: " + err.Error()) } printLogMap(logMap) } func parseLogLineToMap(logline string, logmap map[string]BenchmarkProfile) { var event BenchmarkEvent json.Unmarshal([]byte(logline), &event) testData, ok := logmap[event.UUID] if !ok { testData = BenchmarkProfile{ UUID: event.UUID, TestName: event.TestName, Events: make(map[string]ProfileEvent), } logmap[event.UUID] = testData } profileEvent, ok := testData.Events[event.Benchmark] if !ok { profileEvent = ProfileEvent{ EventName: event.Benchmark, } testData.Events[event.Benchmark] = profileEvent } if event.Event == "Start" { startTime, _ := time.Parse(time.RFC3339, event.Timestamp) profileEvent.StartTime = startTime } if event.Event == "Stop" { stopTime, _ := time.Parse(time.RFC3339, event.Timestamp) profileEvent.StopTime = stopTime } testData.Events[event.Benchmark] = profileEvent fmt.Printf("profile Event: %v\n", profileEvent) } func printLogMap(logMap map[string]BenchmarkProfile) { for uuid, profile := range logMap { fmt.Printf("Test: %s ID: %s Key: %s\n", profile.TestName, profile.UUID, uuid) for benchmarkName, benchmark := range profile.Events { fmt.Printf(" Event: %v Key: %s\n", benchmark, benchmarkName) } } } soci-snapshotter-0.4.1/benchmark/performanceTest/000077500000000000000000000000001454010642300221135ustar00rootroot00000000000000soci-snapshotter-0.4.1/benchmark/performanceTest/main.go000066400000000000000000000064431454010642300233750ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "flag" "fmt" "os" "testing" "github.com/awslabs/soci-snapshotter/benchmark" "github.com/awslabs/soci-snapshotter/benchmark/framework" bparser "github.com/awslabs/soci-snapshotter/benchmark/framework/parser" ) var ( outputDir = "../performanceTest/output" ) func main() { var ( numberOfTests int configCsv string showCom bool parseFileAccessPatterns bool commit string imageList []benchmark.ImageDescriptor err error ) flag.BoolVar(&parseFileAccessPatterns, "parse-file-access", false, "Parse fuse file access patterns.") flag.BoolVar(&showCom, "show-commit", false, "tag the commit hash to the benchmark results") flag.IntVar(&numberOfTests, "count", 5, "Describes the number of runs a benchmarker should run. Default: 5") flag.StringVar(&configCsv, "f", "default", "Path to a csv file describing image details in this order ['Name','Image ref', 'Ready line', 'manifest ref'].") flag.Parse() if showCom { commit, _ = benchmark.GetCommitHash() } else { commit = "N/A" } if parseFileAccessPatterns { fileAccessDir := outputDir + "/file_access_logs" err := os.RemoveAll(fileAccessDir) if err != nil { panic(err) } err = os.MkdirAll(fileAccessDir, 0755) if err != nil { panic(err) } } if configCsv == "default" { imageList = benchmark.GetDefaultWorkloads() } else { imageList, err = benchmark.GetImageListFromCsv(configCsv) if err != nil { errMsg := fmt.Sprintf("Failed to read csv file %s with error:%v\n", configCsv, err) panic(errMsg) } } err = os.Mkdir(outputDir, 0755) if err != nil && !os.IsExist(err) { panic(err) } logFile, err := os.OpenFile(outputDir+"/benchmark_log", os.O_RDWR|os.O_CREATE, 0664) if err != nil { panic(err) } defer logFile.Close() ctx, cancelCtx := framework.GetTestContext(logFile) defer cancelCtx() var drivers []framework.BenchmarkTestDriver for _, image := range imageList { shortName := image.ShortName imageRef := image.ImageRef sociIndexManifestRef := image.SociIndexManifestRef readyLine := image.ReadyLine testName := "SociFull" + shortName driver := framework.BenchmarkTestDriver{ TestName: testName, NumberOfTests: numberOfTests, TestFunction: func(b *testing.B) { benchmark.SociFullRun(ctx, b, imageRef, sociIndexManifestRef, readyLine, testName) }, } if parseFileAccessPatterns { driver.AfterFunction = func() error { err := bparser.ParseFileAccesses(shortName) return err } } drivers = append(drivers, driver) } benchmarks := framework.BenchmarkFramework{ OutputDir: outputDir, CommitID: commit, Drivers: drivers, } benchmarks.Run(ctx) } soci-snapshotter-0.4.1/benchmark/soci_config.toml000066400000000000000000000001471454010642300221330ustar00rootroot00000000000000[cri_keychain] enable_keychain = true image_service_path = "/tmp/containerd-grpc/containerd.sock" soci-snapshotter-0.4.1/benchmark/soci_utils.go000066400000000000000000000077311454010642300214660ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package benchmark import ( "context" "errors" "fmt" "io/fs" "os" "os/exec" "syscall" "time" "github.com/awslabs/soci-snapshotter/benchmark/framework" "github.com/awslabs/soci-snapshotter/fs/source" "github.com/containerd/containerd" ctdsnapshotters "github.com/containerd/containerd/pkg/snapshotters" ) var ( outputFilePerm fs.FileMode = 0777 ) type SociContainerdProcess struct { *framework.ContainerdProcess } type SociProcess struct { command *exec.Cmd address string root string stdout *os.File stderr *os.File } func StartSoci( sociBinary string, sociAddress string, sociRoot string, containerdAddress string, configFile string, outputDir string) (*SociProcess, error) { sociCmd := exec.Command(sociBinary, "-address", sociAddress, "-config", configFile, "-log-level", "debug", "-root", sociRoot) err := os.MkdirAll(outputDir, outputFilePerm) if err != nil { return nil, err } stdoutFile, err := os.Create(outputDir + "/soci-snapshotter-stdout") if err != nil { return nil, err } sociCmd.Stdout = stdoutFile stderrFile, err := os.Create(outputDir + "/soci-snapshotter-stderr") if err != nil { return nil, err } sociCmd.Stderr = stderrFile err = sociCmd.Start() if err != nil { fmt.Printf("Soci Failed to Start %v\n", err) return nil, err } // The soci-snapshotter-grpc is not ready to be used until the // unix socket file is created sleepCount := 0 loopExit := false for !loopExit { time.Sleep(1 * time.Second) sleepCount++ if _, err := os.Stat(sociAddress); err == nil { loopExit = true } if sleepCount > 15 { return nil, errors.New("could not create .sock in time") } } return &SociProcess{ command: sociCmd, address: sociAddress, root: sociRoot, stdout: stdoutFile, stderr: stderrFile}, nil } func (proc *SociProcess) StopProcess() { if proc.stdout != nil { proc.stdout.Close() } if proc.stderr != nil { proc.stderr.Close() } if proc.command != nil { proc.command.Process.Kill() } err := os.RemoveAll(proc.address) if err != nil { fmt.Printf("Error removing Address: %v\n", err) } snapshotDir := proc.root + "/snapshotter/snapshots/" snapshots, err := os.ReadDir(snapshotDir) if err != nil { fmt.Printf("Could not read dir: %s\n", snapshotDir) } for _, s := range snapshots { mountpoint := snapshotDir + s.Name() + "/fs" _ = syscall.Unmount(mountpoint, syscall.MNT_FORCE) } err = os.RemoveAll(proc.root) if err != nil { fmt.Printf("Error removing root: %v\n", err) } } func (proc *SociContainerdProcess) SociRPullImageFromRegistry( ctx context.Context, imageRef string, sociIndexDigest string) (containerd.Image, error) { image, err := proc.Client.Pull(ctx, imageRef, []containerd.RemoteOpt{ containerd.WithResolver(framework.GetResolver(ctx, imageRef)), //nolint:staticcheck containerd.WithSchema1Conversion, //lint:ignore SA1019 containerd.WithPullUnpack, containerd.WithPullSnapshotter("soci"), containerd.WithImageHandlerWrapper(source.AppendDefaultLabelsHandlerWrapper( sociIndexDigest, ctdsnapshotters.AppendInfoHandlerWrapper(imageRef))), }...) if err != nil { fmt.Printf("Soci Pull Failed %v\n", err) return nil, err } return image, nil } func (proc *SociContainerdProcess) CreateSociContainer( ctx context.Context, image containerd.Image) (containerd.Container, func(), error) { return proc.CreateContainer(ctx, image, containerd.WithSnapshotter("soci")) } soci-snapshotter-0.4.1/benchmark/stargzTest/000077500000000000000000000000001454010642300211245ustar00rootroot00000000000000soci-snapshotter-0.4.1/benchmark/stargzTest/main.go000066400000000000000000000041211454010642300223750ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "fmt" "os" "strconv" "testing" "github.com/awslabs/soci-snapshotter/benchmark" "github.com/awslabs/soci-snapshotter/benchmark/framework" ) var ( outputDir = "./output" ) func main() { commit := os.Args[1] configCsv := os.Args[2] numberOfTests, err := strconv.Atoi(os.Args[3]) stargzBinary := os.Args[4] if err != nil { errMsg := fmt.Sprintf("Failed to parse number of test %s with error:%v\n", os.Args[3], err) panic(errMsg) } imageList, err := benchmark.GetImageListFromCsv(configCsv) if err != nil { errMsg := fmt.Sprintf("Failed to read csv file %s with error:%v\n", configCsv, err) panic(errMsg) } err = os.Mkdir(outputDir, 0755) if err != nil && !os.IsExist(err) { panic(err) } logFile, err := os.OpenFile(outputDir+"/benchmark_log", os.O_RDWR|os.O_CREATE, 0664) if err != nil { panic(err) } defer logFile.Close() ctx, cancelCtx := framework.GetTestContext(logFile) defer cancelCtx() var drivers []framework.BenchmarkTestDriver for _, image := range imageList { shortName := image.ShortName imageRef := image.ImageRef readyLine := image.ReadyLine drivers = append(drivers, framework.BenchmarkTestDriver{ TestName: "StargzFullRun" + shortName, NumberOfTests: numberOfTests, TestFunction: func(b *testing.B) { benchmark.StargzFullRun(ctx, b, imageRef, readyLine, stargzBinary) }, }) } benchmarks := framework.BenchmarkFramework{ OutputDir: outputDir, CommitID: commit, Drivers: drivers, } benchmarks.Run(ctx) } soci-snapshotter-0.4.1/benchmark/stargz_config.toml000066400000000000000000000000001454010642300224740ustar00rootroot00000000000000soci-snapshotter-0.4.1/benchmark/stargz_utils.go000066400000000000000000000071721454010642300220420ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package benchmark import ( "context" "errors" "fmt" "os" "os/exec" "syscall" "time" "github.com/awslabs/soci-snapshotter/benchmark/framework" "github.com/containerd/containerd" ctdsnapshotters "github.com/containerd/containerd/pkg/snapshotters" ) type StargzProcess struct { command *exec.Cmd address string root string stdout *os.File stderr *os.File } type StargzContainerdProcess struct { *framework.ContainerdProcess } func StartStargz( stargzBinary string, stargzAddress string, stargzConfig string, stargzRoot string, outputDir string) (*StargzProcess, error) { stargzCmd := exec.Command(stargzBinary, "-address", stargzAddress, "-config", stargzConfig, "-log-level", "debug", "-root", stargzRoot) err := os.MkdirAll(outputDir, 0777) if err != nil { return nil, err } stdoutFile, err := os.Create(outputDir + "/stargz-snapshotter-stdout") if err != nil { return nil, err } stargzCmd.Stdout = stdoutFile stderrFile, err := os.Create(outputDir + "/stargz-snapshotter-stderr") if err != nil { return nil, err } stargzCmd.Stderr = stderrFile err = stargzCmd.Start() if err != nil { fmt.Printf("Stargz process failed to start %v\n", err) return nil, err } // The stargz-snapshotter-grpc is not ready to be used until the // unix socket file is created sleepCount := 0 loopExit := false for !loopExit { time.Sleep(1 * time.Second) sleepCount++ if _, err := os.Stat(stargzAddress); err == nil { loopExit = true } if sleepCount > 15 { return nil, errors.New("could not create .sock in time") } } return &StargzProcess{ command: stargzCmd, address: stargzAddress, root: stargzRoot, stdout: stdoutFile, stderr: stderrFile}, nil } func (proc *StargzProcess) StopProcess() { if proc.stdout != nil { proc.stdout.Close() } if proc.stderr != nil { proc.stderr.Close() } if proc.command != nil { proc.command.Process.Kill() } err := os.RemoveAll(proc.address) if err != nil { fmt.Printf("Error removing stargz process address: %v\n", err) } snapshotDir := proc.root + "/snapshotter/snapshots/" snapshots, err := os.ReadDir(snapshotDir) if err != nil { fmt.Printf("Could not read dir: %s\n", snapshotDir) } for _, s := range snapshots { mountpoint := snapshotDir + s.Name() + "/fs" _ = syscall.Unmount(mountpoint, syscall.MNT_FORCE) } err = os.RemoveAll(proc.root) if err != nil { fmt.Printf("Error removing stargz process root: %v\n", err) } } func (proc *StargzContainerdProcess) StargzRpullImageFromRegistry( ctx context.Context, imageRef string) (containerd.Image, error) { image, err := proc.Client.Pull(ctx, imageRef, []containerd.RemoteOpt{ containerd.WithResolver(framework.GetResolver(ctx, imageRef)), //nolint:staticcheck containerd.WithSchema1Conversion, //lint:ignore SA1019 containerd.WithPullUnpack, containerd.WithPullSnapshotter("stargz"), containerd.WithImageHandlerWrapper(ctdsnapshotters.AppendInfoHandlerWrapper(imageRef)), }...) if err != nil { fmt.Printf("Stargz rpull failed: %v\n", err) return nil, err } return image, nil } soci-snapshotter-0.4.1/benchmark/utils.go000066400000000000000000000076611454010642300204530ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package benchmark import ( "encoding/csv" "errors" "os" "os/exec" ) type ImageDescriptor struct { ShortName string ImageRef string SociIndexManifestRef string ReadyLine string } func GetImageListFromCsv(csvLoc string) ([]ImageDescriptor, error) { csvFile, err := os.Open(csvLoc) if err != nil { return nil, err } csv, err := csv.NewReader(csvFile).ReadAll() if err != nil { return nil, err } var images []ImageDescriptor for _, image := range csv { if len(image) < 3 { return nil, errors.New("image input is not sufficient") } var sociIndexManifestRef string if len(image) == 4 { sociIndexManifestRef = image[2] } images = append(images, ImageDescriptor{ ShortName: image[0], ImageRef: image[1], ReadyLine: image[3], SociIndexManifestRef: sociIndexManifestRef}) } return images, nil } func GetCommitHash() (string, error) { cmd := exec.Command("git", "rev-parse", "HEAD") output, err := cmd.Output() if err != nil { return "", err } return string(output), nil } func GetDefaultWorkloads() []ImageDescriptor { return []ImageDescriptor{ { ShortName: "ECR-public-ffmpeg", ImageRef: "public.ecr.aws/soci-workshop-examples/ffmpeg:latest", SociIndexManifestRef: "ef63578971ebd8fc700c74c96f81dafab4f3875e9117ef3c5eb7446e169d91cb", ReadyLine: "Hello World", }, { ShortName: "ECR-public-tensorflow", ImageRef: "public.ecr.aws/soci-workshop-examples/tensorflow:latest", SociIndexManifestRef: "27546e0267465279e40a8c8ebc8d34836dd5513c6e7019257855c9e0f04a9f34", ReadyLine: "Hello World with TensorFlow!", }, { ShortName: "ECR-public-tensorflow_gpu", ImageRef: "public.ecr.aws/soci-workshop-examples/tensorflow_gpu:latest", SociIndexManifestRef: "a40b70bc941216cbb29623e98970dfc84e9640666a8b9043564ca79f6d5cc137", ReadyLine: "Hello World with TensorFlow!", }, { ShortName: "ECR-public-node", ImageRef: "public.ecr.aws/soci-workshop-examples/node:latest", SociIndexManifestRef: "544d42d3447fe7833c2e798b8a342f5102022188e814de0aa6ce980e76c62894", ReadyLine: "Server ready", }, { ShortName: "ECR-public-busybox", ImageRef: "public.ecr.aws/soci-workshop-examples/busybox:latest", SociIndexManifestRef: "deaaf67bb4baa293dadcfbeb1f511c181f89a05a042ee92dd2e43e7b7295b1c0", ReadyLine: "Hello World", }, { ShortName: "ECR-public-mongo", ImageRef: "public.ecr.aws/soci-workshop-examples/mongo:latest", SociIndexManifestRef: "ecdd6dcc917d09ec7673288e8ba83270542b71959db2ac731fbeb42aa0b038e0", ReadyLine: "Waiting for connections", }, { ShortName: "ECR-public-rabbitmq", ImageRef: "public.ecr.aws/soci-workshop-examples/rabbitmq:latest", SociIndexManifestRef: "3882f9609c0c2da044173710f3905f4bc6c09228f2a5b5a0a5fdce2537677c17", ReadyLine: "Server startup complete", }, { ShortName: "ECR-public-redis", ImageRef: "public.ecr.aws/soci-workshop-examples/redis:latest", SociIndexManifestRef: "da171fda5f4ccf79f453fc0c5e1414642521c2e189f377809ca592af9458287a", ReadyLine: "Ready to accept connections", }, } } soci-snapshotter-0.4.1/cache/000077500000000000000000000000001454010642300160635ustar00rootroot00000000000000soci-snapshotter-0.4.1/cache/cache.go000066400000000000000000000256471454010642300174730ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cache import ( "bytes" "errors" "fmt" "io" "os" "path/filepath" "sync" "github.com/awslabs/soci-snapshotter/util/lrucache" "github.com/awslabs/soci-snapshotter/util/namedmutex" ) const ( defaultMaxLRUCacheEntry = 10 defaultMaxCacheFds = 10 ) type DirectoryCacheConfig struct { // Number of entries of LRU cache (default: 10). // This won't be used when DataCache is specified. MaxLRUCacheEntry int // Number of file descriptors to cache (default: 10). // This won't be used when FdCache is specified. MaxCacheFds int // On Add, wait until the data is fully written to the cache directory. SyncAdd bool // DataCache is an on-memory cache of the data. // OnEvicted will be overridden and replaced for internal use. DataCache *lrucache.Cache // FdCache is a cache for opened file descriptors. // OnEvicted will be overridden and replaced for internal use. FdCache *lrucache.Cache // BufPool will be used for pooling bytes.Buffer. BufPool *sync.Pool // Direct forcefully enables direct mode for all operation in cache. // Thus operation won't use on-memory caches. Direct bool } // TODO: contents validation. // BlobCache represents a cache for bytes data type BlobCache interface { // Add returns a writer to add contents to cache Add(key string, opts ...Option) (Writer, error) // Get returns a reader to read the specified contents // from cache Get(key string, opts ...Option) (Reader, error) // Close closes the cache Close() error } // Reader provides the data cached. type Reader interface { io.ReaderAt Close() error } // Writer enables the client to cache byte data. Commit() must be // called after data is fully written to Write(). To abort the written // data, Abort() must be called. type Writer interface { io.WriteCloser Commit() error Abort() error } type cacheOpt struct { direct bool } type Option func(o *cacheOpt) *cacheOpt // Direct option lets FetchAt and Add methods not to use on-memory caches. When // you know that the targeting value won't be used immediately, you can prevent // the limited space of on-memory caches from being polluted by these unimportant // values. func Direct() Option { return func(o *cacheOpt) *cacheOpt { o.direct = true return o } } func NewDirectoryCache(directory string, config DirectoryCacheConfig) (BlobCache, error) { if !filepath.IsAbs(directory) { return nil, fmt.Errorf("dir cache path must be an absolute path; got %q", directory) } bufPool := config.BufPool if bufPool == nil { bufPool = &sync.Pool{ New: func() interface{} { return new(bytes.Buffer) }, } } dataCache := config.DataCache if dataCache == nil { maxEntry := config.MaxLRUCacheEntry if maxEntry == 0 { maxEntry = defaultMaxLRUCacheEntry } dataCache = lrucache.New(maxEntry) dataCache.OnEvicted = func(key string, value interface{}) { value.(*bytes.Buffer).Reset() bufPool.Put(value) } } fdCache := config.FdCache if fdCache == nil { maxEntry := config.MaxCacheFds if maxEntry == 0 { maxEntry = defaultMaxCacheFds } fdCache = lrucache.New(maxEntry) fdCache.OnEvicted = func(key string, value interface{}) { value.(*os.File).Close() } } if err := os.MkdirAll(directory, 0700); err != nil { return nil, err } wipdir := filepath.Join(directory, "wip") if err := os.MkdirAll(wipdir, 0700); err != nil { return nil, err } dc := &directoryCache{ cache: dataCache, fileCache: fdCache, wipLock: new(namedmutex.NamedMutex), directory: directory, wipDirectory: wipdir, bufPool: bufPool, direct: config.Direct, } dc.syncAdd = config.SyncAdd return dc, nil } // directoryCache is a cache implementation which backend is a directory. type directoryCache struct { cache *lrucache.Cache fileCache *lrucache.Cache wipDirectory string directory string wipLock *namedmutex.NamedMutex bufPool *sync.Pool syncAdd bool direct bool closed bool closedMu sync.Mutex } func (dc *directoryCache) Get(key string, opts ...Option) (Reader, error) { if dc.isClosed() { return nil, fmt.Errorf("cache is already closed") } opt := &cacheOpt{} for _, o := range opts { opt = o(opt) } if !dc.direct && !opt.direct { // Get data from memory if b, done, ok := dc.cache.Get(key); ok { return &reader{ ReaderAt: bytes.NewReader(b.(*bytes.Buffer).Bytes()), closeFunc: func() error { done() return nil }, }, nil } // Get data from disk. If the file is already opened, use it. if f, done, ok := dc.fileCache.Get(key); ok { return &reader{ ReaderAt: f.(*os.File), closeFunc: func() error { done() // file will be closed when it's evicted from the cache return nil }, }, nil } } // Open the cache file and read the target region // TODO: If the target cache is write-in-progress, should we wait for the completion // or simply report the cache miss? file, err := os.Open(dc.cachePath(key)) if err != nil { return nil, fmt.Errorf("failed to open blob file for %q: %w", key, err) } // If "direct" option is specified, do not cache the file on memory. // This option is useful for preventing memory cache from being polluted by data // that won't be accessed immediately. if dc.direct || opt.direct { return &reader{ ReaderAt: file, closeFunc: func() error { return file.Close() }, }, nil } // TODO: should we cache the entire file data on memory? // but making I/O (possibly huge) on every fetching // might be costly. return &reader{ ReaderAt: file, closeFunc: func() error { _, done, added := dc.fileCache.Add(key, file) defer done() // Release it immediately. Cleaned up on eviction. if !added { return file.Close() // file already exists in the cache. close it. } return nil }, }, nil } func (dc *directoryCache) Add(key string, opts ...Option) (Writer, error) { if dc.isClosed() { return nil, fmt.Errorf("cache is already closed") } opt := &cacheOpt{} for _, o := range opts { opt = o(opt) } wip, err := dc.wipFile(key) if err != nil { return nil, err } w := &writer{ WriteCloser: wip, commitFunc: func() error { if dc.isClosed() { return fmt.Errorf("cache is already closed") } // Commit the cache contents c := dc.cachePath(key) if err := os.MkdirAll(filepath.Dir(c), os.ModePerm); err != nil { var allErr error if err := os.Remove(wip.Name()); err != nil { allErr = errors.Join(allErr, err) } return errors.Join(allErr, fmt.Errorf("failed to create cache directory %q: %w", c, err)) } return os.Rename(wip.Name(), c) }, abortFunc: func() error { return os.Remove(wip.Name()) }, } // If "direct" option is specified, do not cache the passed data on memory. // This option is useful for preventing memory cache from being polluted by data // that won't be accessed immediately. if dc.direct || opt.direct { return w, nil } b := dc.bufPool.Get().(*bytes.Buffer) memW := &writer{ WriteCloser: nopWriteCloser(io.Writer(b)), commitFunc: func() error { if dc.isClosed() { w.Close() return fmt.Errorf("cache is already closed") } cached, done, added := dc.cache.Add(key, b) if !added { dc.putBuffer(b) // already exists in the cache. abort it. } commit := func() error { defer done() defer w.Close() n, err := w.Write(cached.(*bytes.Buffer).Bytes()) if err != nil || n != cached.(*bytes.Buffer).Len() { w.Abort() return err } return w.Commit() } if dc.syncAdd { return commit() } go func() { if err := commit(); err != nil { fmt.Println("failed to commit to file:", err) } }() return nil }, abortFunc: func() error { defer w.Close() defer w.Abort() dc.putBuffer(b) // abort it. return nil }, } return memW, nil } func (dc *directoryCache) putBuffer(b *bytes.Buffer) { b.Reset() dc.bufPool.Put(b) } func (dc *directoryCache) Close() error { dc.closedMu.Lock() defer dc.closedMu.Unlock() if dc.closed { return nil } dc.closed = true return os.RemoveAll(dc.directory) } func (dc *directoryCache) isClosed() bool { dc.closedMu.Lock() closed := dc.closed dc.closedMu.Unlock() return closed } func (dc *directoryCache) cachePath(key string) string { return filepath.Join(dc.directory, key) } func (dc *directoryCache) wipFile(key string) (*os.File, error) { return os.CreateTemp(dc.wipDirectory, key+"-*") } func NewMemoryCache() BlobCache { return &MemoryCache{ Membuf: map[string]*bytes.Buffer{}, } } // MemoryCache is a cache implementation which backend is a memory. type MemoryCache struct { Membuf map[string]*bytes.Buffer mu sync.Mutex } func (mc *MemoryCache) Get(key string, opts ...Option) (Reader, error) { mc.mu.Lock() defer mc.mu.Unlock() b, ok := mc.Membuf[key] if !ok { return nil, fmt.Errorf("Missed cache: %q", key) } return &reader{bytes.NewReader(b.Bytes()), func() error { return nil }}, nil } func (mc *MemoryCache) Add(key string, opts ...Option) (Writer, error) { b := new(bytes.Buffer) return &writer{ WriteCloser: nopWriteCloser(io.Writer(b)), commitFunc: func() error { mc.mu.Lock() defer mc.mu.Unlock() mc.Membuf[key] = b return nil }, abortFunc: func() error { return nil }, }, nil } func (mc *MemoryCache) Close() error { return nil } type reader struct { io.ReaderAt closeFunc func() error } func (r *reader) Close() error { return r.closeFunc() } type writer struct { io.WriteCloser commitFunc func() error abortFunc func() error } func (w *writer) Commit() error { return w.commitFunc() } func (w *writer) Abort() error { return w.abortFunc() } type writeCloser struct { io.Writer closeFunc func() error } func (w *writeCloser) Close() error { return w.closeFunc() } func nopWriteCloser(w io.Writer) io.WriteCloser { return &writeCloser{w, func() error { return nil }} } soci-snapshotter-0.4.1/cache/cache_test.go000066400000000000000000000113331454010642300205150ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright 2019 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the NOTICE.md file. */ package cache import ( "crypto/sha256" "fmt" "io" "testing" ) const ( sampleData = "0123456789" ) func TestDirectoryCache(t *testing.T) { // with enough memory cache newCache := func(t *testing.T) BlobCache { tmp := t.TempDir() c, err := NewDirectoryCache(tmp, DirectoryCacheConfig{ MaxLRUCacheEntry: 10, SyncAdd: true, }) if err != nil { t.Fatalf("failed to make cache: %v", err) } return c } testCache(t, "dir-with-enough-mem", newCache) // with smaller memory cache newCache = func(t *testing.T) BlobCache { tmp := t.TempDir() c, err := NewDirectoryCache(tmp, DirectoryCacheConfig{ MaxLRUCacheEntry: 1, SyncAdd: true, }) if err != nil { t.Fatalf("failed to make cache: %v", err) } return c } testCache(t, "dir-with-small-mem", newCache) } func TestMemoryCache(t *testing.T) { testCache(t, "memory", func(*testing.T) BlobCache { return NewMemoryCache() }) } func testCache(t *testing.T, name string, newCache func(t *testing.T) BlobCache) { tests := []struct { name string blobs []string checks []check }{ { name: "empty_data", blobs: []string{ "", }, checks: []check{ hit(""), miss(sampleData), }, }, { name: "data", blobs: []string{ sampleData, }, checks: []check{ hit(sampleData), miss("dummy"), }, }, { name: "manydata", blobs: []string{ sampleData, "test", }, checks: []check{ hit(sampleData), miss("dummy"), }, }, { name: "dup_data", blobs: []string{ sampleData, sampleData, }, checks: []check{ hit(sampleData), }, }, } for _, tt := range tests { t.Run(fmt.Sprintf("%s-%s", name, tt.name), func(t *testing.T) { c := newCache(t) for _, blob := range tt.blobs { d := digestFor(blob) w, err := c.Add(d) if err != nil { t.Fatalf("failed to add %v: %v", d, err) } if n, err := w.Write([]byte(blob)); err != nil || n != len(blob) { w.Close() t.Fatalf("failed to write %v (len:%d): %v", d, len(blob), err) } if err := w.Commit(); err != nil { w.Close() t.Fatalf("failed to commit %v (len:%d): %v", d, len(blob), err) } w.Close() } for _, check := range tt.checks { check(t, c) } }) } } type check func(*testing.T, BlobCache) func digestFor(content string) string { sum := sha256.Sum256([]byte(content)) return fmt.Sprintf("%x", sum) } func hit(sample string) check { return func(t *testing.T, c BlobCache) { // test whole blob key := digestFor(sample) testBlob(t, c, key, 0, sample) // test a chunk chunk := len(sample) / 3 testBlob(t, c, key, int64(chunk), sample[chunk:2*chunk]) } } func testBlob(t *testing.T, c BlobCache, key string, offset int64, sample string) { p := make([]byte, len(sample)) r, err := c.Get(key) if err != nil { t.Errorf("missed %v", key) return } if n, err := r.ReadAt(p, offset); err != nil && err != io.EOF { t.Errorf("failed to fetch blob %q: %v", key, err) return } else if n != len(sample) { t.Errorf("fetched size %d; want %d", len(p), len(sample)) return } if digestFor(sample) != digestFor(string(p)) { t.Errorf("fetched %q; want %q", string(p), sample) } } func miss(sample string) check { return func(t *testing.T, c BlobCache) { d := digestFor(sample) if _, err := c.Get(d); err == nil { t.Errorf("hit blob %q but must be missed: %v", d, err) return } } } soci-snapshotter-0.4.1/cmd/000077500000000000000000000000001454010642300155635ustar00rootroot00000000000000soci-snapshotter-0.4.1/cmd/go.mod000066400000000000000000000135251454010642300166770ustar00rootroot00000000000000module github.com/awslabs/soci-snapshotter/cmd go 1.20 require ( github.com/awslabs/soci-snapshotter v0.0.0-local github.com/containerd/containerd v1.7.2 github.com/coreos/go-systemd/v22 v22.5.0 github.com/docker/cli v24.0.4+incompatible github.com/docker/go-metrics v0.0.1 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.0-rc4 github.com/sirupsen/logrus v1.9.3 github.com/urfave/cli v1.22.14 go.etcd.io/bbolt v1.3.7 golang.org/x/sys v0.10.0 google.golang.org/grpc v1.56.2 oras.land/oras-go/v2 v2.2.1 ) require ( github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 // indirect github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/Microsoft/hcsshim v0.10.0-rc.9 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/containerd/cgroups/v3 v3.0.1 // indirect github.com/containerd/console v1.0.3 // indirect github.com/containerd/continuity v0.4.1 // indirect github.com/containerd/fifo v1.1.0 // indirect github.com/containerd/go-cni v1.1.9 // indirect github.com/containerd/ttrpc v1.2.2 // indirect github.com/containerd/typeurl/v2 v2.1.1 // indirect github.com/containernetworking/cni v1.1.2 // indirect github.com/containernetworking/plugins v1.2.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/cyphar/filepath-securejoin v0.2.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/docker/docker v23.0.5+incompatible // indirect github.com/docker/docker-credential-helpers v0.7.0 // indirect github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.10.1 // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonreference v0.20.0 // indirect github.com/go-openapi/swag v0.19.14 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/mock v1.6.0 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/flatbuffers v23.5.26+incompatible // indirect github.com/google/gnostic v0.5.7-v3refs // indirect github.com/google/go-cmp v0.5.9 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.3.0 // indirect github.com/hanwen/go-fuse/v2 v2.4.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-retryablehttp v0.7.4 // indirect github.com/imdario/mergo v0.3.13 // indirect github.com/intel/goresctrl v0.3.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.16.7 // indirect github.com/mailru/easyjson v0.7.6 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/moby/locker v1.0.1 // indirect github.com/moby/sys/mountinfo v0.6.2 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/signal v0.7.0 // indirect github.com/moby/sys/symlink v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/opencontainers/runc v1.1.7 // indirect github.com/opencontainers/runtime-spec v1.1.0-rc.3 // indirect github.com/opencontainers/selinux v1.11.0 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/prometheus/client_golang v1.16.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common v0.44.0 // indirect github.com/prometheus/procfs v0.11.0 // indirect github.com/rs/xid v1.5.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/otel v1.16.0 // indirect go.opentelemetry.io/otel/metric v1.16.0 // indirect go.opentelemetry.io/otel/trace v1.16.0 // indirect golang.org/x/mod v0.12.0 // indirect golang.org/x/net v0.12.0 // indirect golang.org/x/oauth2 v0.8.0 // indirect golang.org/x/sync v0.3.0 // indirect golang.org/x/term v0.10.0 // indirect golang.org/x/text v0.11.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.11.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/api v0.26.3 // indirect k8s.io/apimachinery v0.26.3 // indirect k8s.io/client-go v0.26.3 // indirect k8s.io/klog/v2 v2.90.1 // indirect k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 // indirect sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) replace ( // Import local packages. github.com/awslabs/soci-snapshotter v0.0.0 => ../ github.com/awslabs/soci-snapshotter v0.0.0-local => ../ // Temporary fork for avoiding importing patent-protected code: https://github.com/hashicorp/golang-lru/issues/73 github.com/hashicorp/golang-lru => github.com/ktock/golang-lru v0.5.5-0.20211029085301-ec551be6f75c ) soci-snapshotter-0.4.1/cmd/go.sum000066400000000000000000001314021454010642300167170ustar00rootroot00000000000000cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 h1:EKPd1INOIyr5hWOWhvpmQpY6tKjeG0hT1s3AMC/9fic= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1/go.mod h1:VzwV+t+dZ9j/H867F1M2ziD+yLHtB46oM35FxxMJ4d0= github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 h1:59MxjQVfjXsBpLy+dbd2/ELV5ofnUkUZBvWSC85sheA= github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0/go.mod h1:OahwfttHWG6eJ0clwcfBAHoDI6X/LV/15hx/wlMZSrU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Microsoft/hcsshim v0.10.0-rc.9 h1:B4mguSolFL2yOHl0JjQxo0Si2Vwipj/Cbib4pyJ4pKA= github.com/Microsoft/hcsshim v0.10.0-rc.9/go.mod h1:1g6+xpige+npSTrEkdm8JOZxOjJ9McQiT0JkEpzyZqA= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= github.com/containerd/cgroups/v3 v3.0.1 h1:4hfGvu8rfGIwVIDd+nLzn/B9ZXx4BcCjzt5ToenJRaE= github.com/containerd/cgroups/v3 v3.0.1/go.mod h1:/vtwk1VXrtoa5AaZLkypuOJgA/6DyPMZHJPGQNtlHnw= github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw= github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= github.com/containerd/containerd v1.7.2 h1:UF2gdONnxO8I6byZXDi5sXWiWvlW3D/sci7dTQimEJo= github.com/containerd/containerd v1.7.2/go.mod h1:afcz74+K10M/+cjGHIVQrCt3RAQhUSCAjJ9iMYhhkuI= github.com/containerd/continuity v0.4.1 h1:wQnVrjIyQ8vhU2sgOiL5T07jo+ouqc2bnKsv5/EqGhU= github.com/containerd/continuity v0.4.1/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY= github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o= github.com/containerd/go-cni v1.1.9 h1:ORi7P1dYzCwVM6XPN4n3CbkuOx/NZ2DOqy+SHRdo9rU= github.com/containerd/go-cni v1.1.9/go.mod h1:XYrZJ1d5W6E2VOvjffL3IZq0Dz6bsVlERHbekNK90PM= github.com/containerd/ttrpc v1.2.2 h1:9vqZr0pxwOF5koz6N0N3kJ0zDHokrcPxIR/ZR2YFtOs= github.com/containerd/ttrpc v1.2.2/go.mod h1:sIT6l32Ph/H9cvnJsfXM5drIVzTr5A2flTf1G5tYZak= github.com/containerd/typeurl/v2 v2.1.1 h1:3Q4Pt7i8nYwy2KmQWIw2+1hTvwTE/6w9FqcttATPO/4= github.com/containerd/typeurl/v2 v2.1.1/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0= github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ= github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= github.com/containernetworking/plugins v1.2.0 h1:SWgg3dQG1yzUo4d9iD8cwSVh1VqI+bP7mkPDoSfP9VU= github.com/containernetworking/plugins v1.2.0/go.mod h1:/VjX4uHecW5vVimFa1wkG4s+r/s9qIfPdqlLF4TW8c4= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/docker/cli v24.0.4+incompatible h1:Y3bYF9ekNTm2VFz5U/0BlMdJy73D+Y1iAAZ8l63Ydzw= github.com/docker/cli v24.0.4+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/docker v23.0.5+incompatible h1:DaxtlTJjFSnLOXVNUBU1+6kXGz2lpDoEAH6QoxaSg8k= github.com/docker/docker v23.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/flatbuffers v23.5.26+incompatible h1:M9dgRyhJemaM4Sw8+66GHBu8ioaQmyPLg1b8VwK5WJg= github.com/google/flatbuffers v23.5.26+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/hanwen/go-fuse/v2 v2.4.1 h1:/iB2ENOCPamqovm4AaXhEkXaX5fGAG0At1F6b7Ve0sE= github.com/hanwen/go-fuse/v2 v2.4.1/go.mod h1:xKwi1cF7nXAOBCXujD5ie0ZKsxc8GGSA1rlMJc+8IJs= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA= github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/intel/goresctrl v0.3.0 h1:K2D3GOzihV7xSBedGxONSlaw/un1LZgWsc9IfqipN4c= github.com/intel/goresctrl v0.3.0/go.mod h1:fdz3mD85cmP9sHD8JUlrNWAxvwM86CrbmVXltEKd7zk= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= github.com/moby/sys/signal v0.7.0 h1:25RW3d5TnQEoKvRbEKUGay6DCQ46IxAVTT9CUMgmsSI= github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= github.com/moby/sys/symlink v0.2.0 h1:tk1rOM+Ljp0nFmfOIBtlV3rTDlWOwFRhjEeAhZB0nZc= github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.24.2 h1:J/tulyYK6JwBldPViHJReihxxZ+22FHs0piGjQAvoUE= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0-rc4 h1:oOxKUJWnFC4YGHCCMNql1x4YaDfYBTS5Y4x/Cgeo1E0= github.com/opencontainers/image-spec v1.1.0-rc4/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= github.com/opencontainers/runc v1.1.7 h1:y2EZDS8sNng4Ksf0GUYNhKbTShZJPJg1FiXJNH/uoCk= github.com/opencontainers/runc v1.1.7/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50= github.com/opencontainers/runtime-spec v1.1.0-rc.3 h1:l04uafi6kxByhbxev7OWiuUv0LZxEsYUfDWZ6bztAuU= github.com/opencontainers/runtime-spec v1.1.0-rc.3/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.11.0 h1:5EAgkfkMl659uZPbe9AS2N68a7Cc1TJbPEuGzFuRbyk= github.com/prometheus/procfs v0.11.0/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/urfave/cli v1.22.14 h1:ebbhrRiGK2i4naQJr+1Xj92HXZCrK7MsyTS/ob3HnAk= github.com/urfave/cli v1.22.14/go.mod h1:X0eDS6pD6Exaclxm99NJ3FiCDRED7vIHpx2mDOHLvkA= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.11.0 h1:EMCa6U9S2LtZXLAMoWiR/R8dAQFRqbAitmbJ2UKhoi8= golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753 h1:+VoAg+OKmWaommL56xmZSE2sUK8A7m6SUO7X89F2tbw= google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753/go.mod h1:iqkVr8IRpZ53gx1dEnWlCUIEwDWqWARWrbzpasaTNYM= google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753 h1:XUODHrpzJEUeWmVo/jfNTLj0YyVveOo28oE6vkFbkO4= google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI= google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.26.3 h1:emf74GIQMTik01Aum9dPP0gAypL8JTLl/lHa4V9RFSU= k8s.io/api v0.26.3/go.mod h1:PXsqwPMXBSBcL1lJ9CYDKy7kIReUydukS5JiRlxC3qE= k8s.io/apimachinery v0.26.3 h1:dQx6PNETJ7nODU3XPtrwkfuubs6w7sX0M8n61zHIV/k= k8s.io/apimachinery v0.26.3/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I= k8s.io/client-go v0.26.3 h1:k1UY+KXfkxV2ScEL3gilKcF7761xkYsSD6BC9szIu8s= k8s.io/client-go v0.26.3/go.mod h1:ZPNu9lm8/dbRIPAgteN30RSXea6vrCpFvq+MateTUuQ= k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E= k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 h1:kmDqav+P+/5e1i9tFfHq1qcF3sOrDp+YEkVDAHu7Jwk= k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go/v2 v2.2.1 h1:3VJTYqy5KfelEF9c2jo1MLSpr+TM3mX8K42wzZcd6qE= oras.land/oras-go/v2 v2.2.1/go.mod h1:GeAwLuC4G/JpNwkd+bSZ6SkDMGaaYglt6YK2WvZP7uQ= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= soci-snapshotter-0.4.1/cmd/soci-snapshotter-grpc/000077500000000000000000000000001454010642300220215ustar00rootroot00000000000000soci-snapshotter-0.4.1/cmd/soci-snapshotter-grpc/main.go000066400000000000000000000240651454010642300233030ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "context" "flag" "fmt" "io" golog "log" "net" "net/http" "os" "os/signal" "path/filepath" "time" _ "net/http/pprof" "github.com/awslabs/soci-snapshotter/config" "github.com/awslabs/soci-snapshotter/fs" "github.com/awslabs/soci-snapshotter/metadata" "github.com/awslabs/soci-snapshotter/service" "github.com/awslabs/soci-snapshotter/service/keychain/cri" "github.com/awslabs/soci-snapshotter/service/keychain/dockerconfig" "github.com/awslabs/soci-snapshotter/service/keychain/kubeconfig" "github.com/awslabs/soci-snapshotter/service/resolver" "github.com/awslabs/soci-snapshotter/version" "github.com/awslabs/soci-snapshotter/ztoc" snapshotsapi "github.com/containerd/containerd/api/services/snapshots/v1" "github.com/containerd/containerd/contrib/snapshotservice" "github.com/containerd/containerd/defaults" "github.com/containerd/containerd/log" "github.com/containerd/containerd/pkg/dialer" "github.com/containerd/containerd/snapshots" runtime_alpha "github.com/containerd/containerd/third_party/k8s.io/cri-api/pkg/apis/runtime/v1alpha2" sddaemon "github.com/coreos/go-systemd/v22/daemon" metrics "github.com/docker/go-metrics" "github.com/sirupsen/logrus" bolt "go.etcd.io/bbolt" "golang.org/x/sys/unix" "google.golang.org/grpc" "google.golang.org/grpc/backoff" "google.golang.org/grpc/credentials/insecure" ) const ( defaultAddress = "/run/soci-snapshotter-grpc/soci-snapshotter-grpc.sock" defaultConfigPath = "/etc/soci-snapshotter-grpc/config.toml" defaultLogLevel = logrus.InfoLevel defaultRootDir = "/var/lib/soci-snapshotter-grpc" ) // logLevel of Debug or Trace may emit sensitive information // e.g. file contents, file names and paths, network addresses and ports var ( address = flag.String("address", defaultAddress, "address for the snapshotter's GRPC server") configPath = flag.String("config", defaultConfigPath, "path to the configuration file") logLevel = flag.String("log-level", defaultLogLevel.String(), "set the logging level [trace, debug, info, warn, error, fatal, panic]") rootDir = flag.String("root", defaultRootDir, "path to the root directory for this snapshotter") printVersion = flag.Bool("version", false, "print the version") ) func main() { flag.Parse() lvl, err := logrus.ParseLevel(*logLevel) if err != nil { log.L.WithError(err).Fatal("failed to prepare logger") } if *printVersion { fmt.Println("soci-snapshotter-grpc version", version.Version, version.Revision) return } logrus.SetLevel(lvl) logrus.SetFormatter(&logrus.JSONFormatter{ TimestampFormat: log.RFC3339NanoFixed, }) ctx, cancel := context.WithCancel(log.WithLogger(context.Background(), log.L)) defer cancel() // Streams log of standard lib (go-fuse uses this) into debug log // Snapshotter should use "github.com/containerd/containerd/log" otherwise // logs are always printed as "debug" mode. golog.SetOutput(log.G(ctx).WriterLevel(logrus.DebugLevel)) log.G(ctx).WithFields(logrus.Fields{ "version": version.Version, "revision": version.Revision, }).Info("starting soci-snapshotter-grpc") cfg, err := config.NewConfigFromToml(*configPath) if err != nil { log.G(ctx).WithError(err).Fatal(err) } if err := service.Supported(*rootDir); err != nil { log.G(ctx).WithError(err).Fatalf("snapshotter is not supported") } // Create a gRPC server rpc := grpc.NewServer() // Configure keychain credsFuncs := []resolver.Credential{dockerconfig.NewDockerConfigKeychain(ctx)} if cfg.KubeconfigKeychainConfig.EnableKeychain { var opts []kubeconfig.Option if kcp := cfg.KubeconfigKeychainConfig.KubeconfigPath; kcp != "" { opts = append(opts, kubeconfig.WithKubeconfigPath(kcp)) } credsFuncs = append(credsFuncs, kubeconfig.NewKubeconfigKeychain(ctx, opts...)) } if cfg.CRIKeychainConfig.EnableKeychain { // connects to the backend CRI service (defaults to containerd socket) connectCRI := func() (runtime_alpha.ImageServiceClient, error) { // TODO: make gRPC options configurable from config.toml backoffConfig := backoff.DefaultConfig backoffConfig.MaxDelay = 3 * time.Second connParams := grpc.ConnectParams{ Backoff: backoffConfig, } gopts := []grpc.DialOption{ grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithConnectParams(connParams), grpc.WithContextDialer(dialer.ContextDialer), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize)), grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)), } conn, err := grpc.Dial(dialer.DialAddress(cfg.CRIKeychainConfig.ImageServicePath), gopts...) if err != nil { return nil, err } return runtime_alpha.NewImageServiceClient(conn), nil } f, criServer := cri.NewCRIKeychain(ctx, connectCRI) runtime_alpha.RegisterImageServiceServer(rpc, criServer) credsFuncs = append(credsFuncs, f) } var fsOpts []fs.Option mt, err := getMetadataStore(*rootDir, *cfg) if err != nil { log.G(ctx).WithError(err).Fatalf("failed to configure metadata store") } fsOpts = append(fsOpts, fs.WithMetadataStore(mt)) rs, err := service.NewSociSnapshotterService(ctx, *rootDir, &cfg.ServiceConfig, service.WithCredsFuncs(credsFuncs...), service.WithFilesystemOptions(fsOpts...)) if err != nil { log.G(ctx).WithError(err).Fatalf("failed to configure snapshotter") } cleanup, err := serve(ctx, rpc, *address, rs, *cfg) if err != nil { log.G(ctx).WithError(err).Fatalf("failed to serve snapshotter") } if cleanup { log.G(ctx).Debug("Closing the snapshotter") rs.Close() } log.G(ctx).Info("Exiting") } func serve(ctx context.Context, rpc *grpc.Server, addr string, rs snapshots.Snapshotter, cfg config.Config) (bool, error) { // Convert the snapshotter to a gRPC service, snsvc := snapshotservice.FromSnapshotter(rs) // Register the service with the gRPC server snapshotsapi.RegisterSnapshotsServer(rpc, snsvc) // Prepare the directory for the socket if err := os.MkdirAll(filepath.Dir(addr), 0700); err != nil { return false, fmt.Errorf("failed to create directory %q: %w", filepath.Dir(addr), err) } // Try to remove the socket file to avoid EADDRINUSE if err := os.RemoveAll(addr); err != nil { return false, fmt.Errorf("failed to remove %q: %w", addr, err) } errCh := make(chan error, 1) var cleanupFns []func() error defer func() { for _, cleanupFn := range cleanupFns { cleanupFn() } }() // We need to consider both the existence of MetricsAddress as well as NoPrometheus flag not set if cfg.MetricsAddress != "" && !cfg.NoPrometheus { l, err := net.Listen(cfg.MetricsNetwork, cfg.MetricsAddress) if err != nil { return false, fmt.Errorf("failed to get listener for metrics endpoint: %w", err) } cleanupFns = append(cleanupFns, l.Close) m := http.NewServeMux() m.Handle("/metrics", metrics.Handler()) go func() { if err := http.Serve(l, m); err != nil { errCh <- fmt.Errorf("error on serving metrics via socket %q: %w", addr, err) } }() } if cfg.DebugAddress != "" { log.G(ctx).Infof("listen %q for debugging", cfg.DebugAddress) go func() { if err := http.ListenAndServe(cfg.DebugAddress, nil); err != nil { errCh <- fmt.Errorf("error on serving a debug endpoint via socket %q: %w", addr, err) } }() } // Listen and serve l, err := net.Listen("unix", addr) if err != nil { return false, fmt.Errorf("error on listen socket %q: %w", addr, err) } cleanupFns = append(cleanupFns, l.Close) go func() { if err := rpc.Serve(l); err != nil { errCh <- fmt.Errorf("error on serving via socket %q: %w", addr, err) } }() if os.Getenv("NOTIFY_SOCKET") != "" { notified, notifyErr := sddaemon.SdNotify(false, sddaemon.SdNotifyReady) log.G(ctx).Debugf("SdNotifyReady notified=%v, err=%v", notified, notifyErr) } defer func() { if os.Getenv("NOTIFY_SOCKET") != "" { notified, notifyErr := sddaemon.SdNotify(false, sddaemon.SdNotifyStopping) log.G(ctx).Debugf("SdNotifyStopping notified=%v, err=%v", notified, notifyErr) } }() var s os.Signal sigCh := make(chan os.Signal, 1) signal.Notify(sigCh, unix.SIGINT, unix.SIGTERM) select { case s = <-sigCh: log.G(ctx).Infof("Got %v", s) case err := <-errCh: return false, err } if s == unix.SIGINT { return true, nil // do cleanup on SIGINT } return false, nil } const ( dbMetadataType = "db" ) func getMetadataStore(rootDir string, config config.Config) (metadata.Store, error) { switch config.MetadataStore { case "", dbMetadataType: bOpts := bolt.Options{ NoFreelistSync: true, InitialMmapSize: 64 * 1024 * 1024, FreelistType: bolt.FreelistMapType, } db, err := bolt.Open(filepath.Join(rootDir, "metadata.db"), 0600, &bOpts) if err != nil { return nil, err } return func(sr *io.SectionReader, toc ztoc.TOC, opts ...metadata.Option) (metadata.Reader, error) { return metadata.NewReader(db, sr, toc, opts...) }, nil default: return nil, fmt.Errorf("unknown metadata store type: %v; must be %v", config.MetadataStore, dbMetadataType) } } soci-snapshotter-0.4.1/cmd/soci/000077500000000000000000000000001454010642300165205ustar00rootroot00000000000000soci-snapshotter-0.4.1/cmd/soci/commands/000077500000000000000000000000001454010642300203215ustar00rootroot00000000000000soci-snapshotter-0.4.1/cmd/soci/commands/create.go000066400000000000000000000073471454010642300221260ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package commands import ( "errors" "os" "github.com/awslabs/soci-snapshotter/cmd/soci/commands/internal" "github.com/awslabs/soci-snapshotter/config" "github.com/awslabs/soci-snapshotter/soci" "github.com/awslabs/soci-snapshotter/soci/store" "github.com/containerd/containerd/cmd/ctr/commands" "github.com/urfave/cli" ) const ( buildToolIdentifier = "AWS SOCI CLI v0.1" spanSizeFlag = "span-size" minLayerSizeFlag = "min-layer-size" ) // CreateCommand creates SOCI index for an image // Output of this command is SOCI layers and SOCI index stored in a local directory // SOCI layer is named as .soci.layer // SOCI index is named as .soci.index var CreateCommand = cli.Command{ Name: "create", Usage: "create SOCI index", ArgsUsage: "[flags] ", Flags: append( internal.PlatformFlags, cli.Int64Flag{ Name: spanSizeFlag, Usage: "Span size that soci index uses to segment layer data. Default is 4 MiB", Value: 1 << 22, }, cli.Int64Flag{ Name: minLayerSizeFlag, Usage: "Minimum layer size to build zTOC for. Smaller layers won't have zTOC and not lazy pulled. Default is 10 MiB.", Value: 10 << 20, }, ), Action: func(cliContext *cli.Context) error { srcRef := cliContext.Args().Get(0) if srcRef == "" { return errors.New("source image needs to be specified") } client, ctx, cancel, err := commands.NewClient(cliContext) if err != nil { return err } defer cancel() cs := client.ContentStore() is := client.ImageService() srcImg, err := is.Get(ctx, srcRef) if err != nil { return err } spanSize := cliContext.Int64(spanSizeFlag) minLayerSize := cliContext.Int64(minLayerSizeFlag) // Creating the snapshotter's root path first if it does not exist, since this ensures, that // it has the limited permission set as drwx--x--x. // The subsequent oci.New creates a root path dir with too broad permission set. if _, err := os.Stat(config.SociSnapshotterRootPath); os.IsNotExist(err) { if err = os.Mkdir(config.SociSnapshotterRootPath, 0711); err != nil { return err } } else if err != nil { return err } ctx, blobStore, err := store.NewContentStore(ctx, store.WithType(store.ContentStoreType(cliContext.GlobalString("content-store"))), store.WithNamespace(cliContext.GlobalString("namespace"))) if err != nil { return err } ps, err := internal.GetPlatforms(ctx, cliContext, srcImg, cs) if err != nil { return err } artifactsDb, err := soci.NewDB(soci.ArtifactsDbPath()) if err != nil { return err } builderOpts := []soci.BuildOption{ soci.WithMinLayerSize(minLayerSize), soci.WithSpanSize(spanSize), soci.WithBuildToolIdentifier(buildToolIdentifier), } for _, plat := range ps { builder, err := soci.NewIndexBuilder(cs, blobStore, artifactsDb, append(builderOpts, soci.WithPlatform(plat))...) if err != nil { return err } sociIndexWithMetadata, err := builder.Build(ctx, srcImg) if err != nil { return err } err = soci.WriteSociIndex(ctx, sociIndexWithMetadata, blobStore, builder.ArtifactsDb) if err != nil { return err } } return nil }, } soci-snapshotter-0.4.1/cmd/soci/commands/image/000077500000000000000000000000001454010642300214035ustar00rootroot00000000000000soci-snapshotter-0.4.1/cmd/soci/commands/image/image.go000066400000000000000000000014011454010642300230100ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package image import "github.com/urfave/cli" var Command = cli.Command{ Name: "image", Usage: "manage images", Subcommands: []cli.Command{ rpullCommand, }, } soci-snapshotter-0.4.1/cmd/soci/commands/image/rpull.go000066400000000000000000000114331454010642300230720ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package image import ( "context" "fmt" "github.com/awslabs/soci-snapshotter/cmd/soci/commands/internal" "github.com/awslabs/soci-snapshotter/fs/source" "github.com/containerd/containerd" "github.com/containerd/containerd/cmd/ctr/commands" "github.com/containerd/containerd/cmd/ctr/commands/content" "github.com/containerd/containerd/images" "github.com/containerd/containerd/log" ctdsnapshotters "github.com/containerd/containerd/pkg/snapshotters" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/urfave/cli" ) const ( remoteSnapshotterName = "soci" skipContentVerifyOpt = "skip-content-verify" ) // rpullCommand is a subcommand to pull an image from a registry levaraging soci snapshotter var rpullCommand = cli.Command{ Name: "rpull", Usage: "pull an image from a registry levaraging soci snapshotter", ArgsUsage: "[flags] ", Description: `Fetch and prepare an image for use in containerd levaraging soci snapshotter. After pulling an image, it should be ready to use the same reference in a run command. `, Flags: append(append(append( commands.RegistryFlags, commands.LabelFlag), commands.SnapshotterFlags...), cli.BoolFlag{ Name: skipContentVerifyOpt, Usage: "Skip content verification for layers contained in this image.", }, // This is a standin for the snapshotter receiving the index digest from container runtimes. cli.StringFlag{ Name: "soci-index-digest", Usage: "The SOCI index digest.", }, cli.StringFlag{ Name: internal.PlatformFlagKey, Usage: "The platform to pull.", }, ), Action: func(context *cli.Context) error { var ( ref = context.Args().First() config = &rPullConfig{} ) if ref == "" { return fmt.Errorf("please provide an image reference") } config.indexDigest = context.String("soci-index-digest") client, ctx, cancel, err := commands.NewClient(context) if err != nil { return err } defer cancel() ctx, done, err := client.WithLease(ctx) if err != nil { return err } defer done(ctx) fc, err := content.NewFetchConfig(ctx, context) if err != nil { return err } config.FetchConfig = fc if context.Bool(skipContentVerifyOpt) { config.skipVerify = true } config.snapshotter = remoteSnapshotterName if sn := context.String("snapshotter"); sn != "" { config.snapshotter = sn } config.platform = context.String(internal.PlatformFlagKey) return pull(ctx, client, ref, config) }, } type rPullConfig struct { *content.FetchConfig skipVerify bool snapshotter string indexDigest string platform string } func pull(ctx context.Context, client *containerd.Client, ref string, config *rPullConfig) error { pCtx := ctx h := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { if desc.MediaType != images.MediaTypeDockerSchema1Manifest { fmt.Printf("fetching %v... %v\n", desc.Digest.String()[:15], desc.MediaType) } return nil, nil }) log.G(pCtx).WithField("image", ref).Debug("fetching") labels := commands.LabelArgs(config.Labels) if _, err := client.Pull(pCtx, ref, []containerd.RemoteOpt{ containerd.WithPullLabels(labels), containerd.WithResolver(config.Resolver), containerd.WithImageHandler(h), //nolint:staticcheck containerd.WithSchema1Conversion, //lint:ignore SA1019 containerd.WithPullUnpack, containerd.WithPlatform(config.platform), containerd.WithPullSnapshotter(config.snapshotter), containerd.WithImageHandlerWrapper(source.AppendDefaultLabelsHandlerWrapper( config.indexDigest, ctdsnapshotters.AppendInfoHandlerWrapper(ref))), }...); err != nil { return err } return nil } soci-snapshotter-0.4.1/cmd/soci/commands/index/000077500000000000000000000000001454010642300214305ustar00rootroot00000000000000soci-snapshotter-0.4.1/cmd/soci/commands/index/index.go000066400000000000000000000014421454010642300230670ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package index import ( "github.com/urfave/cli" ) var Command = cli.Command{ Name: "index", Usage: "manage indices", Subcommands: []cli.Command{ listCommand, infoCommand, rmCommand, }, } soci-snapshotter-0.4.1/cmd/soci/commands/index/info.go000066400000000000000000000040171454010642300227140ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package index import ( "context" "fmt" "io" "os" "github.com/awslabs/soci-snapshotter/soci" "github.com/awslabs/soci-snapshotter/soci/store" "github.com/opencontainers/go-digest" v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/urfave/cli" ) var infoCommand = cli.Command{ Name: "info", Usage: "display an index", Description: "get detailed info about an index", ArgsUsage: "", Action: func(cliContext *cli.Context) error { digest, err := digest.Parse(cliContext.Args().First()) if err != nil { return err } db, err := soci.NewDB(soci.ArtifactsDbPath()) if err != nil { return err } artifactType, err := db.GetArtifactType(digest.String()) if err != nil { return err } if artifactType == soci.ArtifactEntryTypeLayer { return fmt.Errorf("the provided digest is of ztoc not SOCI index. Use \"soci ztoc info\" command to get detailed info of ztoc") } ctx, cancel := context.WithTimeout(context.Background(), cliContext.GlobalDuration("timeout")) defer cancel() ctx, store, err := store.NewContentStore(ctx, store.WithType(store.ContentStoreType(cliContext.GlobalString("content-store"))), store.WithNamespace(cliContext.GlobalString("namespace"))) if err != nil { return err } reader, err := store.Fetch(ctx, v1.Descriptor{Digest: digest}) if err != nil { return err } defer reader.Close() _, err = io.Copy(os.Stdout, reader) return err }, } soci-snapshotter-0.4.1/cmd/soci/commands/index/list.go000066400000000000000000000107561454010642300227430ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package index import ( "fmt" "io" "os" "sort" "text/tabwriter" "time" "github.com/awslabs/soci-snapshotter/soci" "github.com/containerd/containerd/cmd/ctr/commands" "github.com/containerd/containerd/images" "github.com/containerd/containerd/platforms" specs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/urfave/cli" ) type filter func(ae *soci.ArtifactEntry) bool func indexFilter(ae *soci.ArtifactEntry) bool { return ae.Type == soci.ArtifactEntryTypeIndex } func platformFilter(platform specs.Platform) filter { return func(ae *soci.ArtifactEntry) bool { return indexFilter(ae) && ae.Platform == platforms.Format(platform) } } func originalDigestFilter(digest string) filter { return func(ae *soci.ArtifactEntry) bool { return indexFilter(ae) && ae.OriginalDigest == digest } } func anyMatch(fns []filter) filter { return func(ae *soci.ArtifactEntry) bool { for _, f := range fns { if f(ae) { return true } } return false } } var listCommand = cli.Command{ Name: "list", Usage: "list indices", Aliases: []string{"ls"}, Flags: []cli.Flag{ cli.StringFlag{ Name: "ref", Usage: "filter indices to those that are associated with a specific image ref", }, cli.BoolFlag{ Name: "quiet, q", Usage: "only display the index digests", }, cli.StringSliceFlag{ Name: "platform, p", Usage: "filter indices to a specific platform", }, }, Action: func(cliContext *cli.Context) error { var artifacts []*soci.ArtifactEntry ref := cliContext.String("ref") quiet := cliContext.Bool("quiet") var plats []specs.Platform for _, p := range cliContext.StringSlice("platform") { pp, err := platforms.Parse(p) if err != nil { return err } plats = append(plats, pp) } client, ctx, cancel, err := commands.NewClient(cliContext) if err != nil { return err } defer cancel() f := indexFilter is := client.ImageService() if ref != "" { img, err := is.Get(ctx, ref) if err != nil { return err } if len(plats) == 0 { plats, err = images.Platforms(ctx, client.ContentStore(), img.Target) if err != nil { return err } } cs := client.ContentStore() var filters []filter for _, plat := range plats { desc, err := soci.GetImageManifestDescriptor(ctx, cs, img.Target, platforms.OnlyStrict(plat)) if err != nil { return err } filters = append(filters, originalDigestFilter(desc.Digest.String())) } f = anyMatch(filters) } else if len(plats) != 0 { var filters []filter for _, plat := range plats { filters = append(filters, platformFilter(plat)) } f = anyMatch(filters) } db, err := soci.NewDB(soci.ArtifactsDbPath()) if err != nil { return err } db.Walk(func(ae *soci.ArtifactEntry) error { if f(ae) { artifacts = append(artifacts, ae) } return nil }) sort.Slice(artifacts, func(i, j int) bool { return artifacts[i].CreatedAt.After(artifacts[j].CreatedAt) }) if quiet { for _, ae := range artifacts { os.Stdout.Write([]byte(fmt.Sprintf("%s\n", ae.Digest))) } return nil } writer := tabwriter.NewWriter(os.Stdout, 8, 8, 4, ' ', 0) writer.Write([]byte("DIGEST\tSIZE\tIMAGE REF\tPLATFORM\tMEDIA TYPE\tCREATED\t\n")) for _, ae := range artifacts { imgs, _ := is.List(ctx, fmt.Sprintf("target.digest==%s", ae.ImageDigest)) if len(imgs) > 0 { for _, img := range imgs { writeArtifactEntry(writer, ae, img.Name) } } else { writeArtifactEntry(writer, ae, "") } } writer.Flush() return nil }, } func writeArtifactEntry(w io.Writer, ae *soci.ArtifactEntry, imageRef string) { w.Write([]byte(fmt.Sprintf( "%s\t%d\t%s\t%s\t%s\t%s\t\n", ae.Digest, ae.Size, imageRef, ae.Platform, ae.MediaType, getDuration(ae.CreatedAt), ))) } func getDuration(t time.Time) string { if t.IsZero() { return "n/a" } return fmt.Sprintf("%s ago", time.Since(t).Round(time.Second).String()) } soci-snapshotter-0.4.1/cmd/soci/commands/index/rm.go000066400000000000000000000060141454010642300223760ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package index import ( "context" "fmt" "github.com/awslabs/soci-snapshotter/soci" "github.com/awslabs/soci-snapshotter/soci/store" "github.com/containerd/containerd/cmd/ctr/commands" "github.com/opencontainers/go-digest" "github.com/urfave/cli" ) var rmCommand = cli.Command{ Name: "remove", Aliases: []string{"rm"}, Usage: "remove indices", Description: "remove an index from local db, and from content store if supported", Flags: []cli.Flag{ cli.StringFlag{ Name: "ref", Usage: "only remove indices that are associated with a specific image ref", }, }, Action: func(cliContext *cli.Context) error { args := cliContext.Args() ref := cliContext.String("ref") if len(args) != 0 && ref != "" { return fmt.Errorf("please provide either index digests or image ref, but not both") } ctx := context.Background() ctx, contentStore, err := store.NewContentStore(ctx, store.WithType(store.ContentStoreType(cliContext.GlobalString("content-store"))), store.WithNamespace(cliContext.GlobalString("namespace"))) if err != nil { return fmt.Errorf("cannot create local content store: %w", err) } db, err := soci.NewDB(soci.ArtifactsDbPath()) if err != nil { return err } if ref == "" { byteArgs := make([][]byte, len(args)) for i, arg := range args { byteArgs[i] = []byte(arg) } err = removeArtifactsAndContent(ctx, db, contentStore, byteArgs) if err != nil { return err } } else { client, ctx, cancel, err := commands.NewClient(cliContext) if err != nil { return err } defer cancel() is := client.ImageService() img, err := is.Get(ctx, ref) if err != nil { return err } entries, err := db.GetArtifactEntriesByImageDigest(img.Target.Digest.String()) if err != nil { return err } err = removeArtifactsAndContent(ctx, db, contentStore, entries) if err != nil { return err } } return nil }, } // removeArtifactsAndContent takes a list of content digests and removes them from the artifact db and content store func removeArtifactsAndContent(ctx context.Context, db *soci.ArtifactsDb, contentStore store.Store, digests [][]byte) error { for _, dgst := range digests { err := db.RemoveArtifactEntryByIndexDigest(dgst) if err != nil { return err } dgst, err := digest.Parse(string(dgst)) if err != nil { return err } err = contentStore.Delete(ctx, dgst) if err != nil { return err } } return nil } soci-snapshotter-0.4.1/cmd/soci/commands/internal/000077500000000000000000000000001454010642300221355ustar00rootroot00000000000000soci-snapshotter-0.4.1/cmd/soci/commands/internal/index.go000066400000000000000000000025451454010642300236010ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package internal import "github.com/urfave/cli" const ( ExistingIndexFlagName = "existing-index" Warn = "warn" Skip = "skip" Allow = "allow" ) var SupportedExistingIndexOptions = []string{Warn, Skip, Allow} var ExistingIndexFlag = cli.StringFlag{ Name: ExistingIndexFlagName, Value: Warn, Usage: `Configure how to handle existing SOCI artifacts in remote when pushing indices warn - print warning message to stdout but push index anyway skip - skip pushing the index allow - push the index regardless `, } // SupportedArg checks if a value is present within a given slice func SupportedArg[K comparable](v K, list []K) bool { for _, o := range list { if v == o { return true } } return false } soci-snapshotter-0.4.1/cmd/soci/commands/internal/platform.go000066400000000000000000000041031454010642300243060ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package internal import ( "context" "fmt" "github.com/containerd/containerd/content" "github.com/containerd/containerd/images" "github.com/containerd/containerd/platforms" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/urfave/cli" ) const ( PlatformFlagKey = "platform" AllPlatformsFlagKey = "all-platforms" ) var PlatformFlags = []cli.Flag{ cli.BoolFlag{ Name: AllPlatformsFlagKey, Usage: "", }, cli.StringSliceFlag{ Name: PlatformFlagKey + ", p", Usage: "", }, } // GetPlatforms returns the set of platforms from a cli.Context // The order of preference is: // 1) all platforms supported by the image if the `all-plaforms` flag is set // 2) the set of platforms specified by the `platform` flag // 3) the default platform // // This method is not suitable for situations where the default should be all supported platforms (e.g. the `soci index list` command) func GetPlatforms(ctx context.Context, cliContext *cli.Context, img images.Image, cs content.Store) ([]ocispec.Platform, error) { if cliContext.Bool(AllPlatformsFlagKey) { return images.Platforms(ctx, cs, img.Target) } ps := cliContext.StringSlice(PlatformFlagKey) if len(ps) == 0 { return []ocispec.Platform{platforms.DefaultSpec()}, nil } var result []ocispec.Platform for _, p := range ps { platform, err := platforms.Parse(p) if err != nil { return nil, fmt.Errorf("could not parse platform %s: %w", p, err) } result = append(result, platform) } return result, nil } soci-snapshotter-0.4.1/cmd/soci/commands/push.go000066400000000000000000000164271454010642300216410ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package commands import ( "context" "errors" "fmt" "io" "net/http" "sort" "strings" "github.com/awslabs/soci-snapshotter/cmd/soci/commands/internal" "github.com/awslabs/soci-snapshotter/fs" "github.com/awslabs/soci-snapshotter/soci" "github.com/awslabs/soci-snapshotter/soci/store" "github.com/containerd/containerd/cmd/ctr/commands" "github.com/containerd/containerd/reference" dockercliconfig "github.com/docker/cli/cli/config" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/urfave/cli" oraslib "oras.land/oras-go/v2" "oras.land/oras-go/v2/registry/remote" "oras.land/oras-go/v2/registry/remote/auth" ) // PushCommand is a command to push an image artifacts from local content store to the remote repository var PushCommand = cli.Command{ Name: "push", Usage: "push SOCI artifacts to a registry", ArgsUsage: "[flags] ", Description: `Push SOCI artifacts to a registry by image reference. If multiple soci indices exist for the given image, the most recent one will be pushed. After pushing the soci artifacts, they should be available in the registry. Soci artifacts will be pushed only if they are available in the snapshotter's local content store. `, Flags: append(append(append(append( commands.RegistryFlags, commands.LabelFlag), commands.SnapshotterFlags...), internal.PlatformFlags...), internal.ExistingIndexFlag, cli.Uint64Flag{ Name: "max-concurrent-uploads", Usage: "Max concurrent uploads. Default is 10", Value: 10, }, cli.BoolFlag{ Name: "quiet, q", Usage: "quiet mode", }, ), Action: func(cliContext *cli.Context) error { ref := cliContext.Args().First() quiet := cliContext.Bool("quiet") if ref == "" { return fmt.Errorf("please provide an image reference to push") } client, ctx, cancel, err := commands.NewClient(cliContext) if err != nil { return err } defer cancel() cs := client.ContentStore() is := client.ImageService() img, err := is.Get(ctx, ref) if err != nil { return err } ps, err := internal.GetPlatforms(ctx, cliContext, img, cs) if err != nil { return err } artifactsDb, err := soci.NewDB(soci.ArtifactsDbPath()) if err != nil { return err } refspec, err := reference.Parse(ref) if err != nil { return err } dst, err := remote.NewRepository(refspec.Locator) if err != nil { return err } authClient := auth.DefaultClient var username string var secret string if cliContext.IsSet("user") { username = cliContext.String("user") if i := strings.IndexByte(username, ':'); i > 0 { secret = username[i+1:] username = username[0:i] } } else { cf := dockercliconfig.LoadDefaultConfigFile(io.Discard) if cf.ContainsAuth() { if ac, err := cf.GetAuthConfig(refspec.Hostname()); err == nil { username = ac.Username secret = ac.Password } } } authClient.Credential = func(_ context.Context, host string) (auth.Credential, error) { return auth.Credential{ Username: username, Password: secret, }, nil } ctx, src, err := store.NewContentStore(ctx, store.WithType(store.ContentStoreType(cliContext.GlobalString("content-store"))), store.WithNamespace(cliContext.GlobalString("namespace"))) if err != nil { return fmt.Errorf("cannot create local content store: %w", err) } dst.Client = authClient dst.PlainHTTP = cliContext.Bool("plain-http") debug := cliContext.GlobalBool("debug") if debug { dst.Client = &debugClient{client: authClient} } else { dst.Client = authClient } existingIndexOption := cliContext.String(internal.ExistingIndexFlagName) if !internal.SupportedArg(existingIndexOption, internal.SupportedExistingIndexOptions) { return fmt.Errorf("unexpected value for flag %s: %s, expected types %v", internal.ExistingIndexFlagName, existingIndexOption, internal.SupportedExistingIndexOptions) } options := oraslib.DefaultCopyGraphOptions options.PreCopy = func(_ context.Context, desc ocispec.Descriptor) error { if !quiet { fmt.Printf("pushing artifact with digest: %v\n", desc.Digest) } return nil } options.PostCopy = func(_ context.Context, desc ocispec.Descriptor) error { if !quiet { fmt.Printf("successfully pushed artifact with digest: %v\n", desc.Digest) } return nil } options.OnCopySkipped = func(ctx context.Context, desc ocispec.Descriptor) error { if !quiet { fmt.Printf("skipped artifact with digest: %v\n", desc.Digest) } return nil } for _, platform := range ps { indexDescriptors, imgManifestDesc, err := soci.GetIndexDescriptorCollection(ctx, cs, artifactsDb, img, []ocispec.Platform{platform}) if err != nil { return err } if len(indexDescriptors) == 0 { return fmt.Errorf("could not find any soci indices to push") } sort.Slice(indexDescriptors, func(i, j int) bool { return indexDescriptors[i].CreatedAt.Before(indexDescriptors[j].CreatedAt) }) indexDesc := indexDescriptors[len(indexDescriptors)-1] if existingIndexOption != internal.Allow { if !quiet { fmt.Println("checking if a soci index already exists in remote repository...") } client := fs.NewOCIArtifactClient(dst) referrers, err := client.AllReferrers(ctx, ocispec.Descriptor{Digest: imgManifestDesc.Digest}) if err != nil && !errors.Is(err, fs.ErrNoReferrers) { return fmt.Errorf("failed to fetch list of referrers: %w", err) } if len(referrers) > 0 { var foundMessage string if len(referrers) > 1 { foundMessage = "multiple soci indices found in remote repository" } else { foundMessage = fmt.Sprintf("soci index found in remote repository with digest: %s", referrers[0].Digest.String()) } switch existingIndexOption { case internal.Skip: if !quiet { fmt.Printf("%s: skipping pushing artifacts for image manifest: %s\n", foundMessage, imgManifestDesc.Digest.String()) } continue case internal.Warn: fmt.Printf("[WARN] %s: pushing index anyway\n", foundMessage) // Fall through and attempt to push the index anyway } } } if quiet { fmt.Println(indexDesc.Digest.String()) } else { fmt.Printf("pushing soci index with digest: %v\n", indexDesc.Digest) } err = oraslib.CopyGraph(context.Background(), src, dst, indexDesc.Descriptor, options) if err != nil { return fmt.Errorf("error pushing graph to remote: %w", err) } } return nil }, } type debugClient struct { client remote.Client } func (c *debugClient) Do(req *http.Request) (*http.Response, error) { fmt.Printf("http req %s %s\n", req.Method, req.URL) res, err := c.client.Do(req) if err != nil { fmt.Printf("http err %v\n", err) } else { fmt.Printf("http res %s\n", res.Status) } return res, err } soci-snapshotter-0.4.1/cmd/soci/commands/rebuild_db.go000066400000000000000000000037221454010642300227470ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package commands import ( "path/filepath" "github.com/awslabs/soci-snapshotter/soci" "github.com/awslabs/soci-snapshotter/soci/store" "github.com/containerd/containerd/cmd/ctr/commands" "github.com/urfave/cli" ) var RebuildDBCommand = cli.Command{ Name: "rebuild-db", Usage: `rebuild the artifacts database. You should use this command after "rpull" so that indices/ztocs can be discovered by commands like "soci index list", and after "index rm" when using the containerd content store so that deleted orphaned zTOCs will be forgotten`, Action: func(cliContext *cli.Context) error { client, ctx, cancel, err := commands.NewClient(cliContext) if err != nil { return err } defer cancel() containerdContentStore := client.ContentStore() artifactsDb, err := soci.NewDB(soci.ArtifactsDbPath()) if err != nil { return err } ctx, blobStore, err := store.NewContentStore(ctx, store.WithType(store.ContentStoreType(cliContext.GlobalString("content-store"))), store.WithNamespace(cliContext.GlobalString("namespace"))) if err != nil { return err } contentStorePath, err := store.GetContentStorePath(store.ContentStoreType(cliContext.GlobalString("content-store"))) if err != nil { return err } blobStorePath := filepath.Join(contentStorePath, "blobs") return artifactsDb.SyncWithLocalStore(ctx, blobStore, blobStorePath, containerdContentStore) }, } soci-snapshotter-0.4.1/cmd/soci/commands/ztoc/000077500000000000000000000000001454010642300213005ustar00rootroot00000000000000soci-snapshotter-0.4.1/cmd/soci/commands/ztoc/get-file.go000066400000000000000000000062301454010642300233240ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package ztoc import ( "context" "errors" "fmt" "io" "os" "github.com/awslabs/soci-snapshotter/soci" "github.com/awslabs/soci-snapshotter/soci/store" "github.com/awslabs/soci-snapshotter/ztoc" "github.com/containerd/containerd/cmd/ctr/commands" "github.com/containerd/containerd/content" "github.com/opencontainers/go-digest" v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/urfave/cli" ) var getFileCommand = cli.Command{ Name: "get-file", Usage: "retrieve a file from a local image layer using a specified ztoc", ArgsUsage: " ", Flags: []cli.Flag{ cli.StringFlag{ Name: "output, o", Usage: "the file to write the extracted content. Defaults to stdout", }, }, Action: func(cliContext *cli.Context) error { if len(cliContext.Args()) != 2 { return errors.New("please provide both a ztoc digest and a filename to extract") } ztocDigest, err := digest.Parse(cliContext.Args()[0]) if err != nil { return err } file := cliContext.Args()[1] client, ctx, cancel, err := commands.NewClient(cliContext) if err != nil { return err } defer cancel() toc, err := getZtoc(ctx, cliContext, ztocDigest) if err != nil { return err } layerReader, err := getLayer(ctx, ztocDigest, client.ContentStore()) if err != nil { return err } defer layerReader.Close() data, err := toc.ExtractFile(io.NewSectionReader(layerReader, 0, int64(toc.CompressedArchiveSize)), file) if err != nil { return err } outfile := cliContext.String("output") if outfile != "" { os.WriteFile(outfile, data, 0) return nil } fmt.Println(string(data)) return nil }, } func getZtoc(ctx context.Context, cliContext *cli.Context, d digest.Digest) (*ztoc.Ztoc, error) { ctx, blobStore, err := store.NewContentStore(ctx, store.WithType(store.ContentStoreType(cliContext.GlobalString("content-store"))), store.WithNamespace(cliContext.GlobalString("namespace"))) if err != nil { return nil, err } reader, err := blobStore.Fetch(ctx, v1.Descriptor{Digest: d}) if err != nil { return nil, err } defer reader.Close() return ztoc.Unmarshal(reader) } func getLayer(ctx context.Context, ztocDigest digest.Digest, cs content.Store) (content.ReaderAt, error) { metadata, err := soci.NewDB(soci.ArtifactsDbPath()) if err != nil { return nil, err } artifact, err := metadata.GetArtifactEntry(ztocDigest.String()) if err != nil { return nil, err } layerDigest, err := digest.Parse(artifact.OriginalDigest) if err != nil { return nil, err } return cs.ReaderAt(ctx, v1.Descriptor{Digest: layerDigest}) } soci-snapshotter-0.4.1/cmd/soci/commands/ztoc/info.go000066400000000000000000000075031454010642300225670ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package ztoc import ( "context" "encoding/json" "fmt" "github.com/awslabs/soci-snapshotter/soci" "github.com/awslabs/soci-snapshotter/soci/store" "github.com/awslabs/soci-snapshotter/ztoc" "github.com/awslabs/soci-snapshotter/ztoc/compression" "github.com/opencontainers/go-digest" v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/urfave/cli" ) type Info struct { Version string `json:"version"` BuildTool string `json:"build_tool"` Size int64 `json:"size"` SpanSize compression.Offset `json:"span_size"` NumSpans compression.SpanID `json:"num_spans"` NumFiles int `json:"num_files"` NumMultiSpanFiles int `json:"num_multi_span_files"` Files []FileInfo `json:"files"` } type FileInfo struct { Filename string `json:"filename"` Offset int64 `json:"offset"` Size int64 `json:"size"` Type string `json:"type"` StartSpan compression.SpanID `json:"start_span"` EndSpan compression.SpanID `json:"end_span"` } var infoCommand = cli.Command{ Name: "info", Usage: "get detailed info about a ztoc", ArgsUsage: "", Action: func(cliContext *cli.Context) error { digest, err := digest.Parse(cliContext.Args().First()) if err != nil { return err } db, err := soci.NewDB(soci.ArtifactsDbPath()) if err != nil { return err } entry, err := db.GetArtifactEntry(digest.String()) if err != nil { return err } if entry.MediaType == soci.SociIndexArtifactType { return fmt.Errorf("the provided digest belongs to a SOCI index. Use `soci index info` to get the detailed information about it") } ctx, cancel := context.WithTimeout(context.Background(), cliContext.GlobalDuration("timeout")) defer cancel() ctx, store, err := store.NewContentStore(ctx, store.WithType(store.ContentStoreType(cliContext.GlobalString("content-store"))), store.WithNamespace(cliContext.GlobalString("namespace"))) if err != nil { return err } reader, err := store.Fetch(ctx, v1.Descriptor{Digest: digest}) if err != nil { return err } defer reader.Close() ztoc, err := ztoc.Unmarshal(reader) if err != nil { return err } gzInfo, err := ztoc.Zinfo() if err != nil { return err } multiSpanFiles := 0 zinfo := Info{ Version: string(ztoc.Version), BuildTool: ztoc.BuildToolIdentifier, Size: entry.Size, SpanSize: gzInfo.SpanSize(), NumSpans: ztoc.MaxSpanID + 1, NumFiles: len(ztoc.FileMetadata), } for _, v := range ztoc.FileMetadata { startSpan := gzInfo.UncompressedOffsetToSpanID(v.UncompressedOffset) endSpan := gzInfo.UncompressedOffsetToSpanID(v.UncompressedOffset + v.UncompressedSize) if startSpan != endSpan { multiSpanFiles++ } zinfo.Files = append(zinfo.Files, FileInfo{ Filename: v.Name, Offset: int64(v.UncompressedOffset), Size: int64(v.UncompressedSize), Type: v.Type, StartSpan: startSpan, EndSpan: endSpan, }) } zinfo.NumMultiSpanFiles = multiSpanFiles j, err := json.MarshalIndent(zinfo, "", " ") if err != nil { return err } fmt.Println(string(j)) return nil }, } soci-snapshotter-0.4.1/cmd/soci/commands/ztoc/list.go000066400000000000000000000100541454010642300226020ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package ztoc import ( "fmt" "os" "text/tabwriter" "github.com/awslabs/soci-snapshotter/soci" "github.com/containerd/containerd/cmd/ctr/commands" "github.com/containerd/containerd/images" "github.com/containerd/containerd/platforms" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/urfave/cli" ) var listCommand = cli.Command{ Name: "list", Description: "list ztocs", Aliases: []string{"ls"}, Flags: []cli.Flag{ cli.StringFlag{ Name: "ztoc-digest", Usage: "filter ztocs by digest", }, cli.StringFlag{ Name: "image-ref", Usage: "filter ztocs to those that are associated with a specific image", }, cli.BoolFlag{ Name: "verbose, v", Usage: "display extra debugging messages", }, cli.BoolFlag{ Name: "quiet, q", Usage: "only display the index digests", }, }, Action: func(cliContext *cli.Context) error { db, err := soci.NewDB(soci.ArtifactsDbPath()) if err != nil { return err } ztocDgst := cliContext.String("ztoc-digest") imgRef := cliContext.String("image-ref") verbose := cliContext.Bool("verbose") quiet := cliContext.Bool("quiet") var artifacts []*soci.ArtifactEntry if imgRef == "" { db.Walk(func(ae *soci.ArtifactEntry) error { if ae.Type == soci.ArtifactEntryTypeLayer && (ztocDgst == "" || ae.Digest == ztocDgst) { artifacts = append(artifacts, ae) } return nil }) } else { client, ctx, cancel, err := commands.NewClient(cliContext) if err != nil { return err } defer cancel() is := client.ImageService() img, err := is.Get(ctx, imgRef) if err != nil { return err } platform, err := images.Platforms(ctx, client.ContentStore(), img.Target) if err != nil { return err } var layers []ocispec.Descriptor for _, p := range platform { manifest, err := images.Manifest(ctx, client.ContentStore(), img.Target, platforms.OnlyStrict(p)) if err != nil && verbose { // print a warning message if a manifest can't be resolved // continue looking for manifests of other platforms fmt.Printf("no image manifest for platform %s/%s. err: %v\n", p.Architecture, p.OS, err) } else { layers = append(layers, manifest.Layers...) } } if len(layers) == 0 { return fmt.Errorf("no image layers. could not filter ztoc") } db.Walk(func(ae *soci.ArtifactEntry) error { if ae.Type == soci.ArtifactEntryTypeLayer { if ztocDgst == "" { // add all ztocs associated with the image for _, l := range layers { if ae.OriginalDigest == l.Digest.String() { artifacts = append(artifacts, ae) } } } else { // only add the specific ztoc if the ztoc is with an image layer for _, l := range layers { if ae.Digest == ztocDgst && ae.OriginalDigest == l.Digest.String() { artifacts = append(artifacts, ae) } } } } return nil }) if ztocDgst != "" && len(artifacts) == 0 { return fmt.Errorf("the specified ztoc doesn't exist or it's not with the specified image") } } if quiet { for _, ae := range artifacts { os.Stdout.Write([]byte(fmt.Sprintf("%s\n", ae.Digest))) } return nil } writer := tabwriter.NewWriter(os.Stdout, 8, 8, 4, ' ', 0) writer.Write([]byte("DIGEST\tSIZE\tLAYER DIGEST\t\n")) for _, artifact := range artifacts { writer.Write([]byte(fmt.Sprintf("%s\t%d\t%s\t\n", artifact.Digest, artifact.Size, artifact.OriginalDigest))) } writer.Flush() return nil }, } soci-snapshotter-0.4.1/cmd/soci/commands/ztoc/ztoc.go000066400000000000000000000014361454010642300226120ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package ztoc import "github.com/urfave/cli" var Command = cli.Command{ Name: "ztoc", Usage: "manage ztocs", Subcommands: []cli.Command{ infoCommand, getFileCommand, listCommand, }, } soci-snapshotter-0.4.1/cmd/soci/main.go000066400000000000000000000057351454010642300200050ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "fmt" "os" "github.com/awslabs/soci-snapshotter/cmd/soci/commands" "github.com/awslabs/soci-snapshotter/cmd/soci/commands/image" "github.com/awslabs/soci-snapshotter/cmd/soci/commands/index" "github.com/awslabs/soci-snapshotter/cmd/soci/commands/ztoc" "github.com/awslabs/soci-snapshotter/config" "github.com/awslabs/soci-snapshotter/version" "github.com/containerd/containerd/cmd/ctr/commands/run" "github.com/containerd/containerd/defaults" "github.com/containerd/containerd/namespaces" //nolint:staticcheck "github.com/containerd/containerd/pkg/seed" "github.com/urfave/cli" ) func init() { //nolint:staticcheck seed.WithTimeAndRand() //lint:ignore SA1019, WithTimeAndRand is deprecated and we should remove it. } func main() { app := cli.NewApp() app.Name = "soci" app.Flags = []cli.Flag{ cli.StringFlag{ Name: "address, a", Usage: "address for containerd's GRPC server", Value: defaults.DefaultAddress, EnvVar: "CONTAINERD_ADDRESS", }, cli.StringFlag{ Name: "namespace, n", Usage: "namespace to use with commands", Value: namespaces.Default, EnvVar: namespaces.NamespaceEnvVar, }, cli.DurationFlag{ Name: "timeout", Usage: "timeout for commands", }, cli.BoolFlag{ Name: "debug", Usage: "enable debug output", }, cli.StringFlag{ Name: "content-store", Usage: "use a specific content store (soci or containerd)", Value: config.DefaultContentStoreType, }, } app.Version = fmt.Sprintf("%s %s", version.Version, version.Revision) app.Commands = []cli.Command{ image.Command, index.Command, ztoc.Command, commands.CreateCommand, commands.PushCommand, run.Command, commands.RebuildDBCommand, } if err := app.Run(os.Args); err != nil { fmt.Fprintf(os.Stderr, "soci: %v\n", err) os.Exit(1) } } soci-snapshotter-0.4.1/config/000077500000000000000000000000001454010642300162655ustar00rootroot00000000000000soci-snapshotter-0.4.1/config/config.go000066400000000000000000000056261454010642300200720ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright 2019 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the NOTICE.md file. */ package config import ( "fmt" "os" "github.com/pelletier/go-toml" ) const ( // Default path to snapshotter root dir SociSnapshotterRootPath = "/var/lib/soci-snapshotter-grpc/" defaultConfigPath = "/etc/soci-snapshotter-grpc/config.toml" ) type Config struct { ServiceConfig // MetricsAddress is address for the metrics API MetricsAddress string `toml:"metrics_address"` // MetricsNetwork is the type of network for the metrics API (e.g. tcp or unix) MetricsNetwork string `toml:"metrics_network"` // NoPrometheus is a flag to disable the emission of the metrics NoPrometheus bool `toml:"no_prometheus"` // DebugAddress is a Unix domain socket address where the snapshotter exposes /debug/ endpoints. DebugAddress string `toml:"debug_address"` // MetadataStore is the type of the metadata store to use. MetadataStore string `toml:"metadata_store" default:"db"` } type configParser func(*Config) func NewConfigFromToml(cfgPath string) (*Config, error) { cfg := &Config{} // Get configuration from specified file tree, err := toml.LoadFile(cfgPath) if err != nil && !(os.IsNotExist(err) && cfgPath == defaultConfigPath) { return nil, fmt.Errorf("failed to load config file %q", cfgPath) } if err := tree.Unmarshal(cfg); err != nil { return nil, fmt.Errorf("failed to unmarshal config file %q", cfgPath) } parsers := []configParser{parseRootConfig, parseServiceConfig, parseFSConfig} for _, p := range parsers { p(cfg) } return cfg, nil } func parseRootConfig(cfg *Config) { if cfg.MetricsNetwork == "" { cfg.MetricsNetwork = defaultMetricsNetwork } } soci-snapshotter-0.4.1/config/defaults.go000066400000000000000000000076701454010642300204350ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright 2019 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the NOTICE.md file. */ package config // Config (root) defaults const ( defaultMetricsNetwork = "tcp" ) // ServiceConfig defaults const ( DefaultImageServiceAddress = "/run/containerd/containerd.sock" ) // FSConfig defaults const ( defaultFuseTimeoutSec = 1 // defaultBgSilencePeriodMsec specifies the amount of time the background fetcher will wait once a new layer comes in // before (re)starting fetches. defaultBgSilencePeriodMsec = 30_000 // defaultBgFetchPeriodMsec specifies how often the fetch will occur. // The background fetcher will fetch a single span every `defaultFetchPeriod`. defaultBgFetchPeriodMsec = 500 // defaultBgMaxQueueSize specifies the maximum size of the bg-fetcher work queue i.e., the maximum number // of span managers that can be queued. In case of overflow, the `Add` call // will block until a span manager is removed from the workqueue. defaultBgMaxQueueSize = 100 // defaultBgMetricEmitPeriodSec is the default amount of interval at which the background fetcher emits metrics defaultBgMetricEmitPeriodSec = 10 // defaultMountTimeoutSec is the amount of time Mount will time out if a layer can't be resolved. defaultMountTimeoutSec = 30 // defaultFuseMetricsEmitWaitDurationSec is the amount of time the snapshotter will wait before emitting the metrics for FUSE operation. defaultFuseMetricsEmitWaitDurationSec = 60 defaultValidIntervalSec = 60 defaultFetchTimeoutSec = 300 // defaultDialTimeoutMsec is the default number of milliseconds before timeout while connecting to a remote endpoint. See `TimeoutConfig.DialTimeout`. defaultDialTimeoutMsec = 3_000 // defaultResponseHeaderTimeoutMsec is the default number of milliseconds before timeout while waiting for response header from a remote endpoint. See `TimeoutConfig.ResponseHeaderTimeout`. defaultResponseHeaderTimeoutMsec = 3_000 // defaultRequestTimeoutMsec is the default number of milliseconds that the entire request can take before timeout. See `TimeoutConfig.RequestTimeout`. defaultRequestTimeoutMsec = 30_000 // defaults based on a target total retry time of at least 5s. 30*((2^8)-1)>5000 // defaultMaxRetries is the default number of retries that a retryable request will make. See `RetryConfig.MaxRetries`. defaultMaxRetries = 8 // defaultMinWaitMsec is the default minimum number of milliseconds between attempts. See `RetryConfig.MinWait`. defaultMinWaitMsec = 30 // defaultMaxWaitMsec is the default maximum number of milliseconds between attempts. See `RetryConfig.MaxWait`. defaultMaxWaitMsec = 300_000 // DefaultContentStore chooses the soci or containerd content store as the default DefaultContentStoreType = "soci" ) soci-snapshotter-0.4.1/config/fs.go000066400000000000000000000234101454010642300172240ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright 2019 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the NOTICE.md file. */ package config import ( "github.com/containerd/containerd/namespaces" ) type FSConfig struct { HTTPCacheType string `toml:"http_cache_type"` FSCacheType string `toml:"filesystem_cache_type"` ResolveResultEntry int `toml:"resolve_result_entry"` Debug bool `toml:"debug"` AllowNoVerification bool `toml:"allow_no_verification"` DisableVerification bool `toml:"disable_verification"` MaxConcurrency int64 `toml:"max_concurrency"` NoPrometheus bool `toml:"no_prometheus"` MountTimeoutSec int64 `toml:"mount_timeout_sec"` FuseMetricsEmitWaitDurationSec int64 `toml:"fuse_metrics_emit_wait_duration_sec"` RetryableHTTPClientConfig `toml:"http"` BlobConfig `toml:"blob"` DirectoryCacheConfig `toml:"directory_cache"` FuseConfig `toml:"fuse"` BackgroundFetchConfig `toml:"background_fetch"` ContentStoreConfig `toml:"content_store"` } // BlobConfig is config for layer blob management. type BlobConfig struct { ValidInterval int64 `toml:"valid_interval"` CheckAlways bool `toml:"check_always"` FetchTimeoutSec int64 `toml:"fetching_timeout_sec"` ForceSingleRangeMode bool `toml:"force_single_range_mode"` MaxRetries int `toml:"max_retries"` MinWaitMsec int64 `toml:"min_wait_msec"` MaxWaitMsec int64 `toml:"max_wait_msec"` // MaxSpanVerificationRetries defines the number of additional times fetch // will be invoked in case of span verification failure. MaxSpanVerificationRetries int `toml:"max_span_verification_retries"` } // DirectoryCacheConfig is config for directory-based cache. type DirectoryCacheConfig struct { MaxLRUCacheEntry int `toml:"max_lru_cache_entry"` MaxCacheFds int `toml:"max_cache_fds"` SyncAdd bool `toml:"sync_add"` Direct bool `toml:"direct" default:"true"` } type FuseConfig struct { // AttrTimeout defines overall timeout attribute for a file system in seconds. AttrTimeout int64 `toml:"attr_timeout"` // EntryTimeout defines TTL for directory, name lookup in seconds. EntryTimeout int64 `toml:"entry_timeout"` // NegativeTimeout defines the overall entry timeout for failed lookups. NegativeTimeout int64 `toml:"negative_timeout"` // LogFuseOperations enables logging of operations on FUSE FS. This is to be used // for debugging purposes only. This option may emit sensitive information, // e.g. filenames and paths within an image LogFuseOperations bool `toml:"log_fuse_operations"` } type BackgroundFetchConfig struct { Disable bool `toml:"disable"` // SilencePeriodMsec defines the time (in ms) the background fetcher // will be paused for when a new image is mounted. SilencePeriodMsec int64 `toml:"silence_period_msec"` // FetchPeriodMsec specifies how often a background fetch will occur. // The background fetcher will fetch one span every FetchPeriodMsec. FetchPeriodMsec int64 `toml:"fetch_period_msec"` // MaxQueueSize specifies the maximum size of the work queue // i.e., the maximum number of span managers that can be queued // in the background fetcher. MaxQueueSize int `toml:"max_queue_size"` // EmitMetricPeriodSec is the amount of interval (in second) at which the background // fetcher emits metrics EmitMetricPeriodSec int64 `toml:"emit_metric_period_sec"` } // RetryConfig represents the settings for retries in a retryable http client. type RetryConfig struct { // MaxRetries is the maximum number of retries before giving up on a retryable request. // This does not include the initial request so the total number of attempts will be MaxRetries + 1. MaxRetries int // MinWait is the minimum wait time between attempts. The actual wait time is governed by the BackoffStrategy, // but the wait time will never be shorter than this duration. MinWaitMsec int64 // MaxWait is the maximum wait time between attempts. The actual wait time is governed by the BackoffStrategy, // but the wait time will never be longer than this duration. MaxWaitMsec int64 } // TimeoutConfig represents the settings for timeout at various points in a request lifecycle in a retryable http client. type TimeoutConfig struct { // DialTimeout is the maximum duration that connection can take before a request attempt is timed out. DialTimeoutMsec int64 // ResponseHeaderTimeout is the maximum duration waiting for response headers before a request attempt is timed out. // This starts after the entire request body is uploaded to the remote endpoint and stops when the request headers // are fully read. It does not include reading the body. ResponseHeaderTimeoutMsec int64 // RequestTimeout is the maximum duration before the entire request attempt is timed out. This starts when the // client starts the connection attempt and ends when the entire response body is read. RequestTimeoutMsec int64 } // RetryableHTTPClientConfig is the complete config for a retryable http client type RetryableHTTPClientConfig struct { TimeoutConfig RetryConfig } // ContentStoreConfig chooses and configures the content store type ContentStoreConfig struct { Type string `toml:"type"` Namespace string `toml:"namespace"` } func parseFSConfig(cfg *Config) { // Parse top level fs config if cfg.MountTimeoutSec == 0 { cfg.MountTimeoutSec = defaultMountTimeoutSec } if cfg.FuseMetricsEmitWaitDurationSec == 0 { cfg.FuseMetricsEmitWaitDurationSec = defaultFuseMetricsEmitWaitDurationSec } // Parse nested fs configs parsers := []configParser{parseFuseConfig, parseBackgroundFetchConfig, parseRetryableHTTPClientConfig, parseBlobConfig, parseContentStoreConfig} for _, p := range parsers { p(cfg) } } func parseFuseConfig(cfg *Config) { if cfg.FuseConfig.AttrTimeout == 0 { cfg.FuseConfig.AttrTimeout = defaultFuseTimeoutSec } if cfg.FuseConfig.EntryTimeout == 0 { cfg.FuseConfig.EntryTimeout = defaultFuseTimeoutSec } if cfg.FuseConfig.NegativeTimeout == 0 { cfg.FuseConfig.NegativeTimeout = defaultFuseTimeoutSec } } func parseBackgroundFetchConfig(cfg *Config) { if cfg.BackgroundFetchConfig.FetchPeriodMsec == 0 { cfg.BackgroundFetchConfig.FetchPeriodMsec = defaultBgFetchPeriodMsec } if cfg.BackgroundFetchConfig.SilencePeriodMsec == 0 { cfg.BackgroundFetchConfig.SilencePeriodMsec = defaultBgSilencePeriodMsec } if cfg.BackgroundFetchConfig.MaxQueueSize == 0 { cfg.BackgroundFetchConfig.MaxQueueSize = defaultBgMaxQueueSize } if cfg.BackgroundFetchConfig.EmitMetricPeriodSec == 0 { cfg.BackgroundFetchConfig.EmitMetricPeriodSec = defaultBgMetricEmitPeriodSec } } func parseRetryableHTTPClientConfig(cfg *Config) { if cfg.RetryableHTTPClientConfig.TimeoutConfig.DialTimeoutMsec == 0 { cfg.RetryableHTTPClientConfig.TimeoutConfig.DialTimeoutMsec = defaultDialTimeoutMsec } if cfg.RetryableHTTPClientConfig.TimeoutConfig.ResponseHeaderTimeoutMsec == 0 { cfg.RetryableHTTPClientConfig.TimeoutConfig.ResponseHeaderTimeoutMsec = defaultResponseHeaderTimeoutMsec } if cfg.RetryableHTTPClientConfig.TimeoutConfig.RequestTimeoutMsec == 0 { cfg.RetryableHTTPClientConfig.TimeoutConfig.RequestTimeoutMsec = defaultRequestTimeoutMsec } if cfg.RetryableHTTPClientConfig.RetryConfig.MaxRetries == 0 { cfg.RetryableHTTPClientConfig.RetryConfig.MaxRetries = defaultMaxRetries } if cfg.RetryableHTTPClientConfig.RetryConfig.MinWaitMsec == 0 { cfg.RetryableHTTPClientConfig.RetryConfig.MinWaitMsec = defaultMinWaitMsec } if cfg.RetryableHTTPClientConfig.RetryConfig.MaxWaitMsec == 0 { cfg.RetryableHTTPClientConfig.RetryConfig.MaxWaitMsec = defaultMaxWaitMsec } } func parseBlobConfig(cfg *Config) { if cfg.BlobConfig.ValidInterval == 0 { cfg.BlobConfig.ValidInterval = defaultValidIntervalSec } if cfg.BlobConfig.CheckAlways { cfg.BlobConfig.ValidInterval = 0 } if cfg.BlobConfig.FetchTimeoutSec == 0 { cfg.BlobConfig.FetchTimeoutSec = defaultFetchTimeoutSec } if cfg.BlobConfig.MaxRetries == 0 { cfg.BlobConfig.MaxRetries = cfg.RetryableHTTPClientConfig.RetryConfig.MaxRetries } if cfg.BlobConfig.MinWaitMsec == 0 { cfg.BlobConfig.MinWaitMsec = cfg.RetryableHTTPClientConfig.RetryConfig.MinWaitMsec } if cfg.BlobConfig.MaxWaitMsec == 0 { cfg.BlobConfig.MaxWaitMsec = cfg.RetryableHTTPClientConfig.RetryConfig.MaxWaitMsec } } func parseContentStoreConfig(cfg *Config) { if cfg.ContentStoreConfig.Type == "" { cfg.ContentStoreConfig.Type = DefaultContentStoreType } if cfg.ContentStoreConfig.Namespace == "" { cfg.ContentStoreConfig.Namespace = namespaces.Default } } soci-snapshotter-0.4.1/config/resolver.go000066400000000000000000000037571454010642300204710ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright 2019 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the NOTICE.md file. */ package config // ResolverConfig is config for resolving registries. type ResolverConfig struct { Host map[string]HostConfig `toml:"host"` } type HostConfig struct { Mirrors []MirrorConfig `toml:"mirrors"` } type MirrorConfig struct { // Host is the hostname of the host. Host string `toml:"host"` // Insecure is true means use http scheme instead of https. Insecure bool `toml:"insecure"` // RequestTimeoutSec is timeout seconds of each request to the registry. // RequestTimeoutSec == 0 indicates the default timeout (defaultRequestTimeoutSec). // RequestTimeoutSec < 0 indicates no timeout. RequestTimeoutSec int64 `toml:"request_timeout_sec"` } soci-snapshotter-0.4.1/config/service.go000066400000000000000000000060301454010642300202530ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package config type ServiceConfig struct { FSConfig // KubeconfigKeychainConfig is config for kubeconfig-based keychain. KubeconfigKeychainConfig `toml:"kubeconfig_keychain"` // CRIKeychainConfig is config for CRI-based keychain. CRIKeychainConfig `toml:"cri_keychain"` // ResolverConfig is config for resolving registries. ResolverConfig `toml:"resolver"` // SnapshotterConfig is snapshotter-related config. SnapshotterConfig `toml:"snapshotter"` } // KubeconfigKeychainConfig is config for kubeconfig-based keychain. type KubeconfigKeychainConfig struct { // EnableKeychain enables kubeconfig-based keychain EnableKeychain bool `toml:"enable_keychain"` // KubeconfigPath is the path to kubeconfig which can be used to sync // secrets on the cluster into this snapshotter. KubeconfigPath string `toml:"kubeconfig_path"` } // CRIKeychainConfig is config for CRI-based keychain. type CRIKeychainConfig struct { // EnableKeychain enables CRI-based keychain EnableKeychain bool `toml:"enable_keychain"` // ImageServicePath is the path to the unix socket of backing CRI Image Service (e.g. containerd CRI plugin) ImageServicePath string `toml:"image_service_path"` } // SnapshotterConfig is snapshotter-related config. type SnapshotterConfig struct { // MinLayerSize skips remote mounting of smaller layers MinLayerSize int64 `toml:"min_layer_size"` // AllowInvalidMountsOnRestart allows that there are snapshot mounts that cannot access to the // data source when restarting the snapshotter. // NOTE: User needs to manually remove the snapshots from containerd's metadata store using // ctr (e.g. `ctr snapshot rm`). AllowInvalidMountsOnRestart bool `toml:"allow_invalid_mounts_on_restart"` } func parseServiceConfig(cfg *Config) { if cfg.CRIKeychainConfig.ImageServicePath == "" { cfg.CRIKeychainConfig.ImageServicePath = DefaultImageServiceAddress } } soci-snapshotter-0.4.1/docs/000077500000000000000000000000001454010642300157505ustar00rootroot00000000000000soci-snapshotter-0.4.1/docs/benchmark.md000066400000000000000000000137011454010642300202260ustar00rootroot00000000000000# Benchmarking SOCI snapshotter This document walks through how to run the SOCI snapshotter benchmarks, including running the benchmarks on default and custom workloads. There are two types of benchmarks: ```Performance benchmarks```: The performance benchmark focuses on measuring various metrics using the soci snapshotter. ```Comparison benchmarks```: Runs benchmark tests with soci snapshotter and then the same tests with overlayFS snapshotter. We can use these results to compare the difference in performance between the two. - [Prerequisites](#prerequisites) - [Running benchmarks on default workloads](#running-benchmarks-on-default-workloads) - [Running benchmarks on custom workloads](#running-benchmarks-on-custom-workloads) - [Benchmark binaries cli flags](#benchmark-binaries-have-different-cli-flags) - [Csv file format for custom workloads](#csv-file-format-for-custom-workloads) - [Default workloads](#default-workloads) - [Benchmark results format](#benchmark-results) ## Prerequisites Follow the [Getting started guide](/docs/getting-started.md) and complete setting up the project. ## Running benchmarks on default workloads There a set of [8 workloads](#default-custom-workloads) that are included in the benchmark binaries that can readily be benchmarked without any additional setup. These workloads are hosted on a public ECR repository and hence do not need any credentials. ```make benchmarks``` - Runs both the Performance and Comparison benchmark tests on the workloads 5 times ```make benchmarks-perf-test``` - Runs only the Performance benchmark tests on the workloads 5 times ## Running benchmarks on custom workloads Custom workloads can also be benchmarked with SOCI. In order to run the benchmarks on custom workloads the custom container image needs to have its soci indices generated and pushed to a contianer registry as described in the [Getting started docs](/docs/getting-started.md) Generate benchmark binaries: ``` make build-benchmarks``` will generate benchmark binaries for performance testing and comparison testing against overlayFS. The binaries will be available in the ```/benchmark/bin``` folder. ### Benchmark binaries have different cli flags: | Flag | Description | Required / Optional | |----------|----------|----------| | ```-f``` | File path to a csv file containing details of multiple images to be tested | Optional | ```-count``` | Specify number of times the benchmarker needs to run | Optional | ```-show-commit``` | Tag the latest commit hash to the benchmark results | Optional We can now run benchmarks on custom workloads using the ```-f``` flag to specify the file path to a csv file containing details of the workloads. ### Csv file format for custom workloads Ensure that the file being used with the ```-f``` flag follows the following format ```shell , , , ``` Example : ```shell ffmpeg, public.ecr.aws/soci-workshop-examples/ffmpeg:latest, "Hello World",ef63578971ebd8fc700c74c96f81dafab4f3875e9117ef3c5eb7446e169d91cb ``` ### Default workloads | Name | ECR Repository/Tag | Description | Size | |------------------|----------------------|---------------------------|------------| | ffmpeg | public.ecr.aws/soci-workshop-examples/ffmpeg:latest | A minimalist Docker image converting a video file format using ffmpeg. | Medium(~600MB) | | tensor_flow_gpu | public.ecr.aws/soci-workshop-examples/tensorflow_gpu:latest | Software library for machine learning and artificial intelligence with nvidia CUDA drivers installed | Large (> 1GB) | | tensor_flow | public.ecr.aws/soci-workshop-examples/tensorflow:latest | Software library for machine learning and artificial intelligence | Medium (~600MB) | | NodeJs | public.ecr.aws/soci-workshop-examples/node:latest | Back-end JavaScript runtime environment | Large (~1GB) | | busybox | public.ecr.aws/soci-workshop-examples/busybox:latest | Unix utilities suite. | Small (~2MB) | | MongoDb | public.ecr.aws/soci-workshop-examples/mongo:latest | MongoDB is a source-available cross-platform document-oriented database program. | Medium (~700MB) | | RabbitMQ | public.ecr.aws/soci-workshop-examples/rabbitmq:latest | RabbitMQ is an open-source message-broker software | Small (~100MB) | | Redis | public.ecr.aws/soci-workshop-examples/redis:latest | Redis is an in-memory data structure store, used as a distributed, in-memory key–value database, cache and message broker, with optional durability. | Small (~50MB) | ### Benchmark results The benchmark tests generate results of various metrics, the results also provide statistics like mean, standard deviation, p25,p50, p75 and p90 (90th percentile) alongside the min and max value calculated. All measured times are in seconds. Results are available in the ```/benchmark//output ```folder in the following format ```shell { "commit": "commit_hash", "benchmarkTests": [ { "testName": "Image-name", "numberOfTests": 1, "fullRunStats": { "BenchmarkTimes": [ 39.098384883 ], "stdDev": 0, "mean": 39.098384883, "min": 39.098384883, "pct25": 39.098384883, "pct50": 39.098384883, "pct75": 39.098384883, "pct90": 39.098384883, "max": 39.098384883 }, "pullStats": { "BenchmarkTimes": [ 38.801 ], "stdDev": 0, "mean": 38.801, "min": 38.801, "pct25": 38.801, "pct50": 38.801, "pct75": 38.801, "pct90": 38.801, "max": 38.801 }, "lazyTaskStats": { "BenchmarkTimes": [ 0.009 ], "stdDev": 0, "mean": 0.009, "min": 0.009, "pct25": 0.009, "pct50": 0.009, "pct75": 0.009, "pct90": 0.009, "max": 0.009 }, "localTaskStats": { "BenchmarkTimes": [ 0.009 ], "stdDev": 0, "mean": 0.009, "min": 0.009, "pct25": 0.009, "pct50": 0.009, "pct75": 0.009, "pct90": 0.009, "max": 0.009 } } ] } ``` soci-snapshotter-0.4.1/docs/build.md000066400000000000000000000124421454010642300173740ustar00rootroot00000000000000# Build SOCI from source This document is helpful if you plan to contribute to the project (thanks!) or want to use the latest version of either `soci-snapshotter-grpc` or `soci` CLI in the main branch. This document includes the following sections: - [Dependencies](#dependencies) - [Build SOCI](#build-soci) - [Test SOCI](#test-soci) - [(Optional) Contribute your change](#optional-contribute-your-change) - [Development tooling](#development-tooling) ## Dependencies The project binaries have the following dependencies. Please follow the links or commands to install them on your machine: > **Note** > We only mention the direct dependencies of the project. Some dependencies may > have their own dependencies (e.g., containerd depends on runc/cni). Please refer > to their doc for a complete installation guide (mainly containerd). - **[go](https://go.dev/doc/install) >= 1.20** - required to build the project; to confirm please check with `go version`. - **[containerd](https://github.com/containerd/containerd/blob/main/docs/getting-started.md) >= 1.4** - required to run the SOCI snapshotter; to confirm please check with `sudo containerd --version`. - **fuse** - used for mounting without root access (`sudo yum install fuse`). - **zlib** - used for decompression and ztoc creation; Both the CLI and the SOCI snapshotter build zlib statically (`sudo yum install zlib-devel zlib-static`). - **gcc** - used for compiling C code, gzip's zinfo implementation (`sudo yum install gcc`). - **[flatc](https://github.com/google/flatbuffers)** - used for compiling zTOC flatbuffer file and generating corresponding Go code. For fuse/zlib/gcc, they can be installed by your Linux package manager (e.g., `yum` or `apt-get`). For flatc, you can download and install a [release](https://github.com/google/flatbuffers/releases) into your `/usr/local` (or other `$PATH`) directory. For example: ```shell wget -c https://github.com/google/flatbuffers/releases/download/v23.3.3/Linux.flatc.binary.g++-10.zip sudo unzip Linux.flatc.binary.g++-10.zip -d /usr/local rm Linux.flatc.binary.g++-10.zip ``` ## Build SOCI First you need `git` to clone the repository (if you intend to contribute, you can fork the repository and clone your own fork): ```shell git clone https://github.com/awslabs/soci-snapshotter.git cd soci-snapshotter ``` SOCI uses `make` as the build tool. Assuming you're in the root directory of the repository, you can build the CLI and the snapshotter by running: ```shell make ``` This builds the project binaries into the `./out` directory. You can install them to a `PATH` directory (`/usr/local/bin`) with: ```shell sudo make install # check to make sure the SOCI CLI can be found in PATH sudo soci --help # check to make sure the SOCI snapshotter can be found in PATH sudo soci-snapshotter-grpc --help ``` When changing the zTOC flatbuffer definition, you need to regenerate the generated code package with: > It's rare to make such a change, especially delete a field which is a breaking > change and discouraged by flatbuffers. ```shell make flatc ``` ## Test SOCI We have unit tests and integration tests as part of our automated CI, as well as benchmark tests that can be used to test the performance of the SOCI snapshotter. You can run these tests using the following `Makefile` targets: - `make test`: run all unit tests. - `make integration`: run all integration tests. ### Benchmark SOCI We now have a benchmark framework available at [SOCI Benchmarking](/docs/benchmark.md) To speed up develop-test cycle, you can run individual test(s) by utilizing `go test`'s `-run` flag. For example, suppose you only want to run a test named `TestFooBar`, you can: ```shell # 1. if TestFooBar is a unit test GO_TEST_FLAGS="-run TestFooBar" make test # 2. if TestFooBar is an integration test GO_TEST_FLAGS="-run TestFooBar" make integration ``` ## (Optional) Contribute your change If you intend to contribute your change, you need to validate your changes pass all unit/integration tests. (i.e., `make test` and `make integration` pass). Meanwhile, there are a few steps you should follow to ensure your change is ready for review: 1. If you added any new files, make sure they contain the SOCI license header. We provide a script (`./scripts/add-ltag.sh`) that can do this. 2. Make sure your change is well-formatted and you've run `gofmt`. 3. Make sure your commit is signed (`git commit -s`). 4. As a final step, run `make check` to verify your change passes these checks. > **Note** > `make check` requires some checking tools (`golangci`, `ltag`, > `git-validation`). We provide a script (`./scripts/install-check-tools.sh`) to > help install all these checking tools. Once you pass all the tests and checks. You're ready to make your PR! ## Development tooling This repository contains two go modules, one in the root directory and the other in [`cmd`](../cmd). To describe this arrangement to tools like `gopls` (and, by extension, vscode), you need a `go.work` file listing the module locations. An example such file is included in this repository as `go.work.example` which you could rename to `go.work` to use as-is. soci-snapshotter-0.4.1/docs/debug.md000066400000000000000000000324751454010642300173730ustar00rootroot00000000000000# Debugging the SOCI snapshotter This document outlines where to find/access logs and metrics for the snapshotter. It attempts to provide some common error paths that a user might run into while using the snapshotter and provides some guidance on how to either root-cause the issue or resolve it. - [Finding Logs / Metrics](#finding-logs--metrics) - [Logs](#logs) - [Metrics](#metrics) - [Accessing Metrics](#accessing-metrics) - [Metrics Emitted](#metrics-emitted) - [Common Scenarios](#common-scenarios) - [`rpull`](#rpull) - [No lazy-loading](#no-lazy-loading) - [`rpull` Taking An Abnormal Amount Of Time](#rpull-taking-an-abnormal-amount-of-time) - [Background Fetching](#background-fetching) - [Running Container](#running-container) - [FUSE Read Failures](#fuse-read-failures) - [Debugging Tools](#debugging-tools) - [CLI](#cli) - [CPU Profiling](#cpu-profiling) # Finding Logs / Metrics ## Logs For the most part, the `soci-snapshotter-grpc` logs will be the most important place to look when debugging. If `soci-snapshotter-grpc` was started via `systemd` then you can obtain logs using `journalctl`: ```shell sudo journalctl -u soci-snapshotter.unit ``` > **Note** > The command above assumes that you have used the unit file definition [soci-snapshotter.service](../soci-snapshotter.service) we have provided. If you have created your own unit file for `soci-snapshotter-grpc` and replace `soci-snapshotter.unit` with the one you have made. If you have started `soci-snapshotter-grpc` manually, logs will either be emitted to stderr/stdout or to the destination of your choice. ## Metrics ### Accessing Metrics The snapshotter emits [Prometheus](https://prometheus.io/) metrics. To collect and access these metrics, you need to configure `metrics_address` within SOCIs' `config.toml` (located at `/etc/soci-snapshotter-grpc` by default) before starting the snapshotter. You can provide any local address(TCP) or UNIX socket (if you are using a TCP address make sure the port is not in use). To view the metrics you can send a `GET` request via any HTTP client to the `/metrics` endpoint and Prometheus will return all the metrics emitted by the snapshotter. ```shell $ cat /etc/soci-snapshotter-grpc/config.toml metrics_address="localhost:8000" $ curl localhost:8000/metrics soci_fs_operation_duration_milliseconds_bucket{layer="sha256:328b9d3248edeb3ae6e7f9c347bcdb5632c122be218e6ecd89543cca9c8f1997",operation_type="init_metadata_store",le="1"} 1 soci_fs_operation_duration_milliseconds_bucket{layer="sha256:328b9d3248edeb3ae6e7f9c347bcdb5632c122be218e6ecd89543cca9c8f1997",operation_type="init_metadata_store",le="2"} 1 soci_fs_operation_duration_milliseconds_bucket{layer="sha256:328b9d3248edeb3ae6e7f9c347bcdb5632c122be218e6ecd89543cca9c8f1997",operation_type="init_metadata_store",le="4"} 1 soci_fs_operation_duration_milliseconds_bucket{layer="sha256:328b9d3248edeb3ae6e7f9c347bcdb5632c122be218e6ecd89543cca9c8f1997",operation_type="init_metadata_store",le="8"} 1 ... ``` ### Metrics Emitted Below are a list of metrics emitted by the snapshotter: * Mount * **operation_duration_mount (ms)** - defines how long does it take to mount a layer during `rpull`. `rpull` should only take a couple of seconds. If this value is higher than 3-5 seconds this can indicate an issue while mounting. * **operation_duration_init_metadata_store (ms)** - measures the time it takes to parse a zTOC and prepare the respective metadata records in metadata bbolt db (it records layer digest as well). This is one of the components of `rpull`, therefore there should be a correlation between the time to parse a zTOC with updating of metadata db and the duration of layer mount operation. * Fetch from remote registry * **operation_duration_remote_registry_get (ms)** - measures the time it takes to complete a `GET` operation from remote registry for a specific layer. This metric should help in identifying network issues, when lazily fetching layer data and seeing increased container start time. * FUSE * **operation_duration_node_readdir (us)** - measures the time it takes to complete readdir() operation for a file from a specific layer. The per-layer granularity is to point out that each layer has its own `FUSE` mount, so it doesn’t make sense to generalize. The unit is microseconds. Large times in readdir may indicate that there are problems with the request speed from metadata db or issues with the `FUSE` implementation (less likely, since this part is least likely to get modified). * **operation_duration_synchronous_read (us)** - measures the duration of `FUSE` read() operation for the specific `FUSE` mountpoint, defined by the layer digest. The unit of measurement is microseconds. * **synchronous_read_count** - measures how many read() operations were issued for the specific `FUSE` mountpoint (defined by the layer digest) to date. The same value can be obtained from `operation_duration_synchronous_read` as the Count property. * **synchronous_bytes_served** - measures the number of bytes served for synchronous reads. * **fuse_mount_failure_count** - number of times the snapshotter falls back to use a normal overlay mount instead of mounting the layer as a `FUSE` mount. * **background_span_fetch_failure_count** - number of errors of span fetch by background fetcher. * **background_span_fetch_count** - number of spans fetched by background fetcher. * **background_fetch_work_queue_size** - number of items in the work queue of background fetcher. * **operation_duration_background_fetch** - time in milliseconds to complete background fetch for a layer. * Individual `FUSE` operation failure counts: * fuse_node_getattr_failure_count * fuse_node_listxattr_failure_count * fuse_node_lookup_failure_count * fuse_node_open_failure_count * fuse_node_readdir_failure_count * fuse_file_read_failure_count * fuse_file_getattr_failure_count * fuse_whiteout_getattr_failure_count * fuse_unknown_operation_failure_count # Common Scenarios Below are some common scenarios that may occur during `rpull` and the lifetime of running a container. For scenarios not covered, please feel free to [open an issue](https://github.com/awslabs/soci-snapshotter/issues/new/choose). > **Note** > To allow for more verbose logging you can set the `--log-level` flag to `debug` when starting the snapshotter. ## `rpull` During `rpull`, the image manifest, config, and layers without zTOCs' are fetched from the remote registry directly. Layers that have a zTOC are mounted as a `FUSE` file system and will be pulled lazily when launching a container. Below are a list of common error paths that may occur in this phase: ### No lazy-loading If you notice that all layers are being fetched for an image or that `FUSE` mounts are not being created for layers with a zTOC than that means that remote mounting has failed for those layers. Once you inspect the logs you should come across an error log that contains the message `failed to prepare remote snapshot` with an `error` key describing the error that was propagated up within the snapshotter. Some possible error keys include: * `skipping mounting layer as FUSE mount: no zTOC for layer` This "error" message is not really indicative of a true error, but rather implies that the current layer does not have an associated zTOC. This is expected for layers that do not meet the minimum-layer size criteria established when creating the soci-index/zTOCs. * `unable to fetch SOCI artifacts: ` This error indicates that the soci index along with the corresponding zTOC could not be fetched from the remote registry. This can be for a variety of different reasons. The most common reason is that the resolver could not authenticate against the remote registry. The snapshotter uses the docker resolver to resolve blobs in the remote so you must authenticate with docker first. If you are using `ECR` as your registry you can: ```shell export ECR_PASS=$(aws ecr get-login-password --region ) echo $ECR_PASS | sudo docker login -u AWS --password-stdin $ECR_REGISTRY ``` > **Note** > SOCI artifacts are only fetched when preparing the first layer. If they cannot be fetched the snapshotter will fallback to default snapshotter configured (eg: overlayfs) entirely. ### `rpull` Taking An Abnormal Amount Of Time If you notice that `rpull` takes a considerable amount of time you can: * Look for `failed to resolve layer (timeout)` within the logs. Remote mounts may take too long if something’s wrong with layer resolving. By default remote mounts time out after 30 seconds if a layer can’t be resolved. * Check the `operation_duration_mount` metric to see if it takes unusual long time to mount a layer. `rpull` should be taking a couple of seconds, so one can be checking if any of these operations are taking more than 3-5 seconds. * Parsing zTOC and initializing the metadata db is part of `rpull`. You can check the `operation_duration_init_metadata_store` metric to see if initializing the metadata bbolt db is too slow. * Look for HTTP failure codes in the log. Such logs are in this format: `Received status code`: ### Background Fetching The background fetcher is initialized as soon as the snapshotter starts. If you have not explicitly disabled it via the the snapshotters config, it will be performing network requests to fetch data during/after `rpull`. To analyze the background fetcher you can: * Look at the `background_span_fetch_failure_count` to determine how many times a background fetch failed. * Look at `background_span_fetch_count` metric to determine how many spans were fetched by the background fetcher. If this number is 0 this may indicate network failures. * Look for `Retrying request` within the logs to determine the error and response returned from the remote registry. ## Running Container A running container produces many read requests. If there is a read request for a file residing within a lazy-loaded layer than the read request is routed through the layers' `FUSE` filesystem. This path can produce several different errors: ### FUSE Read Failures Look for `failed to read the file` or `unexpected copied data size for on-demand fetch` in the logs. **Corrupt Data** * Span verification failures can occur if the fetched data is corrupt or has been altered since zTOC creation. You can look for `span digests do not match` within logs to verify that this is the root cause. * Check to see if the zTOC contains appropriate data. You can do this by running the `soci ztoc info ` command to inspect the zTOC. If the dictionaries are all 0-ed, the zTOC initially generated and subsequently pulled was corrupt. **Network Failures** The snapshotter contains custom retry logic when fetching spans(data) from the remote registry. By default it will try to fetch from remote a maximum of 9 times before returning an error. * You can look for `retrying request` within the logs to determine the error and response returned from the remote registry. * You can also check `operation_duration_remote_registry_get` metric to see how long it takes to complete `GET` from remote registry. # Debugging Tools ## CLI Here are some SOCI CLI commands that can be helpful when debugging issues relating to SOCI indices/zTOCs' | SOCI CLI Command | Description | | ---------------- | ----------- | | soci ztoc get-file | retrieve a file from a local image layer using a specified ztoc | | soci ztoc info | get detailed info about a ztoc (list of files+offsets, num of spans, ...etc) | | soci ztoc list | list all ztocs | | soci index info | retrieve the contents of an index | | soci index list [options] —ref | list ztocs across all images / filter indices to those that are associated with a specific image ref | | soci index rm [options] —ref | remove an index from local db / only remove indices that are associated with a specific image ref | ## CPU Profiling We can use Golangs `pprof` tool to profile the snapshotter. To enable profiling you must set the `debug_address` within the snapshotters config (default: `/etc/soci-snapshotter-grpc/config.toml`): ```toml debug_address = "localhost:6060" ``` Once you have configured the debug address you can send a `GET` to the `/debug/pprof/profile` endpoint to receive a CPU profile of the snapshotter. You can specify an optional argument `seconds` to limit the results to a certain time span: ```shell curl http://localhost:6060/debug/pprof/profile?seconds=40 > out.pprof ``` You can use the `pprof` tool provided by the Go CLI to visualize the data within a web browser: ```shell go tool pprof -http=:8080 out.pprof ```soci-snapshotter-0.4.1/docs/design-docs/000077500000000000000000000000001454010642300201475ustar00rootroot00000000000000soci-snapshotter-0.4.1/docs/design-docs/README.md000066400000000000000000000033221454010642300214260ustar00rootroot00000000000000# Design docs We use this folder to track design docs for the project and their status (proposed , accepted, implemented, etc). We also keep some features/ideas that we think will improve soci-snapshotter but haven't been converted into concrete design docs. ## Workload-specific load order optimization Some lazy loading snapshotters support load order optimization, where some files are prioritized for prefetching. Typically, there is a one-to-one relationship between the list of to-be-prefetched files and the image or layer artifact. For SOCI, we wanted a bit more flexibility. Often, which files to prefetch is highly dependent on the specific workload, not the image or base layer. For example, a customer may have a Python3 base layer that is shared by thousands of applications. To optimize the launch time of those applications using the traditional approach, the base layer can no longer be shared, because each application’s load order for that layer will be different. Registry storage costs will increase dramatically, and cache hit rates will plummet. And when it comes time to update that base layer, each and every copy will have to be reoptimized. Secondly, there are some workloads that need to be able to prefetch at the subfile level. For example, we have observed machine learning workloads that launch and then immediately read a small header from a very large number of very large files. To meet these use-cases, SOCI will implement a separate load order document (LOD), that can specify which files or file-segments to load. Because it is a separate artifact, a single image can have many LODs. At container launch time, the appropriate LOD can be retrieved using business logic specified by the administrator. soci-snapshotter-0.4.1/docs/getting-started.md000066400000000000000000000301311454010642300213750ustar00rootroot00000000000000# Getting Started With the SOCI Snapshotter This document walks through how to use the SOCI snapshotter, including building a SOCI index, pushing/pulling an image and associated SOCI index, and running a container with the SOCI snapshotter. - [Dependencies](#dependencies) - [Install the SOCI snapshotter](#install-the-soci-snapshotter) - [Push an image to your registry](#push-an-image-to-your-registry) - [Create and push SOCI index](#create-and-push-soci-index) - [Create SOCI index](#create-soci-index) - [(Optional) Inspect SOCI index and zTOC](#optional-inspect-soci-index-and-ztoc) - [Push SOCI index to registry](#push-soci-index-to-registry) - [Run container with the SOCI snapshotter](#run-container-with-the-soci-snapshotter) - [Configure containerd](#configure-containerd) - [Start the SOCI snapshotter](#start-the-soci-snapshotter) - [Lazily pull image](#lazily-pull-image) - [Run container](#run-container) ## Dependencies The SOCI snapshotter has the following runtime dependencies. Please follow the links or commands to install them on your machine: > **Note** > We only mention the direct dependencies of the project. Some dependencies may > have their own dependencies (e.g., containerd depends on runc/cni). Please refer > to their doc for a complete installation guide (mainly containerd). - **[containerd](https://github.com/containerd/containerd/blob/main/docs/getting-started.md) >= 1.4** - required to run the SOCI snapshotter; to confirm please check with `sudo nerdctl system info`. - **[nerdctl](https://github.com/containerd/nerdctl#install) >= v1.5.0** - required for this doc to interact with containerd/registry. You do not need any of the additional components mentioned in the install documentation for this getting started, but you might if you want complex networking in the future. Please note that SOCI will not work with rootless nerdctl. - **fuse** - used for mounting without root access (`sudo yum install fuse` or other Linux package manager like `apt-get`, depending on your Linux distro). ## Install the SOCI snapshotter The SOCI project produces 2 binaries: - `soci`: the CLI tool used to build/manage SOCI indices. - `soci-snapshotter-grpc`: the daemon (a containerd snapshotter plugin) used for lazy loading. You can download prebuilt binaries from our [release page](https://github.com/awslabs/soci-snapshotter/releases) or [build them from source](./build.md). In this doc, let's just download the release binaries and move them to a `PATH` directory (`/usr/local/bin`): > You can find other download link in the release page that matches your machine. ```shell wget https://github.com/awslabs/soci-snapshotter/releases/download/v0.4.1/soci-snapshotter-0.4.1-linux-amd64.tar.gz sudo tar -C /usr/local/bin -xvf soci-snapshotter-0.4.1-linux-amd64.tar.gz soci soci-snapshotter-grpc ``` Now you should be able to use the `soci` CLI (and `soci-snapshotter-grpc` containerd plugin shortly): ```shell # check soci can be found in PATH sudo soci --help ``` Many `soci` CLI commands need to be run as `sudo`, because the metadata is saved in directories that a non-root user often does not have access to. ## Push an image to your registry In this document we will use `rabbitmq` from DockerHub `docker.io/library/rabbitmq:latest`. We use [AWS ECR](https://docs.aws.amazon.com/AmazonECR/latest/userguide/getting-started-console.html) as the public registry for demonstration. Other OCI 1.0 compatible registries such as dockerhub should also work. First let's pull the image from docker into containerd's data store, then (tag and) push it up to your registry: > The example assumes you have created an ECR repository called `rabbitmq` and > have credentials available to the AWS CLI. You just need to update `AWS_ACCOUNT` and `AWS_REGION`. > > If you are using a different registry, you will need to set `REGISTRY` and `REGISTRY_USER/REGISTRY_PASSWORD` appropriately > (and the `rabbitmq` repository is created or can be created automatically while pushing). > > The platform tag might be different depending on your machine. ```shell export AWS_ACCOUNT=000000000000 export AWS_REGION=us-east-1 export REGISTRY_USER=AWS export REGISTRY_PASSWORD=$(aws ecr get-login-password --region $AWS_REGION) export REGISTRY=$AWS_ACCOUNT.dkr.ecr.$AWS_REGION.amazonaws.com # needed for pushing images / SOCI indexes which run as the current user echo $REGISTRY_PASSWORD | nerdctl login -u $REGISTRY_USER --password-stdin $REGISTRY # needed the SOCI snapshotter which runs as root echo $REGISTRY_PASSWORD | sudo nerdctl login -u $REGISTRY_USER --password-stdin $REGISTRY sudo nerdctl pull docker.io/library/rabbitmq:latest sudo nerdctl image tag docker.io/library/rabbitmq:latest $REGISTRY/rabbitmq:latest sudo nerdctl push --platform linux/amd64 $REGISTRY/rabbitmq:latest ``` After this step, please check your registry to confirm the image is present. You can go to your registry console or use your registry's CLI (e.g. for ECR, you can use `aws ecr describe-images --repository-name rabbitmq --region $AWS_REGION`). ## Create and push SOCI index Instead of converting the image format, the SOCI snapshotter uses the SOCI index associated with an image to implement its lazy loading. For more details please see [README](../README.md#no-image-conversion). ### Create SOCI index Let's create a SOCI index, which later will be pushed to your registry: ```shell sudo soci create $REGISTRY/rabbitmq:latest # output layer sha256:57315aaee690b22265ebb83b5443587443398a7cd99dd2a43985c28868d34053 -> ztoc skipped layer sha256:ed46dea0429646ca97e7a90d273159154ab8c28e631f2582d32713e584d98ace -> ztoc skipped layer sha256:3f0e404c1d688448c1c3947d91d6e0926c67212f4d647369518077513ebdfd91 -> ztoc skipped layer sha256:626e07084b41a102f8bcedf05172676423d1c37b8391be76eee2d7bbf56ec31e -> ztoc skipped layer sha256:b49348aba7cfd44d33b07730fd8d3b44ac97d16a268f2d74f7bfb78c4c9d1ff7 -> ztoc skipped layer sha256:ec66df5c883fd24406c6ef53864970f628b51216e8e1f3f5981c439ed6e4ed41 -> ztoc skipped layer sha256:8147f1b064ec70039aad0068f71b316b42cf515d2ba87e6668cb66de4f042f5a -> ztoc skipped layer sha256:f63218e95551afe34f3107b1769a556a3c9a39279cb66979914215e03f4e2754 -> ztoc sha256:ccae6b7217b73ae9caf80bff4c5411dada341739c8b443791fba227b226c61d0 layer sha256:7608715873ec5c02d370e963aa9b19a149023ce218887221d93fe671b3abbf58 -> ztoc sha256:740374aa7cac1764593430843d428a73a30d4a6a0d45fb171c369f3914a638eb layer sha256:96fb4c28b2c1fc1528bf053e2938d5173990eb12097d51f66c2bb3d01a2c9a39 -> ztoc sha256:dc9a2ca27d2b680279fc8052228772b9c03a779d0b7cc61012d2ad833ad1ff5e ... ``` Behind the scene SOCI created two kinds of objects. One is a series of ztocs (one per layer). A ztoc is a table of contents for compressed data. The other is a manifest that relates the ztocs to their corresponding image layers and relates the entire SOCI index to a particular image manifest (i.e. a particular image for a particular platform). > We skip building ztocs for smaller layers (controlled by `--min-layer-size` of > `soci create`) because small layers don't benefit much from lazy loading.) From the above output, we can see that SOCI creates ztocs for 3 layers and skips 7 layers, which means only the 3 layers with ztocs will be lazily pulled. ### (Optional) Inspect SOCI index and zTOC We can inspect one of these ztocs from the output of previous command (replace the digest with one in your command output). This command will print the ztoc, which contains all of the information that SOCI needs to find a given file in the layer: ```shell sudo soci ztoc info sha256:4c1d63f476d4907e0db42b8736f578e79432a28d304935708c918c95e0e4df00 ``` We can also view the SOCI index manifests. This command list out all of our index manifests: ```shell sudo soci index list ``` To inspect an individual SOCI index, we can use the following command, which dump out the index manifest in json: ```shell sudo soci index info sha256:f5f2a8558d0036c0a316638c5575607c01d1fa1588dbe56c6a5a7253e30ce107 ``` ### Push SOCI index to registry Next we need to push the manifest to the registry with the following command. This will push all of the SOCI related artifacts (index manifest, ztoc): ```shell sudo soci push --user $REGISTRY_USER:$REGISTRY_PASSWORD $REGISTRY/rabbitmq:latest ``` Credentials here can be omitted if `nerdctl login` has stored credentials for this registry. ## Run container with the SOCI snapshotter ### Configure containerd We need to reconfigure and restart containerd to enable the SOCI snapshotter. This section assume your containerd is managed by `systemd`. First let's stop containerd: ```shell sudo systemctl stop containerd ``` Next we need to modify containerd's config file (`/etc/containerd/config.toml`). Let's add the following config to the file to enable the SOCI snapshotter as a plugin: ```toml [proxy_plugins] [proxy_plugins.soci] type = "snapshot" address = "/run/soci-snapshotter-grpc/soci-snapshotter-grpc.sock" ``` This config section tells containerd that there is a snapshot plugin named `soci` and to communicate with it via a socket file. Now let's restart containerd and confirm containerd knows about the SOCI snapshotter plugin: ```shell sudo systemctl restart containerd sudo nerdctl system info ``` You should see `soci` under Server -> Plugins -> Storage ### Start the SOCI snapshotter First we need to start the snapshotter grpc service by running the `soci-snapshotter-grpc` binary in background and simply redirecting logs to an arbitrary file: ```shell sudo soci-snapshotter-grpc &> ~/soci-snapshotter-logs & ``` Alternately, you can split up stdout (json logs) and stderr (plain text errors): ```shell sudo soci-snapshotter-grpc 2> ~/soci-snapshotter-errors 1> ~/soci-snapshotter-logs & ``` ### Lazily pull image Once the snapshotter is running we can call the `pull` command from nerdctl. This command reads the manifest from the registry and mounts a FUSE filesystem for each layer. > The snapshotter will use the OCI distribution-spec's [Referrers API](https://github.com/opencontainers/distribution-spec/blob/main/spec.md#listing-referrers) > (if available, otherwise the spec's [fallback mechanism](https://github.com/opencontainers/distribution-spec/blob/main/spec.md#unavailable-referrers-api)) to fetch a list of available indices. ```shell sudo nerdctl pull --snapshotter soci $REGISTRY/rabbitmq:latest #output $Registry/rabbitmq:latest: resolved |++++++++++++++++++++++++++++++++++++++| manifest-sha256:a9072496...: done |++++++++++++++++++++++++++++++++++++++| config-sha256:4027609f...: done |++++++++++++++++++++++++++++++++++++++| elapsed: 9.8 s total: 10.3 K (1.1 KiB/s) ``` After running this command you will see a minimal output as the example, because with lazy pulling, not all layers are pulled during the `pull` step. From previous step we created 3 ztocs for 3 layers. Now let's check the mounts for the FUSE filesystems. There should be one mount per layer for layers with ztoc. In our rabbitmq example, there should be 3 mounts. ```shell mount | grep fuse # output fusectl on /sys/fs/fuse/connections type fusectl (rw,relatime) /home/ec2-user/code/soci-snapshotter/soci on /var/lib/soci-snapshotter-grpc/snapshotter/snapshots/57/fs type fuse.rawBridge (rw,nodev,relatime,user_id=0,group_id=0,allow_other) /home/ec2-user/code/soci-snapshotter/soci on /var/lib/soci-snapshotter-grpc/snapshotter/snapshots/60/fs type fuse.rawBridge (rw,nodev,relatime,user_id=0,group_id=0,allow_other) /home/ec2-user/code/soci-snapshotter/soci on /var/lib/soci-snapshotter-grpc/snapshotter/snapshots/62/fs type fuse.rawBridge (rw,nodev,relatime,user_id=0,group_id=0,allow_other) ``` ### Run container Now that all of the mounts are set up we can run the image using the following command in nerdctl. We need to specify which snapshotter we shall use and we will use the `--net host` flag. Then we pass in the two main arguments, our image registry and the id of the container: ```shell sudo nerdctl run --snapshotter soci --net host --rm $REGISTRY/rabbitmq:latest ``` soci-snapshotter-0.4.1/docs/glossary.md000066400000000000000000000034271454010642300201430ustar00rootroot00000000000000The SOCI project introduces several new terms that sometimes have subtle differences between them. This glossary defines these terms. ## Terminology * __SOCI__: Seekable OCI (pronounced so-CHEE). SOCI combines an unmodified [OCI Image](https://github.com/opencontainers/image-spec/blob/v1.1.0-rc4/spec.md) (or Docker v2 image) with a SOCI index to enable the SOCI snapshotter to lazily pull the image at runtime. * __SOCI index__: An OCI artifact consisting of a SOCI index manifest and a set of zTOCs that enable lazy loading of unmodified OCI images. "Index" refers to the whole set of objects similarly to how "image" refers to the set of image index, manifest, config, and layers. * __SOCI index manifest__: An [OCI Image manifest](https://github.com/opencontainers/image-spec/blob/v1.1.0-rc4/manifest.md) containing the list of zTOCs in the SOCI Index with a Subject reference to the image for which the manifest was generated. * __zTOC__: A Table of Contents for compressed data. A zTOC is composed of 2 parts. 1) a table of contents containing file metadata and its offset in the decompressed TAR archive (the "TOC"). 2) A collection of "checkpoints" of the state of the compression engine at various points in the layer. We refer to this collection as the "zInfo". * __span__: A chunk of data that can be independently decompressed. Each checkpoint in the zInfo corresponds to exactly one span in an image layer. ## Anti-terminology * __SOCI Image__: We generally avoid the term "SOCI Image" because there is no such thing! The image is an unmodified OCI image. Also, a single image may have many SOCI indices with different parameters such as span size, layers indexed, etc. The precise way to refer to an image that has a SOCI index is to refer to the index itself. soci-snapshotter-0.4.1/docs/install.md000066400000000000000000000077751454010642300177600ustar00rootroot00000000000000# Install the SOCI snapshotter This doc walks through how to install the SOCI snapshotter as a component managed by systemd. The SOCI snapshotter produces 2 binaries: - `soci`: the CLI tool used to build/manage SOCI indices. - `soci-snapshotter-grpc`: the daemon (a containerd snapshotter plugin) used for lazy loading. You can get the prebuilt binaries from our [release page](https://github.com/awslabs/soci-snapshotter/releases) or [build them from source](./build.md). - [Dependencies](#dependencies) - [Configure SOCI snapshotter (optional)](#configure-soci-snapshotter-optional) - [Confirm installation](#confirm-installation) - [Install the SOCI snapshotter for containerd with systemd](#install-the-soci-snapshotter-for-containerd-with-systemd) - [Config containerd](#config-containerd) ## Dependencies The SOCI snapshotter has the following dependencies. Please follow the links or commands to install them on your machine: > **Note** > We only mention the direct dependencies of the project. Some dependencies may > have their own dependencies (e.g., containerd depends on runc/cni). Please refer > to their doc for a complete installation guide (mainly containerd). - **[containerd](https://github.com/containerd/containerd/blob/main/docs/getting-started.md) >= 1.4** - required to run the SOCI snapshotter; to confirm please check with `sudo containerd --version`. - **fuse** - used for mounting without root access (`sudo yum install fuse`). For fuse/zlib, they can be installed by your Linux package manager (e.g., `yum` or `apt-get`). ## Configure SOCI snapshotter (optional) Similar to containerd, the SOCI snapshotter has a toml config file which is located at `/etc/soci-snapshotter-grpc/config.toml` by default. If such a file doesn't exist, SOCI snapshotter will use default values for all configurations. > **Note** > Whenever you make changes to the config file, you need to stop the snapshotter > first before making changes, and restart the snapshotter after the changes. ## Confirm installation To validate that the SOCI snapshotter is installed, let's check the snapshotter's version. The output should show the version that you installed. ```shell $ sudo soci-snapshotter-grpc --version soci-snapshotter-grpc version f855ff1.m f855ff1bcf7e161cf0e8d3282dc3d797e733ada0.m ``` ## Install the SOCI snapshotter for containerd with systemd If you plan to use systemd to manage your SOCI snapshotter process, you can download the [`soci-snapshotter.service` unit file](../soci-snapshotter.service) in the repository root directory into `/usr/local/lib/systemd/system/soci-snapshotter.service`, and run the following commands: ```shell sudo systemctl daemon-reload sudo systemctl enable --now soci-snapshotter ``` To validate the SOCI snapshotter is now running: ```shell sudo systemctl status soci-snapshotter ``` ## Config containerd We need to configure and restart containerd to enable the SOCI snapshotter (this section assume your containerd is also managed by `systemd`): - Stop containerd: `sudo systemctl stop containerd`; - Update containerd config to include the SOCI snapshotter plugin. The config file is usually in `/etc/containerd/config.toml`, and you need to add the following: ```toml [proxy_plugins] [proxy_plugins.soci] type = "snapshot" address = "/run/soci-snapshotter-grpc/soci-snapshotter-grpc.sock" ``` - Restart containerd: `sudo systemctl restart containerd`; - (Optional) Check to make sure the SOCI snapshotter is recognized by containerd: `sudo ctr plugin ls id==soci`. You will see output like below. If not, consult containerd logs to determine the cause or reach out on [our discussion](https://github.com/awslabs/soci-snapshotter/discussions). ```shell TYPE ID PLATFORMS STATUS io.containerd.snapshotter.v1 soci - ok ``` soci-snapshotter-0.4.1/docs/pull-modes.md000066400000000000000000000202751454010642300203610ustar00rootroot00000000000000# Pull modes of the SOCI snapshotter The SOCI snapshotter is a remote snapshotter. It is able to lazily load the contents of a container image when a *SOCI index* is present in the remote registry. If a SOCI index is not found, it will download and uncompress the image layers at launch time, just like the default snapshotter does. SOCI indices can also be "sparse", meaning that any individual layer may not be indexed. In that case, that layer will be downloaded at launch time, while the indexed layers will be lazily loaded. A layer will be mounted as a FUSE mountpoint if it's being lazily loaded, or as a normal overlay layer if it's not. Overall, lazily pulling a container image with the SOCI snapshotter (via the `soci image rpull` command) involves the following steps: - [Pull modes of the SOCI snapshotter](#pull-modes-of-the-soci-snapshotter) - [Step 1: specify SOCI index digest](#step-1-specify-soci-index-digest) - [Step 2: fetch SOCI artifacts](#step-2-fetch-soci-artifacts) - [Step 3: fetch image layers](#step-3-fetch-image-layers) ## Step 1: specify SOCI index digest To enable lazy pulling and loading an image with the SOCI snapshotter, first you need to `rpull` the image via the [`soci` CLI](./getting-started.md#install-soci-snapshotter). The CLI accepts an optional flag `--soci-index-digest`, which is the sha256 of the SOCI index manifest and will be passed to the snapshotter. If not provided, the snapshotter will use the OCI distribution-spec's [Referrers API](https://github.com/opencontainers/distribution-spec/blob/main/spec.md#listing-referrers) (if available, otherwise the spec's [fallback mechanism](https://github.com/opencontainers/distribution-spec/blob/main/spec.md#unavailable-referrers-api)) to fetch a list of available indices. An index will be chosen from the list of available indices, but the selection process is undefined and it may not choose the same index every time. > **Note** > Check out [this doc](./getting-started.md#lazily-pull-image) for how to > validate if this step is successful or not, and [the debug doc](./debug.md#common-scenarios) > for the common scenarios where `rpull` might fail and how to debug/fix them. ## Step 2: fetch SOCI artifacts During `rpull`, on the first layer mount there will be an attempt to download and parse the SOCI manifest. If this doesn’t go well, there will be the following error in the log: `unable to fetch SOCI artifacts:`, indicating that the container image will not be lazily loaded. In this case, the snapshotter will fallback to default snapshotter configured (eg: overlayfs) entirely. > Check out [the debug doc](./debug.md#common-scenarios) for how to debug/fix it. ## Step 3: fetch image layers The SOCI index will instruct containerd and the SOCI snapshotter when to fetch/pull image layers. There can be two cases: 1. There’s no zTOC for a specific layer. In this case, there will be an error log: `{"error":"failed to resolve layer`, indicating that this layer will be synchronously downloaded at launch time. 2. There's a zTOC for a specific layer. In this case, the layer will be mounted as a fuse mountpoint, and will be lazily loaded while a container is running. > Whether a layer belongs to 1 or 2 depends on its size. When creating a SOCI > index, SOCI only creates zTOC for layers larger than a given size which is > specified by the `--min-layer-size` flag of [`soci create` command](https://github.com/awslabs/soci-snapshotter/blob/9ff88817f3f2635b926f9fd32f6f05f389f7ecee/cmd/soci/commands/create.go#L56). With debug logging enabled, you can see an entry in logs for each layer. `checking mount point` indicates that the layer will be lazily loaded. `layer is normal snapshot(overlayfs)` indicates that it will not be lazily loaded. ```shell {"key":"sha256:5e986c80babd9591530ee7b5844f8f9cca87b991da5dbf0f489f8612228f28f6","level":"debug","mount-point":"/var/lib/soci-snapshotter-grpc/snapshotter/snapshots/17/fs","msg":"layer is normal snapshot(overlayfs)","time":"2022-08-16T18:06$ {"key":"sha256:5e986c80babd9591530ee7b5844f8f9cca87b991da5dbf0f489f8612228f28f6","level":"debug","mount-point":"/var/lib/soci-snapshotter-grpc/snapshotter/snapshots/16/fs","msg":"layer is normal snapshot(overlayfs)","time":"2022-08-16T18:06$ {"key":"sha256:5e986c80babd9591530ee7b5844f8f9cca87b991da5dbf0f489f8612228f28f6","level":"debug","mount-point":"/var/lib/soci-snapshotter-grpc/snapshotter/snapshots/15/fs","msg":"layer is normal snapshot(overlayfs)","time":"2022-08-16T18:06$ {"key":"sha256:5e986c80babd9591530ee7b5844f8f9cca87b991da5dbf0f489f8612228f28f6","level":"debug","mount-point":"/var/lib/soci-snapshotter-grpc/snapshotter/snapshots/13/fs","msg":"layer is normal snapshot(overlayfs)","time":"2022-08-16T18:06$ {"key":"sha256:5e986c80babd9591530ee7b5844f8f9cca87b991da5dbf0f489f8612228f28f6","level":"debug","mount-point":"/var/lib/soci-snapshotter-grpc/snapshotter/snapshots/12/fs","msg":"layer is normal snapshot(overlayfs)","time":"2022-08-16T18:06$ {"key":"sha256:5e986c80babd9591530ee7b5844f8f9cca87b991da5dbf0f489f8612228f28f6","level":"debug","mount-point":"/var/lib/soci-snapshotter-grpc/snapshotter/snapshots/11/fs","msg":"layer is normal snapshot(overlayfs)","time":"2022-08-16T18:06$ {"key":"sha256:5e986c80babd9591530ee7b5844f8f9cca87b991da5dbf0f489f8612228f28f6","level":"debug","mount-point":"/var/lib/soci-snapshotter-grpc/snapshotter/snapshots/18/fs","msg":"checking mount point","time":"2022-08-16T18:06:48.628108043Z"} {"key":"sha256:5e986c80babd9591530ee7b5844f8f9cca87b991da5dbf0f489f8612228f28f6","level":"debug","mount-point":"/var/lib/soci-snapshotter-grpc/snapshotter/snapshots/14/fs","msg":"checking mount point","time":"2022-08-16T18:06:48.628124854Z"} {"key":"sha256:5e986c80babd9591530ee7b5844f8f9cca87b991da5dbf0f489f8612228f28f6","level":"debug","mount-point":"/var/lib/soci-snapshotter-grpc/snapshotter/snapshots/10/fs","msg":"checking mount point","time":"2022-08-16T18:06:48.628164485Z"} {"key":"sha256:5e986c80babd9591530ee7b5844f8f9cca87b991da5dbf0f489f8612228f28f6","level":"debug","mount-point":"/var/lib/soci-snapshotter-grpc/snapshotter/snapshots/9/fs","msg":"layer is normal snapshot(overlayfs)","time":"2022-08-16T18:06:$ {"key":"sha256:5e986c80babd9591530ee7b5844f8f9cca87b991da5dbf0f489f8612228f28f6","level":"debug","mount-point":"/var/lib/soci-snapshotter-grpc/snapshotter/snapshots/7/fs","msg":"layer is normal snapshot(overlayfs)","time":"2022-08-16T18:06:$ {"key":"sha256:5e986c80babd9591530ee7b5844f8f9cca87b991da5dbf0f489f8612228f28f6","level":"debug","mount-point":"/var/lib/soci-snapshotter-grpc/snapshotter/snapshots/6/fs","msg":"layer is normal snapshot(overlayfs)","time":"2022-08-16T18:06:$ {"key":"sha256:5e986c80babd9591530ee7b5844f8f9cca87b991da5dbf0f489f8612228f28f6","level":"debug","mount-point":"/var/lib/soci-snapshotter-grpc/snapshotter/snapshots/4/fs","msg":"layer is normal snapshot(overlayfs)","time":"2022-08-16T18:06:$ {"key":"sha256:5e986c80babd9591530ee7b5844f8f9cca87b991da5dbf0f489f8612228f28f6","level":"debug","mount-point":"/var/lib/soci-snapshotter-grpc/snapshotter/snapshots/2/fs","msg":"layer is normal snapshot(overlayfs)","time":"2022-08-16T18:06:$ {"key":"sha256:5e986c80babd9591530ee7b5844f8f9cca87b991da5dbf0f489f8612228f28f6","level":"debug","mount-point":"/var/lib/soci-snapshotter-grpc/snapshotter/snapshots/8/fs","msg":"checking mount point","time":"2022-08-16T18:06:48.628307230Z"} {"key":"sha256:5e986c80babd9591530ee7b5844f8f9cca87b991da5dbf0f489f8612228f28f6","level":"debug","mount-point":"/var/lib/soci-snapshotter-grpc/snapshotter/snapshots/5/fs","msg":"checking mount point","time":"2022-08-16T18:06:48.628321040Z"} {"key":"sha256:5e986c80babd9591530ee7b5844f8f9cca87b991da5dbf0f489f8612228f28f6","level":"debug","mount-point":"/var/lib/soci-snapshotter-grpc/snapshotter/snapshots/1/fs","msg":"checking mount point","time":"2022-08-16T18:06:48.628348072Z"} {"key":"sha256:5e986c80babd9591530ee7b5844f8f9cca87b991da5dbf0f489f8612228f28f6","level":"debug","mount-point":"/var/lib/soci-snapshotter-grpc/snapshotter/snapshots/3/fs","msg":"checking mount point","time":"2022-08-16T18:06:48.628371627Z"} ``` soci-snapshotter-0.4.1/docs/registry.md000066400000000000000000000313331454010642300201450ustar00rootroot00000000000000 # Registry Compatibility with SOCI SOCI is compatible with most registries. To check if your registry of choice is compatible, see [List of Registry Compatibility](#list-of-registry-compatibility). For most use-cases, compatibility is the only concern. However, there is a difference in *how* registries work with SOCI that could cause surprising edge cases. The rest of this document is a technical dive into how SOCI indices are stored and retrieved from registries and the surprises you might encounter. - [Registry Requirements](#registry-requirements) - [Referrers API vs Fallback](#referrers-api-vs-fallback) - [Referrers API](#referrers-api) - [Fallback](#fallback) - [How SOCI Indices Appear to Registries](#how-soci-indices-appear-to-registries) - [List of Registry Compatibility](#list-of-registry-compatibility) - [Failure Examples](#failure-examples) ## Registry Requirements In order for a registry to be compatible SOCI it must support the following features of the OCI distribution and image specs: 1) Accept [OCI Image Manifests](https://github.com/opencontainers/image-spec/blob/v1.1.0-rc2/manifest.md) with [subject fields](https://github.com/opencontainers/image-spec/blob/v1.1.0-rc2/manifest.md#image-manifest-property-descriptions) and arbitrary config media types. This allows the registry to store SOCI indices and is supported by most registries. 2) (optional) Support the [OCI referrers API](https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#listing-referrers). This adds convenience around retrieving SOCI indices from the registry. If it is not supported, there is a [fallback mechanism](https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#unavailable-referrers-api) that works with all registries, but it has a few issues noted in the next section. ## Referrers API vs Fallback The SOCI snapshotter can retrieve SOCI indices and ztocs either through the [OCI referrers API](https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#listing-referrers) or a [Fallback mechanism](https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#unavailable-referrers-api). The referrers API is part of the not-yet-released [OCI Distribution Spec v1.1](https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md) so registry support is limited. The Fallback is supported by all registries, but has notable edge cases. The SOCI CLI and the SOCI snapshotter automatically uses the referrers API if the registry supports it or the fallback mechanism otherwise. ### Referrers API The [referrers API](https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#listing-referrers) is a registry endpoint where an agent can query for all artifacts that reference a given image digest, optionally filtering by artifact type. The registry indexes artifacts for the referrers API when the artifact is pushed. When a container is launched, the SOCI snapshotter can query this API to find SOCI indices that reference the digest of the image. ### Fallback If the referrers API is not available, the OCI distribution spec defines a [fallback mechanism that works with existing registries](https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#unavailable-referrers-api). In this mechanism, the contents that would normally be returned by the referrers API are instead put into an [OCI Image Index](https://github.com/opencontainers/image-spec/blob/v1.1.0-rc2/image-index.md) which is tagged in the registry with the digest of the manifest to which it refers. For example, imagine you had an image `myregistry.com/image:latest` with digest `sha:123`. If you created and pushed a SOCI index for that image, there would also be a new image index `myregistry.com/image:sha-123` which contains the SOCI index' descriptor. At runtime, the SOCI snapshotter will pull the `myregistry.com/image:sha-123` index and apply client side filtering to discover the SOCI index. For clarity in the rest of this section, we will refer to `myregistry.com/image:sha-123` as the "fallback" (as opposed to image index) to distinguish it from the SOCI index. An important note here is that the fallback is managed on the *client side* by the tool performing the push. There is therefore a race condition when pushing a SOCI index because the fallback has to be pulled, modified to add the new SOCI index, and then pushed back to the registry. If a second artifact is pushed that references the same image digest, then one modification of the fallback could clobber the other. To clarify the scope of this problem, the fallback is unique per image digest. Multiple artifacts (SOCI Indices, signatures, etc.) can modify the same fallback. The image digest is generally unique per image/platform pair. As an example of what this means in practice, concurrently creating a SOCI index for the image for platforms `linux/amd64` and `linux/i386` is safe because the image digests will be different. Concurrently creating a SOCI index and signature for an image and platform `linux/amd64` is unsafe because both artifacts will refer to the same image digest. Since the fallback is managed client side, the registry does not know about the relationship between SOCI indices and the fallback. Deleting a SOCI index will not delete or modify the fallback. It is up to the user to make the necessary modifications or deletions of the fallback when deleting a SOCI index from the registry. ## How SOCI Indices Appear to Registries Each registry will display information in a slightly different mechanism, but here we show what artifacts might show up in your repository and an explanation of what they are: | Tag | Type | Explanation | | ----------- | ----------- | ---------------------------------------------------------------------------------------------------------- | | latest | Image | The actual image | | \ | Image | The SOCI index manifest. This may appear as type SOCI Index or Other | | sha:123 | Image Index | The fallback image index. This will only be present for registries which do not support the referrers API. | ## List of Registry Compatibility Registries that are not listed have not been tested by the SOCI maintainers or reported by the community, but they may still be compatible SOCI. | Registry | Compatible? | Mechanism | Notes | | ----------------------------------------------------------------------------------------- | ----------- | ------------- | ---------------------------------------------------- | | [Docker Hub](https://hub.docker.com) | No | N/A | Does not support image manifests with subject fields | | [Amazon Elastic Container Registry (ECR)](https://aws.amazon.com/ecr/) | Yes | Fallback | | | [Amazon ECR Public Gallery](https://gallery.ecr.aws) | Yes | Fallback | | | [Azure Container Registry](https://azure.microsoft.com/en-us/products/container-registry) | Yes | Referrers API | | | [GitHub Packages (GHCR)](https://github.com/features/packages) | Yes | Fallback | | | [Google Cloud Container Registry (GCR)](https://cloud.google.com/container-registry) | Yes | Fallback | | | [Google Cloud Artifact Registry (AR)](https://cloud.google.com/artifact-registry) | No | N/A | Testing the referrers API redirects to login | | [Quay.io](https://quay.io) | No | N/A | Does not support image manifests with subject fields | | [Artifactory](https://jfrog.com/artifactory/) | Yes | Fallback | | | [Harbor](https://github.com/goharbor/harbor) | Yes | Fallback | | | [Distribution](https://github.com/distribution/distribution) | Yes | Fallback | | | [OCI-playground Distribution](https://github.com/oci-playground/distribution) | Yes | Referrers API | | ### Failure Examples Below are some slightly redacted examples from the services that don't support the features needed to be compatible SOCI. **Docker Hub** ``` $ sudo ./out/soci push --user $USERNAME:$PASSWORD docker.io/####/busybox:latest checking if a soci index already exists in remote repository... pushing soci index with digest: sha256:d6ebffd218ead37e4862172b4f19491341e72aebc3cc6d9cf1a22297c40fb3c2 pushing artifact with digest: sha256:cce4c7e12e01b32151d69348fcf52e0db7b44f6df6c23c511fa5c52eaf272c28 pushing artifact with digest: sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a skipped artifact with digest: sha256:acaddd9ed544f7baf3373064064a51250b14cfe3ec604d65765a53da5958e5f5 successfully pushed artifact with digest: sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a successfully pushed artifact with digest: sha256:cce4c7e12e01b32151d69348fcf52e0db7b44f6df6c23c511fa5c52eaf272c28 pushing artifact with digest: sha256:d6ebffd218ead37e4862172b4f19491341e72aebc3cc6d9cf1a22297c40fb3c2 soci: error pushing graph to remote: PUT "https://registry-1.docker.io/v2/####/busybox/manifests/sha256:d6ebffd218ead37e4862172b4f19491341e72aebc3cc6d9cf1a22297c40fb3c2": response status code 404: notfound: not found ``` The index manifest can't be pushed at all. **Google Cloud Artifact Registry** ``` $ sudo ./out/soci push --user $USERNAME:$PASSWORD us-east1-docker.pkg.dev/####/busybox:latest checking if a soci index already exists in remote repository... soci: failed to fetch list of referrers: GET "https://accounts.google.com/v3/signin/identifier?dsh=###&continue=https%3A%2F%2Fconsole.cloud.google.com%2Fartifacts%2Ftags%2Fv2%2Fus-east1%2F####%252Fbusybox%252Freferrers%252Fsha256%2Facaddd9ed544f7baf3373064064a51250b14cfe3ec604d65765a53da5958e5f5...&service=cloudconsole&flowName=WebLiteSignIn&flowEntry=ServiceLogin": failed to decode response: invalid character '<' looking for beginning of value ``` This is before pushing the SOCI index. Looking up an existing referrer is redirecting to an auth flow. Pushing a SOCI index works as long as the SOCI CLI receives the `--existing-index allow` flag to skip the check for existing indices. This is because Artifact Registry appears to always redirect requests to the referrers API to an auth flow, despite other authenticated requests to the registry succeeding as expected. Pulling always falls back to OverlayFS due to the redirect. **Quay.io** ``` $ sudo ./out/soci push --user $USERNAME:$PASSWORD quay.io/####/busybox:latest checking if a soci index already exists in remote repository... pushing soci index with digest: sha256:3f2f40d12b70b94e43f17b3840cd0dd850d6ce497f80cee9515fe4f7253d176d skipped artifact with digest: sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a skipped artifact with digest: sha256:acaddd9ed544f7baf3373064064a51250b14cfe3ec604d65765a53da5958e5f5 skipped artifact with digest: sha256:ca306a7641ef2ca78cb69ce48bba4381263459a86fe3efad34ad31ca1c2bc2df pushing artifact with digest: sha256:3f2f40d12b70b94e43f17b3840cd0dd850d6ce497f80cee9515fe4f7253d176d soci: error pushing graph to remote: failed to push referrers index tagged by sha256-acaddd9ed544f7baf3373064064a51250b14cfe3ec604d65765a53da5958e5f5: PUT "https://quay.io/v2/####/busybox/manifests/sha256-acaddd9ed544f7baf3373064064a51250b14cfe3ec604d65765a53da5958e5f5": response status code 400: Bad Request ``` The index manifest is pushed every time the command is run, indicating that it's not being found after the push. We can't push the fallback at all, presumably because it contains the index manifest digest which isn't found. soci-snapshotter-0.4.1/fs/000077500000000000000000000000001454010642300154305ustar00rootroot00000000000000soci-snapshotter-0.4.1/fs/artifact_fetcher.go000066400000000000000000000170241454010642300212600ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package fs import ( "bytes" "context" "errors" "fmt" "io" "strconv" "github.com/awslabs/soci-snapshotter/config" "github.com/awslabs/soci-snapshotter/service/keychain/dockerconfig" "github.com/awslabs/soci-snapshotter/soci" "github.com/awslabs/soci-snapshotter/soci/store" socihttp "github.com/awslabs/soci-snapshotter/util/http" "github.com/awslabs/soci-snapshotter/util/ioutils" "github.com/containerd/containerd/log" "github.com/containerd/containerd/reference" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "golang.org/x/sync/errgroup" "oras.land/oras-go/v2/content" "oras.land/oras-go/v2/errdef" "oras.land/oras-go/v2/registry/remote" "oras.land/oras-go/v2/registry/remote/auth" ) type Fetcher interface { // Fetch fetches the artifact identified by the descriptor. It first checks the local content store // and returns a `ReadCloser` from there. Otherwise it fetches from the remote, saves in the local content store // and then returns a `ReadCloser`. Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, bool, error) // Store takes in a descriptor and io.Reader and stores it in the local store. Store(ctx context.Context, desc ocispec.Descriptor, reader io.Reader) error } type resolverStorage interface { content.Resolver content.Storage } // artifactFetcher is responsible for fetching and storing artifacts in the provided artifact store. type artifactFetcher struct { remoteStore resolverStorage localStore store.BasicStore refspec reference.Spec } // Constructs a new artifact fetcher // Takes in the image reference, the local store and the resolver func newArtifactFetcher(refspec reference.Spec, localStore store.BasicStore, remoteStore resolverStorage) (*artifactFetcher, error) { return &artifactFetcher{ localStore: localStore, remoteStore: remoteStore, refspec: refspec, }, nil } func newRemoteStore(refspec reference.Spec, httpConfig config.RetryableHTTPClientConfig) (*remote.Repository, error) { repo, err := remote.NewRepository(refspec.Locator) if err != nil { return nil, fmt.Errorf("cannot create repository %s: %w", refspec.Locator, err) } authClient := auth.Client{ Client: socihttp.NewRetryableClient(httpConfig), Cache: auth.DefaultCache, Credential: func(_ context.Context, host string) (auth.Credential, error) { username, secret, err := dockerconfig.DockerCreds(host) if err != nil { return auth.EmptyCredential, err } if username == "" && secret != "" { return auth.Credential{ RefreshToken: secret, }, nil } return auth.Credential{ Username: username, Password: secret, }, nil }, } repo.Client = &authClient return repo, nil } // Takes in a descriptor and returns the associated ref to fetch from remote. // i.e. /@ func (f *artifactFetcher) constructRef(desc ocispec.Descriptor) string { return fmt.Sprintf("%s@%s", f.refspec.Locator, desc.Digest.String()) } // Fetches the artifact identified by the descriptor. // It first checks the local store for the artifact. // If not found, if constructs the ref and fetches it from remote. func (f *artifactFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, bool, error) { // Check local store first rc, err := f.localStore.Fetch(ctx, desc) if err == nil { return rc, true, nil } log.G(ctx).WithField("digest", desc.Digest.String()).Infof("fetching artifact from remote") if desc.Size == 0 { // Digest verification fails is desc.Size == 0 // Therefore, we try to use the resolver to resolve the descriptor // and hopefully get the size. // Note that the resolve would fail for size > 4MiB, since that's the limit // for the manifest size when using the Docker resolver. log.G(ctx).WithField("digest", desc.Digest).Warnf("size of descriptor is 0, trying to resolve it...") desc, err = f.resolve(ctx, desc) if err != nil { return nil, false, fmt.Errorf("size of descriptor is 0; unable to resolve: %w", err) } } rc, err = f.remoteStore.Fetch(ctx, desc) if err != nil { return nil, false, fmt.Errorf("unable to fetch descriptor (%v) from remote store: %w", desc.Digest, err) } return rc, false, nil } func (f *artifactFetcher) resolve(ctx context.Context, desc ocispec.Descriptor) (ocispec.Descriptor, error) { ref := f.constructRef(desc) desc, err := f.remoteStore.Resolve(ctx, ref) if err != nil { return desc, fmt.Errorf("unable to resolve ref (%s): %w", ref, err) } return desc, nil } // Store takes in an descriptor and io.Reader and stores it in the local store. func (f *artifactFetcher) Store(ctx context.Context, desc ocispec.Descriptor, reader io.Reader) error { err := f.localStore.Push(ctx, desc, reader) if err != nil { return fmt.Errorf("unable to push to local store: %w", err) } return nil } func FetchSociArtifacts(ctx context.Context, refspec reference.Spec, indexDesc ocispec.Descriptor, localStore store.Store, remoteStore resolverStorage) (*soci.Index, error) { fetcher, err := newArtifactFetcher(refspec, localStore, remoteStore) if err != nil { return nil, fmt.Errorf("could not create an artifact fetcher: %w", err) } log.G(ctx).WithField("digest", indexDesc.Digest).Infof("fetching SOCI index from remote registry") indexReader, local, err := fetcher.Fetch(ctx, indexDesc) if err != nil { return nil, fmt.Errorf("unable to fetch SOCI index: %w", err) } defer indexReader.Close() cw := new(ioutils.CountWriter) tee := io.TeeReader(indexReader, cw) var index soci.Index err = soci.DecodeIndex(tee, &index) if err != nil { return nil, fmt.Errorf("cannot deserialize byte data to index: %w", err) } desc := ocispec.Descriptor{ Digest: indexDesc.Digest, Size: cw.Size(), } // batch will prevent content from being garbage collected in the middle of the following operations ctx, batchDone, err := localStore.BatchOpen(ctx) if err != nil { return nil, err } defer batchDone(ctx) if !local { b, err := soci.MarshalIndex(&index) if err != nil { return nil, err } err = localStore.Push(ctx, desc, bytes.NewReader(b)) if err != nil && !errors.Is(err, errdef.ErrAlreadyExists) { return nil, fmt.Errorf("unable to store index in local store: %w", err) } err = store.LabelGCRoot(ctx, localStore, desc) if err != nil { return nil, fmt.Errorf("unable to label index to prevent garbage collection: %w", err) } } eg, ctx := errgroup.WithContext(ctx) for i, blob := range index.Blobs { blob := blob i := i eg.Go(func() error { rc, local, err := fetcher.Fetch(ctx, blob) if err != nil { return fmt.Errorf("cannot fetch artifact: %w", err) } defer rc.Close() if local { return nil } if err := fetcher.Store(ctx, blob, rc); err != nil && !errors.Is(err, errdef.ErrAlreadyExists) { return fmt.Errorf("unable to store ztoc in local store: %w", err) } return store.LabelGCRefContent(ctx, localStore, desc, "ztoc."+strconv.Itoa(i), blob.Digest.String()) }) } if err := eg.Wait(); err != nil { return nil, err } return &index, nil } soci-snapshotter-0.4.1/fs/artifact_fetcher_test.go000066400000000000000000000137331454010642300223220ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package fs import ( "bytes" "context" "fmt" "io" "testing" "github.com/containerd/containerd/reference" "github.com/google/go-cmp/cmp" "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "oras.land/oras-go/v2/content" "oras.land/oras-go/v2/content/memory" ) const imageRef = "dummy.host/repo:tag" func TestConstructRef(t *testing.T) { testCases := []struct { name string artifactDigest string }{ { name: "constructRef returns correct ref", artifactDigest: "sha256:7b236f6c6ca259a4497e98c204bc1dcf3e653438e74af17bfe39da5329789f4a", }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { fetcher, err := newFakeArtifactFetcher(imageRef, nil) if err != nil { t.Fatalf("could not create artifact fetcher: %v", err) } expectedRef := fmt.Sprintf("dummy.host/repo@%s", tc.artifactDigest) dgst, err := digest.Parse(tc.artifactDigest) if err != nil { t.Fatal(err) } constructedRef := fetcher.constructRef(ocispec.Descriptor{Digest: dgst}) if expectedRef != constructedRef { t.Fatalf("unexpected ref from constructRef, got = %s, expected = %s", constructedRef, expectedRef) } }) } } func TestArtifactFetcherFetch(t *testing.T) { testCases := []struct { name string contents []byte size int64 }{ { name: "correct data fetched", contents: []byte("test"), size: 4, }, { name: "correct data fetched when desc.Size = 0", contents: []byte("test"), }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { fetcher, err := newFakeArtifactFetcher(imageRef, tc.contents) if err != nil { t.Fatalf("could not create artifact fetcher: %v", err) } dgst := digest.FromBytes(tc.contents) desc := ocispec.Descriptor{ Digest: dgst, Size: tc.size, } reader, _, err := fetcher.Fetch(context.Background(), desc) if err != nil { t.Fatal(err) } defer reader.Close() readBytes, err := io.ReadAll(reader) if err != nil { t.Fatal(err) } if diff := cmp.Diff(tc.contents, readBytes); diff != "" { t.Fatalf("unexpected content, diff = %v", diff) } }) } } func TestArtifactFetcherResolve(t *testing.T) { testCases := []struct { name string contents []byte }{ { name: "correct size fetched", contents: []byte("test"), }, { name: "correct size fetched 2", contents: []byte("foobarbaz"), }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { fetcher, err := newFakeArtifactFetcher(imageRef, tc.contents) if err != nil { t.Fatalf("could not create artifact fetcher: %v", err) } dgst := digest.FromBytes(tc.contents) size := int64(len(tc.contents)) desc := ocispec.Descriptor{ Digest: dgst, } ctx := context.Background() desc2, err := fetcher.resolve(ctx, desc) if err != nil { t.Fatalf("cannot resolve: %v", err) } if desc2.Size != size { t.Fatalf("unexpected size; expected = %d, got = %d", size, desc2.Size) } }) } } // Tests to make sure that data stored in local store is not fetched again from remote func TestArtifactFetcherFetchOnlyOnce(t *testing.T) { testCases := []struct { name string contents []byte }{ { name: "correct data fetched", contents: []byte("test"), }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { fetcher, err := newFakeArtifactFetcher(imageRef, tc.contents) if err != nil { t.Fatalf("could not create artifact fetcher: %v", err) } dgst := digest.FromBytes(tc.contents) size := len(tc.contents) desc := ocispec.Descriptor{ Digest: dgst, Size: int64(size), } ctx := context.Background() reader, local, err := fetcher.Fetch(ctx, desc) if err != nil { t.Fatal(err) } if local { t.Fatalf("unexpected value of local; expected = false, got = true") } defer reader.Close() err = fetcher.Store(ctx, desc, reader) if err != nil { t.Fatal(err) } reader, local, err = fetcher.Fetch(ctx, desc) if err != nil { t.Fatal(err) } if !local { t.Fatalf("unexpected value of local; expected = true, got = false") } defer reader.Close() readBytes, err := io.ReadAll(reader) if err != nil { t.Fatal(err) } if diff := cmp.Diff(tc.contents, readBytes); diff != "" { t.Fatalf("unexpected content, diff = %v", diff) } }) } } func newFakeArtifactFetcher(ref string, contents []byte) (*artifactFetcher, error) { refspec, err := reference.Parse(ref) if err != nil { return nil, err } return newArtifactFetcher(refspec, memory.New(), newFakeRemoteStore(contents)) } func newFakeRemoteStore(contents []byte) resolverStorage { return &fakeRemoteStore{ contents: contents, } } type fakeRemoteStore struct { contents []byte } var _ content.Storage = &fakeRemoteStore{} func (f *fakeRemoteStore) Fetch(_ context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { return io.NopCloser(bytes.NewReader(f.contents)), nil } func (f *fakeRemoteStore) Push(_ context.Context, desc ocispec.Descriptor, ra io.Reader) error { return nil } func (f *fakeRemoteStore) Exists(_ context.Context, desc ocispec.Descriptor) (bool, error) { return true, nil } func (f *fakeRemoteStore) Resolve(_ context.Context, ref string) (ocispec.Descriptor, error) { return ocispec.Descriptor{ Size: int64(len(f.contents)), }, nil } soci-snapshotter-0.4.1/fs/backgroundfetcher/000077500000000000000000000000001454010642300211105ustar00rootroot00000000000000soci-snapshotter-0.4.1/fs/backgroundfetcher/background_fetcher.go000066400000000000000000000115451454010642300252640ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package backgroundfetcher import ( "context" "fmt" "time" commonmetrics "github.com/awslabs/soci-snapshotter/fs/metrics/common" "github.com/containerd/containerd/log" "golang.org/x/time/rate" ) type Option func(*BackgroundFetcher) error func WithSilencePeriod(period time.Duration) Option { return func(bf *BackgroundFetcher) error { bf.silencePeriod = period return nil } } func WithFetchPeriod(period time.Duration) Option { return func(bf *BackgroundFetcher) error { bf.fetchPeriod = period return nil } } func WithMaxQueueSize(size int) Option { return func(bf *BackgroundFetcher) error { bf.maxQueueSize = size return nil } } func WithEmitMetricPeriod(period time.Duration) Option { return func(bf *BackgroundFetcher) error { bf.emitMetricPeriod = period return nil } } // An interface for a type to "pause" the background fetcher. // Useful for mocking in unit tests. type pauser interface { pause(time.Duration) } type defaultPauser struct{} func (p defaultPauser) pause(d time.Duration) { time.Sleep(d) } // A backgroundFetcher is responsible for fetching spans from layers // in the background. type BackgroundFetcher struct { silencePeriod time.Duration fetchPeriod time.Duration maxQueueSize int emitMetricPeriod time.Duration rateLimiter *rate.Limiter bfPauser pauser // All span managers are added to the channel and picked up in Run(). // If a span manager is still able to fetch, it is reinserted into the chanel. workQueue chan Resolver closeChan chan struct{} pauseChan chan struct{} } func NewBackgroundFetcher(opts ...Option) (*BackgroundFetcher, error) { bf := new(BackgroundFetcher) for _, o := range opts { if err := o(bf); err != nil { return nil, err } } // Create a rate-limiter that will fetch every bf.fetchPeriod // with a burst capacity of 1 (i.e., it will never invoke more than 1 bg-fetch // within bf.fetchPeriod) bf.rateLimiter = rate.NewLimiter(rate.Every(bf.fetchPeriod), 1) bf.workQueue = make(chan Resolver, bf.maxQueueSize) bf.closeChan = make(chan struct{}) bf.pauseChan = make(chan struct{}, bf.maxQueueSize) if bf.bfPauser == nil { bf.bfPauser = defaultPauser{} } return bf, nil } // Add a new Resolver to be background fetched from. // Sends the resolver through the channel, which will be received in the Run() method. func (bf *BackgroundFetcher) Add(resolver Resolver) { bf.workQueue <- resolver } func (bf *BackgroundFetcher) Close() error { bf.closeChan <- struct{}{} return nil } // Pause sends a signal to pause the background fetcher for silencePeriod on the next iteration. func (bf *BackgroundFetcher) Pause() { bf.pauseChan <- struct{}{} } func (bf *BackgroundFetcher) pause(ctx context.Context) { needPause := false loop: for { select { // A new image has been mounted. Need to pause the background fetcher case <-bf.pauseChan: needPause = true default: break loop } } if needPause { log.G(ctx).WithField("silencePeriod", bf.silencePeriod).Debug("new image mounted, pausing the background fetcher for silence period") bf.bfPauser.pause(bf.silencePeriod) } } func (bf *BackgroundFetcher) Run(ctx context.Context) error { ticker := time.NewTicker(bf.emitMetricPeriod) go bf.emitWorkQueueMetric(ctx, ticker) for { // Pause the background fetcher if necessary. bf.pause(ctx) select { case <-bf.closeChan: ticker.Stop() return nil case <-ctx.Done(): ticker.Stop() return nil default: } select { case lr := <-bf.workQueue: if lr.Closed() { continue } go func() { more, err := lr.Resolve(ctx) if more { bf.workQueue <- lr } else if err != nil { log.G(ctx).WithError(err).Warn("error trying to resolve layer, removing it from the queue") } }() default: } if err := bf.rateLimiter.Wait(ctx); err != nil { return fmt.Errorf("background fetch: error while waiting for rate limiter: %w", err) } } } func (bf *BackgroundFetcher) emitWorkQueueMetric(ctx context.Context, ticker *time.Ticker) { for { select { case <-bf.closeChan: return case <-ctx.Done(): return case <-ticker.C: // background fetcher is at the snapshotter's fs level, so no image digest as key commonmetrics.AddImageOperationCount(commonmetrics.BackgroundFetchWorkQueueSize, "", int32(len(bf.workQueue))) } } } soci-snapshotter-0.4.1/fs/backgroundfetcher/background_fetcher_test.go000066400000000000000000000117261454010642300263240ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package backgroundfetcher import ( "compress/gzip" "context" "sync" "testing" "time" "github.com/awslabs/soci-snapshotter/cache" spanmanager "github.com/awslabs/soci-snapshotter/fs/span-manager" "github.com/awslabs/soci-snapshotter/util/testutil" "github.com/awslabs/soci-snapshotter/ztoc" "github.com/opencontainers/go-digest" ) func withPauser(p pauser) Option { return func(bf *BackgroundFetcher) error { bf.bfPauser = p return nil } } type countingPauser struct { mu sync.Mutex count int } func (c *countingPauser) pause(time.Duration) { c.mu.Lock() defer c.mu.Unlock() c.count++ } func TestBackgroundFetcherPause(t *testing.T) { p := &countingPauser{} bf, err := NewBackgroundFetcher(WithSilencePeriod(0), withPauser(p), WithEmitMetricPeriod(time.Second)) if err != nil { t.Fatal(err) } go bf.Run(context.Background()) defer bf.Close() bf.Pause() time.Sleep(10 * time.Millisecond) p.mu.Lock() defer p.mu.Unlock() if p.count != 1 { t.Fatalf("unexpected pause count; expected 1, got %v", p.count) } } func TestBackgroundFetcherRun(t *testing.T) { testCases := []struct { name string waitTime time.Duration entries [][]testutil.TarEntry }{ { name: "background fetcher fetches all data for single span manager", waitTime: 1 * time.Second, entries: [][]testutil.TarEntry{ { testutil.File("test", string(testutil.RandomByteData(10000000))), }, }, }, { name: "background fetcher fetches all data for multiple span managers", waitTime: 3 * time.Second, entries: [][]testutil.TarEntry{ { testutil.File("test1", string(testutil.RandomByteData(10000000))), }, { testutil.File("test2", string(testutil.RandomByteData(20000000))), }, }, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { type testInfo struct { sm *spanmanager.SpanManager cache *countingCache ztoc *ztoc.Ztoc } var infos []testInfo for _, entries := range tc.entries { ztoc, sr, err := ztoc.BuildZtocReader(t, entries, gzip.DefaultCompression, 1000000) if err != nil { t.Fatalf("error building span manager and section reader: %v", err) } cache := &countingCache{} sm := spanmanager.New(ztoc, sr, cache, 0) infos = append(infos, testInfo{sm, cache, ztoc}) } bf, err := NewBackgroundFetcher(WithFetchPeriod(0), WithEmitMetricPeriod(time.Second)) if err != nil { t.Fatalf("unable to construct background fetcher: %v", err) } go bf.Run(context.Background()) defer bf.Close() for _, info := range infos { bf.Add(NewSequentialResolver(digest.FromString("test"), info.sm)) } time.Sleep(tc.waitTime) for _, info := range infos { info.cache.mu.Lock() defer info.cache.mu.Unlock() if info.cache.addCount != int(info.ztoc.MaxSpanID)+1 { t.Fatalf("unexpected number of adds to cache; expected %d, got %d", info.ztoc.MaxSpanID+1, info.cache.addCount) } // The first 10 bytes of a compressed gzip archive is the gzip header. // We don't fetch it when lazy-loading; therefore, subtracting 10 from the total compressed archive size. compressedSize := info.ztoc.CompressedArchiveSize - 10 if info.cache.addBytes != int64(compressedSize) { t.Fatalf("unexpected number of bytes added to cache; expected %d, got %d", compressedSize, info.cache.addBytes) } } }) } } // countingCache is an implementation of cache.BlobCache // which counts the number of times `cache.Add` was invoked // and the number of bytes added to the cache. // All writes to the cache succeed. type countingCache struct { addCount int addBytes int64 mu sync.Mutex } var _ cache.BlobCache = &countingCache{} func (c *countingCache) Add(key string, opts ...cache.Option) (cache.Writer, error) { return &countingWriter{c}, nil } func (c *countingCache) Get(key string, opts ...cache.Option) (cache.Reader, error) { return nil, nil } func (c *countingCache) Close() error { return nil } type countingWriter struct { cache *countingCache } var _ cache.Writer = &countingWriter{} func (c *countingWriter) Write(p []byte) (int, error) { c.cache.mu.Lock() defer c.cache.mu.Unlock() c.cache.addBytes += int64(len(p)) c.cache.addCount++ return len(p), nil } func (c *countingWriter) Close() error { return nil } func (c *countingWriter) Commit() error { return nil } func (c *countingWriter) Abort() error { return nil } soci-snapshotter-0.4.1/fs/backgroundfetcher/resolver.go000066400000000000000000000056061454010642300233070ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package backgroundfetcher import ( "context" "errors" "fmt" "sync" "time" commonmetrics "github.com/awslabs/soci-snapshotter/fs/metrics/common" sm "github.com/awslabs/soci-snapshotter/fs/span-manager" "github.com/awslabs/soci-snapshotter/ztoc/compression" "github.com/containerd/containerd/log" "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" ) type Resolver interface { // Resolve fetches and caches the next span. Returns true if there is still more data to be fetched. // Returns false otherwise. Resolve(context.Context) (bool, error) // Closes the resolver. Close() error // Checks whether the resolver is closed or not. Closed() bool } type base struct { *sm.SpanManager layerDigest digest.Digest closed bool closedMu sync.Mutex // timestamp when background fetch for the layer starts start time.Time } func (b *base) Close() error { b.closedMu.Lock() defer b.closedMu.Unlock() b.closed = true return nil } func (b *base) Closed() bool { b.closedMu.Lock() defer b.closedMu.Unlock() return b.closed } // A sequentialLayerResolver background fetches spans sequentially, starting from span 0. type sequentialLayerResolver struct { *base nextSpanFetchID compression.SpanID } func NewSequentialResolver(layerDigest digest.Digest, spanManager *sm.SpanManager) Resolver { return &sequentialLayerResolver{ base: &base{ SpanManager: spanManager, layerDigest: layerDigest, }, } } func (lr *sequentialLayerResolver) Resolve(ctx context.Context) (bool, error) { log.G(ctx).WithFields(logrus.Fields{ "layer": lr.layerDigest, "spanId": lr.nextSpanFetchID, }).Debug("fetching span") if lr.nextSpanFetchID == 0 { lr.base.start = time.Now() } err := lr.FetchSingleSpan(lr.nextSpanFetchID) if err == nil { commonmetrics.IncOperationCount(commonmetrics.BackgroundSpanFetchCount, lr.layerDigest) lr.nextSpanFetchID++ return true, nil } if errors.Is(err, sm.ErrExceedMaxSpan) { commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.BackgroundFetch, lr.layerDigest, lr.base.start) return false, nil } commonmetrics.IncOperationCount(commonmetrics.BackgroundSpanFetchFailureCount, lr.layerDigest) return false, fmt.Errorf("error trying to fetch span with spanId = %d from layerDigest = %s: %w", lr.nextSpanFetchID, lr.layerDigest.String(), err) } soci-snapshotter-0.4.1/fs/backgroundfetcher/resolver_test.go000066400000000000000000000046011454010642300243400ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package backgroundfetcher import ( "compress/gzip" "context" "testing" "github.com/awslabs/soci-snapshotter/cache" spanmanager "github.com/awslabs/soci-snapshotter/fs/span-manager" "github.com/awslabs/soci-snapshotter/util/testutil" "github.com/awslabs/soci-snapshotter/ztoc" "github.com/opencontainers/go-digest" ) func TestSequentialResolver(t *testing.T) { testCases := []struct { name string entries []testutil.TarEntry }{ { name: "resolver fetches spans sequentially", entries: []testutil.TarEntry{ testutil.File("test", string(testutil.RandomByteData(10000000))), }, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { ztoc, sr, err := ztoc.BuildZtocReader(t, tc.entries, gzip.DefaultCompression, 1000000) if err != nil { t.Fatalf("error build ztoc and section reader: %v", err) } sm := spanmanager.New(ztoc, sr, cache.NewMemoryCache(), 0) sequentialResolver := NewSequentialResolver(digest.FromString("test"), sm) var resolvedSpans []int for { resolvedSpans = append(resolvedSpans, int(sequentialResolver.(*sequentialLayerResolver).nextSpanFetchID)) more, err := sequentialResolver.Resolve(context.Background()) if !more { break } if err != nil { t.Fatalf("error while resolving span: %v", err) } } lastSpanID := sequentialResolver.(*sequentialLayerResolver).nextSpanFetchID // assert that we've resolved all spans if lastSpanID != ztoc.MaxSpanID+1 { t.Fatalf("unexpected number of spans resolved; expected %d, got %d", ztoc.MaxSpanID+1, lastSpanID) } // assert that all spans are resolved sequentially for i := 0; i < len(resolvedSpans); i++ { if i != resolvedSpans[i] { t.Fatalf("unexpected span id; expected %d, got %d", i, resolvedSpans[i]) } } }) } } soci-snapshotter-0.4.1/fs/client.go000066400000000000000000000052041454010642300172360ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package fs import ( "context" "errors" "fmt" "github.com/awslabs/soci-snapshotter/soci" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "oras.land/oras-go/v2/content" ) var ( ErrNoReferrers = errors.New("no existing referrers") ) // Determines which index will be selected from a list of index descriptors type IndexSelectionPolicy func([]ocispec.Descriptor) (ocispec.Descriptor, error) func SelectFirstPolicy(descs []ocispec.Descriptor) (ocispec.Descriptor, error) { return descs[0], nil } // Responsible for making Referrers API calls to remote registry to fetch list of referrers. type ReferrersClient interface { /// Takes in an manifest descriptor and IndexSelectionPolicy and returns a single artifact descriptor. /// Returns an error (ErrNoReferrers) if the manifest descriptor has no referrers. SelectReferrer(context.Context, ocispec.Descriptor, IndexSelectionPolicy) (ocispec.Descriptor, error) } // Interface for oras-go's Repository.Referrers call, for mocking type ReferrersCaller interface { Referrers(ctx context.Context, desc ocispec.Descriptor, artifactType string, fn func(referrers []ocispec.Descriptor) error) error } type Inner interface { content.Storage ReferrersCaller } type OCIArtifactClient struct { Inner } func NewOCIArtifactClient(inner Inner) *OCIArtifactClient { return &OCIArtifactClient{ Inner: inner, } } func (c *OCIArtifactClient) SelectReferrer(ctx context.Context, desc ocispec.Descriptor, fn IndexSelectionPolicy) (ocispec.Descriptor, error) { descs, err := c.AllReferrers(ctx, desc) if err != nil { return ocispec.Descriptor{}, fmt.Errorf("unable to fetch referrers: %w", err) } if len(descs) == 0 { return ocispec.Descriptor{}, ErrNoReferrers } return fn(descs) } func (c *OCIArtifactClient) AllReferrers(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { descs := []ocispec.Descriptor{} err := c.Referrers(ctx, desc, soci.SociIndexArtifactType, func(referrers []ocispec.Descriptor) error { descs = append(descs, referrers...) return nil }) return descs, err } soci-snapshotter-0.4.1/fs/client_test.go000066400000000000000000000053641454010642300203040ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package fs import ( "context" "errors" "io" "testing" "github.com/google/go-cmp/cmp" "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) type fakeInner struct { descs []ocispec.Descriptor } func newFakeInner(descs []ocispec.Descriptor) *fakeInner { return &fakeInner{ descs: descs, } } var _ Inner = &fakeInner{} func (f *fakeInner) Exists(ctx context.Context, desc ocispec.Descriptor) (bool, error) { return false, nil } func (f *fakeInner) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { return nil, nil } func (f *fakeInner) Push(ctx context.Context, expected ocispec.Descriptor, content io.Reader) error { return nil } func (f *fakeInner) Referrers(ctx context.Context, desc ocispec.Descriptor, artifactType string, fn func(referrers []ocispec.Descriptor) error) error { return fn(f.descs) } func TestOCIArtifactClientSelectReferrer(t *testing.T) { testCases := []struct { name string descs []ocispec.Descriptor expectedErr error expectedDesc ocispec.Descriptor selectionPolicy IndexSelectionPolicy }{ { name: "empty referrers list returns ErrNoReferrers", descs: make([]ocispec.Descriptor, 0), expectedErr: ErrNoReferrers, }, { name: "SelectFirstPolicy returns the first descriptor", descs: []ocispec.Descriptor{ { Digest: digest.FromBytes([]byte("foo")), Size: 3, }, { Digest: digest.FromBytes([]byte("test")), Size: 4, }, }, expectedDesc: ocispec.Descriptor{ Digest: digest.FromBytes([]byte("foo")), Size: 3, }, selectionPolicy: SelectFirstPolicy, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { inner := newFakeInner(tc.descs) client := NewOCIArtifactClient(inner) desc, err := client.SelectReferrer(context.Background(), ocispec.Descriptor{}, tc.selectionPolicy) if err != nil && !errors.Is(err, tc.expectedErr) { t.Fatalf("unexpected error getting descriptor: %v", err) } if diff := cmp.Diff(desc, tc.expectedDesc); diff != "" { t.Fatalf("unexpected descriptor; diff = %v", diff) } }) } } soci-snapshotter-0.4.1/fs/fs.go000066400000000000000000000506661454010642300164040ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright 2019 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the NOTICE.md file. */ // // Implementation of FileSystem of SOCI snapshotter // package fs import ( "context" "fmt" "os/exec" "sync" "syscall" "time" "github.com/awslabs/soci-snapshotter/config" bf "github.com/awslabs/soci-snapshotter/fs/backgroundfetcher" "github.com/awslabs/soci-snapshotter/fs/layer" commonmetrics "github.com/awslabs/soci-snapshotter/fs/metrics/common" layermetrics "github.com/awslabs/soci-snapshotter/fs/metrics/layer" "github.com/awslabs/soci-snapshotter/fs/remote" "github.com/awslabs/soci-snapshotter/fs/source" "github.com/awslabs/soci-snapshotter/metadata" "github.com/awslabs/soci-snapshotter/snapshot" "github.com/awslabs/soci-snapshotter/soci" "github.com/awslabs/soci-snapshotter/soci/store" "github.com/containerd/containerd/log" "github.com/containerd/containerd/mount" ctdsnapshotters "github.com/containerd/containerd/pkg/snapshotters" "github.com/containerd/containerd/reference" "github.com/containerd/containerd/remotes/docker" metrics "github.com/docker/go-metrics" fusefs "github.com/hanwen/go-fuse/v2/fs" "github.com/hanwen/go-fuse/v2/fuse" "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" ) var ( defaultIndexSelectionPolicy = SelectFirstPolicy fusermountBin = "fusermount" ) type Option func(*options) type options struct { getSources source.GetSources resolveHandlers map[string]remote.Handler metadataStore metadata.Store overlayOpaqueType layer.OverlayOpaqueType } func WithGetSources(s source.GetSources) Option { return func(opts *options) { opts.getSources = s } } func WithResolveHandler(name string, handler remote.Handler) Option { return func(opts *options) { if opts.resolveHandlers == nil { opts.resolveHandlers = make(map[string]remote.Handler) } opts.resolveHandlers[name] = handler } } func WithMetadataStore(metadataStore metadata.Store) Option { return func(opts *options) { opts.metadataStore = metadataStore } } func WithOverlayOpaqueType(overlayOpaqueType layer.OverlayOpaqueType) Option { return func(opts *options) { opts.overlayOpaqueType = overlayOpaqueType } } func NewFilesystem(ctx context.Context, root string, cfg config.FSConfig, opts ...Option) (_ snapshot.FileSystem, err error) { var fsOpts options for _, o := range opts { o(&fsOpts) } var ( mountTimeout = time.Duration(cfg.MountTimeoutSec) * time.Second fuseMetricsEmitWaitDuration = time.Duration(cfg.FuseMetricsEmitWaitDurationSec) * time.Second attrTimeout = time.Duration(cfg.FuseConfig.AttrTimeout) * time.Second entryTimeout = time.Duration(cfg.FuseConfig.EntryTimeout) * time.Second negativeTimeout = time.Duration(cfg.FuseConfig.NegativeTimeout) * time.Second bgFetchPeriod = time.Duration(cfg.BackgroundFetchConfig.FetchPeriodMsec) * time.Millisecond bgSilencePeriod = time.Duration(cfg.BackgroundFetchConfig.SilencePeriodMsec) * time.Millisecond bgEmitMetricPeriod = time.Duration(cfg.BackgroundFetchConfig.EmitMetricPeriodSec) * time.Second bgMaxQueueSize = cfg.BackgroundFetchConfig.MaxQueueSize ) metadataStore := fsOpts.metadataStore getSources := fsOpts.getSources if getSources == nil { getSources = source.FromDefaultLabels(func(refspec reference.Spec) (hosts []docker.RegistryHost, _ error) { return docker.ConfigureDefaultRegistries(docker.WithPlainHTTP(docker.MatchLocalhost))(refspec.Hostname()) }) } ctx, store, err := store.NewContentStore(ctx, store.WithType(store.ContentStoreType(cfg.ContentStoreConfig.Type)), store.WithNamespace(cfg.ContentStoreConfig.Namespace)) if err != nil { return nil, fmt.Errorf("cannot create content store: %w", err) } var bgFetcher *bf.BackgroundFetcher if !cfg.BackgroundFetchConfig.Disable { log.G(context.Background()).WithFields(logrus.Fields{ "fetchPeriod": bgFetchPeriod, "silencePeriod": bgSilencePeriod, "maxQueueSize": bgMaxQueueSize, "emitMetricPeriod": bgEmitMetricPeriod, }).Info("constructing background fetcher") bgFetcher, err = bf.NewBackgroundFetcher(bf.WithFetchPeriod(bgFetchPeriod), bf.WithSilencePeriod(bgSilencePeriod), bf.WithMaxQueueSize(bgMaxQueueSize), bf.WithEmitMetricPeriod(bgEmitMetricPeriod)) if err != nil { return nil, fmt.Errorf("cannot create background fetcher: %w", err) } go bgFetcher.Run(context.Background()) } else { log.G(context.Background()).Info("background fetch is disabled") } r, err := layer.NewResolver(root, cfg, fsOpts.resolveHandlers, metadataStore, store, fsOpts.overlayOpaqueType, bgFetcher) if err != nil { return nil, fmt.Errorf("failed to setup resolver: %w", err) } var ns *metrics.Namespace if !cfg.NoPrometheus { ns = metrics.NewNamespace("soci", "fs", nil) commonmetrics.Register() // Register common metrics. This will happen only once. } c := layermetrics.NewLayerMetrics(ns) if ns != nil { metrics.Register(ns) // Register layer metrics. } return &filesystem{ // it's generally considered bad practice to store a context in a struct, // however `filesystem` has it's own lifecycle as well as a per-request lifecycle. // Some operations (e.g. remote calls) exist within a per-request lifecycle and use // the context passed to the specific function, but some operations (e.g. fuse operation counts) // are tied to the lifecycle of the filesystem itself. In order to avoid leaking goroutines, // we store the snapshotter's lifecycle in the struct itself so that we can tie new goroutines // to it later. ctx: ctx, resolver: r, getSources: getSources, debug: cfg.Debug, layer: make(map[string]layer.Layer), allowNoVerification: cfg.AllowNoVerification, disableVerification: true, metricsController: c, attrTimeout: attrTimeout, entryTimeout: entryTimeout, negativeTimeout: negativeTimeout, httpConfig: cfg.RetryableHTTPClientConfig, contentStore: store, bgFetcher: bgFetcher, mountTimeout: mountTimeout, fuseMetricsEmitWaitDuration: fuseMetricsEmitWaitDuration, }, nil } type sociContext struct { cachedErr error cachedErrMu sync.RWMutex bgFetchPauseOnce sync.Once fetchOnce sync.Once sociIndex *soci.Index imageLayerToSociDesc map[string]ocispec.Descriptor fuseOperationCounter *layer.FuseOperationCounter } func (c *sociContext) Init(fsCtx context.Context, ctx context.Context, imageRef, indexDigest, imageManifestDigest string, store store.Store, fuseOpEmitWaitDuration time.Duration, httpConfig config.RetryableHTTPClientConfig) error { var retErr error c.fetchOnce.Do(func() { defer func() { if retErr != nil { c.cachedErrMu.Lock() c.cachedErr = retErr c.cachedErrMu.Unlock() } }() refspec, err := reference.Parse(imageRef) if err != nil { retErr = err return } remoteStore, err := newRemoteStore(refspec, httpConfig) if err != nil { retErr = err return } client := NewOCIArtifactClient(remoteStore) indexDesc := ocispec.Descriptor{ Digest: digest.Digest(indexDigest), } if indexDigest == "" { log.G(ctx).Info("index digest not provided, making a Referrers API call to fetch list of indices") imgDigest, err := digest.Parse(imageManifestDigest) if err != nil { retErr = fmt.Errorf("unable to parse image digest: %w", err) } desc, err := client.SelectReferrer(ctx, ocispec.Descriptor{Digest: imgDigest}, defaultIndexSelectionPolicy) if err != nil { retErr = fmt.Errorf("cannot fetch list of referrers: %w", err) return } indexDesc = desc } log.G(ctx).WithField("digest", indexDesc.Digest.String()).Infof("fetching SOCI artifacts using index descriptor") index, err := FetchSociArtifacts(fsCtx, refspec, indexDesc, store, remoteStore) if err != nil { retErr = fmt.Errorf("error trying to fetch SOCI artifacts: %w", err) return } c.sociIndex = index c.populateImageLayerToSociMapping(index) // Create the FUSE operation counter. // Metrics are emitted after a wait time of fuseOpEmitWaitDuration. c.fuseOperationCounter = layer.NewFuseOperationCounter(digest.Digest(imageManifestDigest), fuseOpEmitWaitDuration) go c.fuseOperationCounter.Run(fsCtx) }) c.cachedErrMu.RLock() retErr = c.cachedErr c.cachedErrMu.RUnlock() return retErr } func (c *sociContext) populateImageLayerToSociMapping(sociIndex *soci.Index) { c.imageLayerToSociDesc = make(map[string]ocispec.Descriptor, len(sociIndex.Blobs)) for _, desc := range sociIndex.Blobs { ociDigest := desc.Annotations[soci.IndexAnnotationImageLayerDigest] c.imageLayerToSociDesc[ociDigest] = desc } } type filesystem struct { ctx context.Context resolver *layer.Resolver debug bool layer map[string]layer.Layer layerMu sync.Mutex allowNoVerification bool disableVerification bool getSources source.GetSources metricsController *layermetrics.Controller attrTimeout time.Duration entryTimeout time.Duration negativeTimeout time.Duration httpConfig config.RetryableHTTPClientConfig sociContexts sync.Map contentStore store.Store bgFetcher *bf.BackgroundFetcher mountTimeout time.Duration fuseMetricsEmitWaitDuration time.Duration } func (fs *filesystem) MountLocal(ctx context.Context, mountpoint string, labels map[string]string, mounts []mount.Mount) error { imageRef, ok := labels[ctdsnapshotters.TargetRefLabel] if !ok { return fmt.Errorf("unable to get image ref from labels") } // Get source information of this layer. src, err := fs.getSources(labels) if err != nil { return err } else if len(src) == 0 { return fmt.Errorf("blob info not found for any labels in %s", fmt.Sprint(labels)) } // download the target layer s := src[0] archive := NewLayerArchive() refspec, err := reference.Parse(imageRef) if err != nil { return fmt.Errorf("cannot parse image ref (%s): %w", imageRef, err) } remoteStore, err := newRemoteStore(refspec, fs.httpConfig) if err != nil { return fmt.Errorf("cannot create remote store: %w", err) } fetcher, err := newArtifactFetcher(refspec, fs.contentStore, remoteStore) if err != nil { return fmt.Errorf("cannot create fetcher: %w", err) } unpacker := NewLayerUnpacker(fetcher, archive) desc := s.Target err = unpacker.Unpack(ctx, desc, mountpoint, mounts) if err != nil { return fmt.Errorf("cannot unpack the layer: %w", err) } return nil } func (fs *filesystem) getSociContext(ctx context.Context, imageRef, indexDigest, imageManifestDigest string) (*sociContext, error) { cAny, _ := fs.sociContexts.LoadOrStore(imageManifestDigest, &sociContext{}) c, ok := cAny.(*sociContext) if !ok { return nil, fmt.Errorf("could not load index: fs soci context is invalid type for %s", indexDigest) } err := c.Init(fs.ctx, ctx, imageRef, indexDigest, imageManifestDigest, fs.contentStore, fs.fuseMetricsEmitWaitDuration, fs.httpConfig) return c, err } func (fs *filesystem) Mount(ctx context.Context, mountpoint string, labels map[string]string) (retErr error) { // Setting the start time to measure the Mount operation duration. start := time.Now() ctx = log.WithLogger(ctx, log.G(ctx).WithField("mountpoint", mountpoint)) // If this is empty or the label doesn't exist, then we will use the referrers API later // to get find an index digest. sociIndexDigest := labels[source.TargetSociIndexDigestLabel] imageRef, ok := labels[ctdsnapshotters.TargetRefLabel] if !ok { return fmt.Errorf("unable to get image ref from labels") } imgDigest, ok := labels[ctdsnapshotters.TargetManifestDigestLabel] if !ok { return fmt.Errorf("unable to get image digest from labels") } c, err := fs.getSociContext(ctx, imageRef, sociIndexDigest, imgDigest) if err != nil { return fmt.Errorf("unable to fetch SOCI artifacts: %w", err) } // Get source information of this layer. src, err := fs.getSources(labels) if err != nil { return err } else if len(src) == 0 { return fmt.Errorf("source must be passed") } // Resolve the target layer var ( resultChan = make(chan layer.Layer) errChan = make(chan error) ) go func() { var rErr error for _, s := range src { sociDesc, ok := c.imageLayerToSociDesc[s.Target.Digest.String()] if !ok { log.G(ctx).WithFields(logrus.Fields{ "layerDigest": s.Target.Digest.String(), "image": s.Name.String(), }).Infof("skipping mounting layer as FUSE mount: %v", snapshot.ErrNoZtoc) rErr = fmt.Errorf("skipping mounting layer %s as FUSE mount: %w", s.Target.Digest.String(), snapshot.ErrNoZtoc) break } l, err := fs.resolver.Resolve(ctx, s.Hosts, s.Name, s.Target, sociDesc, c.fuseOperationCounter) if err == nil { resultChan <- l return } rErr = fmt.Errorf("failed to resolve layer %q from %q: %w", s.Target.Digest, s.Name, err) } errChan <- rErr }() // Also resolve and cache other layers in parallel preResolve := src[0] // TODO: should we pre-resolve blobs in other sources as well? for _, desc := range neighboringLayers(preResolve.Manifest, preResolve.Target) { desc := desc go func() { // Avoids to get canceled by client. ctx := log.WithLogger(context.Background(), log.G(ctx).WithField("mountpoint", mountpoint)) sociDesc, ok := c.imageLayerToSociDesc[desc.Digest.String()] if !ok { log.G(ctx).WithError(snapshot.ErrNoZtoc).WithField("layerDigest", desc.Digest.String()).Debug("skipping layer pre-resolve") return } l, err := fs.resolver.Resolve(ctx, preResolve.Hosts, preResolve.Name, desc, sociDesc, c.fuseOperationCounter) if err != nil { log.G(ctx).WithError(err).Debug("failed to pre-resolve") return } // Release this layer because this isn't target and we don't use it anymore here. // However, this will remain on the resolver cache until eviction. l.Done() }() } // Wait for resolving completion var l layer.Layer select { case l = <-resultChan: case err := <-errChan: retErr = err return case <-time.After(fs.mountTimeout): log.G(ctx).WithFields(logrus.Fields{ "timeout": fs.mountTimeout.String(), "layerDigest": labels[ctdsnapshotters.TargetLayerDigestLabel], }).Info("timeout waiting for layer to resolve") retErr = fmt.Errorf("timeout waiting for layer %s to resolve", labels[ctdsnapshotters.TargetLayerDigestLabel]) return } defer func() { if retErr != nil { l.Done() // don't use this layer. } }() // Verify layer's content if fs.disableVerification { // Skip if verification is disabled completely l.SkipVerify() log.G(ctx).Infof("Verification forcefully skipped") } node, err := l.RootNode(0) if err != nil { log.G(ctx).WithError(err).Warnf("Failed to get root node") retErr = fmt.Errorf("failed to get root node: %w", err) return } // Measuring duration of Mount operation for resolved layer. digest := l.Info().Digest // get layer sha defer commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.Mount, digest, start) // Register the mountpoint layer fs.layerMu.Lock() fs.layer[mountpoint] = l fs.layerMu.Unlock() fs.metricsController.Add(mountpoint, l) // mount the node to the specified mountpoint // TODO: bind mount the state directory as a read-only fs on snapshotter's side rawFS := fusefs.NewNodeFS(node, &fusefs.Options{ AttrTimeout: &fs.attrTimeout, EntryTimeout: &fs.entryTimeout, NegativeTimeout: &fs.negativeTimeout, NullPermissions: true, }) mountOpts := &fuse.MountOptions{ AllowOther: true, // allow users other than root&mounter to access fs FsName: "soci", // name this filesystem as "soci" Debug: fs.debug, } if _, err := exec.LookPath(fusermountBin); err == nil { mountOpts.Options = []string{"suid"} // option for fusermount; allow setuid inside container } else { log.G(ctx).WithError(err).Infof("%s not installed; trying direct mount", fusermountBin) mountOpts.DirectMount = true } server, err := fuse.NewServer(rawFS, mountpoint, mountOpts) if err != nil { log.G(ctx).WithError(err).Debug("failed to make filesystem server") retErr = err return } go server.Serve() // Send a signal to the background fetcher that a new image is being mounted // and to pause all background fetches. c.bgFetchPauseOnce.Do(func() { if fs.bgFetcher != nil { fs.bgFetcher.Pause() } }) return server.WaitMount() } func (fs *filesystem) Check(ctx context.Context, mountpoint string, labels map[string]string) error { ctx = log.WithLogger(ctx, log.G(ctx).WithField("mountpoint", mountpoint)) fs.layerMu.Lock() l := fs.layer[mountpoint] fs.layerMu.Unlock() if l == nil { log.G(ctx).Debug("layer not registered") return fmt.Errorf("layer not registered") } // Check the blob connectivity and try to refresh the connection on failure if err := fs.check(ctx, l, labels); err != nil { log.G(ctx).WithError(err).Warn("check failed") return err } return nil } func (fs *filesystem) check(ctx context.Context, l layer.Layer, labels map[string]string) error { err := l.Check() if err == nil { return nil } log.G(ctx).WithError(err).Warn("failed to connect to blob") // Check failed. Try to refresh the connection with fresh source information src, err := fs.getSources(labels) if err != nil { return err } var ( retrynum = 1 rErr = fmt.Errorf("failed to refresh connection") ) for retry := 0; retry < retrynum; retry++ { log.G(ctx).Warnf("refreshing(%d)...", retry) for _, s := range src { err := l.Refresh(ctx, s.Hosts, s.Name, s.Target) if err == nil { log.G(ctx).Debug("Successfully refreshed connection") return nil } log.G(ctx).WithError(err).Warnf("failed to refresh the layer %q from %q", s.Target.Digest, s.Name) rErr = fmt.Errorf("failed(layer:%q, ref:%q): %v: %w", s.Target.Digest, s.Name, err, rErr) } } return rErr } func (fs *filesystem) Unmount(ctx context.Context, mountpoint string) error { fs.layerMu.Lock() l, ok := fs.layer[mountpoint] if !ok { fs.layerMu.Unlock() return fmt.Errorf("specified path %q isn't a mountpoint", mountpoint) } delete(fs.layer, mountpoint) // unregisters the corresponding layer l.Done() fs.layerMu.Unlock() fs.metricsController.Remove(mountpoint) // The goroutine which serving the mountpoint possibly becomes not responding. // In case of such situations, we use MNT_FORCE here and abort the connection. // In the future, we might be able to consider to kill that specific hanging // goroutine using channel, etc. // See also: https://www.kernel.org/doc/html/latest/filesystems/fuse.html#aborting-a-filesystem-connection return syscall.Unmount(mountpoint, syscall.MNT_FORCE) } // neighboringLayers returns layer descriptors except the `target` layer in the specified manifest. func neighboringLayers(manifest ocispec.Manifest, target ocispec.Descriptor) (descs []ocispec.Descriptor) { for _, desc := range manifest.Layers { if desc.Digest.String() != target.Digest.String() { descs = append(descs, desc) } } return } soci-snapshotter-0.4.1/fs/fs_test.go000066400000000000000000000065341454010642300174360ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright 2019 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the NOTICE.md file. */ package fs import ( "context" "fmt" "testing" "github.com/awslabs/soci-snapshotter/fs/layer" "github.com/awslabs/soci-snapshotter/fs/remote" "github.com/awslabs/soci-snapshotter/fs/source" "github.com/containerd/containerd/reference" "github.com/containerd/containerd/remotes/docker" fusefs "github.com/hanwen/go-fuse/v2/fs" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) func TestCheck(t *testing.T) { bl := &breakableLayer{} fs := &filesystem{ layer: map[string]layer.Layer{ "test": bl, }, getSources: source.FromDefaultLabels(func(refspec reference.Spec) (hosts []docker.RegistryHost, _ error) { return docker.ConfigureDefaultRegistries(docker.WithPlainHTTP(docker.MatchLocalhost))(refspec.Hostname()) }), } bl.success = true if err := fs.Check(context.TODO(), "test", nil); err != nil { t.Errorf("connection failed; wanted to succeed: %v", err) } bl.success = false if err := fs.Check(context.TODO(), "test", nil); err == nil { t.Errorf("connection succeeded; wanted to fail") } } type breakableLayer struct { success bool } func (l *breakableLayer) Info() layer.Info { return layer.Info{} } func (l *breakableLayer) RootNode(uint32) (fusefs.InodeEmbedder, error) { return nil, nil } func (l *breakableLayer) Verify(tocDigest digest.Digest) error { return nil } func (l *breakableLayer) SkipVerify() {} func (l *breakableLayer) ReadAt([]byte, int64, ...remote.Option) (int, error) { return 0, nil } func (l *breakableLayer) BackgroundFetch() error { return fmt.Errorf("fail") } func (l *breakableLayer) Check() error { if !l.success { return fmt.Errorf("failed") } return nil } func (l *breakableLayer) Refresh(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor) error { if !l.success { return fmt.Errorf("failed") } return nil } func (l *breakableLayer) Done() {} soci-snapshotter-0.4.1/fs/layer/000077500000000000000000000000001454010642300165445ustar00rootroot00000000000000soci-snapshotter-0.4.1/fs/layer/layer.go000066400000000000000000000402331454010642300202110ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright 2019 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the NOTICE.md file. */ package layer import ( "bytes" "context" "fmt" "io" "os" "path/filepath" "sync" "time" "github.com/awslabs/soci-snapshotter/cache" "github.com/awslabs/soci-snapshotter/config" backgroundfetcher "github.com/awslabs/soci-snapshotter/fs/backgroundfetcher" commonmetrics "github.com/awslabs/soci-snapshotter/fs/metrics/common" "github.com/awslabs/soci-snapshotter/fs/reader" "github.com/awslabs/soci-snapshotter/fs/remote" "github.com/awslabs/soci-snapshotter/fs/source" spanmanager "github.com/awslabs/soci-snapshotter/fs/span-manager" "github.com/awslabs/soci-snapshotter/metadata" "github.com/awslabs/soci-snapshotter/util/lrucache" "github.com/awslabs/soci-snapshotter/util/namedmutex" "github.com/awslabs/soci-snapshotter/ztoc" "github.com/containerd/containerd/log" "github.com/containerd/containerd/reference" fusefs "github.com/hanwen/go-fuse/v2/fs" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" "oras.land/oras-go/v2/content" ) const ( defaultResolveResultEntry = 30 defaultMaxLRUCacheEntry = 10 defaultMaxCacheFds = 10 memoryCacheType = "memory" ) // Layer represents a layer. type Layer interface { // Info returns the information of this layer. Info() Info // RootNode returns the root node of this layer. RootNode(baseInode uint32) (fusefs.InodeEmbedder, error) // Check checks if the layer is still connectable. Check() error // Refresh refreshes the layer connection. Refresh(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor) error // Verify verifies this layer using the passed TOC Digest. // Nop if Verify() or SkipVerify() was already called. Verify(tocDigest digest.Digest) (err error) // SkipVerify skips verification for this layer. // Nop if Verify() or SkipVerify() was already called. SkipVerify() // ReadAt reads this layer. ReadAt([]byte, int64, ...remote.Option) (int, error) // Done releases the reference to this layer. The resources related to this layer will be // discarded sooner or later. Queries after calling this function won't be serviced. Done() } // Info is the current status of a layer. type Info struct { Digest digest.Digest Size int64 // layer size in bytes FetchedSize int64 // layer fetched size in bytes ReadTime time.Time // last time the layer was read } // Resolver resolves the layer location and provieds the handler of that layer. type Resolver struct { rootDir string resolver *remote.Resolver layerCache *lrucache.Cache layerCacheMu sync.Mutex blobCache *lrucache.Cache blobCacheMu sync.Mutex resolveLock *namedmutex.NamedMutex config config.FSConfig metadataStore metadata.Store artifactStore content.Storage overlayOpaqueType OverlayOpaqueType bgFetcher *backgroundfetcher.BackgroundFetcher } // NewResolver returns a new layer resolver. func NewResolver(root string, cfg config.FSConfig, resolveHandlers map[string]remote.Handler, metadataStore metadata.Store, artifactStore content.Storage, overlayOpaqueType OverlayOpaqueType, bgFetcher *backgroundfetcher.BackgroundFetcher) (*Resolver, error) { resolveResultEntry := cfg.ResolveResultEntry if resolveResultEntry == 0 { resolveResultEntry = defaultResolveResultEntry } // layerCache caches resolved layers for future use. This is useful in a use-case where // the filesystem resolves and caches all layers in an image (not only queried one) in parallel, // before they are actually queried. layerCache := lrucache.New(resolveResultEntry) layerCache.OnEvicted = func(key string, value interface{}) { if err := value.(*layer).close(); err != nil { logrus.WithField("key", key).WithError(err).Warnf("failed to clean up layer") return } logrus.WithField("key", key).Debugf("cleaned up layer") } // blobCache caches resolved blobs for future use. blobCache := lrucache.New(resolveResultEntry) blobCache.OnEvicted = func(key string, value interface{}) { if err := value.(remote.Blob).Close(); err != nil { logrus.WithField("key", key).WithError(err).Warnf("failed to clean up blob") return } logrus.WithField("key", key).Debugf("cleaned up blob") } if err := os.MkdirAll(root, 0700); err != nil { return nil, err } return &Resolver{ rootDir: root, resolver: remote.NewResolver(cfg.BlobConfig, resolveHandlers), layerCache: layerCache, blobCache: blobCache, config: cfg, resolveLock: new(namedmutex.NamedMutex), metadataStore: metadataStore, artifactStore: artifactStore, overlayOpaqueType: overlayOpaqueType, bgFetcher: bgFetcher, }, nil } func newCache(root string, cacheType string, cfg config.FSConfig) (cache.BlobCache, error) { if cacheType == memoryCacheType { return cache.NewMemoryCache(), nil } dcc := cfg.DirectoryCacheConfig maxDataEntry := dcc.MaxLRUCacheEntry if maxDataEntry == 0 { maxDataEntry = defaultMaxLRUCacheEntry } maxFdEntry := dcc.MaxCacheFds if maxFdEntry == 0 { maxFdEntry = defaultMaxCacheFds } bufPool := &sync.Pool{ New: func() interface{} { return new(bytes.Buffer) }, } dCache, fCache := lrucache.New(maxDataEntry), lrucache.New(maxFdEntry) dCache.OnEvicted = func(key string, value interface{}) { value.(*bytes.Buffer).Reset() bufPool.Put(value) } fCache.OnEvicted = func(key string, value interface{}) { value.(*os.File).Close() } // create a cache on an unique directory if err := os.MkdirAll(root, 0700); err != nil { return nil, err } cachePath, err := os.MkdirTemp(root, "") if err != nil { return nil, fmt.Errorf("failed to initialize directory cache: %w", err) } return cache.NewDirectoryCache( cachePath, cache.DirectoryCacheConfig{ SyncAdd: dcc.SyncAdd, DataCache: dCache, FdCache: fCache, BufPool: bufPool, Direct: dcc.Direct, }, ) } // Resolve resolves a layer based on the passed layer blob information. func (r *Resolver) Resolve(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc, sociDesc ocispec.Descriptor, opCounter *FuseOperationCounter, metadataOpts ...metadata.Option) (_ Layer, retErr error) { name := refspec.String() + "/" + desc.Digest.String() // Wait if resolving this layer is already running. The result // can hopefully get from the LRU cache. r.resolveLock.Lock(name) defer r.resolveLock.Unlock(name) ctx = log.WithLogger(ctx, log.G(ctx).WithField("src", name)) // First, try to retrieve this layer from the underlying LRU cache. r.layerCacheMu.Lock() c, done, ok := r.layerCache.Get(name) r.layerCacheMu.Unlock() if ok { if l := c.(*layer); l.Check() == nil { log.G(ctx).Debugf("hit layer cache %q", name) return &layerRef{l, done}, nil } // Cached layer is invalid done() r.layerCacheMu.Lock() r.layerCache.Remove(name) r.layerCacheMu.Unlock() } log.G(ctx).Debugf("resolving") // Resolve the blob. blobR, err := r.resolveBlob(ctx, hosts, refspec, desc) if err != nil { return nil, fmt.Errorf("failed to resolve the blob: %w", err) } defer func() { if retErr != nil { blobR.done() } }() spanCache, err := newCache(filepath.Join(r.rootDir, "spancache"), r.config.FSCacheType, r.config) if err != nil { return nil, fmt.Errorf("failed to create span manager cache: %w", err) } defer func() { if retErr != nil { spanCache.Close() } }() ztocReader, err := r.artifactStore.Fetch(ctx, sociDesc) if err != nil { return nil, err } defer ztocReader.Close() // Check if the ztoc exists (will be passed from fs) // If it exists, we decide if we want to lazily load layer, or // download/decompress the entire layer // If we decide to download/decompress the entire layer, getZtoc will not return the ztoc ztoc, err := ztoc.Unmarshal(ztocReader) if err != nil { // for now error out and let container runtime handle the layer download return nil, fmt.Errorf("cannot get ztoc; download and unpack this layer in container runtime for now: %w", err) } if ztoc == nil { // 1. download and unpack the layer // 2. return the reference to the layer // for now just error out, so container runtime takes care of this return nil, fmt.Errorf("download and unpack this layer in container runtime for now") } // log ztoc info log.G(context.Background()).WithFields(logrus.Fields{ "layer_sha": desc.Digest, "files_in_layer": len(ztoc.FileMetadata), }).Debugf("[Resolver.Resolve] downloaded layer ZTOC") // continue with resolving the layer presuming we handle ZTOC // ztoc will belong to a layer // Get a reader for the layer files // Each file's read operation is a prioritized task and all background tasks // will be stopped during the execution so this can avoid being disturbed for // NW traffic by background tasks. sr := io.NewSectionReader(readerAtFunc(func(p []byte, offset int64) (n int, err error) { return blobR.ReadAt(p, offset) }), 0, blobR.Size()) // define telemetry hooks to measure latency metrics for the metadata store telemetry := metadata.Telemetry{ InitMetadataStoreLatency: func(start time.Time) { commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.InitMetadataStore, desc.Digest, start) }, } meta, err := r.metadataStore(sr, ztoc.TOC, append(metadataOpts, metadata.WithTelemetry(&telemetry))...) if err != nil { return nil, err } log.G(ctx).Debugf("[Resolver.Resolve]Initialized metadata store for layer sha=%v", desc.Digest) spanManager := spanmanager.New(ztoc, sr, spanCache, r.config.BlobConfig.MaxSpanVerificationRetries, cache.Direct()) var bgLayerResolver backgroundfetcher.Resolver if r.bgFetcher != nil { bgLayerResolver = backgroundfetcher.NewSequentialResolver(desc.Digest, spanManager) r.bgFetcher.Add(bgLayerResolver) } vr, err := reader.NewReader(meta, desc.Digest, spanManager) if err != nil { return nil, fmt.Errorf("failed to read layer: %w", err) } // Combine layer information together and cache it. l := newLayer(r, desc, blobR, vr, bgLayerResolver, opCounter) r.layerCacheMu.Lock() cachedL, done2, added := r.layerCache.Add(name, l) r.layerCacheMu.Unlock() if !added { l.close() // layer already exists in the cache. discrad this. } log.G(ctx).Debugf("resolved layer") return &layerRef{cachedL.(*layer), done2}, nil } // resolveBlob resolves a blob based on the passed layer blob information. func (r *Resolver) resolveBlob(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor) (_ *blobRef, retErr error) { name := refspec.String() + "/" + desc.Digest.String() // Try to retrieve the blob from the underlying LRU cache. r.blobCacheMu.Lock() c, done, ok := r.blobCache.Get(name) r.blobCacheMu.Unlock() if ok { if blob := c.(remote.Blob); blob.Check() == nil { return &blobRef{blob, done}, nil } // invalid blob. discard this. done() r.blobCacheMu.Lock() r.blobCache.Remove(name) r.blobCacheMu.Unlock() } httpCache, err := newCache(filepath.Join(r.rootDir, "httpcache"), r.config.HTTPCacheType, r.config) if err != nil { return nil, fmt.Errorf("failed to create http cache: %w", err) } defer func() { if retErr != nil { httpCache.Close() } }() // Resolve the blob and cache the result. b, err := r.resolver.Resolve(ctx, hosts, refspec, desc, httpCache) if err != nil { return nil, fmt.Errorf("failed to resolve the source: %w", err) } r.blobCacheMu.Lock() cachedB, done, added := r.blobCache.Add(name, b) r.blobCacheMu.Unlock() if !added { b.Close() // blob already exists in the cache. discard this. } return &blobRef{cachedB.(remote.Blob), done}, nil } func newLayer( resolver *Resolver, desc ocispec.Descriptor, blob *blobRef, vr *reader.VerifiableReader, bgResolver backgroundfetcher.Resolver, opCounter *FuseOperationCounter, ) *layer { return &layer{ resolver: resolver, desc: desc, blob: blob, verifiableReader: vr, bgResolver: bgResolver, fuseOperationCounter: opCounter, } } type layer struct { resolver *Resolver desc ocispec.Descriptor blob *blobRef verifiableReader *reader.VerifiableReader bgResolver backgroundfetcher.Resolver r reader.Reader fuseOperationCounter *FuseOperationCounter closed bool closedMu sync.Mutex } func (l *layer) Info() Info { var readTime time.Time if l.r != nil { readTime = l.r.LastOnDemandReadTime() } return Info{ Digest: l.desc.Digest, Size: l.blob.Size(), FetchedSize: l.blob.FetchedSize(), ReadTime: readTime, } } func (l *layer) Check() error { if l.isClosed() { return fmt.Errorf("layer is already closed") } return l.blob.Check() } func (l *layer) Refresh(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor) error { if l.isClosed() { return fmt.Errorf("layer is already closed") } return l.blob.Refresh(ctx, hosts, refspec, desc) } func (l *layer) Verify(tocDigest digest.Digest) (err error) { if l.isClosed() { return fmt.Errorf("layer is already closed") } if l.r != nil { return nil } l.r, err = l.verifiableReader.VerifyTOC(tocDigest) return } func (l *layer) SkipVerify() { if l.r != nil { return } l.r = l.verifiableReader.SkipVerify() } func (l *layerRef) Done() { l.done() } func (l *layer) RootNode(baseInode uint32) (fusefs.InodeEmbedder, error) { if l.isClosed() { return nil, fmt.Errorf("layer is already closed") } if l.r == nil { return nil, fmt.Errorf("layer hasn't been verified yet") } return newNode(l.desc.Digest, l.r, l.blob, baseInode, l.resolver.overlayOpaqueType, l.resolver.config.LogFuseOperations, l.fuseOperationCounter) } func (l *layer) ReadAt(p []byte, offset int64, opts ...remote.Option) (int, error) { return l.blob.ReadAt(p, offset, opts...) } func (l *layer) close() error { l.closedMu.Lock() defer l.closedMu.Unlock() if l.closed { return nil } l.closed = true if l.bgResolver != nil { l.bgResolver.Close() } defer l.blob.done() // Close reader first, then close the blob l.verifiableReader.Close() if l.r != nil { return l.r.Close() } return nil } func (l *layer) isClosed() bool { l.closedMu.Lock() closed := l.closed l.closedMu.Unlock() return closed } // blobRef is a reference to the blob in the cache. Calling `done` decreases the reference counter // of this blob in the underlying cache. When nobody refers to the blob in the cache, resources bound // to this blob will be discarded. type blobRef struct { remote.Blob done func() } // layerRef is a reference to the layer in the cache. Calling `Done` or `done` decreases the // reference counter of this blob in the underlying cache. When nobody refers to the layer in the // cache, resources bound to this layer will be discarded. type layerRef struct { *layer done func() } type readerAtFunc func([]byte, int64) (int, error) func (f readerAtFunc) ReadAt(p []byte, offset int64) (int, error) { return f(p, offset) } soci-snapshotter-0.4.1/fs/layer/layer_test.go000066400000000000000000000056411454010642300212540ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright 2019 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the NOTICE.md file. */ package layer import ( "fmt" "sync" "testing" "time" "github.com/awslabs/soci-snapshotter/metadata" ) func TestLayer(t *testing.T) { testNodeRead(t, metadata.NewTempDbStore) testExistence(t, metadata.NewTempDbStore) } func TestWaiter(t *testing.T) { var ( w = newWaiter() waitTime = time.Second startTime = time.Now() doneTime time.Time done = make(chan struct{}) ) go func() { defer close(done) if err := w.wait(10 * time.Second); err != nil { t.Errorf("failed to wait: %v", err) return } doneTime = time.Now() }() time.Sleep(waitTime) w.done() <-done if doneTime.Sub(startTime) < waitTime { t.Errorf("wait time is too short: %v; want %v", doneTime.Sub(startTime), waitTime) } } func newWaiter() *waiter { return &waiter{ completionCond: sync.NewCond(&sync.Mutex{}), } } type waiter struct { isDone bool isDoneMu sync.Mutex completionCond *sync.Cond } func (w *waiter) done() { w.isDoneMu.Lock() w.isDone = true w.isDoneMu.Unlock() w.completionCond.Broadcast() } func (w *waiter) wait(timeout time.Duration) error { wait := func() <-chan struct{} { ch := make(chan struct{}) go func() { w.isDoneMu.Lock() isDone := w.isDone w.isDoneMu.Unlock() w.completionCond.L.Lock() if !isDone { w.completionCond.Wait() } w.completionCond.L.Unlock() ch <- struct{}{} }() return ch } select { case <-time.After(timeout): w.isDoneMu.Lock() w.isDone = true w.isDoneMu.Unlock() w.completionCond.Broadcast() return fmt.Errorf("timeout(%v)", timeout) case <-wait(): return nil } } soci-snapshotter-0.4.1/fs/layer/node.go000066400000000000000000000663401454010642300200310ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright 2019 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the NOTICE.md file. */ package layer import ( "bytes" "context" "encoding/json" "errors" "fmt" "io" "os" "sort" "strings" "sync" "sync/atomic" "syscall" "time" commonmetrics "github.com/awslabs/soci-snapshotter/fs/metrics/common" "github.com/awslabs/soci-snapshotter/fs/reader" "github.com/awslabs/soci-snapshotter/fs/remote" "github.com/awslabs/soci-snapshotter/metadata" "github.com/containerd/containerd/log" fusefs "github.com/hanwen/go-fuse/v2/fs" "github.com/hanwen/go-fuse/v2/fuse" digest "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) const ( blockSize = 4096 // physicalBlockRatio is the ratio of blockSize to 512. It's used as a multiplier // to convert # of blockSize-byte blocks to # of 512 byte blocks. physicalBlockRatio = blockSize / 512 whiteoutPrefix = ".wh." whiteoutOpaqueDir = whiteoutPrefix + whiteoutPrefix + ".opq" opaqueXattrValue = "y" stateDirName = ".soci-snapshotter" statFileMode = syscall.S_IFREG | 0400 // -r-------- stateDirMode = syscall.S_IFDIR | 0500 // dr-x------ ) // OverlayOpaqueType enum possible types. type OverlayOpaqueType int // OverlayOpaqueType enum. const ( OverlayOpaqueAll OverlayOpaqueType = iota OverlayOpaqueTrusted OverlayOpaqueUser ) var opaqueXattrs = map[OverlayOpaqueType][]string{ OverlayOpaqueAll: {"trusted.overlay.opaque", "user.overlay.opaque"}, OverlayOpaqueTrusted: {"trusted.overlay.opaque"}, OverlayOpaqueUser: {"user.overlay.opaque"}, } // fuse operations. const ( fuseOpGetattr = "node.Getattr" fuseOpGetxattr = "node.Getxattr" fuseOpListxattr = "node.Listxattr" fuseOpLookup = "node.Lookup" fuseOpOpen = "node.Open" fuseOpReaddir = "node.Readdir" fuseOpReadLink = "node.Readlink" fuseOpFileRead = "file.Read" fuseOpFileGetattr = "file.Getattr" fuseOpWhiteoutGetattr = "whiteout.Getattr" ) var fuseOpFailureMetrics = map[string]string{ fuseOpGetattr: commonmetrics.FuseNodeGetattrFailureCount, fuseOpListxattr: commonmetrics.FuseNodeListxattrFailureCount, fuseOpLookup: commonmetrics.FuseNodeLookupFailureCount, fuseOpOpen: commonmetrics.FuseNodeOpenFailureCount, fuseOpReaddir: commonmetrics.FuseNodeReaddirFailureCount, fuseOpFileRead: commonmetrics.FuseFileReadFailureCount, fuseOpFileGetattr: commonmetrics.FuseFileGetattrFailureCount, fuseOpWhiteoutGetattr: commonmetrics.FuseWhiteoutGetattrFailureCount, } // FuseOperationCounter collects number of invocations of the various FUSE implementations and emits them as metrics. // Setting `waitPeriod` to be > 0 allows delaying the time when the metrics are emitted. type FuseOperationCounter struct { opCounts map[string]*int32 waitPeriod time.Duration imageDigest digest.Digest } // FuseOpsList is a list of available FUSE operations. var FuseOpsList = []string{ fuseOpGetattr, fuseOpGetxattr, fuseOpListxattr, fuseOpLookup, fuseOpOpen, fuseOpReaddir, fuseOpReadLink, fuseOpFileRead, fuseOpFileGetattr, fuseOpWhiteoutGetattr, } // NewFuseOperationCounter constructs a FuseOperationCounter for an image with digest imgDigest. // waitPeriod specifies how long to wait before emitting the aggregated metrics. func NewFuseOperationCounter(imgDigest digest.Digest, waitPeriod time.Duration) *FuseOperationCounter { f := &FuseOperationCounter{ imageDigest: imgDigest, waitPeriod: waitPeriod, opCounts: make(map[string]*int32), } for _, m := range FuseOpsList { f.opCounts[m] = new(int32) } return f } // Inc atomically increase the count of FUSE operation op. // Noop if op is not in FuseOpsList. func (f *FuseOperationCounter) Inc(op string) { opCount, ok := f.opCounts[op] if !ok { return } atomic.AddInt32(opCount, 1) } // Run waits for f.waitPeriod to pass before emitting a log and metric for each // operation in FuseOpsList. Should be started in different goroutine so that it // doesn't block the current goroutine. func (f *FuseOperationCounter) Run(ctx context.Context) { select { case <-ctx.Done(): return case <-time.After(f.waitPeriod): for op, opCount := range f.opCounts { // We want both an aggregated metric (e.g. p90) and an image specific metric so that we can compare // how a specific image is behaving to a larger dataset. When the image cardinality is small, // we can just include the image digest as a label on the metric itself, however, when the cardinality // is large, this can be very expensive. Here we give consumers options by emitting both logs and // metrics. A low cardinality use case can rely on metrics. A high cardinality use case can // aggregate the metrics across all images, but still get the per-image info via logs. count := atomic.LoadInt32(opCount) commonmetrics.AddImageOperationCount(op, f.imageDigest, count) log.G(ctx).Infof("fuse operation count for image %s: %s = %d", f.imageDigest, op, count) } } } func incFuseOpFailureMetric(operationName string, layer digest.Digest) { metric, ok := fuseOpFailureMetrics[operationName] if !ok { metric = commonmetrics.FuseUnknownFailureCount } commonmetrics.IncOperationCount(metric, layer) } // logFSOperations may cause sensitive information to be emitted to logs // e.g. filenames and paths within an image func newNode(layerDgst digest.Digest, r reader.Reader, blob remote.Blob, baseInode uint32, opaque OverlayOpaqueType, logFSOperations bool, opCounter *FuseOperationCounter) (fusefs.InodeEmbedder, error) { rootID := r.Metadata().RootID() rootAttr, err := r.Metadata().GetAttr(rootID) if err != nil { return nil, err } opq, ok := opaqueXattrs[opaque] if !ok { return nil, fmt.Errorf("unknown overlay opaque type") } ffs := &fs{ r: r, layerDigest: layerDgst, baseInode: baseInode, rootID: rootID, opaqueXattrs: opq, logFSOperations: logFSOperations, operationCounter: opCounter, } ffs.s = ffs.newState(layerDgst, blob) return &node{ id: rootID, attr: rootAttr, fs: ffs, }, nil } // fs contains global metadata used by nodes type fs struct { r reader.Reader s *state layerDigest digest.Digest baseInode uint32 rootID uint32 opaqueXattrs []string logFSOperations bool operationCounter *FuseOperationCounter } func (fs *fs) inodeOfState() uint64 { return (uint64(fs.baseInode) << 32) | 1 // reserved } func (fs *fs) inodeOfStatFile() uint64 { return (uint64(fs.baseInode) << 32) | 2 // reserved } func (fs *fs) inodeOfID(id uint32) (uint64, error) { // 0 is reserved by go-fuse 1 and 2 are reserved by the state dir if id > ^uint32(0)-3 { return 0, fmt.Errorf("too many inodes") } return (uint64(fs.baseInode) << 32) | uint64(3+id), nil } // node is a filesystem inode abstraction. type node struct { fusefs.Inode fs *fs id uint32 attr metadata.Attr ents []fuse.DirEntry entsCached bool } func (n *node) logOperation(ctx context.Context, operationName string) { if n.fs.logFSOperations { log.G(ctx).WithFields(logrus.Fields{ "operation": operationName, "path": n.Path(nil), }).Debug("FUSE operation") } } func (n *node) isRootNode() bool { return n.id == n.fs.rootID } func (n *node) isOpaque() bool { if _, _, err := n.fs.r.Metadata().GetChild(n.id, whiteoutOpaqueDir); err == nil { return true } return false } var _ = (fusefs.InodeEmbedder)((*node)(nil)) var _ = (fusefs.NodeReaddirer)((*node)(nil)) func (n *node) Readdir(ctx context.Context) (fusefs.DirStream, syscall.Errno) { n.logOperation(ctx, fuseOpReaddir) if n.fs.operationCounter != nil { n.fs.operationCounter.Inc(fuseOpReaddir) } ents, errno := n.readdir() if errno != 0 { return nil, errno } return fusefs.NewListDirStream(ents), 0 } func (n *node) readdir() ([]fuse.DirEntry, syscall.Errno) { // Measure how long node_readdir operation takes (in microseconds). start := time.Now() // set start time defer commonmetrics.MeasureLatencyInMicroseconds(commonmetrics.NodeReaddir, n.fs.layerDigest, start) if n.entsCached { return n.ents, 0 } var ents []fuse.DirEntry whiteouts := map[string]uint32{} normalEnts := map[string]bool{} var lastErr error if err := n.fs.r.Metadata().ForeachChild(n.id, func(name string, id uint32, mode os.FileMode) bool { // We don't want to show whiteouts. if strings.HasPrefix(name, whiteoutPrefix) { if name == whiteoutOpaqueDir { return true } // Add the overlayfs-compiant whiteout later. whiteouts[name] = id return true } // This is a normal entry. normalEnts[name] = true ino, err := n.fs.inodeOfID(id) if err != nil { lastErr = err return false } ents = append(ents, fuse.DirEntry{ Mode: fileModeToSystemMode(mode), Name: name, Ino: ino, }) return true }); err != nil || lastErr != nil { incFuseOpFailureMetric(fuseOpReaddir, n.fs.layerDigest) n.fs.s.report(fmt.Errorf("%s: err = %v; lastErr = %v", fuseOpReaddir, err, lastErr)) return nil, syscall.EIO } // Append whiteouts if no entry replaces the target entry in the lower layer. for w, id := range whiteouts { if !normalEnts[w[len(whiteoutPrefix):]] { ino, err := n.fs.inodeOfID(id) if err != nil { incFuseOpFailureMetric(fuseOpReaddir, n.fs.layerDigest) n.fs.s.report(fmt.Errorf("%s: err = %v; lastErr = %v", fuseOpReaddir, err, lastErr)) return nil, syscall.EIO } ents = append(ents, fuse.DirEntry{ Mode: syscall.S_IFCHR, Name: w[len(whiteoutPrefix):], Ino: ino, }) } } // Avoid undeterministic order of entries on each call sort.Slice(ents, func(i, j int) bool { return ents[i].Name < ents[j].Name }) n.ents, n.entsCached = ents, true // cache it return ents, 0 } var _ = (fusefs.NodeLookuper)((*node)(nil)) func (n *node) Lookup(ctx context.Context, name string, out *fuse.EntryOut) (*fusefs.Inode, syscall.Errno) { n.logOperation(ctx, fuseOpLookup) if n.fs.operationCounter != nil { n.fs.operationCounter.Inc(fuseOpLookup) } isRoot := n.isRootNode() // We don't want to show whiteouts. if strings.HasPrefix(name, whiteoutPrefix) { return nil, syscall.ENOENT } // state directory if isRoot && name == stateDirName { return n.NewInode(ctx, n.fs.s, n.fs.stateToAttr(&out.Attr)), 0 } // lookup on memory nodes if cn := n.GetChild(name); cn != nil { switch tn := cn.Operations().(type) { case *node: ino, err := n.fs.inodeOfID(tn.id) if err != nil { incFuseOpFailureMetric(fuseOpLookup, n.fs.layerDigest) n.fs.s.report(fmt.Errorf("%s: %v", fuseOpLookup, err)) return nil, syscall.EIO } entryToAttr(ino, tn.attr, &out.Attr) case *whiteout: ino, err := n.fs.inodeOfID(tn.id) if err != nil { incFuseOpFailureMetric(fuseOpLookup, n.fs.layerDigest) n.fs.s.report(fmt.Errorf("%s: %v", fuseOpLookup, err)) return nil, syscall.EIO } entryToAttr(ino, tn.attr, &out.Attr) default: incFuseOpFailureMetric(fuseOpLookup, n.fs.layerDigest) n.fs.s.report(fmt.Errorf("%s: uknown node type detected", fuseOpLookup)) return nil, syscall.EIO } return cn, 0 } // early return if this entry doesn't exist if n.entsCached { var found bool for _, e := range n.ents { if e.Name == name { found = true } } if !found { return nil, syscall.ENOENT } } id, ce, err := n.fs.r.Metadata().GetChild(n.id, name) if err != nil { // If the entry exists as a whiteout, show an overlayfs-styled whiteout node. if whID, wh, err := n.fs.r.Metadata().GetChild(n.id, fmt.Sprintf("%s%s", whiteoutPrefix, name)); err == nil { ino, err := n.fs.inodeOfID(whID) if err != nil { incFuseOpFailureMetric(fuseOpLookup, n.fs.layerDigest) n.fs.s.report(fmt.Errorf("%s: %v", fuseOpLookup, err)) return nil, syscall.EIO } return n.NewInode(ctx, &whiteout{ id: whID, fs: n.fs, attr: wh, }, entryToWhAttr(ino, wh, &out.Attr)), 0 } n.readdir() // This code path is very expensive. Cache child entries here so that the next call don't reach here. return nil, syscall.ENOENT } ino, err := n.fs.inodeOfID(id) if err != nil { incFuseOpFailureMetric(fuseOpLookup, n.fs.layerDigest) n.fs.s.report(fmt.Errorf("%s: %v", fuseOpLookup, err)) return nil, syscall.EIO } return n.NewInode(ctx, &node{ id: id, fs: n.fs, attr: ce, }, entryToAttr(ino, ce, &out.Attr)), 0 } var _ = (fusefs.NodeOpener)((*node)(nil)) func (n *node) Open(ctx context.Context, flags uint32) (fh fusefs.FileHandle, fuseFlags uint32, errno syscall.Errno) { n.logOperation(ctx, fuseOpOpen) if n.fs.operationCounter != nil { n.fs.operationCounter.Inc(fuseOpOpen) } ra, err := n.fs.r.OpenFile(n.id) if err != nil { incFuseOpFailureMetric(fuseOpOpen, n.fs.layerDigest) n.fs.s.report(fmt.Errorf("%s: %v", fuseOpOpen, err)) return nil, 0, syscall.EIO } return &file{ n: n, ra: ra, }, fuse.FOPEN_KEEP_CACHE, 0 } var _ = (fusefs.NodeGetattrer)((*node)(nil)) func (n *node) Getattr(ctx context.Context, f fusefs.FileHandle, out *fuse.AttrOut) syscall.Errno { n.logOperation(ctx, fuseOpGetattr) if n.fs.operationCounter != nil { n.fs.operationCounter.Inc(fuseOpGetattr) } ino, err := n.fs.inodeOfID(n.id) if err != nil { incFuseOpFailureMetric(fuseOpGetattr, n.fs.layerDigest) n.fs.s.report(fmt.Errorf("%s: %v", fuseOpGetattr, err)) return syscall.EIO } entryToAttr(ino, n.attr, &out.Attr) return 0 } var _ = (fusefs.NodeGetxattrer)((*node)(nil)) func (n *node) Getxattr(ctx context.Context, attr string, dest []byte) (uint32, syscall.Errno) { n.logOperation(ctx, fuseOpGetxattr) if n.fs.operationCounter != nil { n.fs.operationCounter.Inc(fuseOpGetxattr) } ent := n.attr opq := n.isOpaque() for _, opaqueXattr := range n.fs.opaqueXattrs { if attr == opaqueXattr && opq { // This node is an opaque directory so give overlayfs-compliant indicator. if len(dest) < len(opaqueXattrValue) { return uint32(len(opaqueXattrValue)), syscall.ERANGE } return uint32(copy(dest, opaqueXattrValue)), 0 } } if v, ok := ent.Xattrs[attr]; ok { if len(dest) < len(v) { return uint32(len(v)), syscall.ERANGE } return uint32(copy(dest, v)), 0 } return 0, syscall.ENODATA } var _ = (fusefs.NodeListxattrer)((*node)(nil)) func (n *node) Listxattr(ctx context.Context, dest []byte) (uint32, syscall.Errno) { n.logOperation(ctx, fuseOpListxattr) if n.fs.operationCounter != nil { n.fs.operationCounter.Inc(fuseOpListxattr) } ent := n.attr opq := n.isOpaque() var attrs []byte if opq { // This node is an opaque directory so add overlayfs-compliant indicator. for _, opaqueXattr := range n.fs.opaqueXattrs { attrs = append(attrs, []byte(opaqueXattr+"\x00")...) } } for k := range ent.Xattrs { attrs = append(attrs, []byte(k+"\x00")...) } if len(dest) < len(attrs) { return uint32(len(attrs)), syscall.ERANGE } return uint32(copy(dest, attrs)), 0 } var _ = (fusefs.NodeReadlinker)((*node)(nil)) func (n *node) Readlink(ctx context.Context) ([]byte, syscall.Errno) { n.logOperation(ctx, fuseOpReadLink) if n.fs.operationCounter != nil { n.fs.operationCounter.Inc(fuseOpReadLink) } ent := n.attr return []byte(ent.LinkName), 0 } var _ = (fusefs.NodeStatfser)((*node)(nil)) func (n *node) Statfs(ctx context.Context, out *fuse.StatfsOut) syscall.Errno { defaultStatfs(out) return 0 } // file is a file abstraction which implements file handle in go-fuse. type file struct { n *node ra io.ReaderAt } var _ = (fusefs.FileReader)((*file)(nil)) func (f *file) Read(ctx context.Context, dest []byte, off int64) (fuse.ReadResult, syscall.Errno) { f.n.logOperation(ctx, fuseOpFileRead) if f.n.fs.operationCounter != nil { f.n.fs.operationCounter.Inc(fuseOpFileRead) } defer commonmetrics.MeasureLatencyInMicroseconds(commonmetrics.SynchronousRead, f.n.fs.layerDigest, time.Now()) // measure time for synchronous file reads (in microseconds) defer commonmetrics.IncOperationCount(commonmetrics.SynchronousReadCount, f.n.fs.layerDigest) // increment the counter for synchronous file reads n, err := f.ra.ReadAt(dest, off) if err != nil && err != io.EOF { incFuseOpFailureMetric(fuseOpFileRead, f.n.fs.layerDigest) f.n.fs.s.report(fmt.Errorf("%s: %v", fuseOpFileRead, err)) return nil, syscall.EIO } return fuse.ReadResultData(dest[:n]), 0 } var _ = (fusefs.FileGetattrer)((*file)(nil)) func (f *file) Getattr(ctx context.Context, out *fuse.AttrOut) syscall.Errno { f.n.logOperation(ctx, fuseOpFileGetattr) if f.n.fs.operationCounter != nil { f.n.fs.operationCounter.Inc(fuseOpFileGetattr) } ino, err := f.n.fs.inodeOfID(f.n.id) if err != nil { incFuseOpFailureMetric(fuseOpFileGetattr, f.n.fs.layerDigest) f.n.fs.s.report(fmt.Errorf("%s: %v", fuseOpFileGetattr, err)) return syscall.EIO } entryToAttr(ino, f.n.attr, &out.Attr) return 0 } // whiteout is a whiteout abstraction compliant to overlayfs. type whiteout struct { fusefs.Inode id uint32 fs *fs attr metadata.Attr } var _ = (fusefs.NodeGetattrer)((*whiteout)(nil)) func (w *whiteout) Getattr(ctx context.Context, f fusefs.FileHandle, out *fuse.AttrOut) syscall.Errno { if w.fs.operationCounter != nil { w.fs.operationCounter.Inc(fuseOpWhiteoutGetattr) } ino, err := w.fs.inodeOfID(w.id) if err != nil { incFuseOpFailureMetric(fuseOpWhiteoutGetattr, w.fs.layerDigest) w.fs.s.report(fmt.Errorf("%s: %v", fuseOpWhiteoutGetattr, err)) return syscall.EIO } entryToWhAttr(ino, w.attr, &out.Attr) return 0 } var _ = (fusefs.NodeStatfser)((*whiteout)(nil)) func (w *whiteout) Statfs(ctx context.Context, out *fuse.StatfsOut) syscall.Errno { defaultStatfs(out) return 0 } // newState provides new state directory node. // It creates statFile at the same time to give it stable inode number. func (fs *fs) newState(layerDigest digest.Digest, blob remote.Blob) *state { return &state{ statFile: &statFile{ name: layerDigest.String() + ".json", statJSON: statJSON{ Digest: layerDigest.String(), Size: blob.Size(), }, blob: blob, fs: fs, }, fs: fs, } } // state is a directory which contain a "state file" of this layer aiming to // observability. This filesystem uses it to report something(e.g. error) to // the clients(e.g. Kubernetes's livenessProbe). // This directory has mode "dr-x------ root root". type state struct { fusefs.Inode statFile *statFile fs *fs } var _ = (fusefs.NodeReaddirer)((*state)(nil)) func (s *state) Readdir(ctx context.Context) (fusefs.DirStream, syscall.Errno) { return fusefs.NewListDirStream([]fuse.DirEntry{ { Mode: statFileMode, Name: s.statFile.name, Ino: s.fs.inodeOfStatFile(), }, }), 0 } var _ = (fusefs.NodeLookuper)((*state)(nil)) func (s *state) Lookup(ctx context.Context, name string, out *fuse.EntryOut) (*fusefs.Inode, syscall.Errno) { if name != s.statFile.name { return nil, syscall.ENOENT } attr, errno := s.statFile.attr(&out.Attr) if errno != 0 { return nil, errno } return s.NewInode(ctx, s.statFile, attr), 0 } var _ = (fusefs.NodeGetattrer)((*state)(nil)) func (s *state) Getattr(ctx context.Context, f fusefs.FileHandle, out *fuse.AttrOut) syscall.Errno { s.fs.stateToAttr(&out.Attr) return 0 } var _ = (fusefs.NodeStatfser)((*state)(nil)) func (s *state) Statfs(ctx context.Context, out *fuse.StatfsOut) syscall.Errno { defaultStatfs(out) return 0 } func (s *state) report(err error) { s.statFile.report(err) } type statJSON struct { Error string `json:"error,omitempty"` Digest string `json:"digest"` // URL is excluded for potential security reason Size int64 `json:"size"` FetchedSize int64 `json:"fetchedSize"` FetchedPercent float64 `json:"fetchedPercent"` // Fetched / Size * 100.0 } // statFile is a file which contain something to be reported from this layer. // This filesystem uses statFile.report() to report something(e.g. error) to // the clients(e.g. Kubernetes's livenessProbe). // This file has mode "-r-------- root root". type statFile struct { fusefs.Inode name string blob remote.Blob statJSON statJSON mu sync.Mutex fs *fs } var _ = (fusefs.NodeOpener)((*statFile)(nil)) func (sf *statFile) Open(ctx context.Context, flags uint32) (fh fusefs.FileHandle, fuseFlags uint32, errno syscall.Errno) { return nil, 0, 0 } var _ = (fusefs.NodeReader)((*statFile)(nil)) func (sf *statFile) Read(ctx context.Context, f fusefs.FileHandle, dest []byte, off int64) (fuse.ReadResult, syscall.Errno) { sf.mu.Lock() defer sf.mu.Unlock() st, err := sf.updateStatUnlocked() if err != nil { return nil, syscall.EIO } n, err := bytes.NewReader(st).ReadAt(dest, off) if err != nil && err != io.EOF { return nil, syscall.EIO } return fuse.ReadResultData(dest[:n]), 0 } var _ = (fusefs.NodeGetattrer)((*statFile)(nil)) func (sf *statFile) Getattr(ctx context.Context, f fusefs.FileHandle, out *fuse.AttrOut) syscall.Errno { _, errno := sf.attr(&out.Attr) return errno } var _ = (fusefs.NodeStatfser)((*statFile)(nil)) func (sf *statFile) Statfs(ctx context.Context, out *fuse.StatfsOut) syscall.Errno { defaultStatfs(out) return 0 } // logContents puts the contents of statFile in the log // to keep that information accessible for troubleshooting. // The entries naming is kept to be consistend with the field naming in statJSON. func (sf *statFile) logContents() { ctx := context.Background() log.G(ctx).WithFields(logrus.Fields{ "digest": sf.statJSON.Digest, "size": sf.statJSON.Size, "fetchedSize": sf.statJSON.FetchedSize, "fetchedPercent": sf.statJSON.FetchedPercent, }).WithError(errors.New(sf.statJSON.Error)).Error("statFile error") } func (sf *statFile) report(err error) { sf.mu.Lock() defer sf.mu.Unlock() sf.statJSON.Error = err.Error() sf.logContents() } func (sf *statFile) attr(out *fuse.Attr) (fusefs.StableAttr, syscall.Errno) { sf.mu.Lock() defer sf.mu.Unlock() st, err := sf.updateStatUnlocked() if err != nil { return fusefs.StableAttr{}, syscall.EIO } return sf.fs.statFileToAttr(uint64(len(st)), out), 0 } func (sf *statFile) updateStatUnlocked() ([]byte, error) { sf.statJSON.FetchedSize = sf.blob.FetchedSize() sf.statJSON.FetchedPercent = float64(sf.statJSON.FetchedSize) / float64(sf.statJSON.Size) * 100.0 j, err := json.Marshal(&sf.statJSON) if err != nil { return nil, err } j = append(j, []byte("\n")...) return j, nil } // entryToAttr converts metadata.Attr to go-fuse's Attr. func entryToAttr(ino uint64, e metadata.Attr, out *fuse.Attr) fusefs.StableAttr { out.Ino = ino out.Size = uint64(e.Size) if e.Mode&os.ModeSymlink != 0 { out.Size = uint64(len(e.LinkName)) } out.Blksize = blockSize out.Blocks = (out.Size + blockSize - 1) / blockSize * physicalBlockRatio mtime := e.ModTime out.SetTimes(nil, &mtime, nil) out.Mode = fileModeToSystemMode(e.Mode) out.Owner = fuse.Owner{Uid: uint32(e.UID), Gid: uint32(e.GID)} out.Rdev = uint32(unix.Mkdev(uint32(e.DevMajor), uint32(e.DevMinor))) out.Nlink = uint32(e.NumLink) if out.Nlink == 0 { out.Nlink = 1 // zero "NumLink" means one. } out.Padding = 0 // TODO return fusefs.StableAttr{ Mode: out.Mode, Ino: out.Ino, // NOTE: The inode number is unique throughout the lifetime of // this filesystem so we don't consider about generation at this // moment. } } // entryToWhAttr converts metadata.Attr to go-fuse's Attr of whiteouts. func entryToWhAttr(ino uint64, e metadata.Attr, out *fuse.Attr) fusefs.StableAttr { out.Ino = ino out.Size = 0 out.Blksize = blockSize out.Blocks = 0 mtime := e.ModTime out.SetTimes(nil, &mtime, nil) out.Mode = syscall.S_IFCHR out.Owner = fuse.Owner{Uid: 0, Gid: 0} out.Rdev = uint32(unix.Mkdev(0, 0)) out.Nlink = 1 out.Padding = 0 // TODO return fusefs.StableAttr{ Mode: out.Mode, Ino: out.Ino, // NOTE: The inode number is unique throughout the lifetime of // this filesystem so we don't consider about generation at this // moment. } } // stateToAttr converts state directory to go-fuse's Attr. func (fs *fs) stateToAttr(out *fuse.Attr) fusefs.StableAttr { out.Ino = fs.inodeOfState() out.Size = 0 out.Blksize = blockSize out.Blocks = 0 out.Nlink = 1 // root can read and open it (dr-x------ root root). out.Mode = stateDirMode out.Owner = fuse.Owner{Uid: 0, Gid: 0} // dummy out.Mtime = 0 out.Mtimensec = 0 out.Rdev = 0 out.Padding = 0 return fusefs.StableAttr{ Mode: out.Mode, Ino: out.Ino, // NOTE: The inode number is unique throughout the lifetime of // this filesystem so we don't consider about generation at this // moment. } } // statFileToAttr converts stat file to go-fuse's Attr. // func statFileToAttr(id uint64, sf *statFile, size uint64, out *fuse.Attr) fusefs.StableAttr { func (fs *fs) statFileToAttr(size uint64, out *fuse.Attr) fusefs.StableAttr { out.Ino = fs.inodeOfStatFile() out.Size = size out.Blksize = blockSize out.Blocks = (out.Size + blockSize - 1) / blockSize * physicalBlockRatio out.Nlink = 1 // Root can read it ("-r-------- root root"). out.Mode = statFileMode out.Owner = fuse.Owner{Uid: 0, Gid: 0} // dummy out.Mtime = 0 out.Mtimensec = 0 out.Rdev = 0 out.Padding = 0 return fusefs.StableAttr{ Mode: out.Mode, Ino: out.Ino, // NOTE: The inode number is unique throughout the lifetime of // this filesystem so we don't consider about generation at this // moment. } } func fileModeToSystemMode(m os.FileMode) uint32 { // Permission bits res := uint32(m & os.ModePerm) // File type bits switch m & os.ModeType { case os.ModeDevice: res |= syscall.S_IFBLK case os.ModeDevice | os.ModeCharDevice: res |= syscall.S_IFCHR case os.ModeDir: res |= syscall.S_IFDIR case os.ModeNamedPipe: res |= syscall.S_IFIFO case os.ModeSymlink: res |= syscall.S_IFLNK case os.ModeSocket: res |= syscall.S_IFSOCK default: // regular file. res |= syscall.S_IFREG } // suid, sgid, sticky bits if m&os.ModeSetuid != 0 { res |= syscall.S_ISUID } if m&os.ModeSetgid != 0 { res |= syscall.S_ISGID } if m&os.ModeSticky != 0 { res |= syscall.S_ISVTX } return res } func defaultStatfs(stat *fuse.StatfsOut) { // http://man7.org/linux/man-pages/man2/statfs.2.html stat.Blocks = 0 // dummy stat.Bfree = 0 stat.Bavail = 0 stat.Files = 0 // dummy stat.Ffree = 0 stat.Bsize = blockSize stat.NameLen = 1<<32 - 1 stat.Frsize = blockSize stat.Padding = 0 stat.Spare = [6]uint32{} } soci-snapshotter-0.4.1/fs/layer/node_test.go000066400000000000000000000030621454010642300210600ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package layer import ( "testing" "github.com/awslabs/soci-snapshotter/metadata" "github.com/hanwen/go-fuse/v2/fuse" ) func TestEntryToAttr(t *testing.T) { testcases := []struct { name string attr metadata.Attr expected fuse.Attr }{ { name: "fuse.Attr.Blocks is reported as # of 512-byte blocks", attr: metadata.Attr{ Size: 1774757, }, expected: fuse.Attr{ Size: 1774757, // Blocks should be the number of 512-byte blocks aligned to blockSize. // Specifically we want to validate that it's not ceiling(Size/blockSize) Blocks: 3472, Blksize: blockSize, Mode: fileModeToSystemMode(0), Nlink: 1, }, }, } for _, tc := range testcases { tc := tc t.Run(tc.name, func(t *testing.T) { var actual fuse.Attr entryToAttr(0, tc.attr, &actual) tc.expected.Mtime = actual.Mtime if actual != tc.expected { t.Fatalf("unexpected fuse attr. actual %v expected %v", actual, tc.expected) } }) } } soci-snapshotter-0.4.1/fs/layer/util_test.go000066400000000000000000000505731454010642300211210ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright 2019 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the NOTICE.md file. */ package layer import ( "compress/gzip" "context" "crypto/sha256" "encoding/json" "fmt" "io" "os" "path/filepath" "strings" "syscall" "testing" "time" "github.com/awslabs/soci-snapshotter/cache" "github.com/awslabs/soci-snapshotter/fs/reader" "github.com/awslabs/soci-snapshotter/fs/remote" "github.com/awslabs/soci-snapshotter/fs/source" spanmanager "github.com/awslabs/soci-snapshotter/fs/span-manager" "github.com/awslabs/soci-snapshotter/metadata" "github.com/awslabs/soci-snapshotter/util/testutil" "github.com/awslabs/soci-snapshotter/ztoc" "github.com/containerd/containerd/reference" "github.com/google/go-cmp/cmp" fusefs "github.com/hanwen/go-fuse/v2/fs" "github.com/hanwen/go-fuse/v2/fuse" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "golang.org/x/sys/unix" ) const ( sampleSpanSize = 3 sampleData1 = "0123456789" sampleMiddleOffset = sampleSpanSize / 2 lastSpanOffset1 = sampleSpanSize * (int64(len(sampleData1)) / sampleSpanSize) ) var testStateLayerDigest = digest.FromString("dummy") var spanSizeCond = [3]int64{64, 128, 256} func testNodeRead(t *testing.T, factory metadata.Store) { sizeCond := map[string]int64{ "single_span": sampleSpanSize - sampleMiddleOffset, "multi_spans": sampleSpanSize + sampleMiddleOffset, } innerOffsetCond := map[string]int64{ "at_top": 0, "at_middle": sampleMiddleOffset, } baseOffsetCond := map[string]int64{ "of_1st_span": sampleSpanSize * 0, "of_2nd_span": sampleSpanSize * 1, "of_last_span": lastSpanOffset1, } fileSizeCond := map[string]int64{ "in_1_span_file": sampleSpanSize * 1, "in_2_span_file": sampleSpanSize * 2, "in_max_size_file": int64(len(sampleData1)), } for sn, size := range sizeCond { for in, innero := range innerOffsetCond { for bo, baseo := range baseOffsetCond { for fn, filesize := range fileSizeCond { t.Run(fmt.Sprintf("reading_%s_%s_%s_%s", sn, in, bo, fn), func(t *testing.T) { if filesize > int64(len(sampleData1)) { t.Fatal("sample file size is larger than sample data") } wantN := size offset := baseo + innero if remain := filesize - offset; remain < wantN { if wantN = remain; wantN < 0 { wantN = 0 } } // use constant string value as a data source. want := strings.NewReader(sampleData1) // data we want to get. wantData := make([]byte, wantN) _, err := want.ReadAt(wantData, offset) if err != nil && err != io.EOF { t.Fatalf("want.ReadAt (offset=%d,size=%d): %v", offset, wantN, err) } // data we get from the file node. f, closeFn := makeNodeReader(t, []byte(sampleData1)[:filesize], sampleSpanSize, factory) defer closeFn() tmpbuf := make([]byte, size) // fuse library can request bigger than remain rr, errno := f.Read(context.Background(), tmpbuf, offset) if errno != 0 { t.Errorf("failed to read off=%d, size=%d, filesize=%d: %v", offset, size, filesize, err) return } if rsize := rr.Size(); int64(rsize) != wantN { t.Errorf("read size: %d; want: %d; passed %d", rsize, wantN, size) return } tmpbuf = make([]byte, len(tmpbuf)) respData, fs := rr.Bytes(tmpbuf) if fs != fuse.OK { t.Errorf("failed to read result data for off=%d, size=%d, filesize=%d: %v", offset, size, filesize, err) } if diff := cmp.Diff(wantData, respData); diff != "" { t.Errorf("off=%d, filesize=%d; read data and want data mismatch. diff=%+v", offset, filesize, diff) return } }) } } } } } func makeNodeReader(t *testing.T, contents []byte, spanSize int64, factory metadata.Store) (_ *file, closeFn func() error) { testName := "test" tarEntry := []testutil.TarEntry{testutil.File(testName, string(contents))} ztoc, sr, err := ztoc.BuildZtocReader(t, tarEntry, gzip.DefaultCompression, spanSize) if err != nil { t.Fatalf("failed to build ztoc: %v", err) } mr, err := factory(sr, ztoc.TOC) if err != nil { t.Fatalf("failed to create reader: %v", err) } spanManager := spanmanager.New(ztoc, sr, cache.NewMemoryCache(), 0) vr, err := reader.NewReader(mr, digest.FromString(""), spanManager) if err != nil { mr.Close() t.Fatalf("failed to make new reader: %v", err) } r := vr.GetReader() rootNode := getRootNode(t, r, OverlayOpaqueAll) var eo fuse.EntryOut inode, errno := rootNode.Lookup(context.Background(), testName, &eo) if errno != 0 { vr.Close() t.Fatalf("failed to lookup test node; errno: %v", errno) } f, _, errno := inode.Operations().(fusefs.NodeOpener).Open(context.Background(), 0) if errno != 0 { vr.Close() t.Fatalf("failed to open test file; errno: %v", errno) } return f.(*file), vr.Close } func testExistence(t *testing.T, factory metadata.Store) { for _, o := range []OverlayOpaqueType{OverlayOpaqueAll, OverlayOpaqueTrusted, OverlayOpaqueUser} { testExistenceWithOpaque(t, factory, o) } } func testExistenceWithOpaque(t *testing.T, factory metadata.Store, opaque OverlayOpaqueType) { hasOpaque := func(entry string) check { return func(t *testing.T, root *node) { for _, k := range opaqueXattrs[opaque] { hasNodeXattrs(entry, k, opaqueXattrValue)(t, root) } } } tests := []struct { name string in []testutil.TarEntry want []check }{ { name: "1_whiteout_with_sibling", in: []testutil.TarEntry{ testutil.Dir("foo/"), testutil.File("foo/bar.txt", ""), testutil.File("foo/.wh.foo.txt", ""), }, want: []check{ hasValidWhiteout("foo/foo.txt"), fileNotExist("foo/.wh.foo.txt"), }, }, { name: "1_whiteout_with_duplicated_name", in: []testutil.TarEntry{ testutil.Dir("foo/"), testutil.File("foo/bar.txt", "test"), testutil.File("foo/.wh.bar.txt", ""), }, want: []check{ hasFileDigest("foo/bar.txt", digestFor("test")), fileNotExist("foo/.wh.bar.txt"), }, }, { name: "1_opaque", in: []testutil.TarEntry{ testutil.Dir("foo/"), testutil.File("foo/.wh..wh..opq", ""), }, want: []check{ hasOpaque("foo/"), fileNotExist("foo/.wh..wh..opq"), }, }, { name: "1_opaque_with_sibling", in: []testutil.TarEntry{ testutil.Dir("foo/"), testutil.File("foo/.wh..wh..opq", ""), testutil.File("foo/bar.txt", "test"), }, want: []check{ hasOpaque("foo/"), hasFileDigest("foo/bar.txt", digestFor("test")), fileNotExist("foo/.wh..wh..opq"), }, }, { name: "1_opaque_with_xattr", in: []testutil.TarEntry{ testutil.Dir("foo/", testutil.WithDirXattrs(map[string]string{"foo": "bar"})), testutil.File("foo/.wh..wh..opq", ""), }, want: []check{ hasOpaque("foo/"), hasNodeXattrs("foo/", "SCHILY.xattr.foo", "bar"), fileNotExist("foo/.wh..wh..opq"), }, }, { name: "state_file", in: []testutil.TarEntry{ testutil.File("test", "test"), }, want: []check{ hasFileDigest("test", digestFor("test")), hasStateFile(t, testStateLayerDigest.String()+".json"), }, }, { name: "file_suid", in: []testutil.TarEntry{ testutil.File("test", "test", testutil.WithFileMode(0644|os.ModeSetuid)), }, want: []check{ hasExtraMode("test", os.ModeSetuid), }, }, { name: "dir_sgid", in: []testutil.TarEntry{ testutil.Dir("test/", testutil.WithDirMode(0755|os.ModeSetgid)), }, want: []check{ hasExtraMode("test/", os.ModeSetgid), }, }, { name: "file_sticky", in: []testutil.TarEntry{ testutil.File("test", "test", testutil.WithFileMode(0644|os.ModeSticky)), }, want: []check{ hasExtraMode("test", os.ModeSticky), }, }, { name: "symlink_size", in: []testutil.TarEntry{ testutil.Symlink("test", "target"), }, want: []check{ hasSize("test", len("target")), }, }, } for _, tt := range tests { for _, spanSize := range spanSizeCond { t.Run(fmt.Sprintf("testExistence_%s_spansize_%d", tt.name, spanSize), func(t *testing.T) { ztoc, sr, err := ztoc.BuildZtocReader(t, tt.in, gzip.DefaultCompression, spanSize) if err != nil { t.Fatalf("failed to build sample ztoc: %v", err) } mr, err := factory(sr, ztoc.TOC) if err != nil { t.Fatalf("failed to create reader: %v", err) } defer mr.Close() spanManager := spanmanager.New(ztoc, sr, cache.NewMemoryCache(), 0) vr, err := reader.NewReader(mr, digest.FromString(""), spanManager) if err != nil { t.Fatalf("failed to make new reader: %v", err) } r := vr.GetReader() defer r.Close() rootNode := getRootNode(t, r, opaque) for _, want := range tt.want { want(t, rootNode) } }) } } } func hasSize(name string, size int) check { return func(t *testing.T, root *node) { _, n, err := getDirentAndNode(t, root, name) if err != nil { t.Fatalf("failed to get node %q: %v", name, err) } var ao fuse.AttrOut if errno := n.Operations().(fusefs.NodeGetattrer).Getattr(context.Background(), nil, &ao); errno != 0 { t.Fatalf("failed to get attributes of node %q: %v", name, errno) } if ao.Attr.Size != uint64(size) { t.Fatalf("got size = %d, want %d", ao.Attr.Size, size) } } } func getRootNode(t *testing.T, r reader.Reader, opaque OverlayOpaqueType) *node { rootNode, err := newNode(testStateLayerDigest, &testReader{r}, &testBlobState{10, 5}, 100, opaque, false, nil) if err != nil { t.Fatalf("failed to get root node: %v", err) } fusefs.NewNodeFS(rootNode, &fusefs.Options{}) // initializes root node return rootNode.(*node) } type testReader struct { r reader.Reader } func (tr *testReader) OpenFile(id uint32) (io.ReaderAt, error) { return tr.r.OpenFile(id) } func (tr *testReader) Metadata() metadata.Reader { return tr.r.Metadata() } func (tr *testReader) Cache(opts ...reader.CacheOption) error { return nil } func (tr *testReader) Close() error { return nil } func (tr *testReader) LastOnDemandReadTime() time.Time { return time.Now() } type testBlobState struct { size int64 fetchedSize int64 } func (tb *testBlobState) Check() error { return nil } func (tb *testBlobState) Size() int64 { return tb.size } func (tb *testBlobState) FetchedSize() int64 { return tb.fetchedSize } func (tb *testBlobState) ReadAt(p []byte, offset int64, opts ...remote.Option) (int, error) { return 0, nil } func (tb *testBlobState) Cache(offset int64, size int64, opts ...remote.Option) error { return nil } func (tb *testBlobState) Refresh(ctx context.Context, host source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor) error { return nil } func (tb *testBlobState) Close() error { return nil } type check func(*testing.T, *node) func fileNotExist(file string) check { return func(t *testing.T, root *node) { if _, _, err := getDirentAndNode(t, root, file); err == nil { t.Errorf("Node %q exists", file) } } } func hasFileDigest(filename string, digest string) check { return func(t *testing.T, root *node) { _, n, err := getDirentAndNode(t, root, filename) if err != nil { t.Fatalf("failed to get node %q: %v", filename, err) } ni := n.Operations().(*node) attr, err := ni.fs.r.Metadata().GetAttr(ni.id) if err != nil { t.Fatalf("failed to get attr %q(%d): %v", filename, ni.id, err) } fh, _, errno := ni.Open(context.Background(), 0) if errno != 0 { t.Fatalf("failed to open node %q: %v", filename, errno) } rr, errno := fh.(*file).Read(context.Background(), make([]byte, attr.Size), 0) if errno != 0 { t.Fatalf("failed to read node %q: %v", filename, errno) } res, status := rr.Bytes(make([]byte, attr.Size)) if status != fuse.OK { t.Fatalf("failed to get read result of node %q: %v", filename, status) } if ndgst := digestFor(string(res)); ndgst != digest { t.Fatalf("Digest(%q) = %q, want %q", filename, ndgst, digest) } } } func hasExtraMode(name string, mode os.FileMode) check { return func(t *testing.T, root *node) { _, n, err := getDirentAndNode(t, root, name) if err != nil { t.Fatalf("failed to get node %q: %v", name, err) } var ao fuse.AttrOut if errno := n.Operations().(fusefs.NodeGetattrer).Getattr(context.Background(), nil, &ao); errno != 0 { t.Fatalf("failed to get attributes of node %q: %v", name, errno) } a := ao.Attr gotMode := a.Mode & (syscall.S_ISUID | syscall.S_ISGID | syscall.S_ISVTX) wantMode := extraModeToTarMode(mode) if gotMode != uint32(wantMode) { t.Fatalf("got mode = %b, want %b", gotMode, wantMode) } } } func hasValidWhiteout(name string) check { return func(t *testing.T, root *node) { ent, n, err := getDirentAndNode(t, root, name) if err != nil { t.Fatalf("failed to get node %q: %v", name, err) } var ao fuse.AttrOut if errno := n.Operations().(fusefs.NodeGetattrer).Getattr(context.Background(), nil, &ao); errno != 0 { t.Fatalf("failed to get attributes of file %q: %v", name, errno) } a := ao.Attr if a.Ino != ent.Ino { t.Errorf("inconsistent inodes %d(Node) != %d(Dirent)", a.Ino, ent.Ino) return } // validate the direntry if ent.Mode != syscall.S_IFCHR { t.Errorf("whiteout entry %q isn't a char device", name) return } // validate the node if a.Mode != syscall.S_IFCHR { t.Errorf("whiteout %q has an invalid mode %o; want %o", name, a.Mode, syscall.S_IFCHR) return } if a.Rdev != uint32(unix.Mkdev(0, 0)) { t.Errorf("whiteout %q has invalid device numbers (%d, %d); want (0, 0)", name, unix.Major(uint64(a.Rdev)), unix.Minor(uint64(a.Rdev))) return } } } func hasNodeXattrs(entry, name, value string) check { return func(t *testing.T, root *node) { _, n, err := getDirentAndNode(t, root, entry) if err != nil { t.Fatalf("failed to get node %q: %v", entry, err) } // check xattr exists in the xattrs list. buf := make([]byte, 1000) nb, errno := n.Operations().(fusefs.NodeListxattrer).Listxattr(context.Background(), buf) if errno != 0 { t.Fatalf("failed to get xattrs list of node %q: %v", entry, err) } attrs := strings.Split(string(buf[:nb]), "\x00") var found bool for _, x := range attrs { if x == name { found = true } } if !found { t.Errorf("node %q doesn't have an opaque xattr %q", entry, value) return } // check the xattr has valid value. v := make([]byte, len(value)) nv, errno := n.Operations().(fusefs.NodeGetxattrer).Getxattr(context.Background(), name, v) if errno != 0 { t.Fatalf("failed to get xattr %q of node %q: %v", name, entry, err) } if int(nv) != len(value) { t.Fatalf("invalid xattr size for file %q, value %q got %d; want %d", name, value, nv, len(value)) } if string(v) != value { t.Errorf("node %q has an invalid xattr %q; want %q", entry, v, value) return } } } func hasEntry(t *testing.T, name string, ents fusefs.DirStream) (fuse.DirEntry, bool) { for ents.HasNext() { de, errno := ents.Next() if errno != 0 { t.Fatalf("faield to read entries for %q", name) } if de.Name == name { return de, true } } return fuse.DirEntry{}, false } func hasStateFile(t *testing.T, id string) check { return func(t *testing.T, root *node) { // Check the state dir is hidden on OpenDir for "/" ents, errno := root.Readdir(context.Background()) if errno != 0 { t.Errorf("failed to open root directory: %v", errno) return } if _, ok := hasEntry(t, stateDirName, ents); ok { t.Errorf("state direntry %q should not be listed", stateDirName) return } // Check existence of state dir var eo fuse.EntryOut sti, errno := root.Lookup(context.Background(), stateDirName, &eo) if errno != 0 { t.Errorf("failed to lookup directory %q: %v", stateDirName, errno) return } st, ok := sti.Operations().(*state) if !ok { t.Errorf("directory %q isn't a state node", stateDirName) return } // Check existence of state file ents, errno = st.Readdir(context.Background()) if errno != 0 { t.Errorf("failed to open directory %q: %v", stateDirName, errno) return } if _, ok := hasEntry(t, id, ents); !ok { t.Errorf("direntry %q not found in %q", id, stateDirName) return } inode, errno := st.Lookup(context.Background(), id, &eo) if errno != 0 { t.Errorf("failed to lookup node %q in %q: %v", id, stateDirName, errno) return } n, ok := inode.Operations().(*statFile) if !ok { t.Errorf("entry %q isn't a normal node", id) return } // wanted data r := testutil.NewThreadsafeRandom() wantErr := fmt.Errorf("test-%d", r.Int63()) // report the data root.fs.s.report(wantErr) // obtain file size (check later) var ao fuse.AttrOut errno = n.Operations().(fusefs.NodeGetattrer).Getattr(context.Background(), nil, &ao) if errno != 0 { t.Errorf("failed to get attr of state file: %v", errno) return } attr := ao.Attr // get data via state file tmp := make([]byte, 4096) res, errno := n.Read(context.Background(), nil, tmp, 0) if errno != 0 { t.Errorf("failed to read state file: %v", errno) return } gotState, status := res.Bytes(nil) if status != fuse.OK { t.Errorf("failed to get result bytes of state file: %v", errno) return } if attr.Size != uint64(len(string(gotState))) { t.Errorf("size %d; want %d", attr.Size, len(string(gotState))) return } var j statJSON if err := json.Unmarshal(gotState, &j); err != nil { t.Errorf("failed to unmarshal %q: %v", string(gotState), err) return } if wantErr.Error() != j.Error { t.Errorf("expected error %q, got %q", wantErr.Error(), j.Error) return } } } // getDirentAndNode gets dirent and node at the specified path at once and makes // sure that the both of them exist. func getDirentAndNode(t *testing.T, root *node, path string) (ent fuse.DirEntry, n *fusefs.Inode, err error) { dir, base := filepath.Split(filepath.Clean(path)) // get the target's parent directory. var eo fuse.EntryOut d := root for _, name := range strings.Split(dir, "/") { if len(name) == 0 { continue } di, errno := d.Lookup(context.Background(), name, &eo) if errno != 0 { err = fmt.Errorf("failed to lookup directory %q: %v", name, errno) return } var ok bool if d, ok = di.Operations().(*node); !ok { err = fmt.Errorf("directory %q isn't a normal node", name) return } } // get the target's direntry. ents, errno := d.Readdir(context.Background()) if errno != 0 { err = fmt.Errorf("failed to open directory %q: %v", path, errno) } ent, ok := hasEntry(t, base, ents) if !ok { err = fmt.Errorf("direntry %q not found in the parent directory of %q", base, path) } // get the target's node. n, errno = d.Lookup(context.Background(), base, &eo) if errno != 0 { err = fmt.Errorf("failed to lookup node %q: %v", path, errno) } return } func digestFor(content string) string { sum := sha256.Sum256([]byte(content)) return fmt.Sprintf("sha256:%x", sum) } // suid, guid, sticky bits for archive/tar // https://github.com/golang/go/blob/release-branch.go1.13/src/archive/tar/common.go#L607-L609 const ( cISUID = 04000 // Set uid cISGID = 02000 // Set gid cISVTX = 01000 // Save text (sticky bit) ) func extraModeToTarMode(fm os.FileMode) (tm int64) { if fm&os.ModeSetuid != 0 { tm |= cISUID } if fm&os.ModeSetgid != 0 { tm |= cISGID } if fm&os.ModeSticky != 0 { tm |= cISVTX } return } soci-snapshotter-0.4.1/fs/metrics/000077500000000000000000000000001454010642300170765ustar00rootroot00000000000000soci-snapshotter-0.4.1/fs/metrics/common/000077500000000000000000000000001454010642300203665ustar00rootroot00000000000000soci-snapshotter-0.4.1/fs/metrics/common/metrics.go000066400000000000000000000232301454010642300223630ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package commonmetrics import ( "sync" "time" digest "github.com/opencontainers/go-digest" "github.com/prometheus/client_golang/prometheus" ) const ( // OperationLatencyKeyMilliseconds is the key for soci operation latency metrics in milliseconds. OperationLatencyKeyMilliseconds = "operation_duration_milliseconds" // OperationLatencyKeyMicroseconds is the key for soci operation latency metrics in microseconds. OperationLatencyKeyMicroseconds = "operation_duration_microseconds" // OperationCountKey is the key for soci operation count metrics. OperationCountKey = "operation_count" // BytesServedKey is the key for any metric related to counting bytes served as the part of specific operation. BytesServedKey = "bytes_served" // ImageOperationCountKey is the key for any metric related to operation count metric at the image level (as opposed to layer). ImageOperationCountKey = "image_operation_count_key" // Keep namespace as soci and subsystem as fs. namespace = "soci" subsystem = "fs" ) // Lists all metric labels. const ( // prometheus metrics Mount = "mount" RemoteRegistryGet = "remote_registry_get" NodeReaddir = "node_readdir" InitMetadataStore = "init_metadata_store" SynchronousRead = "synchronous_read" BackgroundFetch = "background_fetch" SynchronousReadCount = "synchronous_read_count" SynchronousReadRegistryFetchCount = "synchronous_read_remote_registry_fetch_count" // TODO revisit (wrong place) SynchronousBytesServed = "synchronous_bytes_served" // fuse operation failure metrics FuseNodeGetattrFailureCount = "fuse_node_getattr_failure_count" FuseNodeListxattrFailureCount = "fuse_node_listxattr_failure_count" FuseNodeLookupFailureCount = "fuse_node_lookup_failure_count" FuseNodeOpenFailureCount = "fuse_node_open_failure_count" FuseNodeReaddirFailureCount = "fuse_node_readdir_failure_count" FuseFileReadFailureCount = "fuse_file_read_failure_count" FuseFileGetattrFailureCount = "fuse_file_getattr_failure_count" FuseWhiteoutGetattrFailureCount = "fuse_whiteout_getattr_failure_count" FuseUnknownFailureCount = "fuse_unknown_operation_failure_count" // TODO this metric is not available now. This needs to go down to BlobReader where the actuall http call is issued SynchronousBytesFetched = "synchronous_bytes_fetched" // Number of times the snapshotter falls back to use a normal overlay mount instead of mounting the layer as a FUSE mount. // Note that a layer not having a ztoc is NOT classified as an error, even though `fs.Mount` returns an error in that case. FuseMountFailureCount = "fuse_mount_failure_count" // Number of errors of span fetch by background fetcher BackgroundSpanFetchFailureCount = "background_span_fetch_failure_count" // Number of spans fetched by background fetcher BackgroundSpanFetchCount = "background_span_fetch_count" // Number of items in the work queue of background fetcher BackgroundFetchWorkQueueSize = "background_fetch_work_queue_size" ) var ( // Buckets for OperationLatency metrics. latencyBucketsMilliseconds = []float64{1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384} // in milliseconds latencyBucketsMicroseconds = []float64{1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024} // in microseconds // operationLatencyMilliseconds collects operation latency numbers in milliseconds grouped by // operation, type and layer digest. operationLatencyMilliseconds = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Namespace: namespace, Subsystem: subsystem, Name: OperationLatencyKeyMilliseconds, Help: "Latency in milliseconds of soci snapshotter operations. Broken down by operation type and layer sha.", Buckets: latencyBucketsMilliseconds, }, []string{"operation_type", "layer"}, ) // operationLatencyMicroseconds collects operation latency numbers in microseconds grouped by // operation, type and layer digest. operationLatencyMicroseconds = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Namespace: namespace, Subsystem: subsystem, Name: OperationLatencyKeyMicroseconds, Help: "Latency in microseconds of soci snapshotter operations. Broken down by operation type and layer sha.", Buckets: latencyBucketsMicroseconds, }, []string{"operation_type", "layer"}, ) // operationCount collects operation count numbers by operation // type and layer sha. operationCount = prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: OperationCountKey, Help: "The count of soci snapshotter operations. Broken down by operation type and layer sha.", }, []string{"operation_type", "layer"}, ) // bytesCount reflects the number of bytes served as the part of specific operation type per layer sha. bytesCount = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: BytesServedKey, Help: "The number of bytes served per soci snapshotter operations. Broken down by operation type and layer sha.", }, []string{"operation_type", "layer"}, ) imageOperationCount = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: ImageOperationCountKey, Help: "The count of soci snapshotter operations. Broken down by operation type and image digest.", }, []string{"operation_type", "image"}) ) var register sync.Once // sinceInMilliseconds gets the time since the specified start in milliseconds. // The division is made to have the milliseconds value as floating point number, since the native method // .Milliseconds() returns an integer value and you can lose precision for sub-millisecond values. func sinceInMilliseconds(start time.Time) float64 { return float64(time.Since(start).Nanoseconds()) / float64(time.Millisecond/time.Nanosecond) } // sinceInMicroseconds gets the time since the specified start in microseconds. // The division is made to have the microseconds value as floating point number, since the native method // .Microseconds() returns an integer value and you can lose precision for sub-microsecond values. func sinceInMicroseconds(start time.Time) float64 { return float64(time.Since(start).Nanoseconds()) / float64(time.Microsecond/time.Nanosecond) } // Register registers metrics. This is always called only once. func Register() { register.Do(func() { prometheus.MustRegister(operationLatencyMilliseconds) prometheus.MustRegister(operationLatencyMicroseconds) prometheus.MustRegister(operationCount) prometheus.MustRegister(bytesCount) prometheus.MustRegister(imageOperationCount) }) } // MeasureLatencyInMilliseconds wraps the labels attachment as well as calling Observe into a single method. // Right now we attach the operation and layer digest, so it's possible to see the breakdown for latency // by operation and individual layers. // If you want this to be layer agnostic, just pass the digest from empty string, e.g. // layerDigest := digest.FromString("") func MeasureLatencyInMilliseconds(operation string, layer digest.Digest, start time.Time) { operationLatencyMilliseconds.WithLabelValues(operation, layer.String()).Observe(sinceInMilliseconds(start)) } // MeasureLatencyInMicroseconds wraps the labels attachment as well as calling Observe into a single method. // Right now we attach the operation and layer digest, so it's possible to see the breakdown for latency // by operation and individual layers. // If you want this to be layer agnostic, just pass the digest from empty string, e.g. // layerDigest := digest.FromString("") func MeasureLatencyInMicroseconds(operation string, layer digest.Digest, start time.Time) { operationLatencyMicroseconds.WithLabelValues(operation, layer.String()).Observe(sinceInMicroseconds(start)) } // IncOperationCount wraps the labels attachment as well as calling Inc into a single method. func IncOperationCount(operation string, layer digest.Digest) { operationCount.WithLabelValues(operation, layer.String()).Inc() } // AddBytesCount wraps the labels attachment as well as calling Add into a single method. func AddBytesCount(operation string, layer digest.Digest, bytes int64) { bytesCount.WithLabelValues(operation, layer.String()).Add(float64(bytes)) } // AddImageOperationCount wraps the labels attachment as well as calling Add into a single method. func AddImageOperationCount(operation string, image digest.Digest, count int32) { imageOperationCount.WithLabelValues(operation, image.String()).Add(float64(count)) } soci-snapshotter-0.4.1/fs/metrics/layer/000077500000000000000000000000001454010642300202125ustar00rootroot00000000000000soci-snapshotter-0.4.1/fs/metrics/layer/layer.go000066400000000000000000000035531454010642300216630ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package layermetrics import ( "github.com/awslabs/soci-snapshotter/fs/layer" metrics "github.com/docker/go-metrics" "github.com/prometheus/client_golang/prometheus" ) var layerMetrics = []*metric{ { name: "layer_fetched_size", help: "Total fetched size of the layer", unit: metrics.Bytes, vt: prometheus.CounterValue, getValues: func(l layer.Layer) []value { return []value{ { v: float64(l.Info().FetchedSize), }, } }, }, { name: "layer_size", help: "Total size of the layer", unit: metrics.Bytes, vt: prometheus.CounterValue, getValues: func(l layer.Layer) []value { return []value{ { v: float64(l.Info().Size), }, } }, }, } soci-snapshotter-0.4.1/fs/metrics/layer/metrics.go000066400000000000000000000061661454010642300222200ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package layermetrics import ( "sync" "github.com/awslabs/soci-snapshotter/fs/layer" metrics "github.com/docker/go-metrics" "github.com/prometheus/client_golang/prometheus" ) func NewLayerMetrics(ns *metrics.Namespace) *Controller { if ns == nil { return &Controller{} } c := &Controller{ ns: ns, layer: make(map[string]layer.Layer), } c.metrics = append(c.metrics, layerMetrics...) ns.Add(c) return c } type Controller struct { ns *metrics.Namespace metrics []*metric layer map[string]layer.Layer layerMu sync.RWMutex } func (c *Controller) Describe(ch chan<- *prometheus.Desc) { for _, e := range c.metrics { ch <- e.desc(c.ns) } } func (c *Controller) Collect(ch chan<- prometheus.Metric) { c.layerMu.RLock() wg := &sync.WaitGroup{} for mp, l := range c.layer { mp, l := mp, l wg.Add(1) go func() { defer wg.Done() for _, e := range c.metrics { e.collect(mp, l, c.ns, ch) } }() } c.layerMu.RUnlock() wg.Wait() } func (c *Controller) Add(key string, l layer.Layer) { if c.ns == nil { return } c.layerMu.Lock() c.layer[key] = l c.layerMu.Unlock() } func (c *Controller) Remove(key string) { if c.ns == nil { return } c.layerMu.Lock() delete(c.layer, key) c.layerMu.Unlock() } type value struct { v float64 l []string } type metric struct { name string help string unit metrics.Unit vt prometheus.ValueType labels []string // getValues returns the value and labels for the data getValues func(l layer.Layer) []value } func (m *metric) desc(ns *metrics.Namespace) *prometheus.Desc { return ns.NewDesc(m.name, m.help, m.unit, append([]string{"digest", "mountpoint"}, m.labels...)...) } func (m *metric) collect(mountpoint string, l layer.Layer, ns *metrics.Namespace, ch chan<- prometheus.Metric) { values := m.getValues(l) for _, v := range values { ch <- prometheus.MustNewConstMetric(m.desc(ns), m.vt, v.v, append([]string{l.Info().Digest.String(), mountpoint}, v.l...)...) } } soci-snapshotter-0.4.1/fs/reader/000077500000000000000000000000001454010642300166725ustar00rootroot00000000000000soci-snapshotter-0.4.1/fs/reader/reader.go000066400000000000000000000160521454010642300204670ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright 2019 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the NOTICE.md file. */ package reader import ( "errors" "fmt" "io" "sync" "sync/atomic" "time" "github.com/awslabs/soci-snapshotter/cache" commonmetrics "github.com/awslabs/soci-snapshotter/fs/metrics/common" spanmanager "github.com/awslabs/soci-snapshotter/fs/span-manager" "github.com/awslabs/soci-snapshotter/metadata" "github.com/awslabs/soci-snapshotter/ztoc/compression" digest "github.com/opencontainers/go-digest" ) type Reader interface { OpenFile(id uint32) (io.ReaderAt, error) Metadata() metadata.Reader Close() error LastOnDemandReadTime() time.Time } // VerifiableReader produces a Reader with a given verifier. type VerifiableReader struct { r *reader lastVerifyErr atomic.Value prohibitVerifyFailure bool prohibitVerifyFailureMu sync.RWMutex closed bool closedMu sync.Mutex verifier func(uint32, string) (digest.Verifier, error) } func (vr *VerifiableReader) SkipVerify() Reader { return vr.r } func (vr *VerifiableReader) VerifyTOC(tocDigest digest.Digest) (Reader, error) { if vr.isClosed() { return nil, fmt.Errorf("reader is already closed") } vr.prohibitVerifyFailureMu.Lock() vr.prohibitVerifyFailure = true lastVerifyErr := vr.lastVerifyErr.Load() vr.prohibitVerifyFailureMu.Unlock() if err := lastVerifyErr; err != nil { return nil, fmt.Errorf("content error occures during caching contents: %w", err.(error)) } vr.r.verify = true return vr.r, nil } // nolint:revive func (vr *VerifiableReader) GetReader() *reader { return vr.r } func (vr *VerifiableReader) Metadata() metadata.Reader { // TODO: this shouldn't be called before verified return vr.r.r } func (vr *VerifiableReader) Close() error { vr.closedMu.Lock() defer vr.closedMu.Unlock() if vr.closed { return nil } vr.closed = true return vr.r.Close() } func (vr *VerifiableReader) isClosed() bool { vr.closedMu.Lock() closed := vr.closed vr.closedMu.Unlock() return closed } // NewReader creates a Reader based on the given soci blob and Span Manager. func NewReader(r metadata.Reader, layerSha digest.Digest, spanManager *spanmanager.SpanManager) (*VerifiableReader, error) { vr := &reader{ spanManager: spanManager, r: r, layerSha: layerSha, verifier: digestVerifier, } return &VerifiableReader{r: vr, verifier: digestVerifier}, nil } type reader struct { spanManager *spanmanager.SpanManager r metadata.Reader layerSha digest.Digest lastReadTime time.Time lastReadTimeMu sync.Mutex closed bool closedMu sync.Mutex verify bool verifier func(uint32, string) (digest.Verifier, error) } func (gr *reader) Metadata() metadata.Reader { return gr.r } func (gr *reader) setLastReadTime(lastReadTime time.Time) { gr.lastReadTimeMu.Lock() gr.lastReadTime = lastReadTime gr.lastReadTimeMu.Unlock() } func (gr *reader) LastOnDemandReadTime() time.Time { gr.lastReadTimeMu.Lock() t := gr.lastReadTime gr.lastReadTimeMu.Unlock() return t } func (gr *reader) OpenFile(id uint32) (io.ReaderAt, error) { if gr.isClosed() { return nil, fmt.Errorf("reader is already closed") } var fr metadata.File fr, err := gr.r.OpenFile(id) if err != nil { return nil, fmt.Errorf("failed to open file %d: %w", id, err) } return &file{ id: id, fr: fr, gr: gr, }, nil } func (gr *reader) Close() (retErr error) { gr.closedMu.Lock() defer gr.closedMu.Unlock() if gr.closed { return nil } gr.closed = true if err := gr.r.Close(); err != nil { retErr = errors.Join(retErr, err) } return } func (gr *reader) isClosed() bool { gr.closedMu.Lock() closed := gr.closed gr.closedMu.Unlock() return closed } type file struct { id uint32 fr metadata.File gr *reader } // ReadAt reads the file when the file is requested by the container func (sf *file) ReadAt(p []byte, offset int64) (int, error) { if len(p) == 0 { return 0, nil } uncompFileSize := sf.fr.GetUncompressedFileSize() if compression.Offset(offset) >= uncompFileSize { return 0, io.EOF } expectedSize := uncompFileSize - compression.Offset(offset) if expectedSize > compression.Offset(len(p)) { expectedSize = compression.Offset(len(p)) } fileOffsetStart := sf.fr.GetUncompressedOffset() + compression.Offset(offset) fileOffsetEnd := fileOffsetStart + expectedSize r, err := sf.gr.spanManager.GetContents(fileOffsetStart, fileOffsetEnd) if err != nil { return 0, fmt.Errorf("failed to read the file: %w", err) } defer r.Close() // TODO this is not the right place for this metric to be. It needs to go down the BlobReader, when the HTTP request is issued commonmetrics.IncOperationCount(commonmetrics.SynchronousReadRegistryFetchCount, sf.gr.layerSha) // increment the number of on demand file fetches from remote registry sf.gr.setLastReadTime(time.Now()) n, err := io.ReadFull(r, p[0:expectedSize]) if err != nil { return 0, fmt.Errorf("unexpected copied data size for on-demand fetch. read = %d, expected = %d", n, expectedSize) } commonmetrics.AddBytesCount(commonmetrics.SynchronousBytesServed, sf.gr.layerSha, int64(n)) // measure the number of bytes served synchronously return n, nil } type CacheOption func(*cacheOptions) type cacheOptions struct { cacheOpts []cache.Option filter func(int64) bool reader *io.SectionReader } func WithCacheOpts(cacheOpts ...cache.Option) CacheOption { return func(opts *cacheOptions) { opts.cacheOpts = cacheOpts } } func WithFilter(filter func(int64) bool) CacheOption { return func(opts *cacheOptions) { opts.filter = filter } } func WithReader(sr *io.SectionReader) CacheOption { return func(opts *cacheOptions) { opts.reader = sr } } func digestVerifier(id uint32, digestStr string) (digest.Verifier, error) { digest, err := digest.Parse(digestStr) if err != nil { return nil, fmt.Errorf("no digset is recorded: %w", err) } return digest.Verifier(), nil } soci-snapshotter-0.4.1/fs/reader/reader_test.go000066400000000000000000000163001454010642300215220ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright 2019 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the NOTICE.md file. */ package reader import ( "bytes" "compress/gzip" "fmt" "io" "strings" "testing" "github.com/awslabs/soci-snapshotter/cache" spanmanager "github.com/awslabs/soci-snapshotter/fs/span-manager" "github.com/awslabs/soci-snapshotter/metadata" "github.com/awslabs/soci-snapshotter/util/testutil" "github.com/awslabs/soci-snapshotter/ztoc" digest "github.com/opencontainers/go-digest" ) const ( sampleSpanSize = 3 sampleMiddleOffset = sampleSpanSize / 2 sampleData1 = "0123456789" lastSpanOffset1 = sampleSpanSize * (int64(len(sampleData1)) / sampleSpanSize) ) var spanSizeCond = [3]int64{64, 128, 256} func TestFsReader(t *testing.T) { testFileReadAt(t, metadata.NewTempDbStore) testFailReader(t, metadata.NewTempDbStore) } func testFileReadAt(t *testing.T, factory metadata.Store) { sizeCond := map[string]int64{ "single_span": sampleSpanSize - sampleMiddleOffset, "multi_spans": sampleSpanSize + sampleMiddleOffset, } innerOffsetCond := map[string]int64{ "at_top": 0, "at_middle": sampleMiddleOffset, } baseOffsetCond := map[string]int64{ "of_1st_span": sampleSpanSize * 0, "of_2nd_span": sampleSpanSize * 1, "of_last_span": lastSpanOffset1, } fileSizeCond := map[string]int64{ "in_1_span_file": sampleSpanSize * 1, "in_2_spans_file": sampleSpanSize * 2, "in_max_size_file": int64(len(sampleData1)), } for sn, size := range sizeCond { for in, innero := range innerOffsetCond { for bo, baseo := range baseOffsetCond { for fn, filesize := range fileSizeCond { for _, spanSize := range spanSizeCond { t.Run(fmt.Sprintf("reading_%s_%s_%s_%s_spansize_%d", sn, in, bo, fn, spanSize), func(t *testing.T) { if filesize > int64(len(sampleData1)) { t.Fatal("sample file size is larger than sample data") } wantN := size offset := baseo + innero if offset >= filesize { return } if remain := filesize - offset; remain < wantN { if wantN = remain; wantN < 0 { wantN = 0 } } // use constant string value as a data source. want := strings.NewReader(sampleData1) // data we want to get. wantData := make([]byte, wantN) _, err := want.ReadAt(wantData, offset) if err != nil && err != io.EOF { t.Fatalf("want.ReadAt (offset=%d,size=%d): %v", offset, wantN, err) } // data we get through a file. f, closeFn := makeFile(t, []byte(sampleData1)[:filesize], factory, spanSize) defer closeFn() // read the file respData := make([]byte, size) n, err := f.ReadAt(respData, offset) if err != nil && err != io.EOF { t.Fatalf("failed to read off=%d, size=%d, filesize=%d: %v", offset, size, filesize, err) } respData = respData[:n] if !bytes.Equal(wantData, respData) { t.Errorf("off=%d, filesize=%d; read data{size=%d,data=%q}; want (size=%d,data=%q)", offset, filesize, len(respData), string(respData), wantN, string(wantData)) } }) } } } } } } func makeFile(t *testing.T, contents []byte, factory metadata.Store, spanSize int64) (*file, func() error) { testName := "test" tarEntry := []testutil.TarEntry{ testutil.File(testName, string(contents)), } ztoc, sr, err := ztoc.BuildZtocReader(t, tarEntry, gzip.DefaultCompression, spanSize) if err != nil { t.Fatalf("failed to build sample ztoc: %v", err) } mr, err := factory(sr, ztoc.TOC) if err != nil { t.Fatalf("failed to create reader: %v", err) } spanManager := spanmanager.New(ztoc, sr, cache.NewMemoryCache(), 0) vr, err := NewReader(mr, digest.FromString(""), spanManager) if err != nil { mr.Close() t.Fatalf("failed to make new reader: %v", err) } r := vr.GetReader() tid, _, err := mr.GetChild(mr.RootID(), testName) if err != nil { vr.Close() t.Fatalf("failed to get %q: %v", testName, err) } ra, err := r.OpenFile(tid) if err != nil { vr.Close() t.Fatalf("Failed to open testing file: %v", err) } f, ok := ra.(*file) if !ok { vr.Close() t.Fatalf("invalid type of file %q", tid) } return f, vr.Close } func testFailReader(t *testing.T, factory metadata.Store) { testFileName := "test" tarEntry := []testutil.TarEntry{ testutil.File(testFileName, sampleData1), } for _, spanSize := range spanSizeCond { t.Run(fmt.Sprintf("reading_spansize_%d", spanSize), func(t *testing.T) { ztoc, sr, err := ztoc.BuildZtocReader(t, tarEntry, gzip.DefaultCompression, spanSize) if err != nil { t.Fatalf("failed to build sample ztoc: %v", err) } // build a metadata reader mr, err := factory(sr, ztoc.TOC) if err != nil { t.Fatalf("failed to prepare metadata reader") } defer mr.Close() // tests for opening non-existing file notexist := uint32(0) found := false for i := uint32(0); i < 1000000; i++ { if _, err := mr.GetAttr(i); err != nil { notexist, found = i, true break } } if !found { t.Fatalf("free ID not found") } spanManager := spanmanager.New(ztoc, sr, cache.NewMemoryCache(), 0) vr, err := NewReader(mr, digest.FromString(""), spanManager) if err != nil { mr.Close() t.Fatalf("failed to make new reader: %v", err) } r := vr.GetReader() _, err = r.OpenFile(notexist) if err == nil { t.Errorf("succeeded to open file but wanted to fail") } // tests failure behaviour of a file read tid, _, err := mr.GetChild(mr.RootID(), testFileName) if err != nil { t.Fatalf("failed to get %q: %v", testFileName, err) } fr, err := r.OpenFile(tid) if err != nil { t.Fatalf("failed to open file but wanted to succeed: %v", err) } // tests for reading file p := make([]byte, len(sampleData1)) n, err := fr.ReadAt(p, 0) if (err != nil && err != io.EOF) || n != len(sampleData1) || !bytes.Equal([]byte(sampleData1), p) { t.Errorf("failed to read data but wanted to succeed: %v", err) } }) } } soci-snapshotter-0.4.1/fs/remote/000077500000000000000000000000001454010642300167235ustar00rootroot00000000000000soci-snapshotter-0.4.1/fs/remote/blob.go000066400000000000000000000161241454010642300201740ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright 2019 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the NOTICE.md file. */ package remote import ( "context" "fmt" "io" "regexp" "sync" "time" "github.com/awslabs/soci-snapshotter/fs/source" "github.com/containerd/containerd/reference" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) var contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`) type Blob interface { Check() error Size() int64 FetchedSize() int64 ReadAt(p []byte, offset int64, opts ...Option) (int, error) Refresh(ctx context.Context, host source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor) error Close() error } type blob struct { fetcher fetcher fetcherMu sync.Mutex size int64 lastCheck time.Time lastCheckMu sync.Mutex checkInterval time.Duration fetchTimeout time.Duration fetchedRegionSet regionSet fetchedRegionSetMu sync.Mutex resolver *Resolver closed bool closedMu sync.Mutex } func makeBlob(fetcher fetcher, size int64, lastCheck time.Time, checkInterval time.Duration, r *Resolver, fetchTimeout time.Duration) *blob { return &blob{ fetcher: fetcher, size: size, lastCheck: lastCheck, checkInterval: checkInterval, resolver: r, fetchTimeout: fetchTimeout, } } func (b *blob) Close() error { b.closedMu.Lock() defer b.closedMu.Unlock() if !b.closed { b.closed = true } return nil } func (b *blob) isClosed() bool { b.closedMu.Lock() closed := b.closed b.closedMu.Unlock() return closed } func (b *blob) Refresh(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor) error { if b.isClosed() { return fmt.Errorf("blob is already closed") } // refresh the fetcher f, newSize, err := b.resolver.resolveFetcher(ctx, hosts, refspec, desc) if err != nil { return err } if newSize != b.size { return fmt.Errorf("invalid size of new blob %d; want %d", newSize, b.size) } // update the blob's fetcher with new one b.fetcherMu.Lock() b.fetcher = f b.fetcherMu.Unlock() b.lastCheckMu.Lock() b.lastCheck = time.Now() b.lastCheckMu.Unlock() return nil } func (b *blob) Check() error { if b.isClosed() { return fmt.Errorf("blob is already closed") } now := time.Now() b.lastCheckMu.Lock() lastCheck := b.lastCheck b.lastCheckMu.Unlock() if now.Sub(lastCheck) < b.checkInterval { // do nothing if not expired return nil } b.fetcherMu.Lock() fr := b.fetcher b.fetcherMu.Unlock() err := fr.check() if err == nil { // update lastCheck only if check succeeded. // on failure, we should check this layer next time again. b.lastCheckMu.Lock() b.lastCheck = now b.lastCheckMu.Unlock() } return err } func (b *blob) Size() int64 { return b.size } func (b *blob) FetchedSize() int64 { b.fetchedRegionSetMu.Lock() sz := b.fetchedRegionSet.totalSize() b.fetchedRegionSetMu.Unlock() return sz } // ReadAt reads remote blob from specified offset for the buffer size. // We can configure this function with options. func (b *blob) ReadAt(p []byte, offset int64, opts ...Option) (int, error) { if b.isClosed() { return 0, fmt.Errorf("blob is already closed") } if len(p) == 0 || offset > b.size { return 0, nil } reg := region{offset, offset + int64(len(p)) - 1} var readAtOpts options for _, o := range opts { o(&readAtOpts) } // Take it from remote registry. w := newBytesWriter(p, 0) // Read required data if err := b.fetchRange(reg, w, &readAtOpts); err != nil { return 0, err } // Adjust the buffer size according to the blob size if remain := b.size - offset; int64(len(p)) >= remain { if remain < 0 { remain = 0 } p = p[:remain] } return len(p), nil } // fetchRegion fetches content from remote blob. // It must be called from within fetchRange and need to ensure that it is inside the singleflight `Do` operation. func (b *blob) fetchRegion(reg region, w io.Writer, fetched bool, opts *options) error { // Fetcher can be suddenly updated so we take and use the snapshot of it for // consistency. b.fetcherMu.Lock() fr := b.fetcher b.fetcherMu.Unlock() fetchCtx, cancel := context.WithTimeout(context.Background(), b.fetchTimeout) defer cancel() if opts.ctx != nil { fetchCtx = opts.ctx } var req []region req = append(req, reg) mr, err := fr.fetch(fetchCtx, req, true) if err != nil { return err } defer mr.Close() // Update the check timer because we succeeded to access the blob b.lastCheckMu.Lock() b.lastCheck = time.Now() b.lastCheckMu.Unlock() for { _, p, err := mr.Next() if err == io.EOF { break } else if err != nil { return fmt.Errorf("failed to read multipart resp: %w", err) } if _, err := io.CopyN(w, p, reg.size()); err != nil { return err } b.fetchedRegionSetMu.Lock() b.fetchedRegionSet.add(reg) b.fetchedRegionSetMu.Unlock() fetched = true } if !fetched { return fmt.Errorf("failed to fetch region %v", reg) } return nil } // fetchRange fetches content from remote blob. func (b *blob) fetchRange(reg region, w io.Writer, opts *options) error { return b.fetchRegion(reg, w, false, opts) } func newBytesWriter(dest []byte, destOff int64) io.Writer { return &bytesWriter{ dest: dest, destOff: destOff, current: 0, } } type bytesWriter struct { dest []byte destOff int64 current int64 } func (bw *bytesWriter) Write(p []byte) (int, error) { defer func() { bw.current = bw.current + int64(len(p)) }() var ( destBase = positive(bw.current - bw.destOff) pBegin = positive(bw.destOff - bw.current) pEnd = positive(bw.destOff + int64(len(bw.dest)) - bw.current) ) if destBase > int64(len(bw.dest)) { return len(p), nil } if pBegin >= int64(len(p)) { return len(p), nil } if pEnd > int64(len(p)) { pEnd = int64(len(p)) } copy(bw.dest[destBase:], p[pBegin:pEnd]) return len(p), nil } func positive(n int64) int64 { if n < 0 { return 0 } return n } soci-snapshotter-0.4.1/fs/remote/blob_test.go000066400000000000000000000412421454010642300212320ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright 2019 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the NOTICE.md file. */ package remote import ( "bytes" "fmt" "io" "mime" "mime/multipart" "net/http" "net/textproto" "sort" "strconv" "strings" "sync" "sync/atomic" "testing" "time" ) const ( testURL = "http://testdummy.com/v2/library/test/blobs/sha256:deadbeaf" rangeHeaderPrefix = "bytes=" sampleChunkSize = 3 sampleMiddleOffset = sampleChunkSize / 2 sampleData1 = "0123456789" ) // Tests ReadAt method of each file. func TestReadAt(t *testing.T) { sizeCond := map[string]int64{ "single_chunk": sampleChunkSize - sampleMiddleOffset, "multi_chunks": 2*sampleChunkSize + sampleMiddleOffset, } innerOffsetCond := map[string]int64{ "at_top": 0, "at_middle": sampleMiddleOffset, } baseOffsetCond := map[string]int64{ "of_1st_chunk": sampleChunkSize * 0, "of_2nd_chunk": sampleChunkSize * 1, "of_last_chunk": sampleChunkSize * (int64(len(sampleData1)) / sampleChunkSize), } blobSizeCond := map[string]int64{ "in_1_chunk_blob": sampleChunkSize * 1, "in_3_chunks_blob": sampleChunkSize * 3, "in_max_size_chunk": int64(len(sampleData1)), } transportCond := map[string]struct { allowMultiRange bool }{ "with_multi_reg": { allowMultiRange: true, }, "with_single_reg": { allowMultiRange: false, }, } for sn, size := range sizeCond { for in, innero := range innerOffsetCond { for bo, baseo := range baseOffsetCond { for bs, blobsize := range blobSizeCond { for tc, trCond := range transportCond { t.Run(fmt.Sprintf("reading_%s_%s_%s_%s_%s", sn, in, bo, bs, tc), func(t *testing.T) { if blobsize > int64(len(sampleData1)) { t.Fatal("sample file size is larger than sample data") } wantN := size //7 offset := baseo + innero if remain := blobsize - offset; remain < wantN { if wantN = remain; wantN < 0 { wantN = 0 } } // use constant string value as a data source. want := strings.NewReader(sampleData1) // data we want to get. wantData := make([]byte, wantN) _, err := want.ReadAt(wantData, offset) if err != nil && err != io.EOF { t.Fatalf("want.ReadAt (offset=%d,size=%d): %v", offset, wantN, err) } // data we get through a remote blob. blob := []byte(sampleData1)[:blobsize] // Check with allowing multi range requests tr := multiRoundTripper(t, blob, allowMultiRange(trCond.allowMultiRange)) // Check ReadAt method bb1 := makeTestBlob(t, blobsize, tr) checkRead(t, wantData, bb1, offset, wantN) }) } } } } } } func checkRead(t *testing.T, wantData []byte, r *blob, offset int64, wantSize int64) { respData := make([]byte, wantSize) t.Logf("reading offset:%d, size:%d", offset, wantSize) n, err := r.ReadAt(respData, offset) if err != nil { t.Errorf("failed to read off=%d, size=%d, blobsize=%d: %v", offset, wantSize, r.Size(), err) return } respData = respData[:n] if !bytes.Equal(wantData, respData) { t.Errorf("off=%d, blobsize=%d; read data{size=%d,data=%q}; want (size=%d,data=%q)", offset, r.Size(), len(respData), string(respData), len(wantData), string(wantData)) return } } // Tests ReadAt method for failure cases. func TestFailReadAt(t *testing.T) { // test failed http respose. r := makeTestBlob(t, int64(len(sampleData1)), failRoundTripper()) respData := make([]byte, len(sampleData1)) _, err := r.ReadAt(respData, 0) if err == nil || err == io.EOF { t.Errorf("must be fail for http failure but err=%v", err) return } // test broken body with allowing multi range checkBrokenBody(t, true) // with allowing multi range checkBrokenBody(t, false) // with prohibiting multi range // test broken header checkBrokenHeader(t, true) // with allowing multi range checkBrokenHeader(t, false) // with prohibiting multi range } func checkBrokenBody(t *testing.T, allowMultiRange bool) { respData := make([]byte, len(sampleData1)) r := makeTestBlob(t, int64(len(sampleData1)), brokenBodyRoundTripper(t, []byte(sampleData1), allowMultiRange)) if _, err := r.ReadAt(respData, 0); err == nil { t.Errorf("must be fail for broken full body but err=%v (allowMultiRange=%v)", err, allowMultiRange) return } r = makeTestBlob(t, int64(len(sampleData1)), brokenBodyRoundTripper(t, []byte(sampleData1), allowMultiRange)) if _, err := r.ReadAt(respData[0:len(sampleData1)/2], 0); err == nil { t.Errorf("must be fail for broken multipart body but err=%v (allowMultiRange=%v)", err, allowMultiRange) return } } func checkBrokenHeader(t *testing.T, allowMultiRange bool) { r := makeTestBlob(t, int64(len(sampleData1)), brokenHeaderRoundTripper(t, []byte(sampleData1), allowMultiRange)) respData := make([]byte, len(sampleData1)) if _, err := r.ReadAt(respData[0:len(sampleData1)/2], 0); err == nil || err == io.EOF { t.Errorf("must be fail for broken multipart header but err=%v (allowMultiRange=%v)", err, allowMultiRange) return } } func TestParallelDownloadingBehavior(t *testing.T) { type testData struct { name string regions [3]region roundtripCount int64 content string } tests := []testData{ { name: "same_regions", regions: [3]region{ { b: 0, e: 3, }, { b: 0, e: 3, }, { b: 0, e: 3, }, }, roundtripCount: 3, content: "test", }, { name: "different_regions", regions: [3]region{ { b: 0, e: 3, }, { b: 4, e: 7, }, { b: 0, e: 9, }, }, roundtripCount: 3, content: "test123456", }, } var wg sync.WaitGroup // we always run 3 routines routines := 3 for _, tst := range tests { var ( tr = &callsCountRoundTripper{ content: tst.content, } b = &blob{ fetcher: &httpFetcher{ url: "test", tr: tr, }, size: int64(len(tst.content)), } ) start := make(chan struct{}) wg.Add(routines) var contentBytes [3][]byte for i := 0; i < routines; i++ { reg := tst.regions[i] contentBytes[i] = make([]byte, reg.size()) w := newBytesWriter(contentBytes[i], 0) go func() { <-start // by blocking on channel start we can ensure that the goroutines will run at approximately the same time defer wg.Done() b.fetchRange(reg, w, &options{}) }() } close(start) // starting wg.Wait() // We expect the number of round trip calls to be 1, since we are making 5 calls to fetchRange with // overlapping intervals. if tr.count != tst.roundtripCount { t.Errorf("%v test failed: the round trip count should be %v, but was %v", tst.name, tst.roundtripCount, tr.count) } // Check for contents for j := range contentBytes { for i := 0; i < int(tst.regions[j].size()); i++ { if contentBytes[j][i] != []byte(tst.content)[i] { t.Errorf("%v test failed: the output sequence is wrong, wanted %v, got %v", tst.name, []byte(tst.content)[i], contentBytes[j]) break } } } } } func makeTestBlob(t *testing.T, size int64, fn RoundTripFunc) *blob { var ( lastCheck time.Time checkInterval time.Duration ) return makeBlob( &httpFetcher{ url: testURL, tr: fn, }, size, lastCheck, checkInterval, &Resolver{}, time.Duration(defaultFetchTimeoutSec)*time.Second) } func TestCheckInterval(t *testing.T) { var ( tr = &calledRoundTripper{} firstTime = time.Now() b = &blob{ fetcher: &httpFetcher{ url: "test", tr: tr, }, lastCheck: firstTime, } ) check := func(name string, checkInterval time.Duration) (time.Time, bool) { beforeUpdate := time.Now() time.Sleep(time.Millisecond) tr.called = false b.checkInterval = checkInterval if err := b.Check(); err != nil { t.Fatalf("%q: check mustn't be failed", name) } time.Sleep(time.Millisecond) afterUpdate := time.Now() if !tr.called { return b.lastCheck, false } if !(b.lastCheck.After(beforeUpdate) && b.lastCheck.Before(afterUpdate)) { t.Errorf("%q: updated time must be after %q and before %q but %q", name, beforeUpdate, afterUpdate, b.lastCheck) } return b.lastCheck, true } // second time(not expired yet) secondTime, called := check("second time", time.Hour) if called { t.Error("mustn't be checked if not expired") } if !secondTime.Equal(firstTime) { t.Errorf("lastCheck time must be same as first time(%q) but %q", firstTime, secondTime) } // third time(expired, must be checked) if _, called := check("third time", 0); !called { t.Error("must be called for the third time") } } type callsCountRoundTripper struct { count int64 content string } func (c *callsCountRoundTripper) RoundTrip(req *http.Request) (res *http.Response, err error) { atomic.AddInt64(&c.count, 1) time.Sleep(50 * time.Millisecond) // sleep for 50 milliseconds to emulate the http call and to make sure that we can run tests on parallel goroutines convertBody := func(r io.ReadCloser) io.ReadCloser { return r } header := make(http.Header) header.Add("Content-Length", fmt.Sprintf("%d", len(c.content))) return &http.Response{ StatusCode: http.StatusOK, Header: header, Body: convertBody(io.NopCloser(bytes.NewReader([]byte(c.content)))), }, nil } type calledRoundTripper struct { called bool } func (c *calledRoundTripper) RoundTrip(req *http.Request) (res *http.Response, err error) { c.called = true res = &http.Response{ StatusCode: http.StatusOK, Header: make(http.Header), Body: io.NopCloser(bytes.NewReader([]byte("test"))), } return } type RoundTripFunc func(req *http.Request) *http.Response func (f RoundTripFunc) RoundTrip(req *http.Request) (*http.Response, error) { return f(req), nil } type bodyConverter func(r io.ReadCloser) io.ReadCloser type exceptRegions []region type allowMultiRange bool func multiRoundTripper(t *testing.T, contents []byte, opts ...interface{}) RoundTripFunc { multiRangeEnable := true doNotFetch := []region{} convertBody := func(r io.ReadCloser) io.ReadCloser { return r } for _, opt := range opts { if v, ok := opt.(allowMultiRange); ok { multiRangeEnable = bool(v) } else if v, ok := opt.(exceptRegions); ok { doNotFetch = []region(v) } else if v, ok := opt.(bodyConverter); ok { convertBody = (func(r io.ReadCloser) io.ReadCloser)(v) } } emptyResponse := func(statusCode int) *http.Response { return &http.Response{ StatusCode: statusCode, Header: make(http.Header), Body: io.NopCloser(bytes.NewReader([]byte{})), } } return func(req *http.Request) *http.Response { // Validate request if req.Method != "GET" || req.URL.String() != testURL { return emptyResponse(http.StatusBadRequest) } ranges := req.Header.Get("Range") if ranges == "" { return emptyResponse(http.StatusBadRequest) } if !strings.HasPrefix(ranges, rangeHeaderPrefix) { return emptyResponse(http.StatusBadRequest) } rlist := strings.Split(ranges[len(rangeHeaderPrefix):], ",") if len(rlist) == 0 { return emptyResponse(http.StatusBadRequest) } // check this request can be served as one whole blob. var sorted []region for _, part := range rlist { begin, end := parseRangeString(t, part) sorted = append(sorted, region{begin, end}) } sort.Slice(sorted, func(i, j int) bool { return sorted[i].b < sorted[j].b }) var sparse bool if sorted[0].b == 0 { var max int64 for _, reg := range sorted { if reg.e > max { if max < reg.b-1 { sparse = true break } max = reg.e } } if max >= int64(len(contents)-1) && !sparse { t.Logf("serving whole range %q = %d", ranges, len(contents)) header := make(http.Header) header.Add("Content-Length", fmt.Sprintf("%d", len(contents))) return &http.Response{ StatusCode: http.StatusOK, Header: header, Body: convertBody(io.NopCloser(bytes.NewReader(contents))), } } } if !multiRangeEnable { if len(rlist) > 1 { return emptyResponse(http.StatusBadRequest) // prohibiting multi range } // serve as single part response begin, end := parseRangeString(t, rlist[0]) target := region{begin, end} for _, reg := range doNotFetch { if target.b <= reg.b && reg.e <= target.e { t.Fatalf("Requested prohibited region (singlepart): (%d, %d) contained in fetching region (%d, %d)", reg.b, reg.e, target.b, target.e) } } header := make(http.Header) header.Add("Content-Length", fmt.Sprintf("%d", target.size())) header.Add("Content-Range", fmt.Sprintf("bytes %d-%d/%d", target.b, target.e, len(contents))) header.Add("Content-Type", "application/octet-stream") part := contents[target.b : target.e+1] return &http.Response{ StatusCode: http.StatusPartialContent, Header: header, Body: convertBody(io.NopCloser(bytes.NewReader(part))), } } // Write multipart response. var buf bytes.Buffer mw := multipart.NewWriter(&buf) for _, part := range rlist { mh := make(textproto.MIMEHeader) mh.Set("Content-Range", fmt.Sprintf("bytes %s/%d", part, len(contents))) w, err := mw.CreatePart(mh) if err != nil { t.Fatalf("failed to create part: %v", err) } begin, end := parseRangeString(t, part) if begin >= int64(len(contents)) { // skip if out of range. continue } if end > int64(len(contents)-1) { end = int64(len(contents) - 1) } for _, reg := range doNotFetch { if begin <= reg.b && reg.e <= end { t.Fatalf("Requested prohibited region (multipart): (%d, %d) contained in fetching region (%d, %d)", reg.b, reg.e, begin, end) } } if n, err := w.Write(contents[begin : end+1]); err != nil || int64(n) != end+1-begin { t.Fatalf("failed to write to part(%d-%d): %v", begin, end, err) } } mw.Close() param := map[string]string{ "boundary": mw.Boundary(), } header := make(http.Header) header.Add("Content-Type", mime.FormatMediaType("multipart/text", param)) return &http.Response{ StatusCode: http.StatusPartialContent, Header: header, Body: convertBody(io.NopCloser(&buf)), } } } func failRoundTripper() RoundTripFunc { return func(req *http.Request) *http.Response { return &http.Response{ StatusCode: http.StatusInternalServerError, Header: make(http.Header), Body: io.NopCloser(bytes.NewReader([]byte{})), } } } func brokenBodyRoundTripper(t *testing.T, contents []byte, multiRange bool) RoundTripFunc { breakReadCloser := func(r io.ReadCloser) io.ReadCloser { defer r.Close() data, err := io.ReadAll(r) if err != nil { t.Fatalf("failed to break read closer faild to read original: %v", err) } return io.NopCloser(bytes.NewReader(data[:len(data)/2])) } tr := multiRoundTripper(t, contents, allowMultiRange(multiRange), bodyConverter(breakReadCloser)) return func(req *http.Request) *http.Response { return tr(req) } } func brokenHeaderRoundTripper(t *testing.T, contents []byte, multiRange bool) RoundTripFunc { tr := multiRoundTripper(t, contents, allowMultiRange(multiRange)) return func(req *http.Request) *http.Response { res := tr(req) res.Header = make(http.Header) return res } } func parseRangeString(t *testing.T, rangeString string) (int64, int64) { rng := strings.Split(rangeString, "-") if len(rng) != 2 { t.Fatalf("falied to parse range %q", rng) } begin, err := strconv.ParseInt(rng[0], 10, 64) if err != nil { t.Fatalf("failed to parse beginning offset: %v", err) } end, err := strconv.ParseInt(rng[1], 10, 64) if err != nil { t.Fatalf("failed to parse ending offset: %v", err) } return begin, end } soci-snapshotter-0.4.1/fs/remote/resolver.go000066400000000000000000000472421454010642300211240ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright 2019 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the NOTICE.md file. */ package remote import ( "context" "crypto/sha256" "errors" "fmt" "io" "mime" "mime/multipart" "net/http" "net/url" "path" "strconv" "strings" "sync" "time" "github.com/awslabs/soci-snapshotter/cache" "github.com/awslabs/soci-snapshotter/config" commonmetrics "github.com/awslabs/soci-snapshotter/fs/metrics/common" "github.com/awslabs/soci-snapshotter/fs/source" socihttp "github.com/awslabs/soci-snapshotter/util/http" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/log" "github.com/containerd/containerd/reference" "github.com/containerd/containerd/remotes/docker" rhttp "github.com/hashicorp/go-retryablehttp" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) const ( defaultValidIntervalSec int64 = 60 defaultFetchTimeoutSec int64 = 300 ) func NewResolver(cfg config.BlobConfig, handlers map[string]Handler) *Resolver { return &Resolver{ blobConfig: cfg, handlers: handlers, } } type Resolver struct { blobConfig config.BlobConfig handlers map[string]Handler } type fetcher interface { fetch(ctx context.Context, rs []region, retry bool) (multipartReadCloser, error) check() error genID(reg region) string } func (r *Resolver) Resolve(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor, blobCache cache.BlobCache) (Blob, error) { f, size, err := r.resolveFetcher(ctx, hosts, refspec, desc) if err != nil { return nil, err } blobConfig := &r.blobConfig return makeBlob(f, size, time.Now(), time.Duration(blobConfig.ValidInterval)*time.Second, r, time.Duration(blobConfig.FetchTimeoutSec)*time.Second), nil } func (r *Resolver) resolveFetcher(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor) (f fetcher, size int64, err error) { blobConfig := &r.blobConfig fc := &fetcherConfig{ hosts: hosts, refspec: refspec, desc: desc, maxRetries: blobConfig.MaxRetries, minWait: time.Duration(blobConfig.MinWaitMsec) * time.Millisecond, maxWait: time.Duration(blobConfig.MaxWaitMsec) * time.Millisecond, } var handlersErr error for name, p := range r.handlers { // TODO: allow to configure the selection of readers based on the hostname in refspec r, size, err := p.Handle(ctx, desc) if err != nil { handlersErr = errors.Join(handlersErr, err) continue } log.G(ctx).WithField("handler name", name).WithField("ref", refspec.String()).WithField("digest", desc.Digest). Debugf("contents is provided by a handler") return &remoteFetcher{r}, size, nil } logger := log.G(ctx) if handlersErr != nil { logger = logger.WithError(handlersErr) } logger.WithField("ref", refspec.String()).WithField("digest", desc.Digest).Debugf("using default handler") hf, err := newHTTPFetcher(ctx, fc) if err != nil { return nil, 0, err } if desc.Size == 0 { desc.Size, err = getLayerSize(ctx, hf) if err != nil { return nil, 0, fmt.Errorf("failed to retrieve layer size from %s after it was not found in labels: %w", hf.url, err) } } if blobConfig.ForceSingleRangeMode { hf.singleRangeMode() } return hf, desc.Size, err } type fetcherConfig struct { hosts source.RegistryHosts refspec reference.Spec desc ocispec.Descriptor maxRetries int minWait time.Duration maxWait time.Duration } func newHTTPFetcher(ctx context.Context, fc *fetcherConfig) (*httpFetcher, error) { reghosts, err := fc.hosts(fc.refspec) if err != nil { return nil, err } desc := fc.desc if desc.Digest.String() == "" { return nil, fmt.Errorf("digest is mandatory in layer descriptor") } digest := desc.Digest pullScope, err := repositoryScope(fc.refspec, false) if err != nil { return nil, err } // Try to create fetcher until succeeded rErr := fmt.Errorf("failed to resolve") for _, host := range reghosts { if host.Host == "" || strings.Contains(host.Host, "/") { rErr = fmt.Errorf("invalid destination (host %q, ref:%q, digest:%q): %w", host.Host, fc.refspec, digest, rErr) continue // Try another } // Prepare transport with authorization functionality tr := host.Client.Transport timeout := host.Client.Timeout if rt, ok := tr.(*rhttp.RoundTripper); ok { rt.Client.RetryMax = fc.maxRetries rt.Client.RetryWaitMin = fc.minWait rt.Client.RetryWaitMax = fc.maxWait rt.Client.Backoff = socihttp.BackoffStrategy rt.Client.CheckRetry = socihttp.RetryStrategy timeout = rt.Client.HTTPClient.Timeout } if host.Authorizer != nil { tr = &transport{ inner: tr, auth: host.Authorizer, scope: pullScope, } } // Resolve redirection and get blob URL blobURL := fmt.Sprintf("%s://%s/%s/blobs/%s", host.Scheme, path.Join(host.Host, host.Path), strings.TrimPrefix(fc.refspec.Locator, fc.refspec.Hostname()+"/"), digest) url, err := redirect(ctx, blobURL, tr, timeout) if err != nil { rErr = fmt.Errorf("failed to redirect (host %q, ref:%q, digest:%q): %v: %w", host.Host, fc.refspec, digest, err, rErr) continue // Try another } // Hit one destination return &httpFetcher{ url: url, tr: tr, blobURL: blobURL, digest: digest, timeout: timeout, }, nil } return nil, fmt.Errorf("cannot resolve layer: %w", rErr) } type transport struct { inner http.RoundTripper auth docker.Authorizer scope string } func (tr *transport) RoundTrip(req *http.Request) (*http.Response, error) { ctx := docker.WithScope(req.Context(), tr.scope) roundTrip := func(req *http.Request) (*http.Response, error) { // authorize the request using docker.Authorizer if err := tr.auth.Authorize(ctx, req); err != nil { return nil, err } // send the request return tr.inner.RoundTrip(req) } resp, err := roundTrip(req) if err != nil { return nil, err } // TODO: support more status codes and retries if resp.StatusCode == http.StatusUnauthorized { log.G(ctx).Infof("Received status code: %v. Refreshing creds...", resp.Status) // Prepare authorization for the target host using docker.Authorizer. // The docker authorizer only refreshes OAuth tokens after two // successive 401 errors for the same URL. Rather than issue the same // request multiple times to tickle the token-refreshing logic, just // provide the same response twice to trick it into refreshing the // cached OAuth token. Call AddResponses() twice, first to invalidate // the existing token (with two responses), second to fetch a new one // (with one response). // TODO: fix after one of these two PRs are merged and available: // https://github.com/containerd/containerd/pull/8735 // https://github.com/containerd/containerd/pull/8388 if err := tr.auth.AddResponses(ctx, []*http.Response{resp, resp}); err != nil { if errdefs.IsNotImplemented(err) { return resp, nil } return nil, err } if err := tr.auth.AddResponses(ctx, []*http.Response{resp}); err != nil { if errdefs.IsNotImplemented(err) { return resp, nil } return nil, err } // re-authorize and send the request return roundTrip(req.Clone(ctx)) } return resp, nil } func redirect(ctx context.Context, blobURL string, tr http.RoundTripper, timeout time.Duration) (url string, err error) { if timeout > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, timeout) defer cancel() } // We use GET request for redirect. // gcr.io returns 200 on HEAD without Location header (2020). // ghcr.io returns 200 on HEAD without Location header (2020). req, err := http.NewRequestWithContext(ctx, "GET", blobURL, nil) if err != nil { return "", fmt.Errorf("failed to make request to the registry: %w", err) } req.Close = false req.Header.Set("Range", "bytes=0-1") res, err := tr.RoundTrip(req) if err != nil { return "", fmt.Errorf("failed to request: %w", err) } defer func() { io.Copy(io.Discard, res.Body) res.Body.Close() }() if res.StatusCode/100 == 2 { url = blobURL } else if redir := res.Header.Get("Location"); redir != "" && res.StatusCode/100 == 3 { // TODO: Support nested redirection url = redir } else { return "", fmt.Errorf("failed to access to the registry with code %v", res.StatusCode) } return } func getLayerSize(ctx context.Context, hf *httpFetcher) (int64, error) { if hf.timeout > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, hf.timeout) defer cancel() } req, err := http.NewRequestWithContext(ctx, "HEAD", hf.url, nil) if err != nil { return 0, err } req.Close = false res, err := hf.tr.RoundTrip(req) if err != nil { return 0, err } defer res.Body.Close() if res.StatusCode == http.StatusOK { return strconv.ParseInt(res.Header.Get("Content-Length"), 10, 64) } headStatusCode := res.StatusCode // Failed to do HEAD request. Fall back to GET. // ghcr.io (https://github-production-container-registry.s3.amazonaws.com) doesn't allow // HEAD request (2020). req, err = http.NewRequestWithContext(ctx, "GET", hf.url, nil) if err != nil { return 0, fmt.Errorf("failed to make request to the registry: %w", err) } req.Close = false req.Header.Set("Range", "bytes=0-1") res, err = hf.tr.RoundTrip(req) if err != nil { return 0, fmt.Errorf("failed to request: %w", err) } defer func() { io.Copy(io.Discard, res.Body) res.Body.Close() }() if res.StatusCode == http.StatusOK { return strconv.ParseInt(res.Header.Get("Content-Length"), 10, 64) } else if res.StatusCode == http.StatusPartialContent { _, size, err := parseRange(res.Header.Get("Content-Range")) return size, err } return 0, fmt.Errorf("failed to get size with code (HEAD=%v, GET=%v)", headStatusCode, res.StatusCode) } type httpFetcher struct { url string urlMu sync.Mutex tr http.RoundTripper blobURL string digest digest.Digest singleRange bool singleRangeMu sync.Mutex timeout time.Duration } type multipartReadCloser interface { Next() (region, io.Reader, error) Close() error } func (f *httpFetcher) fetch(ctx context.Context, rs []region, retry bool) (multipartReadCloser, error) { if len(rs) == 0 { return nil, fmt.Errorf("no request queried") } var ( tr = f.tr singleRangeMode = f.isSingleRangeMode() ) // squash requesting regions for reducing the total size of request header // (servers generally have limits for the size of headers) // TODO: when our request has too many ranges, we need to divide it into // multiple requests to avoid huge header. var s regionSet for _, reg := range rs { s.add(reg) } requests := s.rs if singleRangeMode { // Squash requests if the layer doesn't support multi range. requests = []region{superRegion(requests)} } // Request to the registry f.urlMu.Lock() url := f.url f.urlMu.Unlock() req, err := http.NewRequestWithContext(ctx, "GET", url, nil) if err != nil { return nil, err } var ranges string for _, reg := range requests { ranges += fmt.Sprintf("%d-%d,", reg.b, reg.e) } req.Header.Add("Range", fmt.Sprintf("bytes=%s", ranges[:len(ranges)-1])) req.Header.Add("Accept-Encoding", "identity") req.Header.Add("User-Agent", socihttp.UserAgent) req.Close = false // Recording the roundtrip latency for remote registry GET operation. start := time.Now() res, err := tr.RoundTrip(req) // NOT DefaultClient; don't want redirects commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.RemoteRegistryGet, f.digest, start) if err != nil { return nil, err } if res.StatusCode == http.StatusOK { // We are getting the whole blob in one part (= status 200) size, err := strconv.ParseInt(res.Header.Get("Content-Length"), 10, 64) if err != nil { return nil, fmt.Errorf("failed to parse Content-Length: %w", err) } return newSinglePartReader(region{0, size - 1}, res.Body), nil } else if res.StatusCode == http.StatusPartialContent { mediaType, params, err := mime.ParseMediaType(res.Header.Get("Content-Type")) if err != nil { return nil, fmt.Errorf("invalid media type %q: %w", mediaType, err) } if strings.HasPrefix(mediaType, "multipart/") { // We are getting a set of regions as a multipart body. return newMultiPartReader(res.Body, params["boundary"]), nil } // We are getting single range reg, _, err := parseRange(res.Header.Get("Content-Range")) if err != nil { return nil, fmt.Errorf("failed to parse Content-Range: %w", err) } return newSinglePartReader(reg, res.Body), nil } else if retry && res.StatusCode == http.StatusForbidden { log.G(ctx).Infof("Received status code: %v. Refreshing URL and retrying...", res.Status) // re-redirect and retry this once. if err := f.refreshURL(ctx); err != nil { return nil, fmt.Errorf("failed to refresh URL on %v: %w", res.Status, err) } return f.fetch(ctx, rs, false) } else if retry && res.StatusCode == http.StatusBadRequest && !singleRangeMode { log.G(ctx).Infof("Received status code: %v. Setting single range mode and retrying...", res.Status) // gcr.io (https://storage.googleapis.com) returns 400 on multi-range request (2020 #81) f.singleRangeMode() // fallbacks to singe range request mode return f.fetch(ctx, rs, false) // retries with the single range mode } return nil, fmt.Errorf("unexpected status code: %v", res.Status) } func (f *httpFetcher) check() error { ctx := context.Background() if f.timeout > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, f.timeout) defer cancel() } f.urlMu.Lock() url := f.url f.urlMu.Unlock() req, err := http.NewRequestWithContext(ctx, "GET", url, nil) if err != nil { return fmt.Errorf("check failed: failed to make request: %w", err) } req.Close = false req.Header.Set("Range", "bytes=0-1") res, err := f.tr.RoundTrip(req) if err != nil { return fmt.Errorf("check failed: failed to request to registry: %w", err) } defer func() { io.Copy(io.Discard, res.Body) res.Body.Close() }() if res.StatusCode == http.StatusOK || res.StatusCode == http.StatusPartialContent { return nil } else if res.StatusCode == http.StatusForbidden { // Try to re-redirect this blob rCtx := context.Background() if f.timeout > 0 { var rCancel context.CancelFunc rCtx, rCancel = context.WithTimeout(rCtx, f.timeout) defer rCancel() } if err := f.refreshURL(rCtx); err == nil { return nil } return fmt.Errorf("failed to refresh URL on status %v", res.Status) } return fmt.Errorf("unexpected status code %v", res.StatusCode) } func (f *httpFetcher) refreshURL(ctx context.Context) error { newURL, err := redirect(ctx, f.blobURL, f.tr, f.timeout) if err != nil { return err } f.urlMu.Lock() f.url = newURL f.urlMu.Unlock() return nil } func (f *httpFetcher) genID(reg region) string { sum := sha256.Sum256([]byte(fmt.Sprintf("%s-%d-%d", f.blobURL, reg.b, reg.e))) return fmt.Sprintf("%x", sum) } func (f *httpFetcher) singleRangeMode() { f.singleRangeMu.Lock() f.singleRange = true f.singleRangeMu.Unlock() } func (f *httpFetcher) isSingleRangeMode() bool { f.singleRangeMu.Lock() r := f.singleRange f.singleRangeMu.Unlock() return r } func newSinglePartReader(reg region, rc io.ReadCloser) multipartReadCloser { return &singlepartReader{ r: rc, Closer: rc, reg: reg, } } type singlepartReader struct { io.Closer r io.Reader reg region called bool } func (sr *singlepartReader) Next() (region, io.Reader, error) { if !sr.called { sr.called = true return sr.reg, sr.r, nil } return region{}, nil, io.EOF } func newMultiPartReader(rc io.ReadCloser, boundary string) multipartReadCloser { return &multipartReader{ m: multipart.NewReader(rc, boundary), Closer: rc, } } type multipartReader struct { io.Closer m *multipart.Reader } func (sr *multipartReader) Next() (region, io.Reader, error) { p, err := sr.m.NextPart() if err != nil { return region{}, nil, err } reg, _, err := parseRange(p.Header.Get("Content-Range")) if err != nil { return region{}, nil, fmt.Errorf("failed to parse Content-Range: %w", err) } return reg, p, nil } func parseRange(header string) (region, int64, error) { submatches := contentRangeRegexp.FindStringSubmatch(header) if len(submatches) < 4 { return region{}, 0, fmt.Errorf("Content-Range %q doesn't have enough information", header) } begin, err := strconv.ParseInt(submatches[1], 10, 64) if err != nil { return region{}, 0, fmt.Errorf("failed to parse beginning offset %q: %w", submatches[1], err) } end, err := strconv.ParseInt(submatches[2], 10, 64) if err != nil { return region{}, 0, fmt.Errorf("failed to parse end offset %q: %w", submatches[2], err) } blobSize, err := strconv.ParseInt(submatches[3], 10, 64) if err != nil { return region{}, 0, fmt.Errorf("failed to parse blob size %q: %w", submatches[3], err) } return region{begin, end}, blobSize, nil } type Option func(*options) type options struct { ctx context.Context cacheOpts []cache.Option } func WithContext(ctx context.Context) Option { return func(opts *options) { opts.ctx = ctx } } func WithCacheOpts(cacheOpts ...cache.Option) Option { return func(opts *options) { opts.cacheOpts = cacheOpts } } // NOTE: ported from https://github.com/containerd/containerd/blob/v1.5.2/remotes/docker/scope.go#L29-L42 // TODO: import this from containerd package once we drop support to continerd v1.4.x // // repositoryScope returns a repository scope string such as "repository:foo/bar:pull" // for "host/foo/bar:baz". // When push is true, both pull and push are added to the scope. func repositoryScope(refspec reference.Spec, push bool) (string, error) { u, err := url.Parse("dummy://" + refspec.Locator) if err != nil { return "", err } s := "repository:" + strings.TrimPrefix(u.Path, "/") + ":pull" if push { s += ",push" } return s, nil } type remoteFetcher struct { r Fetcher } func (r *remoteFetcher) fetch(ctx context.Context, rs []region, retry bool) (multipartReadCloser, error) { var s regionSet for _, reg := range rs { s.add(reg) } reg := superRegion(s.rs) rc, err := r.r.Fetch(ctx, reg.b, reg.size()) if err != nil { return nil, err } return newSinglePartReader(reg, rc), nil } func (r *remoteFetcher) check() error { return r.r.Check() } func (r *remoteFetcher) genID(reg region) string { return r.r.GenID(reg.b, reg.size()) } type Handler interface { Handle(ctx context.Context, desc ocispec.Descriptor) (fetcher Fetcher, size int64, err error) } type Fetcher interface { Fetch(ctx context.Context, off int64, size int64) (io.ReadCloser, error) Check() error GenID(off int64, size int64) string } soci-snapshotter-0.4.1/fs/remote/resolver_test.go000066400000000000000000000230751454010642300221610ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright 2019 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the NOTICE.md file. */ package remote import ( "bytes" "context" "fmt" "io" "net/http" "net/url" "path/filepath" "regexp" "strings" "testing" socihttp "github.com/awslabs/soci-snapshotter/util/http" "github.com/containerd/containerd/reference" "github.com/containerd/containerd/remotes/docker" rhttp "github.com/hashicorp/go-retryablehttp" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) func TestMirror(t *testing.T) { ref := "dummyexample.com/library/test" refspec, err := reference.Parse(ref) if err != nil { t.Fatalf("failed to prepare dummy reference: %v", err) } var ( blobDigest = digest.FromString("dummy") blobPath = filepath.Join("/v2", strings.TrimPrefix(refspec.Locator, refspec.Hostname()+"/"), "blobs", blobDigest.String()) refHost = refspec.Hostname() ) tests := []struct { name string tr http.RoundTripper mirrors []string wantHost string error bool }{ { name: "no-mirror", tr: &sampleRoundTripper{okURLs: []string{refHost}}, mirrors: nil, wantHost: refHost, }, { name: "valid-mirror", tr: &sampleRoundTripper{okURLs: []string{"mirrorexample.com"}}, mirrors: []string{"mirrorexample.com"}, wantHost: "mirrorexample.com", }, { name: "invalid-mirror", tr: &sampleRoundTripper{ withCode: map[string]int{ "mirrorexample1.com": http.StatusInternalServerError, "mirrorexample2.com": http.StatusUnauthorized, "mirrorexample3.com": http.StatusNotFound, }, okURLs: []string{"mirrorexample4.com", refHost}, }, mirrors: []string{ "mirrorexample1.com", "mirrorexample2.com", "mirrorexample3.com", "mirrorexample4.com", }, wantHost: "mirrorexample4.com", }, { name: "invalid-all-mirror", tr: &sampleRoundTripper{ withCode: map[string]int{ "mirrorexample1.com": http.StatusInternalServerError, "mirrorexample2.com": http.StatusUnauthorized, "mirrorexample3.com": http.StatusNotFound, }, okURLs: []string{refHost}, }, mirrors: []string{ "mirrorexample1.com", "mirrorexample2.com", "mirrorexample3.com", }, wantHost: refHost, }, { name: "invalid-hostname-of-mirror", tr: &sampleRoundTripper{ okURLs: []string{`.*`}, }, mirrors: []string{"mirrorexample.com/somepath/"}, wantHost: refHost, }, { name: "redirected-mirror", tr: &sampleRoundTripper{ redirectURL: map[string]string{ regexp.QuoteMeta(fmt.Sprintf("mirrorexample.com%s", blobPath)): "https://backendexample.com/blobs/" + blobDigest.String(), }, okURLs: []string{`.*`}, }, mirrors: []string{"mirrorexample.com"}, wantHost: "backendexample.com", }, { name: "fail-all", tr: &sampleRoundTripper{}, mirrors: []string{"mirrorexample.com"}, wantHost: "", error: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { hosts := func(refspec reference.Spec) (reghosts []docker.RegistryHost, _ error) { host := refspec.Hostname() for _, m := range append(tt.mirrors, host) { reghosts = append(reghosts, docker.RegistryHost{ Client: &http.Client{Transport: tt.tr}, Host: m, Scheme: "https", Path: "/v2", Capabilities: docker.HostCapabilityPull, }) } return } fetcher, err := newHTTPFetcher(context.Background(), &fetcherConfig{ hosts: hosts, refspec: refspec, desc: ocispec.Descriptor{Digest: blobDigest}, }) if err != nil { if tt.error { return } t.Fatalf("failed to resolve reference: %v", err) } nurl, err := url.Parse(fetcher.url) if err != nil { t.Fatalf("failed to parse url %q: %v", fetcher.url, err) } if nurl.Hostname() != tt.wantHost { t.Errorf("invalid hostname %q(%q); want %q", nurl.Hostname(), nurl.String(), tt.wantHost) } }) } } type sampleRoundTripper struct { withCode map[string]int redirectURL map[string]string okURLs []string } func (tr *sampleRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { for host, code := range tr.withCode { if ok, _ := regexp.Match(host, []byte(req.URL.String())); ok { return &http.Response{ StatusCode: code, Header: make(http.Header), Body: io.NopCloser(bytes.NewReader([]byte{})), Request: req, }, nil } } for host, rurl := range tr.redirectURL { if ok, _ := regexp.Match(host, []byte(req.URL.String())); ok { header := make(http.Header) header.Add("Location", rurl) return &http.Response{ StatusCode: http.StatusMovedPermanently, Header: header, Body: io.NopCloser(bytes.NewReader([]byte{})), Request: req, }, nil } } for _, host := range tr.okURLs { if ok, _ := regexp.Match(host, []byte(req.URL.String())); ok { header := make(http.Header) header.Add("Content-Length", "1") return &http.Response{ StatusCode: http.StatusOK, Header: header, Body: io.NopCloser(bytes.NewReader([]byte{0})), Request: req, }, nil } } return &http.Response{ StatusCode: http.StatusNotFound, Header: make(http.Header), Body: io.NopCloser(bytes.NewReader([]byte{})), Request: req, }, nil } func TestCheck(t *testing.T) { tr := &breakRoundTripper{} f := &httpFetcher{ url: "test", tr: tr, } tr.success = true if err := f.check(); err != nil { t.Errorf("connection failed; wanted to succeed") } tr.success = false if err := f.check(); err == nil { t.Errorf("connection succeeded; wanted to fail") } } type breakRoundTripper struct { success bool } func (b *breakRoundTripper) RoundTrip(req *http.Request) (res *http.Response, err error) { if b.success { res = &http.Response{ StatusCode: http.StatusPartialContent, Header: make(http.Header), Body: io.NopCloser(bytes.NewReader([]byte("test"))), } } else { res = &http.Response{ StatusCode: http.StatusInternalServerError, Header: make(http.Header), Body: io.NopCloser(bytes.NewReader([]byte{})), } } return } func TestRetry(t *testing.T) { tr := &retryRoundTripper{} rclient := rhttp.NewClient() rclient.HTTPClient.Transport = tr rclient.Backoff = socihttp.BackoffStrategy f := &httpFetcher{ url: "test", tr: &rhttp.RoundTripper{Client: rclient}, } regions := []region{{b: 0, e: 1}} _, err := f.fetch(context.Background(), regions, true) if err != nil { t.Fatalf("unexpected error = %v", err) } if tr.retryCount != 4 { t.Fatalf("unexpected retryCount; expected=4 got=%d", tr.retryCount) } } type retryRoundTripper struct { retryCount int } func (r *retryRoundTripper) RoundTrip(req *http.Request) (res *http.Response, err error) { defer func() { r.retryCount++ }() switch r.retryCount { case 0: err = fmt.Errorf("dummy error") case 1: res = &http.Response{ StatusCode: http.StatusTooManyRequests, Header: make(http.Header), Body: io.NopCloser(bytes.NewReader([]byte{})), } case 2: res = &http.Response{ StatusCode: http.StatusServiceUnavailable, Header: make(http.Header), Body: io.NopCloser(bytes.NewReader([]byte{})), } default: header := make(http.Header) header.Add("Content-Length", "4") res = &http.Response{ StatusCode: http.StatusOK, Header: header, Body: io.NopCloser(bytes.NewReader([]byte("test"))), } } return } func TestCustomUserAgent(t *testing.T) { rt := &userAgentRoundTripper{expectedUserAgent: socihttp.UserAgent} f := &httpFetcher{ url: "dummyregistry", tr: rt, } regions := []region{{b: 0, e: 1}} _, err := f.fetch(context.Background(), regions, true) if err != nil { t.Fatalf("unexpected error = %v", err) } if rt.roundTripUserAgent != rt.expectedUserAgent { t.Fatalf("unexpected User-Agent; expected %s; got %s", rt.expectedUserAgent, rt.roundTripUserAgent) } } type userAgentRoundTripper struct { expectedUserAgent string roundTripUserAgent string } func (u *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { u.roundTripUserAgent = req.UserAgent() header := make(http.Header) header.Add("Content-Length", "4") return &http.Response{ StatusCode: http.StatusOK, Request: req, Header: header, Body: io.NopCloser(bytes.NewReader([]byte("test"))), }, nil } soci-snapshotter-0.4.1/fs/remote/util.go000066400000000000000000000071471454010642300202400ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright 2019 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the NOTICE.md file. */ package remote // region is HTTP-range-request-compliant range. // "b" is beginning byte of the range and "e" is the end. // "e" is must be inclusive along with HTTP's range expression. type region struct{ b, e int64 } func (c region) size() int64 { return c.e - c.b + 1 } func superRegion(regs []region) region { s := regs[0] for _, reg := range regs { if reg.b < s.b { s.b = reg.b } if reg.e > s.e { s.e = reg.e } } return s } // regionSet is a set of regions type regionSet struct { rs []region // must be kept sorted } // add attempts to merge r to rs.rs with squashing the regions as // small as possible. This operation takes O(n). // TODO: more efficient way to do it. func (rs *regionSet) add(r region) { // Iterate over the sorted region slice from the tail. // a) When an overwrap occurs, adjust `r` to fully contain the looking region // `l` and remove `l` from region slice. // b) Once l.e become less than r.b, no overwrap will occur again. So immediately // insert `r` which fully contains all overwrapped regions, to the region slice. // Here, `r` is inserted to the region slice with keeping it sorted, without // overwrapping to any regions. // *) If any `l` contains `r`, we don't need to do anything so return immediately. for i := len(rs.rs) - 1; i >= 0; i-- { l := &rs.rs[i] // *) l contains r if l.b <= r.b && r.e <= l.e { return } // a) r overwraps to l so adjust r to fully contain l and reomve l // from region slice. if l.b <= r.b && r.b <= l.e+1 && l.e <= r.e { r.b = l.b rs.rs = append(rs.rs[:i], rs.rs[i+1:]...) continue } if r.b <= l.b && l.b <= r.e+1 && r.e <= l.e { r.e = l.e rs.rs = append(rs.rs[:i], rs.rs[i+1:]...) continue } if r.b <= l.b && l.e <= r.e { rs.rs = append(rs.rs[:i], rs.rs[i+1:]...) continue } // b) No overwrap will occur after this iteration. Instert r to the // region slice immediately. if l.e < r.b { rs.rs = append(rs.rs[:i+1], append([]region{r}, rs.rs[i+1:]...)...) return } // No overwrap occurs yet. See the next region. } // r is the topmost region among regions in the slice. rs.rs = append([]region{r}, rs.rs...) } func (rs *regionSet) totalSize() int64 { var sz int64 for _, f := range rs.rs { sz += f.size() } return sz } soci-snapshotter-0.4.1/fs/remote/util_test.go000066400000000000000000000054611454010642300212740ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright 2019 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the NOTICE.md file. */ package remote import ( "reflect" "testing" ) func TestRegionSet(t *testing.T) { tests := []struct { input []region expected []region }{ { input: []region{{1, 3}, {2, 4}}, expected: []region{{1, 4}}, }, { input: []region{{1, 5}, {2, 4}}, expected: []region{{1, 5}}, }, { input: []region{{2, 4}, {1, 5}}, expected: []region{{1, 5}}, }, { input: []region{{2, 4}, {6, 8}, {1, 5}}, expected: []region{{1, 8}}, }, { input: []region{{1, 2}, {1, 2}}, expected: []region{{1, 2}}, }, { input: []region{{1, 3}, {1, 2}}, expected: []region{{1, 3}}, }, { input: []region{{1, 3}, {2, 3}}, expected: []region{{1, 3}}, }, { input: []region{{1, 3}, {3, 6}}, expected: []region{{1, 6}}, }, { input: []region{{1, 3}, {4, 6}}, // region.e is inclusive expected: []region{{1, 6}}, }, { input: []region{{4, 6}, {1, 3}}, // region.e is inclusive expected: []region{{1, 6}}, }, { input: []region{{4, 6}, {1, 3}, {7, 9}, {2, 8}}, expected: []region{{1, 9}}, }, { input: []region{{4, 6}, {1, 5}, {7, 9}, {4, 8}}, expected: []region{{1, 9}}, }, { input: []region{{7, 8}, {1, 2}, {5, 6}}, expected: []region{{1, 2}, {5, 8}}, }, } for i, tt := range tests { var rs regionSet for _, f := range tt.input { rs.add(f) } if !reflect.DeepEqual(tt.expected, rs.rs) { t.Errorf("#%d: expected %v, got %v", i, tt.expected, rs.rs) } } } soci-snapshotter-0.4.1/fs/source/000077500000000000000000000000001454010642300167305ustar00rootroot00000000000000soci-snapshotter-0.4.1/fs/source/source.go000066400000000000000000000163671454010642300205740ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package source import ( "context" "fmt" "strconv" "strings" "github.com/containerd/containerd/images" "github.com/containerd/containerd/labels" ctdsnapshotters "github.com/containerd/containerd/pkg/snapshotters" "github.com/containerd/containerd/reference" "github.com/containerd/containerd/remotes/docker" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) // GetSources is a function for converting snapshot labels into typed blob sources // information. This package defines a default converter which provides source // information based on some labels but implementations aren't required to use labels. // Implementations are allowed to return several sources (registry config + image refs) // about the blob. type GetSources func(labels map[string]string) (source []Source, err error) // RegistryHosts returns a list of registries that provides the specified image. type RegistryHosts func(reference.Spec) ([]docker.RegistryHost, error) // Source is a typed blob source information. This contains information about // a blob stored in registries and some contexts of the blob. type Source struct { // Hosts is a registry configuration where this blob is stored. Hosts RegistryHosts // Name is an image reference which contains this blob. Name reference.Spec // Target is a descriptor of this blob. Target ocispec.Descriptor // Manifest is an image manifest which contains the blob. This will // be used by the filesystem to pre-resolve some layers contained in // the manifest. // Currently, layer digest (Manifest.Layers.Digest) and size will be used. Manifest ocispec.Manifest } const ( // TargetSizeLabel is a label which contains layer size. TargetSizeLabel = "containerd.io/snapshot/remote/soci.size" // targetImageLayersSizeLabel is a label which contains layer sizes contained in // the target image. targetImageLayersSizeLabel = "containerd.io/snapshot/remote/image.layers.size" // TargetSociIndexDigestLabel is a label which contains the digest of the soci index. TargetSociIndexDigestLabel = "containerd.io/snapshot/remote/soci.index.digest" ) // FromDefaultLabels returns a function for converting snapshot labels to // source information based on labels. func FromDefaultLabels(hosts RegistryHosts) GetSources { return func(labels map[string]string) ([]Source, error) { refStr, ok := labels[ctdsnapshotters.TargetRefLabel] if !ok { return nil, fmt.Errorf("reference hasn't been passed") } refspec, err := reference.Parse(refStr) if err != nil { return nil, err } digestStr, ok := labels[ctdsnapshotters.TargetLayerDigestLabel] if !ok { return nil, fmt.Errorf("digest hasn't been passed") } target, err := digest.Parse(digestStr) if err != nil { return nil, err } var targetSize int64 targetSizeStr, ok := labels[TargetSizeLabel] if ok { targetSize, err = strconv.ParseInt(targetSizeStr, 10, 64) if err != nil { return nil, err } } var neighboringLayers []ocispec.Descriptor if l, ok := labels[ctdsnapshotters.TargetImageLayersLabel]; ok { layerDigestsStr := strings.Split(l, ",") if s, ok := labels[targetImageLayersSizeLabel]; ok { layerSizes := strings.Split(s, ",") if len(layerDigestsStr) != len(layerSizes) { return nil, fmt.Errorf("the lengths of layer digests and layer sizes don't match") } for i := 0; i < len(layerDigestsStr); i++ { l := layerDigestsStr[i] d, err := digest.Parse(l) if err != nil { return nil, err } if d.String() != target.String() { size, err := strconv.ParseInt(layerSizes[i], 10, 64) if err != nil { return nil, err } desc := ocispec.Descriptor{Digest: d, Size: size} neighboringLayers = append(neighboringLayers, desc) } } } } targetDesc := ocispec.Descriptor{ Digest: target, Size: targetSize, Annotations: labels, } return []Source{ { Hosts: hosts, Name: refspec, Target: targetDesc, Manifest: ocispec.Manifest{Layers: append([]ocispec.Descriptor{targetDesc}, neighboringLayers...)}, }, }, nil } } // AppendDefaultLabelsHandlerWrapper makes a handler which appends image's basic // information to each layer descriptor as annotations during unpack. These // annotations will be passed to this remote snapshotter as labels and used to // construct source information. func AppendDefaultLabelsHandlerWrapper(indexDigest string, wrapper func(images.Handler) images.Handler) func(f images.Handler) images.Handler { return func(f images.Handler) images.Handler { return images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { children, err := wrapper(f).Handle(ctx, desc) if err != nil { return nil, err } switch desc.MediaType { case ocispec.MediaTypeImageManifest, images.MediaTypeDockerSchema2Manifest: for i := range children { c := &children[i] if images.IsLayerType(c.MediaType) { if c.Annotations == nil { c.Annotations = make(map[string]string) } c.Annotations[TargetSizeLabel] = fmt.Sprintf("%d", c.Size) c.Annotations[TargetSociIndexDigestLabel] = indexDigest remainingLayerDigestsCount := len(strings.Split(c.Annotations[ctdsnapshotters.TargetImageLayersLabel], ",")) var layerSizes string /* We must ensure that the counts of layer sizes and layer digests are equal. We will limit the # of neighboring label sizes to equal the # of neighboring ayer digests for any given layer. */ for _, l := range children[i : i+remainingLayerDigestsCount] { if images.IsLayerType(l.MediaType) { ls := fmt.Sprintf("%d,", l.Size) // This avoids the label hits the size limitation. // Skipping layers is allowed here and only affects performance. if err := labels.Validate(targetImageLayersSizeLabel, layerSizes+ls); err != nil { break } layerSizes += ls } } c.Annotations[targetImageLayersSizeLabel] = strings.TrimSuffix(layerSizes, ",") } } } return children, nil }) } } soci-snapshotter-0.4.1/fs/span-manager/000077500000000000000000000000001454010642300200015ustar00rootroot00000000000000soci-snapshotter-0.4.1/fs/span-manager/span.go000066400000000000000000000057521454010642300213020ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package spanmanager import ( "errors" "fmt" "sync" "sync/atomic" "github.com/awslabs/soci-snapshotter/ztoc/compression" ) type spanState int var errInvalidSpanStateTransition = errors.New("invalid span state transition") const ( // A span is in Unrequested state when it's not requested from remote. unrequested spanState = iota // A span is in Requested state when it's requested from remote but its content hasn't been returned. requested // A span is in Fetched state when its content is fetched from remote and compressed data is cached. fetched // A span is in Uncompressed state when it's uncompressed and its uncompressed content is cached. uncompressed ) const ( // Default number of tries fetching data from remote and verifying the digest. defaultSpanVerificationFailureRetries = 3 ) // map of valid span transtions: current state -> valid new states. // stateTransitionMap is kept minimum so we won't change state by accident. // We should keep it documented when each transition will happen. var stateTransitionMap = map[spanState][]spanState{ unrequested: { // when span starts being fetched; it makes other goroutines aware of this requested, }, requested: { // when a span fetch fails; change back to unrequested so other goroutines can request again unrequested, // when bg-fetcher fetches and caches compressed span fetched, // when span data request comes; span is fetched, uncompressed and cached uncompressed, }, fetched: { // when span data request comes and span is fetched by bg-fetcher; compressed span is available in cache uncompressed, }, } type span struct { id compression.SpanID startCompOffset compression.Offset endCompOffset compression.Offset startUncompOffset compression.Offset endUncompOffset compression.Offset state atomic.Value mu sync.Mutex } func (s *span) checkState(expected spanState) bool { state := s.state.Load().(spanState) return state == expected } func (s *span) setState(state spanState) error { err := s.validateStateTransition(state) if err != nil { return err } s.state.Store(state) return nil } func (s *span) validateStateTransition(newState spanState) error { state := s.state.Load().(spanState) for _, s := range stateTransitionMap[state] { if newState == s { return nil } } return fmt.Errorf("%w: %v -> %v", errInvalidSpanStateTransition, state, newState) } soci-snapshotter-0.4.1/fs/span-manager/span_manager.go000066400000000000000000000331161454010642300227670ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package spanmanager import ( "bytes" "context" "errors" "fmt" "io" "runtime" "github.com/awslabs/soci-snapshotter/cache" "github.com/awslabs/soci-snapshotter/ztoc" "github.com/awslabs/soci-snapshotter/ztoc/compression" "github.com/opencontainers/go-digest" "golang.org/x/sync/errgroup" ) // Specific error types raised by SpanManager. var ( ErrSpanNotAvailable = errors.New("span not available in cache") ErrIncorrectSpanDigest = errors.New("span digests do not match") ErrExceedMaxSpan = errors.New("span id larger than max span id") ) type MultiReaderCloser struct { c []io.Closer io.Reader } func (mrc *MultiReaderCloser) Close() error { errs := []error{} for _, c := range mrc.c { if err := c.Close(); err != nil { errs = append(errs, err) } } return errors.Join(errs...) } type SectionReaderCloser struct { c io.Closer *io.SectionReader } func (src *SectionReaderCloser) Close() error { return src.c.Close() } // SpanManager fetches and caches spans of a given layer. type SpanManager struct { cache cache.BlobCache cacheOpt []cache.Option zinfo compression.Zinfo r *io.SectionReader // reader for contents of the spans managed by SpanManager spans []*span ztoc *ztoc.Ztoc maxSpanVerificationFailureRetries int } type spanInfo struct { // starting span id of the requested contents spanStart compression.SpanID // ending span id of the requested contents spanEnd compression.SpanID // start offsets of the requested contents within the spans startOffInSpan []compression.Offset // end offsets the requested contents within the spans endOffInSpan []compression.Offset // indexes of the spans in the buffer spanIndexInBuf []compression.Offset } // New creates a SpanManager with given ztoc and content reader, and builds all // spans based on the ztoc. func New(ztoc *ztoc.Ztoc, r *io.SectionReader, cache cache.BlobCache, retries int, cacheOpt ...cache.Option) *SpanManager { index, err := ztoc.Zinfo() if err != nil { return nil } spans := make([]*span, ztoc.MaxSpanID+1) m := &SpanManager{ cache: cache, cacheOpt: cacheOpt, zinfo: index, r: r, spans: spans, ztoc: ztoc, maxSpanVerificationFailureRetries: retries, } if m.maxSpanVerificationFailureRetries < 0 { m.maxSpanVerificationFailureRetries = defaultSpanVerificationFailureRetries } m.buildAllSpans() runtime.SetFinalizer(m, func(m *SpanManager) { m.Close() }) return m } func (m *SpanManager) buildAllSpans() { var i compression.SpanID for i = 0; i <= m.ztoc.MaxSpanID; i++ { s := span{ id: i, startCompOffset: m.zinfo.StartCompressedOffset(i), endCompOffset: m.zinfo.EndCompressedOffset(i, m.ztoc.CompressedArchiveSize), startUncompOffset: m.zinfo.StartUncompressedOffset(i), endUncompOffset: m.zinfo.EndUncompressedOffset(i, m.ztoc.UncompressedArchiveSize), } m.spans[i] = &s m.spans[i].state.Store(unrequested) } } // FetchSingleSpan invokes the reader to fetch the span in the background and cache // the span without uncompressing. It is invoked by the BackgroundFetcher. // span state change: unrequested -> requested -> fetched. func (m *SpanManager) FetchSingleSpan(spanID compression.SpanID) error { if spanID > m.ztoc.MaxSpanID { return ErrExceedMaxSpan } // return directly if span is not in `unrequested` s := m.spans[spanID] if !s.checkState(unrequested) { return nil } s.mu.Lock() defer s.mu.Unlock() // check again after acquiring Lock if !s.checkState(unrequested) { return nil } _, err := m.fetchAndCacheSpan(spanID, false) return err } // resolveSpan ensures the span exists in cache and is uncompressed by calling // `getSpanContent`. Only for testing. func (m *SpanManager) resolveSpan(spanID compression.SpanID) error { if spanID > m.ztoc.MaxSpanID { return ErrExceedMaxSpan } // this func itself doesn't use the returned span data _, err := m.getSpanContent(spanID, 0, m.spans[spanID].endUncompOffset) return err } // GetContents returns a reader for the requested contents. The contents may be // across multiple spans. func (m *SpanManager) GetContents(startUncompOffset, endUncompOffset compression.Offset) (io.ReadCloser, error) { si := m.getSpanInfo(startUncompOffset, endUncompOffset) numSpans := si.spanEnd - si.spanStart + 1 spanReaders := make([]io.Reader, numSpans) spanClosers := make([]io.Closer, numSpans) eg, _ := errgroup.WithContext(context.Background()) var i compression.SpanID for i = 0; i < numSpans; i++ { j := i eg.Go(func() error { spanID := j + si.spanStart r, err := m.getSpanContent(spanID, si.startOffInSpan[j], si.endOffInSpan[j]) if err != nil { return err } spanReaders[j] = r spanClosers[j] = r return nil }) } if err := eg.Wait(); err != nil { return nil, err } return &MultiReaderCloser{spanClosers, io.MultiReader(spanReaders...)}, nil } // getSpanInfo returns spanInfo from the offsets of the requested file func (m *SpanManager) getSpanInfo(offsetStart, offsetEnd compression.Offset) *spanInfo { spanStart := m.zinfo.UncompressedOffsetToSpanID(offsetStart) spanEnd := m.zinfo.UncompressedOffsetToSpanID(offsetEnd) numSpans := spanEnd - spanStart + 1 start := make([]compression.Offset, numSpans) end := make([]compression.Offset, numSpans) index := make([]compression.Offset, numSpans) var bufSize compression.Offset for i := spanStart; i <= spanEnd; i++ { j := i - spanStart index[j] = bufSize s := m.spans[i] uncompSpanSize := s.endUncompOffset - s.startUncompOffset if offsetStart > s.startUncompOffset { start[j] = offsetStart - s.startUncompOffset } if offsetEnd < s.endUncompOffset { end[j] = offsetEnd - s.startUncompOffset } else { end[j] = uncompSpanSize } bufSize += end[j] - start[j] } spanInfo := spanInfo{ spanStart: spanStart, spanEnd: spanEnd, startOffInSpan: start, endOffInSpan: end, spanIndexInBuf: index, } return &spanInfo } // getSpanContent gets uncompressed span content (specified by [offsetStart:offsetEnd]), // which is returned as an `io.Reader`. // // It resolves the span to ensure it exists and is uncompressed in cache: // 1. For `uncompressed` span, directly return the reader from the cache. // 2. For `fetched` span, read and uncompress the compressed span from cache, cache and // return the reader from the uncompressed span. // 3. For `unrequested` span, fetch-uncompress-cache the span data, return the reader // from the uncompressed span // 4. No span state lock will be acquired in `requested` state. func (m *SpanManager) getSpanContent(spanID compression.SpanID, offsetStart, offsetEnd compression.Offset) (io.ReadCloser, error) { s := m.spans[spanID] size := offsetEnd - offsetStart // return from cache directly if cached and uncompressed if s.checkState(uncompressed) { return m.getSpanFromCache(s.id, offsetStart, size) } s.mu.Lock() defer s.mu.Unlock() // check again after acquiring lock if s.checkState(uncompressed) { return m.getSpanFromCache(s.id, offsetStart, size) } // if cached but not uncompressed, uncompress and cache the span content if s.checkState(fetched) { // get compressed span from the cache compressedSize := s.endCompOffset - s.startCompOffset r, err := m.getSpanFromCache(s.id, 0, compressedSize) if err != nil { return nil, err } defer r.Close() // read compressed span compressedBuf, err := io.ReadAll(r) if err != nil { return nil, err } // uncompress span uncompSpanBuf, err := m.uncompressSpan(s, compressedBuf) if err != nil { return nil, err } // cache uncompressed span if err := m.addSpanToCache(s.id, uncompSpanBuf, m.cacheOpt...); err != nil { return nil, err } if err := s.setState(uncompressed); err != nil { return nil, err } return io.NopCloser(bytes.NewReader(uncompSpanBuf[offsetStart : offsetStart+size])), nil } // fetch-uncompress-cache span: span state can only be `unrequested` since // no goroutine will release span state lock in `requested` state uncompBuf, err := m.fetchAndCacheSpan(s.id, true) if err != nil { return nil, err } buf := bytes.NewBuffer(uncompBuf[offsetStart : offsetStart+size]) return io.NopCloser(buf), nil } // fetchAndCacheSpan fetches a span, uncompresses the span if `uncompress == true`, // caches and returns the span content. The span state is set to `fetched/uncompressed`, // depending on if `uncompress` is enabled. // The caller needs to check the span state (e.g. `unrequested`) and acquires the // span's state lock before calling. func (m *SpanManager) fetchAndCacheSpan(spanID compression.SpanID, uncompress bool) (buf []byte, err error) { s := m.spans[spanID] // change to `requested`; if fetch/cache fails, change back to `unrequested` // so other goroutines can request again. if err := s.setState(requested); err != nil { return nil, err } defer func() { if err != nil && s.checkState(requested) { s.setState(unrequested) } }() // fetch compressed span compressedBuf, err := m.fetchSpanWithRetries(spanID) if err != nil { return nil, err } buf = compressedBuf var state = fetched if uncompress { // uncompress span uncompSpanBuf, err := m.uncompressSpan(s, compressedBuf) if err != nil { return nil, err } buf = uncompSpanBuf state = uncompressed } // cache span data if err := m.addSpanToCache(spanID, buf, m.cacheOpt...); err != nil { return nil, err } if err := s.setState(state); err != nil { return nil, err } return buf, nil } // fetchSpanWithRetries fetches the requested data and verifies that the span digest matches the one in the ztoc. // It will retry the fetch and verification m.maxSpanVerificationFailureRetries times. // It does not retry when there is an error fetching the data, because retries already happen lower in the stack in httpFetcher. // If there is an error fetching data from remote, it is not an transient error. func (m *SpanManager) fetchSpanWithRetries(spanID compression.SpanID) ([]byte, error) { s := m.spans[spanID] offset := s.startCompOffset compressedSize := s.endCompOffset - s.startCompOffset compressedBuf := make([]byte, compressedSize) var ( err error n int ) for i := 0; i < m.maxSpanVerificationFailureRetries+1; i++ { n, err = m.r.ReadAt(compressedBuf, int64(offset)) // if the n = len(p) bytes returned by ReadAt are at the end of the input source, // ReadAt may return either err == EOF or err == nil: https://pkg.go.dev/io#ReaderAt if err != nil && err != io.EOF { return []byte{}, err } if n != len(compressedBuf) { return []byte{}, fmt.Errorf("unexpected data size for reading compressed span. read = %d, expected = %d", n, len(compressedBuf)) } if err = m.verifySpanContents(compressedBuf, spanID); err == nil { return compressedBuf, nil } } return []byte{}, err } // uncompressSpan uses zinfo to extract uncompressed span data from compressed // span data. func (m *SpanManager) uncompressSpan(s *span, compressedBuf []byte) ([]byte, error) { uncompSize := s.endUncompOffset - s.startUncompOffset // Theoretically, a span can be empty. If that happens, just return an empty buffer. if uncompSize == 0 { return []byte{}, nil } bytes, err := m.zinfo.ExtractDataFromBuffer(compressedBuf, uncompSize, s.startUncompOffset, s.id) if err != nil { return nil, err } return bytes, nil } // addSpanToCache adds contents of the span to the cache. // A non-nil error is returned if the data is not written to the cache. func (m *SpanManager) addSpanToCache(spanID compression.SpanID, contents []byte, opts ...cache.Option) error { w, err := m.cache.Add(fmt.Sprintf("%d", spanID), opts...) if err != nil { return err } defer w.Close() _, err = w.Write(contents) if err != nil { w.Abort() return err } w.Commit() return nil } // getSpanFromCache returns the cached span content as an `io.Reader`. // `offset` is the offset of the requested contents within the span. // `size` is the size of the requested contents. func (m *SpanManager) getSpanFromCache(spanID compression.SpanID, offset, size compression.Offset) (io.ReadCloser, error) { r, err := m.cache.Get(fmt.Sprintf("%d", spanID)) if err != nil { return nil, fmt.Errorf("%w: %w", ErrSpanNotAvailable, err) } return &SectionReaderCloser{r, io.NewSectionReader(r, int64(offset), int64(size))}, nil } // verifySpanContents calculates span digest from its compressed bytes, and compare // with the digest stored in ztoc. func (m *SpanManager) verifySpanContents(compressedData []byte, spanID compression.SpanID) error { actual := digest.FromBytes(compressedData) expected := m.ztoc.SpanDigests[spanID] if actual != expected { return fmt.Errorf("expected %v but got %v: %w", expected, actual, ErrIncorrectSpanDigest) } return nil } // Close closes both the underlying zinfo data and blob cache. func (m *SpanManager) Close() { m.zinfo.Close() m.cache.Close() } soci-snapshotter-0.4.1/fs/span-manager/span_manager_test.go000066400000000000000000000256761454010642300240420ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package spanmanager import ( "bytes" "compress/gzip" "errors" "fmt" "io" "testing" "github.com/awslabs/soci-snapshotter/cache" "github.com/awslabs/soci-snapshotter/util/testutil" "github.com/awslabs/soci-snapshotter/ztoc" "github.com/awslabs/soci-snapshotter/ztoc/compression" ) func TestSpanManager(t *testing.T) { var spanSize compression.Offset = 65536 // 64 KiB fileName := "span-manager-test" testCases := []struct { name string maxSpans compression.SpanID sectionReader *io.SectionReader expectedError error }{ { name: "a file from 1 span", maxSpans: 1, }, { name: "a file from 100 spans", maxSpans: 100, }, { name: "span digest verification fails", maxSpans: 100, sectionReader: io.NewSectionReader(readerFn(func(b []byte, _ int64) (int, error) { var sz compression.Offset = compression.Offset(len(b)) copy(b, testutil.RandomByteData(int64(sz))) return len(b), nil }), 0, 10000000), expectedError: ErrIncorrectSpanDigest, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { var err error defer func() { if err != nil && !errors.Is(err, tc.expectedError) { t.Fatal(err) } }() fileContent := []byte{} for i := 0; i < int(tc.maxSpans); i++ { fileContent = append(fileContent, testutil.RandomByteData(int64(spanSize))...) } tarEntries := []testutil.TarEntry{ testutil.File(fileName, string(fileContent)), } toc, r, err := ztoc.BuildZtocReader(t, tarEntries, gzip.BestCompression, int64(spanSize)) if err != nil { err = fmt.Errorf("failed to create ztoc: %w", err) return } if tc.sectionReader != nil { r = tc.sectionReader } cache := cache.NewMemoryCache() defer cache.Close() m := New(toc, r, cache, 0) // Test GetContent fileContentFromSpans, err := getFileContentFromSpans(m, toc, fileName) if err != nil { return } if !bytes.Equal(fileContent, fileContentFromSpans) { err = fmt.Errorf("file contents are not the same as span contents") return } // Test resolving all spans var i compression.SpanID for i = 0; i <= toc.MaxSpanID; i++ { err := m.resolveSpan(i) if err != nil { t.Fatalf("error resolving span %d. error: %v", i, err) } } // Test resolveSpan returning ErrExceedMaxSpan for span id larger than max span id resolveSpanErr := m.resolveSpan(toc.MaxSpanID + 1) if !errors.Is(resolveSpanErr, ErrExceedMaxSpan) { t.Fatalf("failed returning ErrExceedMaxSpan for span id larger than max span id") } }) } } func TestSpanManagerCache(t *testing.T) { var spanSize compression.Offset = 65536 // 64 KiB content := testutil.RandomByteData(int64(spanSize)) tarEntries := []testutil.TarEntry{ testutil.File("span-manager-cache-test", string(content)), } toc, r, err := ztoc.BuildZtocReader(t, tarEntries, gzip.BestCompression, int64(spanSize)) if err != nil { t.Fatalf("failed to create ztoc: %v", err) } cache := cache.NewMemoryCache() defer cache.Close() m := New(toc, r, cache, 0) spanID := 0 err = m.resolveSpan(compression.SpanID(spanID)) if err != nil { t.Fatalf("failed to resolve span 0: %v", err) } testCases := []struct { name string offset compression.Offset size compression.Offset }{ { name: "offset 0", offset: 0, size: 100, }, { name: "offset 20000", offset: 20000, size: 500, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { // Test resolveSpanFromCache spanR, err := m.getSpanContent(compression.SpanID(spanID), tc.offset, tc.offset+tc.size) if err != nil { t.Fatalf("error resolving span from cache") } spanContent, err := io.ReadAll(spanR) if err != nil && err != io.EOF { t.Fatalf("error reading span content") } if tc.size != compression.Offset(len(spanContent)) { t.Fatalf("size of span content from cache is not expected") } }) } } func TestStateTransition(t *testing.T) { var spanSize compression.Offset = 65536 // 64 KiB content := testutil.RandomByteData(int64(spanSize)) tarEntries := []testutil.TarEntry{ testutil.File("set-span-test", string(content)), } toc, r, err := ztoc.BuildZtocReader(t, tarEntries, gzip.BestCompression, int64(spanSize)) if err != nil { t.Fatalf("failed to create ztoc: %v", err) } cache := cache.NewMemoryCache() defer cache.Close() m := New(toc, r, cache, 0) // check initial span states for i := uint32(0); i <= uint32(toc.MaxSpanID); i++ { state := m.spans[i].state.Load().(spanState) if state != unrequested { t.Fatalf("failed initializing span states to Unrequested") } } testCases := []struct { name string spanID compression.SpanID isBgFetch bool }{ { name: "span 0 - bgfetch", spanID: 0, isBgFetch: true, }, { name: "span 0 - on demand fetch", spanID: 0, }, { name: "max span - bgfetch", spanID: m.ztoc.MaxSpanID, isBgFetch: true, }, { name: "max span - on demand fetch", spanID: m.ztoc.MaxSpanID, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { s := m.spans[tc.spanID] if tc.isBgFetch { err := m.FetchSingleSpan(tc.spanID) if err != nil { t.Fatalf("failed resolving the span for prefetch: %v", err) } state := s.state.Load().(spanState) if state != fetched { t.Fatalf("failed transitioning to Fetched state") } } else { _, err := m.getSpanContent(tc.spanID, 0, s.endUncompOffset-s.startUncompOffset) if err != nil { t.Fatalf("failed getting the span for on-demand fetch: %v", err) } state := s.state.Load().(spanState) if state != uncompressed { t.Fatalf("failed transitioning to Uncompressed state") } } }) } } func TestValidateState(t *testing.T) { testCases := []struct { name string currentState spanState newState []spanState expectedErr error }{ { name: "span in Unrequested state with valid new state", currentState: unrequested, newState: []spanState{requested}, expectedErr: nil, }, { name: "span in Unrequested state with invalid new state", currentState: unrequested, newState: []spanState{unrequested, fetched, uncompressed}, expectedErr: errInvalidSpanStateTransition, }, { name: "span in Requested state with valid new state", currentState: requested, newState: []spanState{unrequested, fetched, uncompressed}, expectedErr: nil, }, { name: "span in Requested state with invalid new state", currentState: requested, newState: []spanState{requested}, expectedErr: errInvalidSpanStateTransition, }, { name: "span in Fetched state with valid new state", currentState: fetched, newState: []spanState{uncompressed}, expectedErr: nil, }, { name: "span in Fetched state with invalid new state", currentState: fetched, newState: []spanState{unrequested, requested, fetched}, expectedErr: errInvalidSpanStateTransition, }, { name: "span in Uncompressed state with valid new state", currentState: uncompressed, newState: []spanState{}, expectedErr: nil, }, { name: "span in Uncompressed state with invalid new state", currentState: uncompressed, newState: []spanState{unrequested, requested, fetched, uncompressed}, expectedErr: errInvalidSpanStateTransition, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { for _, ns := range tc.newState { s := span{} s.state.Store(tc.currentState) err := s.validateStateTransition(ns) if !errors.Is(err, tc.expectedErr) { t.Fatalf("failed validateState. current state: %v, new state: %v", tc.currentState, ns) } } }) } } func TestSpanManagerRetries(t *testing.T) { testCases := []struct { name string spanManagerRetries int readerErrors int expectedErr error }{ { name: "reader returns correct data first time", spanManagerRetries: 3, readerErrors: 0, }, { name: "reader returns correct data the last time", spanManagerRetries: 3, readerErrors: 2, }, { name: "reader returns ErrIncorrectSpanDigest", spanManagerRetries: 3, readerErrors: 5, expectedErr: ErrIncorrectSpanDigest, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { entries := []testutil.TarEntry{ testutil.File("test", string(testutil.RandomByteData(10000000))), } ztoc, sr, err := ztoc.BuildZtocReader(t, entries, gzip.DefaultCompression, 100000) if err != nil { t.Fatal(err) } rdr := &retryableReaderAt{inner: sr, maxErrors: tc.readerErrors} sr = io.NewSectionReader(rdr, 0, 10000000) sm := New(ztoc, sr, cache.NewMemoryCache(), tc.spanManagerRetries) for i := 0; i < int(ztoc.MaxSpanID); i++ { rdr.errCount = 0 _, err := sm.fetchAndCacheSpan(compression.SpanID(i), true) if !errors.Is(err, tc.expectedErr) { t.Fatalf("unexpected err; expected %v, got %v", tc.expectedErr, err) } min := func(x, y int) int { if x < y { return x } return y } if rdr.errCount != min(tc.spanManagerRetries+1, tc.readerErrors) { t.Fatalf("retry count is unexpected; expected %d, got %d", min(tc.spanManagerRetries+1, tc.readerErrors), rdr.errCount) } } }) } } // A retryableReaderAt returns incorrect data to the caller maxErrors - 1 times. type retryableReaderAt struct { inner *io.SectionReader errCount int maxErrors int } func (r *retryableReaderAt) ReadAt(buf []byte, off int64) (int, error) { n, err := r.inner.ReadAt(buf, off) if (err != nil && err != io.EOF) || n != len(buf) { return n, err } if r.errCount < r.maxErrors { r.errCount++ buf[0] = buf[0] ^ 0xff } return n, err } func getFileContentFromSpans(m *SpanManager, toc *ztoc.Ztoc, fileName string) ([]byte, error) { metadata, err := toc.GetMetadataEntry(fileName) if err != nil { return nil, err } offsetStart := metadata.UncompressedOffset offsetEnd := offsetStart + metadata.UncompressedSize r, err := m.GetContents(offsetStart, offsetEnd) if err != nil { return nil, err } content, err := io.ReadAll(r) if err != nil { return nil, err } return content, nil } type readerFn func([]byte, int64) (int, error) func (f readerFn) ReadAt(b []byte, n int64) (int, error) { return f(b, n) } soci-snapshotter-0.4.1/fs/unpacker.go000066400000000000000000000066271454010642300176020ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package fs import ( "context" "fmt" "io" "strings" "github.com/containerd/containerd/archive" "github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/mount" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) type Unpacker interface { // Unpack takes care of getting the layer specified by descriptor `desc`, // decompressing it, putting it in the directory with the path `mountpoint` // and applying the difference to the parent layers if there is any. // After that the layer can be mounted as non-remote snapshot. Unpack(ctx context.Context, desc ocispec.Descriptor, mountpoint string, mounts []mount.Mount) error } type Archive interface { // Apply decompresses the compressed stream represented by reader `r` and // applies it to the directory `root`. Apply(ctx context.Context, root string, r io.Reader, opts ...archive.ApplyOpt) (int64, error) } type layerArchive struct { } func NewLayerArchive() Archive { return &layerArchive{} } func (la *layerArchive) Apply(ctx context.Context, root string, r io.Reader, opts ...archive.ApplyOpt) (int64, error) { // we use containerd implementation here // decompress first and then apply decompressReader, err := compression.DecompressStream(r) if err != nil { return 0, fmt.Errorf("cannot decompress the stream: %w", err) } defer decompressReader.Close() return archive.Apply(ctx, root, decompressReader, opts...) } type layerUnpacker struct { fetcher Fetcher archive Archive } func NewLayerUnpacker(fetcher Fetcher, archive Archive) Unpacker { return &layerUnpacker{ fetcher: fetcher, archive: archive, } } func (lu *layerUnpacker) Unpack(ctx context.Context, desc ocispec.Descriptor, mountpoint string, mounts []mount.Mount) error { rc, local, err := lu.fetcher.Fetch(ctx, desc) if err != nil { return fmt.Errorf("cannot fetch layer: %w", err) } defer rc.Close() if !local { if err = lu.fetcher.Store(ctx, desc, rc); err != nil { return fmt.Errorf("cannot store layer: %w", err) } rc.Close() rc, _, err = lu.fetcher.Fetch(ctx, desc) if err != nil { return fmt.Errorf("cannot fetch layer: %w", err) } } parents, err := getLayerParents(mounts[0].Options) if err != nil { return fmt.Errorf("cannot get layer parents: %w", err) } opts := []archive.ApplyOpt{ archive.WithConvertWhiteout(archive.OverlayConvertWhiteout), } if len(parents) > 0 { opts = append(opts, archive.WithParents(parents)) } _, err = lu.archive.Apply(ctx, mountpoint, rc, opts...) if err != nil { return fmt.Errorf("cannot apply layer: %w", err) } return nil } func getLayerParents(options []string) (lower []string, err error) { const lowerdirPrefix = "lowerdir=" for _, o := range options { if strings.HasPrefix(o, lowerdirPrefix) { lower = strings.Split(strings.TrimPrefix(o, lowerdirPrefix), ":") } } return } soci-snapshotter-0.4.1/fs/unpacker_test.go000066400000000000000000000132741454010642300206350ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package fs import ( "bytes" "context" "fmt" "io" "testing" "github.com/containerd/containerd/archive" "github.com/containerd/containerd/mount" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) func TestFailureModes(t *testing.T) { testCases := []struct { name string mountpoint string unpackedSize int64 desc ocispec.Descriptor applyFails bool fetchFails bool storeFails bool }{ { name: "first fetch fails", mountpoint: "/some/path/filename", unpackedSize: 65535, applyFails: false, fetchFails: true, storeFails: false, }, { name: "store fails", mountpoint: "/some/path/filename", unpackedSize: 65535, applyFails: false, fetchFails: false, storeFails: true, }, { name: "apply fails", mountpoint: "/some/path/filename", unpackedSize: 65535, applyFails: true, fetchFails: false, storeFails: false, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { fetcher := newFakeFetcher(false, tc.storeFails, tc.fetchFails) archive := newFakeArchive(tc.unpackedSize, tc.applyFails) unpacker := NewLayerUnpacker(fetcher, archive) mounts := getFakeMounts() err := unpacker.Unpack(context.Background(), tc.desc, tc.mountpoint, mounts) if err == nil { t.Fatalf("%v: there should've been an error due to the following cases: fetch=%v, store=%v, apply=%v", tc.name, tc.fetchFails, tc.storeFails, tc.applyFails) } if tc.fetchFails && fetcher.fetchCount != 1 { t.Fatalf("%v: fetch must have been called once, but was called %d times", tc.name, fetcher.fetchCount) } if tc.storeFails && fetcher.storeCount != 1 { t.Fatalf("%v: store must have been called once, but was called %d times", tc.name, fetcher.storeCount) } if tc.applyFails && archive.applyCount != 1 { t.Fatalf("%v: apply must have been called once, but was called %d times", tc.name, archive.applyCount) } }) } } func TestUnpackHappyPath(t *testing.T) { testCases := []struct { name string mountpoint string unpackedSize int64 hasLocal bool desc ocispec.Descriptor }{ { name: "happy path layer exists locally", mountpoint: "/some/path/filename", unpackedSize: 65535, hasLocal: true, }, { name: "happy path layer does not exist locally", mountpoint: "/some/path/filename", unpackedSize: 10000, hasLocal: false, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { fetcher := newFakeFetcher(tc.hasLocal, false, false) archive := newFakeArchive(tc.unpackedSize, false) unpacker := NewLayerUnpacker(fetcher, archive) mounts := getFakeMounts() err := unpacker.Unpack(context.Background(), tc.desc, tc.mountpoint, mounts) if err != nil { t.Fatalf("%v: failed to unpack layer", tc.name) } if tc.hasLocal { if fetcher.storeCount != 0 { t.Fatalf("%v: Store was called on fetcher", tc.name) } if fetcher.fetchCount != 1 { t.Fatalf("%v: Fetch must be called only once if the layer exists locally, but was called %d times", tc.name, fetcher.fetchCount) } } else { if fetcher.storeCount != 1 { t.Fatalf("%v: Store must be called only once, but was called %d times", tc.name, fetcher.storeCount) } if fetcher.fetchCount != 2 { t.Fatalf("%v: Fetch must be called twice, but was called %d times", tc.name, fetcher.fetchCount) } } if archive.applyCount != 1 { t.Fatalf("%v: Apply() must be called only once, but was called %d times", tc.name, archive.applyCount) } }) } } type fakeArtifactFetcher struct { storeFails bool fetchFails bool storeCount int64 fetchCount int64 hasLocal bool } func newFakeFetcher(hasLocal, storeFails, fetchFails bool) *fakeArtifactFetcher { return &fakeArtifactFetcher{ storeFails: storeFails, fetchFails: fetchFails, hasLocal: hasLocal, } } func (f *fakeArtifactFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, bool, error) { f.fetchCount++ if f.fetchFails { return nil, false, fmt.Errorf("dummy error on Fetch()") } return io.NopCloser(bytes.NewBuffer([]byte("test"))), f.hasLocal, nil } func (f *fakeArtifactFetcher) Store(ctx context.Context, desc ocispec.Descriptor, reader io.Reader) error { f.storeCount++ if f.storeFails { return fmt.Errorf("dummy error on Store()") } f.hasLocal = true return nil } type fakeArchive struct { applyFails bool unpackedSize int64 applyCount int64 } func newFakeArchive(unpackedSize int64, applyFails bool) *fakeArchive { return &fakeArchive{ applyFails: applyFails, unpackedSize: unpackedSize, } } func (a *fakeArchive) Apply(ctx context.Context, root string, r io.Reader, opts ...archive.ApplyOpt) (int64, error) { a.applyCount++ if a.applyFails { return 0, fmt.Errorf("dummy error on Apply()") } return a.unpackedSize, nil } func getFakeMounts() []mount.Mount { return []mount.Mount{ { Type: "overlay", Source: "overlay", Options: []string{ "workdir=somedir1", "upperdir=somedir2", "lowerdir=somedir3:somedir4", }, }, } } soci-snapshotter-0.4.1/go.mod000066400000000000000000000121001454010642300161200ustar00rootroot00000000000000module github.com/awslabs/soci-snapshotter go 1.20 require ( github.com/containerd/containerd v1.7.2 github.com/containerd/continuity v0.4.1 github.com/docker/cli v24.0.4+incompatible github.com/docker/go-metrics v0.0.1 github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da github.com/google/flatbuffers v23.5.26+incompatible github.com/google/go-cmp v0.5.9 github.com/google/uuid v1.3.0 github.com/hanwen/go-fuse/v2 v2.4.1 github.com/hashicorp/go-retryablehttp v0.7.4 github.com/klauspost/compress v1.16.7 github.com/moby/sys/mountinfo v0.6.2 github.com/montanaflynn/stats v0.7.1 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.0-rc4 github.com/pelletier/go-toml v1.9.5 github.com/prometheus/client_golang v1.16.0 github.com/rs/xid v1.5.0 github.com/sirupsen/logrus v1.9.3 go.etcd.io/bbolt v1.3.7 golang.org/x/crypto v0.11.0 golang.org/x/sync v0.3.0 golang.org/x/sys v0.10.0 golang.org/x/time v0.3.0 google.golang.org/grpc v1.56.2 k8s.io/api v0.26.3 k8s.io/apimachinery v0.26.3 k8s.io/client-go v0.26.3 oras.land/oras-go/v2 v2.2.1 ) require ( github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 // indirect github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/Microsoft/hcsshim v0.10.0-rc.9 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/containerd/fifo v1.1.0 // indirect github.com/containerd/ttrpc v1.2.2 // indirect github.com/containerd/typeurl/v2 v2.1.1 // indirect github.com/cyphar/filepath-securejoin v0.2.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/docker/docker v23.0.5+incompatible // indirect github.com/docker/docker-credential-helpers v0.7.0 // indirect github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect github.com/emicklei/go-restful/v3 v3.10.1 // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonreference v0.20.0 // indirect github.com/go-openapi/swag v0.19.14 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/mock v1.6.0 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/gnostic v0.5.7-v3refs // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/imdario/mergo v0.3.13 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.6 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/moby/locker v1.0.1 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/signal v0.7.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/opencontainers/runc v1.1.7 // indirect github.com/opencontainers/runtime-spec v1.1.0-rc.3 // indirect github.com/opencontainers/selinux v1.11.0 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common v0.44.0 // indirect github.com/prometheus/procfs v0.11.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/testify v1.8.3 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/otel v1.16.0 // indirect go.opentelemetry.io/otel/metric v1.16.0 // indirect go.opentelemetry.io/otel/trace v1.16.0 // indirect golang.org/x/mod v0.12.0 // indirect golang.org/x/net v0.12.0 // indirect golang.org/x/oauth2 v0.8.0 // indirect golang.org/x/term v0.10.0 // indirect golang.org/x/text v0.11.0 // indirect golang.org/x/tools v0.11.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect gotest.tools/v3 v3.0.3 // indirect k8s.io/klog/v2 v2.90.1 // indirect k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 // indirect sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) // Temporary fork for avoiding importing patent-protected code: https://github.com/hashicorp/golang-lru/issues/73 replace github.com/hashicorp/golang-lru => github.com/ktock/golang-lru v0.5.5-0.20211029085301-ec551be6f75c soci-snapshotter-0.4.1/go.sum000066400000000000000000001164741454010642300161700ustar00rootroot00000000000000cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 h1:EKPd1INOIyr5hWOWhvpmQpY6tKjeG0hT1s3AMC/9fic= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1/go.mod h1:VzwV+t+dZ9j/H867F1M2ziD+yLHtB46oM35FxxMJ4d0= github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 h1:59MxjQVfjXsBpLy+dbd2/ELV5ofnUkUZBvWSC85sheA= github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0/go.mod h1:OahwfttHWG6eJ0clwcfBAHoDI6X/LV/15hx/wlMZSrU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Microsoft/hcsshim v0.10.0-rc.9 h1:B4mguSolFL2yOHl0JjQxo0Si2Vwipj/Cbib4pyJ4pKA= github.com/Microsoft/hcsshim v0.10.0-rc.9/go.mod h1:1g6+xpige+npSTrEkdm8JOZxOjJ9McQiT0JkEpzyZqA= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= github.com/containerd/containerd v1.7.2 h1:UF2gdONnxO8I6byZXDi5sXWiWvlW3D/sci7dTQimEJo= github.com/containerd/containerd v1.7.2/go.mod h1:afcz74+K10M/+cjGHIVQrCt3RAQhUSCAjJ9iMYhhkuI= github.com/containerd/continuity v0.4.1 h1:wQnVrjIyQ8vhU2sgOiL5T07jo+ouqc2bnKsv5/EqGhU= github.com/containerd/continuity v0.4.1/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY= github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o= github.com/containerd/ttrpc v1.2.2 h1:9vqZr0pxwOF5koz6N0N3kJ0zDHokrcPxIR/ZR2YFtOs= github.com/containerd/ttrpc v1.2.2/go.mod h1:sIT6l32Ph/H9cvnJsfXM5drIVzTr5A2flTf1G5tYZak= github.com/containerd/typeurl/v2 v2.1.1 h1:3Q4Pt7i8nYwy2KmQWIw2+1hTvwTE/6w9FqcttATPO/4= github.com/containerd/typeurl/v2 v2.1.1/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/docker/cli v24.0.4+incompatible h1:Y3bYF9ekNTm2VFz5U/0BlMdJy73D+Y1iAAZ8l63Ydzw= github.com/docker/cli v24.0.4+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/docker v23.0.5+incompatible h1:DaxtlTJjFSnLOXVNUBU1+6kXGz2lpDoEAH6QoxaSg8k= github.com/docker/docker v23.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/flatbuffers v23.5.26+incompatible h1:M9dgRyhJemaM4Sw8+66GHBu8ioaQmyPLg1b8VwK5WJg= github.com/google/flatbuffers v23.5.26+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/hanwen/go-fuse/v2 v2.4.1 h1:/iB2ENOCPamqovm4AaXhEkXaX5fGAG0At1F6b7Ve0sE= github.com/hanwen/go-fuse/v2 v2.4.1/go.mod h1:xKwi1cF7nXAOBCXujD5ie0ZKsxc8GGSA1rlMJc+8IJs= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA= github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= github.com/moby/sys/signal v0.7.0 h1:25RW3d5TnQEoKvRbEKUGay6DCQ46IxAVTT9CUMgmsSI= github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE= github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0-rc4 h1:oOxKUJWnFC4YGHCCMNql1x4YaDfYBTS5Y4x/Cgeo1E0= github.com/opencontainers/image-spec v1.1.0-rc4/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= github.com/opencontainers/runc v1.1.7 h1:y2EZDS8sNng4Ksf0GUYNhKbTShZJPJg1FiXJNH/uoCk= github.com/opencontainers/runc v1.1.7/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50= github.com/opencontainers/runtime-spec v1.1.0-rc.3 h1:l04uafi6kxByhbxev7OWiuUv0LZxEsYUfDWZ6bztAuU= github.com/opencontainers/runtime-spec v1.1.0-rc.3/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.11.0 h1:5EAgkfkMl659uZPbe9AS2N68a7Cc1TJbPEuGzFuRbyk= github.com/prometheus/procfs v0.11.0/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.11.0 h1:EMCa6U9S2LtZXLAMoWiR/R8dAQFRqbAitmbJ2UKhoi8= golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753 h1:+VoAg+OKmWaommL56xmZSE2sUK8A7m6SUO7X89F2tbw= google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753/go.mod h1:iqkVr8IRpZ53gx1dEnWlCUIEwDWqWARWrbzpasaTNYM= google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753 h1:XUODHrpzJEUeWmVo/jfNTLj0YyVveOo28oE6vkFbkO4= google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI= google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.26.3 h1:emf74GIQMTik01Aum9dPP0gAypL8JTLl/lHa4V9RFSU= k8s.io/api v0.26.3/go.mod h1:PXsqwPMXBSBcL1lJ9CYDKy7kIReUydukS5JiRlxC3qE= k8s.io/apimachinery v0.26.3 h1:dQx6PNETJ7nODU3XPtrwkfuubs6w7sX0M8n61zHIV/k= k8s.io/apimachinery v0.26.3/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I= k8s.io/client-go v0.26.3 h1:k1UY+KXfkxV2ScEL3gilKcF7761xkYsSD6BC9szIu8s= k8s.io/client-go v0.26.3/go.mod h1:ZPNu9lm8/dbRIPAgteN30RSXea6vrCpFvq+MateTUuQ= k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E= k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 h1:kmDqav+P+/5e1i9tFfHq1qcF3sOrDp+YEkVDAHu7Jwk= k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go/v2 v2.2.1 h1:3VJTYqy5KfelEF9c2jo1MLSpr+TM3mX8K42wzZcd6qE= oras.land/oras-go/v2 v2.2.1/go.mod h1:GeAwLuC4G/JpNwkd+bSZ6SkDMGaaYglt6YK2WvZP7uQ= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= soci-snapshotter-0.4.1/go.work.example000066400000000000000000000000331454010642300177570ustar00rootroot00000000000000go 1.20 use ( . ./cmd ) soci-snapshotter-0.4.1/integration/000077500000000000000000000000001454010642300173435ustar00rootroot00000000000000soci-snapshotter-0.4.1/integration/config/000077500000000000000000000000001454010642300206105ustar00rootroot00000000000000soci-snapshotter-0.4.1/integration/config/etc/000077500000000000000000000000001454010642300213635ustar00rootroot00000000000000soci-snapshotter-0.4.1/integration/config/etc/containerd/000077500000000000000000000000001454010642300235115ustar00rootroot00000000000000soci-snapshotter-0.4.1/integration/config/etc/containerd/config.toml000066400000000000000000000007561454010642300256630ustar00rootroot00000000000000# explicitly use v2 config format version = 2 disabled_plugins = [ "io.containerd.snapshotter.v1.aufs", "io.containerd.snapshotter.v1.btrfs", "io.containerd.snapshotter.v1.devmapper", "io.containerd.snapshotter.v1.zfs", "io.containerd.tracing.processor.v1.otlp", "io.containerd.internal.v1.tracing", "io.containerd.grpc.v1.cri", ] # Use soci snapshotter [proxy_plugins] [proxy_plugins.soci] type = "snapshot" address = "/run/soci-snapshotter-grpc/soci-snapshotter-grpc.sock" soci-snapshotter-0.4.1/integration/config/etc/soci-snapshotter-grpc/000077500000000000000000000000001454010642300256215ustar00rootroot00000000000000soci-snapshotter-0.4.1/integration/config/etc/soci-snapshotter-grpc/config.toml000066400000000000000000000000751454010642300277650ustar00rootroot00000000000000# Append configurations for Soci Snapshotter in TOML format. soci-snapshotter-0.4.1/integration/create_test.go000066400000000000000000000145211454010642300221770ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package integration import ( "bytes" "encoding/json" "path/filepath" "testing" "github.com/awslabs/soci-snapshotter/config" "github.com/awslabs/soci-snapshotter/soci" "github.com/awslabs/soci-snapshotter/soci/store" "github.com/awslabs/soci-snapshotter/util/testutil" "github.com/containerd/containerd/platforms" "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) func TestSociCreateEmptyIndex(t *testing.T) { sh, done := newSnapshotterBaseShell(t) defer done() rebootContainerd(t, sh, "", "") imgInfo := dockerhub(alpineImage) indexDigest := buildIndex(sh, imgInfo, withMinLayerSize(1000000000), withAllowErrors) if indexDigest != "" { t.Fatal("index was created despite all layers being smaller than min layer size") } } func TestSociCreateSparseIndex(t *testing.T) { tests := []struct { name string minLayerSize int64 }{ { name: "test create for rethinkdb:latest with min-layer-size 0 bytes", minLayerSize: 0, }, { name: "test create for rethinkdb:latest with min-layer-size 1000000 bytes", minLayerSize: 1000000, }, { name: "test create for rethinkdb:latest with min-layer-size 10000000 bytes", minLayerSize: 10000000, }, } const manifestDigest = "sha256:4452aadba3e99771ff3559735dab16279c5a352359d79f38737c6fdca941c6e5" const containerImage = "rethinkdb@" + manifestDigest for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { sh, done := newSnapshotterBaseShell(t) defer done() rebootContainerd(t, sh, "", "") imgInfo := dockerhub(containerImage) indexDigest := buildIndex(sh, imgInfo, withMinLayerSize(tt.minLayerSize)) var index soci.Index contentStoreBlobPath, _ := testutil.GetContentStoreBlobPath(config.DefaultContentStoreType) if indexDigest != "" { dgst, err := digest.Parse(indexDigest) if err != nil { t.Fatalf("cannot parse digest: %v", err) } checkpoints := fetchContentFromPath(sh, filepath.Join(contentStoreBlobPath, dgst.Encoded())) err = soci.DecodeIndex(bytes.NewReader(checkpoints), &index) if err != nil { t.Fatalf("cannot get index data: %v", err) } } imageManifestJSON, err := FetchContentByDigest(sh, store.ContainerdContentStoreType, manifestDigest) if err != nil { t.Fatalf("cannot fetch content %s: %v", manifestDigest, err) } imageManifest := new(ocispec.Manifest) if err := json.Unmarshal(imageManifestJSON, imageManifest); err != nil { t.Fatalf("cannot unmarshal index manifest: %v", err) } includedLayers := make(map[string]struct{}) for _, layer := range imageManifest.Layers { if layer.Size >= tt.minLayerSize { includedLayers[layer.Digest.String()] = struct{}{} } } if indexDigest == "" { if len(includedLayers) > 0 { t.Fatalf("failed to validate soci index: unexpected layer count; expected=%v, got=0", len(includedLayers)) } } else { if err := validateSociIndex(sh, config.DefaultContentStoreType, index, manifestDigest, includedLayers); err != nil { t.Fatalf("failed to validate soci index: %v", err) } } }) } } func TestSociCreate(t *testing.T) { sh, done := newSnapshotterBaseShell(t) defer done() tests := []struct { name string containerImage string platform string contentStoreType store.ContentStoreType }{ { name: "test create for nginx", containerImage: nginxImage, }, { name: "test create for alpine", containerImage: alpineImage, }, // The following two tests guarantee that we have tested both content // stores { name: "test create for drupal on soci content store", containerImage: drupalImage, contentStoreType: store.SociContentStoreType, }, { name: "test create for drupal on containerd content store", containerImage: drupalImage, contentStoreType: store.ContainerdContentStoreType, }, // The following two tests guarantee that we have tested at least // 2 different platforms. Depending on what host they run on, one // might be a duplicate of the earlier test using the default platform { name: "test create for ubuntu amd64", containerImage: ubuntuImage, platform: "linux/amd64", }, { name: "test create for ubuntu arm64", containerImage: ubuntuImage, platform: "linux/arm64", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { rebootContainerd(t, sh, "", getSnapshotterConfigToml(t, false, GetContentStoreConfigToml(store.WithType(tt.contentStoreType)))) platform := platforms.DefaultSpec() if tt.platform != "" { var err error platform, err = platforms.Parse(tt.platform) if err != nil { t.Fatalf("could not parse platform: %v", err) } } imgInfo := dockerhub(tt.containerImage, withPlatform(platform)) indexDigest := buildIndex(sh, imgInfo, withMinLayerSize(0), withContentStoreType(tt.contentStoreType)) contentStoreBlobPath, err := testutil.GetContentStoreBlobPath(tt.contentStoreType) if err != nil { t.Fatalf("cannot get content store path: %v", err) } dgst, err := digest.Parse(indexDigest) if err != nil { t.Fatalf("cannot parse digest: %v", err) } checkpoints := fetchContentFromPath(sh, filepath.Join(contentStoreBlobPath, dgst.Encoded())) var sociIndex soci.Index err = soci.DecodeIndex(bytes.NewReader(checkpoints), &sociIndex) if err != nil { t.Fatalf("cannot get soci index: %v", err) } m, err := getManifestDigest(sh, imgInfo.ref, platform) if err != nil { t.Fatalf("failed to get manifest digest: %v", err) } if err := validateSociIndex(sh, tt.contentStoreType, sociIndex, m, nil); err != nil { t.Fatalf("failed to validate soci index: %v", err) } }) } } soci-snapshotter-0.4.1/integration/index_test.go000066400000000000000000000265371454010642300220550ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package integration import ( "bufio" "bytes" "strings" "testing" "time" "github.com/awslabs/soci-snapshotter/config" "github.com/awslabs/soci-snapshotter/soci/store" shell "github.com/awslabs/soci-snapshotter/util/dockershell" "github.com/containerd/containerd/platforms" ) type testImageIndex struct { imgName string platform string imgInfo imageInfo sociIndexDigest string ztocDigests []string } func prepareSociIndices(t *testing.T, sh *shell.Shell, opt ...indexBuildOption) map[string]testImageIndex { imageIndexes := []testImageIndex{ { imgName: ubuntuImage, platform: "linux/arm64", }, { imgName: alpineImage, platform: "linux/amd64", }, { imgName: nginxImage, platform: "linux/arm64", }, { imgName: drupalImage, platform: "linux/amd64", }, } return prepareCustomSociIndices(t, sh, imageIndexes, opt...) } func prepareCustomSociIndices(t *testing.T, sh *shell.Shell, images []testImageIndex, opt ...indexBuildOption) map[string]testImageIndex { indexBuildConfig := defaultIndexBuildConfig() for _, o := range opt { o(&indexBuildConfig) } testImages := make(map[string]testImageIndex) for _, tii := range images { testImages[tii.imgName] = tii } for imgName, img := range testImages { platform := platforms.DefaultSpec() if img.platform != "" { var err error platform, err = platforms.Parse(img.platform) if err != nil { t.Fatalf("could not parse platform: %v", err) } } img.imgInfo = dockerhub(imgName, withPlatform(platform)) img.sociIndexDigest = buildIndex(sh, img.imgInfo, withIndexBuildConfig(indexBuildConfig), withMinLayerSize(0)) ztocDigests, err := getZtocDigestsForImage(sh, img.imgInfo) if err != nil { t.Fatalf("could not get ztoc digests: %v", err) } img.ztocDigests = ztocDigests testImages[imgName] = img } return testImages } func getZtocDigestsForImage(sh *shell.Shell, img imageInfo) ([]string, error) { ztocInfoBytes := sh.O("soci", "ztoc", "list", "--image-ref", img.ref) scanner := bufio.NewScanner(bytes.NewReader(ztocInfoBytes)) scanner.Split(bufio.ScanLines) var lines []string for scanner.Scan() { lines = append(lines, scanner.Text()) } var ztocDigests []string for i := 1; i < len(lines); i++ { entries := strings.Fields(lines[i]) ztocDigests = append(ztocDigests, entries[0]) } return ztocDigests, nil } func TestSociIndexInfo(t *testing.T) { t.Parallel() sh, done := newSnapshotterBaseShell(t) defer done() rebootContainerd(t, sh, "", "") testImages := prepareSociIndices(t, sh) for imgName, img := range testImages { tests := []struct { name string digest string expectErr bool }{ { name: imgName + " with index digest", digest: img.sociIndexDigest, expectErr: false, }, { name: imgName + " with ztoc digest", digest: img.ztocDigests[0], expectErr: true, }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { sociIndex, err := sociIndexFromDigest(sh, tt.digest) if !tt.expectErr { if err != nil { t.Fatal(err) } m, err := getManifestDigest(sh, img.imgInfo.ref, img.imgInfo.platform) if err != nil { t.Fatalf("failed to get manifest digest: %v", err) } if err := validateSociIndex(sh, config.DefaultContentStoreType, sociIndex, m, nil); err != nil { t.Fatalf("failed to validate soci index: %v", err) } } else if err == nil { t.Fatalf("failed to return err") } }) } } } func TestSociIndexList(t *testing.T) { t.Parallel() sh, done := newSnapshotterBaseShell(t) defer done() rebootContainerd(t, sh, "", "") testImages := prepareSociIndices(t, sh) existHandlerFull := func(output string, img testImageIndex) bool { // full output should have both img ref and soci index digest return strings.Contains(output, img.imgInfo.ref) && strings.Contains(output, img.sociIndexDigest) } existHandlerQuiet := func(output string, img testImageIndex) bool { // a given soci index should match exactly one line in the quiet output // for the first index, it should have prefix of digest+\n // for the rest, it should have `\n` before and after its digest return strings.HasPrefix(output, img.sociIndexDigest+"\n") || strings.Contains(output, "\n"+img.sociIndexDigest+"\n") } existHandlerExact := func(output string, img testImageIndex) bool { // when quiet output has only one index, it should be the exact soci_index_digest string return strings.Trim(output, "\n") == img.sociIndexDigest } // each test runs a soci command, filter to get expected images, and check // (only) expected images exist in command output tests := []struct { name string command []string filter func(img testImageIndex) bool // return true if `img` is expected in command output existHandler func(output string, img testImageIndex) bool // return true if `img` appears in `output` }{ { name: "`soci index ls` should list all soci indices", command: []string{"soci", "index", "list"}, filter: func(img testImageIndex) bool { return true }, existHandler: existHandlerFull, }, { name: "`soci index ls -q` should list digests of all soci indices", command: []string{"soci", "index", "list", "-q"}, filter: func(img testImageIndex) bool { return true }, existHandler: existHandlerQuiet, }, { name: "`soci index ls --ref imgRef` should only list soci indices for the image", command: []string{"soci", "index", "list", "--ref", testImages[ubuntuImage].imgInfo.ref}, filter: func(img testImageIndex) bool { return img.imgInfo.ref == testImages[ubuntuImage].imgInfo.ref }, existHandler: existHandlerFull, }, { name: "`soci index ls --platform linux/arm64` should only list soci indices for arm64 platform", command: []string{"soci", "index", "list", "--platform", "linux/arm64"}, filter: func(img testImageIndex) bool { return img.platform == "linux/arm64" }, existHandler: existHandlerFull, }, { // make sure the image only generates one soci index (the test expects a single digest output) name: "`soci index ls --ref imgRef -q` should print the exact soci index digest", command: []string{"soci", "index", "list", "-q", "--ref", testImages[ubuntuImage].imgInfo.ref}, filter: func(img testImageIndex) bool { return img.imgInfo.ref == testImages[ubuntuImage].imgInfo.ref }, existHandler: existHandlerExact, }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { output := string(sh.O(tt.command...)) for _, img := range testImages { expected := tt.filter(img) if expected && !tt.existHandler(output, img) { t.Fatalf("output doesn't have expected soci index: image: %s, soci index: %s", img.imgInfo.ref, img.sociIndexDigest) } if !expected && tt.existHandler(output, img) { t.Fatalf("output has unexpected soci index: image: %s, soci index: %s", img.imgInfo.ref, img.sociIndexDigest) } } }) } } func TestSociIndexRemove(t *testing.T) { sh, done := newSnapshotterBaseShell(t) defer done() rebootContainerd(t, sh, getContainerdConfigToml(t, false, ` [plugins."io.containerd.gc.v1.scheduler"] deletion_threshold = 1 startup_delay = "10ms" `), "") t.Run("soci index rm indexDigest removes an index", func(t *testing.T) { testImages := prepareSociIndices(t, sh) target := testImages[ubuntuImage] indicesRaw := sh. X("soci", "index", "rm", target.sociIndexDigest). O("soci", "index", "list", "-q") if strings.Contains(string(indicesRaw), target.sociIndexDigest) { t.Fatalf("\"soci index rm indexDigest\" doesn't remove the given index: %s", target.sociIndexDigest) } }) t.Run("soci index rm --ref imgRef removes all indices for imgRef", func(t *testing.T) { testImages := prepareSociIndices(t, sh) target := testImages[ubuntuImage] indicesRaw := sh. X("soci", "index", "rm", "--ref", target.imgInfo.ref). O("soci", "index", "list", "-q", "--ref", target.imgInfo.ref) indices := strings.Trim(string(indicesRaw), "\n") if indices != "" { t.Fatalf("\"soci index rm --ref\" doesn't remove all soci indices for the given image %s, remaining indices: %s", target.imgInfo.ref, indices) } }) t.Run("soci index rm on containerd content store removes orphaned zTOCs and not unorphaned zTOCs", func(t *testing.T) { testImages := prepareCustomSociIndices(t, sh, []testImageIndex{{imgName: nginxAlpineImage}, {imgName: nginxAlpineImage2}}, withContentStoreType(store.ContainerdContentStoreType)) remove := testImages[nginxAlpineImage] keep := testImages[nginxAlpineImage2] commonZtocs := make(map[string]struct{}) removeZtocs := make(map[string]struct{}) for _, dgst := range remove.ztocDigests { removeZtocs[dgst] = struct{}{} } for _, dgst := range keep.ztocDigests { if _, ok := removeZtocs[dgst]; ok { commonZtocs[dgst] = struct{}{} } } if len(commonZtocs) == 0 { t.Fatalf("test invalidated due to no common zTOCs between %s and %s", remove.sociIndexDigest, keep.sociIndexDigest) } if len(removeZtocs)-len(commonZtocs) < 1 { t.Fatalf("test invalidated due to no unique zTOCs between %s and %s", remove.sociIndexDigest, keep.sociIndexDigest) } sh.X("soci", "--content-store", string(store.ContainerdContentStoreType), "index", "rm", remove.sociIndexDigest) time.Sleep(1 * time.Second) // clean up zTOCs from the artifact db if they were removed from the content store due to garbage collection sh.X("soci", "--content-store", string(store.ContainerdContentStoreType), "rebuild-db") ztocsRaw := string(sh.O("soci", "ztoc", "list", "-q")) for dgst := range removeZtocs { if _, ok := commonZtocs[dgst]; ok { if !strings.Contains(ztocsRaw, dgst) { t.Fatalf("index removal removed non-oprhaned ztoc: %s", dgst) } } else { if strings.Contains(ztocsRaw, dgst) { t.Fatalf("index removal didn't remove oprhaned ztoc: %s", dgst) } } } }) t.Run("soci index rm $(soci index ls -q) removes all existing indices", func(t *testing.T) { _ = prepareSociIndices(t, sh) // a walkaround due to that go exec doesn't support command substitution. allIndices := strings.Trim(string(sh.O("soci", "index", "list", "-q")), "\n") rmCommand := append([]string{"soci", "index", "rm"}, strings.Split(allIndices, "\n")...) indicesRaw := sh. X(rmCommand...). O("soci", "index", "list", "-q") indices := strings.Trim(string(indicesRaw), "\n") if indices != "" { t.Fatalf("\"soci index rm $(soci index ls -q)\" doesn't remove all soci indices, remaining indices: %s", indices) } }) t.Run("soci index rm with an invalid index digest", func(t *testing.T) { invalidDgst := "digest" _, err := sh.OLog("soci", "index", "rm", invalidDgst) if err == nil { t.Fatalf("failed to return err") } }) } soci-snapshotter-0.4.1/integration/main_test.go000066400000000000000000000105421454010642300216570ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package integration import ( "os" "testing" shell "github.com/awslabs/soci-snapshotter/util/dockershell" "github.com/awslabs/soci-snapshotter/util/dockershell/compose" dexec "github.com/awslabs/soci-snapshotter/util/dockershell/exec" "github.com/awslabs/soci-snapshotter/util/testutil" "github.com/sirupsen/logrus" ) const ( enableTestEnv = "ENABLE_INTEGRATION_TEST" containerdLogLevelEnv = "CONTAINERD_LOG_LEVEL" sociLogLevelEnv = "SOCI_LOG_LEVEL" ) // this can be overwritten by setting up env variables specified by // `containerdLogLevelEnv`/`sociLogLevelEnv`. var ( containerdLogLevel = "warn" sociLogLevel = "debug" ) // TestMain is a main function for integration tests. // This checks the system requirements the run tests. func TestMain(m *testing.M) { if os.Getenv(enableTestEnv) != "true" { testutil.TestingL.Printf("%s is not true. skipping integration test", enableTestEnv) return } if logLevel := os.Getenv(containerdLogLevelEnv); logLevel != "" { if _, err := logrus.ParseLevel(logLevel); err != nil { testutil.TestingL.Printf("unsupported log level: %s. skipping integration test", logLevel) return } containerdLogLevel = logLevel } if logLevel := os.Getenv(sociLogLevelEnv); logLevel != "" { if _, err := logrus.ParseLevel(logLevel); err != nil { testutil.TestingL.Printf("unsupported log level: %s. skipping integration test", logLevel) return } sociLogLevel = logLevel } if err := shell.Supported(); err != nil { testutil.TestingL.Fatalf("shell pkg is not supported: %v", err) } if err := compose.Supported(); err != nil { testutil.TestingL.Fatalf("compose pkg is not supported: %v", err) } if err := dexec.Supported(); err != nil { testutil.TestingL.Fatalf("dockershell/exec pkg is not supported: %v", err) } cleanups, err := setup() if err != nil { testutil.TestingL.Fatalf("failed integration test set up: %v", err) } c := m.Run() err = teardown(cleanups) if err != nil { testutil.TestingL.Fatalf("failed integration test tear down: %v", err) } os.Exit(c) } // setup can be used to initialize things before integration tests start (as of now it only builds the services used by the integration tests so they can be referenced) func setup() ([]func() error, error) { var ( serviceName = "testing" targetStage = "containerd-snapshotter-base" registry2Stage = "registry2" ) pRoot, err := testutil.GetProjectRoot() if err != nil { return nil, err } buildArgs, err := getBuildArgsFromEnv() if err != nil { return nil, err } composeYaml, err := testutil.ApplyTextTemplate(composeBuildTemplate, dockerComposeYaml{ ServiceName: serviceName, ImageContextDir: pRoot, TargetStage: targetStage, Registry2Stage: registry2Stage, }) if err != nil { return nil, err } cOpts := []compose.Option{ compose.WithBuildArgs(buildArgs...), compose.WithStdio(testutil.TestingLogDest()), } return compose.Build(composeYaml, cOpts...) } // teardown takes a list of cleanup functions and executes them after integration tests have ended func teardown(cleanups []func() error) error { for i := 0; i < len(cleanups); i++ { err := cleanups[i]() if err != nil { return err } } return nil } soci-snapshotter-0.4.1/integration/metrics_test.go000066400000000000000000000316711454010642300224070ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package integration import ( "bytes" "fmt" "math" "strconv" "strings" "testing" "time" "github.com/awslabs/soci-snapshotter/config" "github.com/awslabs/soci-snapshotter/fs/layer" commonmetrics "github.com/awslabs/soci-snapshotter/fs/metrics/common" "github.com/awslabs/soci-snapshotter/soci" "github.com/awslabs/soci-snapshotter/soci/store" "github.com/awslabs/soci-snapshotter/util/testutil" shell "github.com/awslabs/soci-snapshotter/util/dockershell" "github.com/awslabs/soci-snapshotter/ztoc" "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) const ( tcpMetricsAddress = "localhost:1338" unixMetricsAddress = "/var/lib/soci-snapshotter-grpc/metrics.sock" metricsPath = "/metrics" ) const tcpMetricsConfig = ` metrics_address="` + tcpMetricsAddress + `" ` const unixMetricsConfig = ` metrics_address="` + unixMetricsAddress + `" metrics_network="unix" ` func TestMetrics(t *testing.T) { tests := []struct { name string config string command []string }{ { name: "tcp", config: tcpMetricsConfig, command: []string{"curl", "--fail", tcpMetricsAddress + metricsPath}, }, { name: "unix", config: unixMetricsConfig, command: []string{"curl", "--fail", "--unix-socket", unixMetricsAddress, "http://localhost" + metricsPath}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { sh, done := newSnapshotterBaseShell(t) defer done() rebootContainerd(t, sh, "", tt.config) sh.X(tt.command...) if err := sh.Err(); err != nil { t.Fatal(err) } }) } } func TestOverlayFallbackMetric(t *testing.T) { sh, done := newSnapshotterBaseShell(t) defer done() testCases := []struct { name string image string indexDigestFn func(*shell.Shell, store.ContentStoreType, imageInfo) string expectedFallbackCount int }{ { name: "image with all layers having ztocs and no fs.Mount error results in 0 overlay fallback", image: rabbitmqImage, indexDigestFn: func(sh *shell.Shell, contentStoreType store.ContentStoreType, image imageInfo) string { return buildIndex(sh, image, withMinLayerSize(0), withContentStoreType(contentStoreType)) }, expectedFallbackCount: 0, }, { name: "image with some layers not having ztoc and no fs.Mount results in 0 overlay fallback", image: rabbitmqImage, indexDigestFn: func(sh *shell.Shell, contentStoreType store.ContentStoreType, image imageInfo) string { return buildIndex(sh, image, withMinLayerSize(defaultMinLayerSize), withContentStoreType(contentStoreType)) }, expectedFallbackCount: 0, }, { name: "image with fs.Mount errors results in non-zero overlay fallback", image: rabbitmqImage, indexDigestFn: func(_ *shell.Shell, _ store.ContentStoreType, _ imageInfo) string { return "invalid index string" }, expectedFallbackCount: 10, }, } for _, tc := range testCases { for _, contentStoreType := range store.ContentStoreTypes() { t.Run(tc.name+" with "+string(contentStoreType)+" content store", func(t *testing.T) { rebootContainerd(t, sh, getContainerdConfigToml(t, false), getSnapshotterConfigToml(t, false, tcpMetricsConfig, GetContentStoreConfigToml(store.WithType(contentStoreType)))) imgInfo := dockerhub(tc.image) sh.X("nerdctl", "pull", "-q", imgInfo.ref) indexDigest := tc.indexDigestFn(sh, contentStoreType, imgInfo) sh.X("soci", "--content-store", string(contentStoreType), "image", "rpull", "--soci-index-digest", indexDigest, imgInfo.ref) curlOutput := string(sh.O("curl", tcpMetricsAddress+metricsPath)) if err := checkOverlayFallbackCount(curlOutput, tc.expectedFallbackCount); err != nil { t.Fatal(err) } }) } } } func TestFuseOperationFailureMetrics(t *testing.T) { const logFuseOperationConfig = ` [fuse] log_fuse_operations = true ` sh, done := newSnapshotterBaseShell(t) defer done() manipulateZtocMetadata := func(zt *ztoc.Ztoc) { for i, md := range zt.FileMetadata { md.UncompressedOffset += 2 md.UncompressedSize = math.MaxInt64 md.Xattrs = map[string]string{"foo": "bar"} zt.FileMetadata[i] = md } } testCases := []struct { name string image string indexDigestFn func(*testing.T, *shell.Shell, imageInfo) string metricToCheck string expectFuseOperationFailure bool }{ { name: "image with valid ztocs and index doesn't cause fuse file.read failures", image: rabbitmqImage, indexDigestFn: func(t *testing.T, sh *shell.Shell, image imageInfo) string { return buildIndex(sh, image, withMinLayerSize(0)) }, // even a valid index/ztoc produces some fuse operation failures such as // node.lookup and node.getxattr failures, so we only check a specific fuse failure metric. metricToCheck: commonmetrics.FuseFileReadFailureCount, expectFuseOperationFailure: false, }, { name: "image with valid-formatted but invalid-data ztocs causes fuse file.read failures", image: rabbitmqImage, indexDigestFn: func(t *testing.T, sh *shell.Shell, image imageInfo) string { indexDigest, err := buildIndexByManipulatingZtocData(sh, buildIndex(sh, image, withMinLayerSize(0)), manipulateZtocMetadata) if err != nil { t.Fatal(err) } return indexDigest }, metricToCheck: commonmetrics.FuseFileReadFailureCount, expectFuseOperationFailure: true, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { rebootContainerd(t, sh, getContainerdConfigToml(t, false), getSnapshotterConfigToml(t, false, tcpMetricsConfig, logFuseOperationConfig)) imgInfo := dockerhub(tc.image) sh.X("nerdctl", "pull", "-q", imgInfo.ref) indexDigest := tc.indexDigestFn(t, sh, imgInfo) sh.X("soci", "image", "rpull", "--soci-index-digest", indexDigest, imgInfo.ref) // this command may fail due to fuse operation failure, use XLog to avoid crashing shell sh.XLog("ctr", "run", "--rm", "--snapshotter=soci", imgInfo.ref, "test", "echo", "hi") curlOutput := string(sh.O("curl", tcpMetricsAddress+metricsPath)) checkFuseOperationFailureMetrics(t, curlOutput, tc.metricToCheck, tc.expectFuseOperationFailure) }) } } func TestFuseOperationCountMetrics(t *testing.T) { const snapshotterConfig = ` fuse_metrics_emit_wait_duration_sec = 10 ` sh, done := newSnapshotterBaseShell(t) defer done() testCases := []struct { name string image string }{ { name: "rabbitmq image", image: rabbitmqImage, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { rebootContainerd(t, sh, getContainerdConfigToml(t, false), getSnapshotterConfigToml(t, false, tcpMetricsConfig, snapshotterConfig)) imgInfo := dockerhub(tc.image) sh.X("nerdctl", "pull", "-q", imgInfo.ref) indexDigest := buildIndex(sh, imgInfo) sh.X("soci", "image", "rpull", "--soci-index-digest", indexDigest, imgInfo.ref) sh.XLog("ctr", "run", "-d", "--snapshotter=soci", imgInfo.ref, "test", "echo", "hi") curlOutput := string(sh.O("curl", tcpMetricsAddress+metricsPath)) for _, m := range layer.FuseOpsList { if checkMetricExists(curlOutput, m) { t.Fatalf("got unexpected metric: %s", m) } } time.Sleep(10 * time.Second) curlOutput = string(sh.O("curl", tcpMetricsAddress+metricsPath)) for _, m := range layer.FuseOpsList { if !checkMetricExists(curlOutput, m) { t.Fatalf("missing expected metric: %s", m) } } }) } } func TestBackgroundFetchMetrics(t *testing.T) { const backgroundFetchConfig = ` [background_fetch] silence_period_msec = 1000 fetch_period_msec = 100 emit_metric_period_sec = 2 ` bgFetchMetricsToCheck := []string{ commonmetrics.BackgroundFetchWorkQueueSize, commonmetrics.BackgroundSpanFetchCount, } sh, done := newSnapshotterBaseShell(t) defer done() testCases := []struct { name string image string }{ { name: "drupal image", image: drupalImage, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { rebootContainerd(t, sh, getContainerdConfigToml(t, false), getSnapshotterConfigToml(t, false, tcpMetricsConfig, backgroundFetchConfig)) imgInfo := dockerhub(tc.image) sh.X("nerdctl", "pull", "-q", imgInfo.ref) indexDigest := buildIndex(sh, imgInfo) sh.X("soci", "image", "rpull", "--soci-index-digest", indexDigest, imgInfo.ref) sh.XLog("ctr", "run", "-d", "--snapshotter=soci", imgInfo.ref, "test", "echo", "hi") time.Sleep(5 * time.Second) curlOutput := string(sh.O("curl", tcpMetricsAddress+metricsPath)) for _, m := range bgFetchMetricsToCheck { if !checkMetricExists(curlOutput, m) { t.Fatalf("missing expected metric: %s", m) } } }) } } // buildIndexByManipulatingZtocData produces a new soci index by manipulating // the ztocs of an existing index specified by `indexDigest`. // // The new index (and ztocs) are stored separately and the original index keeps unchanged. // The manipulated ztocs are (de)serializable but have meaningless ztoc data (manipuated by `manipulator`). // This helps test soci behaviors when ztocs have valid format but wrong/corrupted data. func buildIndexByManipulatingZtocData(sh *shell.Shell, indexDigest string, manipulator func(*ztoc.Ztoc)) (string, error) { sh.O("ctr", "i", "ls") index, err := sociIndexFromDigest(sh, indexDigest) if err != nil { return "", err } var newZtocDescs []ocispec.Descriptor for _, blob := range index.Blobs { origZtocDigestString := blob.Digest.String() origZtocDigest, err := digest.Parse(origZtocDigestString) if err != nil { return "", fmt.Errorf("cannot parse ztoc digest %s: %w", origZtocDigestString, err) } origBlobBytes, err := FetchContentByDigest(sh, config.DefaultContentStoreType, origZtocDigest) if err != nil { return "", fmt.Errorf("cannot fetch ztoc digest %s: %w", origZtocDigestString, err) } origBlobReader := bytes.NewReader(origBlobBytes) zt, err := ztoc.Unmarshal(origBlobReader) if err != nil { return "", fmt.Errorf("invalid ztoc %s from soci index %s: %w", origZtocDigestString, indexDigest, err) } // manipulate the ztoc manipulator(zt) newZtocReader, newZtocDesc, err := ztoc.Marshal(zt) if err != nil { return "", fmt.Errorf("unable to marshal ztoc %s: %s", newZtocDesc.Digest.String(), err) } err = testutil.InjectContentStoreContentFromReader(sh, config.DefaultContentStoreType, newZtocDesc, newZtocReader) if err != nil { return "", fmt.Errorf("cannot inject manipulated ztoc %s: %w", newZtocDesc.Digest.String(), err) } newZtocDesc.MediaType = soci.SociLayerMediaType newZtocDesc.Annotations = blob.Annotations newZtocDescs = append(newZtocDescs, newZtocDesc) } subject := ocispec.Descriptor{ Digest: index.Subject.Digest, Size: index.Subject.Size, } newIndex := soci.NewIndex(newZtocDescs, &subject, nil) b, err := soci.MarshalIndex(newIndex) if err != nil { return "", err } newIndexDigest := digest.FromBytes(b) desc := ocispec.Descriptor{Digest: newIndexDigest} err = testutil.InjectContentStoreContentFromBytes(sh, config.DefaultContentStoreType, desc, b) if err != nil { return "", err } return strings.Trim(newIndexDigest.String(), "\n"), nil } // checkFuseOperationFailureMetrics checks if output from metrics endpoint includes // a specific fuse operation failure metrics (or any fuse op failure if an empty string is given) func checkFuseOperationFailureMetrics(t *testing.T, output string, metricToCheck string, expectOpFailure bool) { metricCountSum := 0 lines := strings.Split(output, "\n") for _, line := range lines { // skip non-fuse and fuse_mount_failure_count metrics if !strings.Contains(line, "fuse") || strings.Contains(line, commonmetrics.FuseMountFailureCount) { continue } parts := strings.Split(line, " ") if metricCount, err := strconv.Atoi(parts[len(parts)-1]); err == nil && metricCount != 0 { t.Logf("fuse operation failure metric: %s", line) if metricToCheck == "" || strings.Contains(line, metricToCheck) { metricCountSum += metricCount } } } if (metricCountSum != 0) != expectOpFailure { t.Fatalf("incorrect fuse operation failure metrics. metric: %s, total operation failure count: %d, expect fuse operation failure: %t", metricToCheck, metricCountSum, expectOpFailure) } } func checkMetricExists(output, metric string) bool { lines := strings.Split(output, "\n") for _, line := range lines { if strings.Contains(line, metric) { return true } } return false } soci-snapshotter-0.4.1/integration/pull_test.go000066400000000000000000000744741454010642300217250ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package integration import ( "bytes" "encoding/json" "fmt" "os" "path/filepath" "strconv" "strings" "testing" "github.com/awslabs/soci-snapshotter/config" "github.com/awslabs/soci-snapshotter/soci" "github.com/awslabs/soci-snapshotter/soci/store" shell "github.com/awslabs/soci-snapshotter/util/dockershell" "github.com/awslabs/soci-snapshotter/util/dockershell/compose" "github.com/awslabs/soci-snapshotter/util/testutil" "github.com/containerd/containerd/platforms" "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) // TestSnapshotterStartup tests to run containerd + snapshotter and check plugin is // recognized by containerd func TestSnapshotterStartup(t *testing.T) { t.Parallel() sh, done := newSnapshotterBaseShell(t) defer done() rebootContainerd(t, sh, "", "") found := false err := sh.ForEach(shell.C("ctr", "plugin", "ls"), func(l string) bool { info := strings.Fields(l) if len(info) < 4 { t.Fatalf("malformed plugin info: %v", info) } if info[0] == "io.containerd.snapshotter.v1" && info[1] == "soci" && info[3] == "ok" { found = true return false } return true }) if err != nil || !found { t.Fatalf("failed to get soci snapshotter status using ctr plugin ls: %v", err) } } // TestOptimizeConsistentSociArtifact tests if the Soci artifact is produced consistently across runs. // This test does the following: // 1. Generate Soci artifact // 2. Copy the local content store to another folder // 3. Generate Soci artifact for the same image again // 4. Do the comparison of the Soci artifact blobs // 5. Clean up the local content store folder and the folder used for comparison // Due to the reason that this test will be doing manipulations with local content store folder, // it should be never run in parallel with the other tests. func TestOptimizeConsistentSociArtifact(t *testing.T) { regConfig := newRegistryConfig() sh, done := newShellWithRegistry(t, regConfig) defer done() tests := []struct { name string containerImage string }{ { name: "soci artifact is consistently built for ubuntu", containerImage: ubuntuImage, }, { name: "soci artifact is consistently built for nginx", containerImage: nginxImage, }, { name: "soci artifact is consistently built for alpine", containerImage: alpineImage, }, } for _, tt := range tests { for _, contentStoreType := range store.ContentStoreTypes() { t.Run(tt.name+" with "+string(contentStoreType+" content store"), func(t *testing.T) { contentStorePath, err := store.GetContentStorePath(contentStoreType) if err != nil { t.Fatalf("cannot get local content store path: %v", err) } // build artifacts from scratch rebootContainerd(t, sh, "", getSnapshotterConfigToml(t, false, GetContentStoreConfigToml(store.WithType(contentStoreType)))) // ensure the image is in the local registry copyImage(sh, dockerhub(tt.containerImage), regConfig.mirror(tt.containerImage)) buildIndex(sh, regConfig.mirror(tt.containerImage), withMinLayerSize(0), withContentStoreType(contentStoreType)) // copy the content store files sh. X("rm", "-rf", "copy"). X("cp", "-r", contentStorePath, "copy") // build artifacts from scratch again rebootContainerd(t, sh, "", getSnapshotterConfigToml(t, false, GetContentStoreConfigToml(store.WithType(contentStoreType)))) copyImage(sh, dockerhub(tt.containerImage), regConfig.mirror(tt.containerImage)) buildIndex(sh, regConfig.mirror(tt.containerImage), withMinLayerSize(0), withContentStoreType(contentStoreType)) currContent := sh.O("ls", filepath.Join(contentStorePath, "blobs", "sha256")) prevContent := sh.O("ls", filepath.Join("copy", "blobs", "sha256")) if !bytes.Equal(currContent, prevContent) { t.Fatalf("local content store: previously generated artifact listing is different") } fileNames := strings.Fields(string(currContent)) for _, fn := range fileNames { if fn == "artifacts.db" { // skipping artifacts.db, since this is bbolt file and we have no control over its internals continue } out, _ := sh.OLog("cmp", filepath.Join(contentStorePath, "blobs", "sha256", fn), filepath.Join("copy", "blobs", "sha256", fn)) if string(out) != "" { t.Fatalf("the artifact is different: %v", string(out)) } } sh.X("rm", "-rf", "copy") }) } } } func TestLazyPullWithSparseIndex(t *testing.T) { imageName := rabbitmqImage regConfig := newRegistryConfig() sh, done := newShellWithRegistry(t, regConfig) defer done() rebootContainerd(t, sh, getContainerdConfigToml(t, false), getSnapshotterConfigToml(t, false)) _, minLayerSize, _ := middleSizeLayerInfo(t, sh, dockerhub(imageName)) copyImage(sh, dockerhub(imageName), regConfig.mirror(imageName)) indexDigest := buildIndex(sh, regConfig.mirror(imageName), withMinLayerSize(minLayerSize)) fromNormalSnapshotter := func(image string) tarPipeExporter { return func(t *testing.T, tarExportArgs ...string) { rebootContainerd(t, sh, "", "") sh.X("nerdctl", "pull", "-q", image) sh.Pipe(nil, shell.C("ctr", "run", "--rm", image, "test", "tar", "-zc", "/usr"), tarExportArgs) } } export := func(sh *shell.Shell, image string, tarExportArgs []string) { sh.X("soci", "image", "rpull", "--user", regConfig.creds(), "--soci-index-digest", indexDigest, image) sh.Pipe(nil, shell.C("soci", "run", "--rm", "--snapshotter=soci", image, "test", "tar", "-zc", "/usr"), tarExportArgs) } imageManifestDigest, err := getManifestDigest(sh, dockerhub(imageName).ref, dockerhub(imageName).platform) if err != nil { t.Fatalf("failed to get manifest digest: %v", err) } dgst, err := digest.Parse(imageManifestDigest) if err != nil { t.Fatalf("failed to parse manifest digest: %v", err) } imageManifestJSON, err := FetchContentByDigest(sh, store.ContainerdContentStoreType, dgst) if err != nil { t.Fatalf("failed to fetch content %s: %v", dgst, err) } imageManifest := new(ocispec.Manifest) if err := json.Unmarshal(imageManifestJSON, imageManifest); err != nil { t.Fatalf("cannot unmarshal index manifest: %v", err) } layersToDownload := make([]ocispec.Descriptor, 0) for _, layerBlob := range imageManifest.Layers { if layerBlob.Size < minLayerSize { layersToDownload = append(layersToDownload, layerBlob) } } remoteSnapshotsExpectedCount := len(imageManifest.Layers) - len(layersToDownload) tests := []struct { name string want tarPipeExporter test tarPipeExporter }{ { name: "Soci", want: fromNormalSnapshotter(regConfig.mirror(imageName).ref), test: func(t *testing.T, tarExportArgs ...string) { image := regConfig.mirror(imageName).ref rebootContainerd(t, sh, "", "") buildIndex(sh, regConfig.mirror(imageName), withMinLayerSize(minLayerSize)) sh.X("ctr", "i", "rm", imageName) export(sh, image, tarExportArgs) checkFuseMounts(t, sh, remoteSnapshotsExpectedCount) checkLayersInSnapshottersContentStore(t, sh, layersToDownload) }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { testSameTarContents(t, sh, tt.want, tt.test) }) } } func checkFuseMounts(t *testing.T, sh *shell.Shell, remoteSnapshotsExpectedCount int) { mounts := string(sh.O("mount")) remoteSnapshotsActualCount := strings.Count(mounts, "fuse.rawBridge") if remoteSnapshotsExpectedCount != remoteSnapshotsActualCount { t.Fatalf("incorrect number of remote snapshots; expected=%d, actual=%d", remoteSnapshotsExpectedCount, remoteSnapshotsActualCount) } } func checkLayersInSnapshottersContentStore(t *testing.T, sh *shell.Shell, layers []ocispec.Descriptor) { for _, layer := range layers { contentStoreBlobPath, err := testutil.GetContentStoreBlobPath(config.DefaultContentStoreType) if err != nil { t.Fatalf("cannot get content store path: %v", err) } layerPath := filepath.Join(contentStoreBlobPath, layer.Digest.Encoded()) existenceResult := strings.TrimSuffix(string(sh.O("ls", layerPath)), "\n") if layerPath != existenceResult { t.Fatalf("layer file %s was not found in snapshotter's local content store, the result of ls=%s", layerPath, existenceResult) } } } // TestLazyPull tests if lazy pulling works. func TestLazyPull(t *testing.T) { regConfig := newRegistryConfig() sh, done := newShellWithRegistry(t, regConfig) defer done() optimizedImageName1 := rabbitmqImage optimizedImageName2 := nginxImage nonOptimizedImageName := ubuntuImage // Mirror images rebootContainerd(t, sh, getContainerdConfigToml(t, false), getSnapshotterConfigToml(t, false)) copyImage(sh, dockerhub(optimizedImageName1), regConfig.mirror(optimizedImageName1)) copyImage(sh, dockerhub(optimizedImageName2), regConfig.mirror(optimizedImageName2)) copyImage(sh, dockerhub(nonOptimizedImageName), regConfig.mirror(nonOptimizedImageName)) indexDigest1 := buildIndex(sh, regConfig.mirror(optimizedImageName1), withMinLayerSize(0)) indexDigest2 := buildIndex(sh, regConfig.mirror(optimizedImageName2), withMinLayerSize(0)) // Test if contents are pulled fromNormalSnapshotter := func(image string) tarPipeExporter { return func(t *testing.T, tarExportArgs ...string) { rebootContainerd(t, sh, "", "") sh.X("nerdctl", "pull", "-q", image) sh.Pipe(nil, shell.C("ctr", "run", "--rm", image, "test", "tar", "-zc", "/usr"), tarExportArgs) } } export := func(sh *shell.Shell, image string, tarExportArgs []string) { sh.X("soci", "image", "rpull", "--user", regConfig.creds(), "--soci-index-digest", indexDigest1, image) sh.Pipe(nil, shell.C("soci", "run", "--rm", "--snapshotter=soci", image, "test", "tar", "-zc", "/usr"), tarExportArgs) } // NOTE: these tests must be executed sequentially. tests := []struct { name string want tarPipeExporter test tarPipeExporter }{ { name: "normal", want: fromNormalSnapshotter(regConfig.mirror(nonOptimizedImageName).ref), test: func(t *testing.T, tarExportArgs ...string) { image := regConfig.mirror(nonOptimizedImageName).ref rebootContainerd(t, sh, "", "") export(sh, image, tarExportArgs) }, }, { name: "Soci", want: fromNormalSnapshotter(regConfig.mirror(optimizedImageName1).ref), test: func(t *testing.T, tarExportArgs ...string) { image := regConfig.mirror(optimizedImageName1).ref m := rebootContainerd(t, sh, "", "") rsm, done := testutil.NewRemoteSnapshotMonitor(m) defer done() buildIndex(sh, regConfig.mirror(optimizedImageName1), withMinLayerSize(0)) sh.X("ctr", "i", "rm", optimizedImageName1) export(sh, image, tarExportArgs) rsm.CheckAllRemoteSnapshots(t) }, }, { name: "multi-image", want: fromNormalSnapshotter(regConfig.mirror(optimizedImageName1).ref), test: func(t *testing.T, tarExportArgs ...string) { image := regConfig.mirror(optimizedImageName1).ref m := rebootContainerd(t, sh, "", "") rsm, done := testutil.NewRemoteSnapshotMonitor(m) defer done() buildIndex(sh, regConfig.mirror(optimizedImageName2), withMinLayerSize(0)) sh.X("soci", "image", "rpull", "--user", regConfig.creds(), "--soci-index-digest", indexDigest2, regConfig.mirror(optimizedImageName2).ref) buildIndex(sh, regConfig.mirror(optimizedImageName1), withMinLayerSize(0)) sh.X("ctr", "i", "rm", optimizedImageName1) export(sh, image, tarExportArgs) rsm.CheckAllRemoteSnapshots(t) }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { testSameTarContents(t, sh, tt.want, tt.test) }) } } // TestLazyPull tests if lazy pulling works when no index digest is provided (makes a Referrers API call) func TestLazyPullNoIndexDigest(t *testing.T) { regConfig := newRegistryConfig() sh, done := newShellWithRegistry(t, regConfig) defer done() optimizedImageName := alpineImage nonOptimizedImageName := ubuntuImage // Mirror images rebootContainerd(t, sh, getContainerdConfigToml(t, false), getSnapshotterConfigToml(t, false)) copyImage(sh, dockerhub(optimizedImageName), regConfig.mirror(optimizedImageName)) copyImage(sh, dockerhub(nonOptimizedImageName), regConfig.mirror(nonOptimizedImageName)) buildIndex(sh, regConfig.mirror(optimizedImageName), withMinLayerSize(0)) sh.X("soci", "push", "--user", regConfig.creds(), regConfig.mirror(optimizedImageName).ref) // Test if contents are pulled fromNormalSnapshotter := func(image string) tarPipeExporter { return func(t *testing.T, tarExportArgs ...string) { rebootContainerd(t, sh, "", "") sh.X("nerdctl", "pull", "-q", image) sh.Pipe(nil, shell.C("ctr", "run", "--rm", image, "test", "tar", "-zc", "/usr"), tarExportArgs) } } export := func(sh *shell.Shell, image string, tarExportArgs []string) { sh.X("soci", "image", "rpull", "--user", regConfig.creds(), image) sh.Pipe(nil, shell.C("soci", "run", "--rm", "--snapshotter=soci", image, "test", "tar", "-zc", "/usr"), tarExportArgs) } // NOTE: these tests must be executed sequentially. tests := []struct { name string want tarPipeExporter test tarPipeExporter checkAllRemoteSnapshots bool }{ { name: "normal", want: fromNormalSnapshotter(regConfig.mirror(nonOptimizedImageName).ref), test: func(t *testing.T, tarExportArgs ...string) { image := regConfig.mirror(nonOptimizedImageName).ref export(sh, image, tarExportArgs) }, }, { name: "soci", want: fromNormalSnapshotter(regConfig.mirror(optimizedImageName).ref), test: func(t *testing.T, tarExportArgs ...string) { image := regConfig.mirror(optimizedImageName).ref sh.X("ctr", "i", "rm", regConfig.mirror(optimizedImageName).ref) export(sh, image, tarExportArgs) }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { m := rebootContainerd(t, sh, "", "") rsm, done := testutil.NewRemoteSnapshotMonitor(m) defer done() testSameTarContents(t, sh, tt.want, tt.test) if tt.checkAllRemoteSnapshots { rsm.CheckAllRemoteSnapshots(t) } }) } } // TestPullWithAribtraryBlobInvalidZtocFormat tests the snapshotter behavior if an arbitrary blob is passed // as a Ztoc. In this case, the flatbuffer deserialization will fail, which will lead // to the snapshotter mounting the layer as a normal overlayfs mount. func TestPullWithAribtraryBlobInvalidZtocFormat(t *testing.T) { regConfig := newRegistryConfig() sh, done := newShellWithRegistry(t, regConfig) defer done() images := []struct { name string ref string }{ { name: "rabbitmq", ref: pinnedRabbitmqImage, }, } fromNormalSnapshotter := func(image string) tarPipeExporter { return func(t *testing.T, tarExportArgs ...string) { rebootContainerd(t, sh, getContainerdConfigToml(t, false), getSnapshotterConfigToml(t, false)) sh.X("nerdctl", "pull", "-q", image) sh.Pipe(nil, shell.C("ctr", "run", "--rm", image, "test", "tar", "-zc", "/usr"), tarExportArgs) } } export := func(sh *shell.Shell, image, sociIndexDigest string, tarExportArgs []string) { sh.X("soci", "image", "rpull", "--user", regConfig.creds(), "--soci-index-digest", sociIndexDigest, image) sh.Pipe(nil, shell.C("soci", "run", "--rm", "--snapshotter=soci", image, "test", "tar", "-zc", "/usr"), tarExportArgs) } buildMaliciousIndex := func(sh *shell.Shell, imgDigest string) ([]byte, []ocispec.Descriptor, error) { imgBytes := sh.O("ctr", "content", "get", imgDigest) var manifest ocispec.Manifest if err := json.Unmarshal(imgBytes, &manifest); err != nil { return nil, nil, err } var ztocDescs []ocispec.Descriptor for _, layer := range manifest.Layers { ztocBytes := testutil.RandomByteData(1000000) ztocDgst := digest.FromBytes(ztocBytes) desc := ocispec.Descriptor{ MediaType: soci.SociLayerMediaType, Digest: digest.FromBytes(ztocBytes), Size: 100000, Annotations: map[string]string{ soci.IndexAnnotationImageLayerDigest: layer.Digest.String(), soci.IndexAnnotationImageLayerMediaType: layer.MediaType, }, } if err := testutil.InjectContentStoreContentFromBytes(sh, config.DefaultContentStoreType, desc, ztocBytes); err != nil { t.Fatalf("cannot write ztoc %s to content store: %v", ztocDgst.String(), err) } ztocDescs = append(ztocDescs, desc) } subject := ocispec.Descriptor{ Digest: digest.Digest(imgDigest), Size: int64(len(imgBytes)), } index := soci.NewIndex(ztocDescs, &subject, nil) b, err := soci.MarshalIndex(index) if err != nil { return nil, nil, err } return b, manifest.Layers, nil } for _, img := range images { t.Run(img.name, func(t *testing.T) { rebootContainerd(t, sh, getContainerdConfigToml(t, false), getSnapshotterConfigToml(t, false)) sociImage := regConfig.mirror(img.ref) copyImage(sh, dockerhub(img.ref), sociImage) pushedPlatformDigest, _ := sh.OLog("nerdctl", "image", "convert", "--platform", platforms.Format(sociImage.platform), sociImage.ref, "test") sociImage.ref = fmt.Sprintf("%s/%s@%s", regConfig.host, img.name, strings.TrimSpace(string(pushedPlatformDigest))) want := fromNormalSnapshotter(sociImage.ref) test := func(t *testing.T, tarExportArgs ...string) { image := sociImage.ref indexBytes, imgLayers, err := buildMaliciousIndex(sh, image[strings.IndexByte(image, '@')+1:]) if err != nil { t.Fatal(err) } sh.X("ctr", "i", "rm", image) indexDigest := digest.FromBytes(indexBytes) desc := ocispec.Descriptor{ Digest: indexDigest, Size: int64(len(indexBytes)), } if err := testutil.InjectContentStoreContentFromBytes(sh, config.DefaultContentStoreType, desc, indexBytes); err != nil { t.Fatalf("cannot write index %s to content store: %v", indexDigest.String(), err) } export(sh, image, indexDigest.String(), tarExportArgs) checkFuseMounts(t, sh, 0) checkLayersInSnapshottersContentStore(t, sh, imgLayers) } testSameTarContents(t, sh, want, test) }) } } // TestLazyPull tests if lazy pulling works without background fetch. func TestLazyPullNoBackgroundFetch(t *testing.T) { const backgroundFetcherConfig = ` [background_fetch] disable = true ` regConfig := newRegistryConfig() sh, done := newShellWithRegistry(t, regConfig) defer done() optimizedImageName1 := rabbitmqImage optimizedImageName2 := nginxImage nonOptimizedImageName := ubuntuImage // Mirror images rebootContainerd(t, sh, getContainerdConfigToml(t, false), getSnapshotterConfigToml(t, false, backgroundFetcherConfig)) copyImage(sh, dockerhub(optimizedImageName1), regConfig.mirror(optimizedImageName1)) copyImage(sh, dockerhub(optimizedImageName2), regConfig.mirror(optimizedImageName2)) copyImage(sh, dockerhub(nonOptimizedImageName), regConfig.mirror(nonOptimizedImageName)) indexDigest1 := buildIndex(sh, regConfig.mirror(optimizedImageName1), withMinLayerSize(0)) indexDigest2 := buildIndex(sh, regConfig.mirror(optimizedImageName2), withMinLayerSize(0)) // Test if contents are pulled fromNormalSnapshotter := func(image string) tarPipeExporter { return func(t *testing.T, tarExportArgs ...string) { rebootContainerd(t, sh, "", "") sh.X("nerdctl", "pull", "-q", image) sh.Pipe(nil, shell.C("ctr", "run", "--rm", image, "test", "tar", "-zc", "/usr"), tarExportArgs) } } export := func(sh *shell.Shell, image string, tarExportArgs []string) { sh.X("soci", "image", "rpull", "--user", regConfig.creds(), "--soci-index-digest", indexDigest1, image) sh.Pipe(nil, shell.C("soci", "run", "--rm", "--snapshotter=soci", image, "test", "tar", "-zc", "/usr"), tarExportArgs) } // NOTE: these tests must be executed sequentially. tests := []struct { name string want tarPipeExporter test tarPipeExporter }{ { name: "normal", want: fromNormalSnapshotter(regConfig.mirror(nonOptimizedImageName).ref), test: func(t *testing.T, tarExportArgs ...string) { image := regConfig.mirror(nonOptimizedImageName).ref rebootContainerd(t, sh, "", "") export(sh, image, tarExportArgs) }, }, { name: "Soci", want: fromNormalSnapshotter(regConfig.mirror(optimizedImageName1).ref), test: func(t *testing.T, tarExportArgs ...string) { image := regConfig.mirror(optimizedImageName1).ref m := rebootContainerd(t, sh, "", "") rsm, done := testutil.NewRemoteSnapshotMonitor(m) defer done() buildIndex(sh, regConfig.mirror(optimizedImageName1), withMinLayerSize(0)) sh.X("ctr", "i", "rm", optimizedImageName1) export(sh, image, tarExportArgs) rsm.CheckAllRemoteSnapshots(t) }, }, { name: "multi-image", want: fromNormalSnapshotter(regConfig.mirror(optimizedImageName1).ref), test: func(t *testing.T, tarExportArgs ...string) { image := regConfig.mirror(optimizedImageName1).ref m := rebootContainerd(t, sh, "", "") rsm, done := testutil.NewRemoteSnapshotMonitor(m) defer done() buildIndex(sh, regConfig.mirror(optimizedImageName2), withMinLayerSize(0)) sh.X("soci", "image", "rpull", "--user", regConfig.creds(), "--soci-index-digest", indexDigest2, regConfig.mirror(optimizedImageName2).ref) buildIndex(sh, regConfig.mirror(optimizedImageName1), withMinLayerSize(0)) sh.X("ctr", "i", "rm", optimizedImageName1) export(sh, image, tarExportArgs) rsm.CheckAllRemoteSnapshots(t) }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { testSameTarContents(t, sh, tt.want, tt.test) }) } } // TestMirror tests if mirror & refreshing functionalities of snapshotter work func TestMirror(t *testing.T) { var ( reporter = testutil.NewTestingReporter(t) caCertDir = "/usr/local/share/ca-certificates" serviceName = "testing_mirror" ) pRoot, err := testutil.GetProjectRoot() if err != nil { t.Fatal(err) } regConfig := newRegistryConfig() regAltConfig := newRegistryConfig(withPort(5000), withCreds(""), withPlainHTTP()) // Setup dummy creds for test crt, key, err := generateRegistrySelfSignedCert(regConfig.host) if err != nil { t.Fatalf("failed to generate cert: %v", err) } htpasswd, err := generateBasicHtpasswd(regConfig.user, regConfig.pass) if err != nil { t.Fatalf("failed to generate htpasswd: %v", err) } hostVolumeMount := t.TempDir() authDir := filepath.Join(hostVolumeMount, "auth") if err := os.Mkdir(authDir, 0777); err != nil { t.Fatalf("failed to create auth folder in tempdir: %v", err) } if err := os.WriteFile(filepath.Join(authDir, "domain.key"), key, 0666); err != nil { t.Fatalf("failed to prepare key file") } if err := os.WriteFile(filepath.Join(authDir, "domain.crt"), crt, 0666); err != nil { t.Fatalf("failed to prepare crt file") } if err := os.WriteFile(filepath.Join(authDir, "htpasswd"), htpasswd, 0666); err != nil { t.Fatalf("failed to prepare htpasswd file") } targetStage := "containerd-snapshotter-base" // Run testing environment on docker compose s, err := testutil.ApplyTextTemplate(composeRegistryAltTemplate, dockerComposeYaml{ TargetStage: targetStage, ServiceName: serviceName, ImageContextDir: pRoot, RegistryImageRef: oci10RegistryImage, RegistryAltImageRef: oci10RegistryImage, RegistryHost: regConfig.host, RegistryAltHost: regAltConfig.host, HostVolumeMount: hostVolumeMount, }) if err != nil { t.Fatal(err) } buildArgs, err := getBuildArgsFromEnv() if err != nil { t.Fatal(err) } c, err := compose.Up(s, compose.WithBuildArgs(buildArgs...), compose.WithStdio(testutil.TestingLogDest())) if err != nil { t.Fatalf("failed to prepare compose: %v", err) } defer c.Cleanup() de, ok := c.Get(serviceName) if !ok { t.Fatalf("failed to get shell of service %v: %v", serviceName, err) } sh := shell.New(de, reporter) containerdMirrorConfig := fmt.Sprintf(` [[plugins."io.containerd.snapshotter.v1.soci".resolver.host."%s".mirrors]] host = "%s" insecure = true `, regConfig.host, regAltConfig.hostWithPort()) snapshotterMirrorConfig := fmt.Sprintf(` [blob] check_always = true [[resolver.host."%s".mirrors]] host = "%s" insecure = true `, regConfig.host, regAltConfig.hostWithPort()) // Setup environment if err := testutil.WriteFileContents(sh, filepath.Join(caCertDir, "domain.crt"), crt, 0600); err != nil { t.Fatalf("failed to write %v: %v", caCertDir, err) } sh. X("apt-get", "--no-install-recommends", "install", "-y", "iptables"). X("update-ca-certificates"). Retry(100, "nerdctl", "login", "-u", regConfig.user, "-p", regConfig.pass, regConfig.host) imageName := rabbitmqImage // Mirror images rebootContainerd(t, sh, getContainerdConfigToml(t, false, containerdMirrorConfig), getSnapshotterConfigToml(t, false, snapshotterMirrorConfig)) copyImage(sh, dockerhub(imageName), regConfig.mirror(imageName)) copyImage(sh, regConfig.mirror(imageName), regAltConfig.mirror(imageName)) indexDigest := buildIndex(sh, regConfig.mirror(imageName), withMinLayerSize(0)) // Pull images // NOTE: Registry connection will still be checked on each "run" because // we added "check_always = true" to the configuration in the above. // We use this behaviour for testing mirroring & refleshing functionality. rebootContainerd(t, sh, "", "") sh.X("nerdctl", "pull", "-q", regConfig.mirror(imageName).ref) sh.X("soci", "create", regConfig.mirror(imageName).ref) sh.X("soci", "image", "rpull", "--user", regConfig.creds(), "--soci-index-digest", indexDigest, regConfig.mirror(imageName).ref) registryHostIP, registryAltHostIP := getIP(t, sh, regConfig.host), getIP(t, sh, regAltConfig.host) export := func(image string) []string { return shell.C("soci", "run", "--rm", "--snapshotter=soci", image, "test", "tar", "-zc", "/usr") } sample := func(t *testing.T, tarExportArgs ...string) { sh.Pipe(nil, shell.C("ctr", "run", "--rm", regConfig.mirror(imageName).ref, "test", "tar", "-zc", "/usr"), tarExportArgs) } // test if mirroring is working (switching to registryAltHost) testSameTarContents(t, sh, sample, func(t *testing.T, tarExportArgs ...string) { sh. X("iptables", "-A", "OUTPUT", "-d", registryHostIP, "-j", "DROP"). X("iptables", "-L"). Pipe(nil, export(regConfig.mirror(imageName).ref), tarExportArgs). X("iptables", "-D", "OUTPUT", "-d", registryHostIP, "-j", "DROP") }, ) // test if refreshing is working (swithching back to registryHost) testSameTarContents(t, sh, sample, func(t *testing.T, tarExportArgs ...string) { sh. X("iptables", "-A", "OUTPUT", "-d", registryAltHostIP, "-j", "DROP"). X("iptables", "-L"). Pipe(nil, export(regConfig.mirror(imageName).ref), tarExportArgs). X("iptables", "-D", "OUTPUT", "-d", registryAltHostIP, "-j", "DROP") }, ) } func getIP(t *testing.T, sh *shell.Shell, name string) string { resolved := strings.Fields(string(sh.O("getent", "hosts", name))) if len(resolved) < 1 { t.Fatalf("failed to resolve name %v", name) } return resolved[0] } type tarPipeExporter func(t *testing.T, tarExportArgs ...string) func testSameTarContents(t *testing.T, sh *shell.Shell, aC, bC tarPipeExporter) { aDir, err := testutil.TempDir(sh) if err != nil { t.Fatalf("failed to create temp dir A: %v", err) } bDir, err := testutil.TempDir(sh) if err != nil { t.Fatalf("failed to create temp dir B: %v", err) } aC(t, "tar", "-zxC", aDir) bC(t, "tar", "-zxC", bDir) sh.X("diff", "--no-dereference", "-qr", aDir+"/", bDir+"/") } // TestRpullImageThenRemove pulls and rpulls an image then removes it to confirm fuse mounts are unmounted func TestRpullImageThenRemove(t *testing.T) { regConfig := newRegistryConfig() sh, done := newShellWithRegistry(t, regConfig) defer done() rebootContainerd(t, sh, getContainerdConfigToml(t, false), "") containerImage := nginxImage copyImage(sh, dockerhub(containerImage), regConfig.mirror(containerImage)) indexDigest := buildIndex(sh, regConfig.mirror(containerImage), withMinLayerSize(0)) rawJSON := sh.O("soci", "index", "info", indexDigest) var sociIndex soci.Index if err := soci.UnmarshalIndex(rawJSON, &sociIndex); err != nil { t.Fatalf("invalid soci index from digest %s: %v", indexDigest, rawJSON) } if len(sociIndex.Blobs) == 0 { t.Fatalf("soci index %s contains 0 blobs, invalidating this test", indexDigest) } sh.X("soci", "image", "rpull", "--user", regConfig.creds(), "--soci-index-digest", indexDigest, regConfig.mirror(containerImage).ref) checkFuseMounts(t, sh, len(sociIndex.Blobs)) sh.X("ctr", "image", "rm", "--sync", regConfig.mirror(containerImage).ref) sh.X("ctr", "image", "rm", "--sync", dockerhub(containerImage).ref) checkFuseMounts(t, sh, 0) } // TestRpullImageWithMinLayerSize pulls and rpulls an image with a runtime min_layer_size to confirm small layers are mounted locally func TestRpullImageWithMinLayerSize(t *testing.T) { containerImage := rabbitmqImage regConfig := newRegistryConfig() sh, done := newShellWithRegistry(t, regConfig) defer done() // Start soci with default config rebootContainerd(t, sh, getContainerdConfigToml(t, false), getSnapshotterConfigToml(t, false)) middleIndex, middleSize, layerCount := middleSizeLayerInfo(t, sh, dockerhub(containerImage)) minLayerSizeConfig := ` [snapshotter] min_layer_size=` + strconv.FormatInt(middleSize, 10) + ` ` // Start soci with config to test rebootContainerd(t, sh, getContainerdConfigToml(t, false), getSnapshotterConfigToml(t, false, minLayerSizeConfig)) copyImage(sh, dockerhub(containerImage), regConfig.mirror(containerImage)) indexDigest := buildIndex(sh, regConfig.mirror(containerImage), withMinLayerSize(0)) sh.X("soci", "image", "rpull", "--user", regConfig.creds(), "--soci-index-digest", indexDigest, regConfig.mirror(containerImage).ref) checkFuseMounts(t, sh, layerCount-middleIndex) } soci-snapshotter-0.4.1/integration/push_test.go000066400000000000000000000245751454010642300217250ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package integration import ( "fmt" "path/filepath" "strings" "testing" "github.com/awslabs/soci-snapshotter/config" "github.com/awslabs/soci-snapshotter/soci" "github.com/awslabs/soci-snapshotter/soci/store" "github.com/containerd/containerd/platforms" ) func TestSociArtifactsPushAndPull(t *testing.T) { regConfig := newRegistryConfig() sh, done := newShellWithRegistry(t, regConfig) defer done() tests := []struct { Name string Platform string }{ { Name: "amd64", Platform: "linux/amd64", }, { Name: "arm64", Platform: "linux/arm64", }, } for _, tt := range tests { t.Run(tt.Name, func(t *testing.T) { rebootContainerd(t, sh, getContainerdConfigToml(t, false), getSnapshotterConfigToml(t, false)) platform, err := platforms.Parse(tt.Platform) if err != nil { t.Fatalf("could not parse platform %s: %v", tt.Platform, err) } imageName := ubuntuImage copyImage(sh, dockerhub(imageName, withPlatform(platform)), regConfig.mirror(imageName, withPlatform(platform))) indexDigest := buildIndex(sh, regConfig.mirror(imageName, withPlatform(platform)), withMinLayerSize(0)) artifactsStoreContentDigest, err := getSociLocalStoreContentDigest(sh, config.DefaultContentStoreType) if err != nil { t.Fatalf("could not get digest of local content store: %v", err) } sh.X("soci", "push", "--user", regConfig.creds(), "--platform", tt.Platform, regConfig.mirror(imageName).ref) sh.X("rm", "-rf", filepath.Join(store.DefaultSociContentStorePath, "blobs", "sha256")) sh.X("soci", "image", "rpull", "--user", regConfig.creds(), "--soci-index-digest", indexDigest, "--platform", tt.Platform, regConfig.mirror(imageName).ref) artifactsStoreContentDigestAfterRPull, err := getSociLocalStoreContentDigest(sh, config.DefaultContentStoreType) if err != nil { t.Fatalf("could not get digest of local content store: %v", err) } if artifactsStoreContentDigest != artifactsStoreContentDigestAfterRPull { t.Fatalf("unexpected digests before and after rpull; before = %v, after = %v", artifactsStoreContentDigest, artifactsStoreContentDigestAfterRPull) } }) } } func TestPushAlwaysMostRecentlyCreatedIndex(t *testing.T) { regConfig := newRegistryConfig() sh, done := newShellWithRegistry(t, regConfig) defer done() type buildOpts struct { spanSize int64 minLayerSize int64 } testCases := []struct { name string ref string opts []buildOpts }{ { name: "rabbitmq", // Pinning a specific image, so that this test is guaranteed to fail in case of any regressions. ref: pinnedRabbitmqImage, opts: []buildOpts{ { spanSize: 1 << 22, // 4MiB minLayerSize: 10 << 20, // 10MiB }, { spanSize: 128000, minLayerSize: 10 << 20, }, }, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { rebootContainerd(t, sh, getContainerdConfigToml(t, false), getSnapshotterConfigToml(t, false)) imgInfo := regConfig.mirror(tc.ref) copyImage(sh, dockerhub(tc.ref), imgInfo) pushedPlatformDigest, _ := sh.OLog("nerdctl", "image", "convert", "--platform", platforms.Format(imgInfo.platform), imgInfo.ref, "test") imgInfo.ref = fmt.Sprintf("%s/%s@%s", regConfig.host, tc.name, strings.TrimSpace(string(pushedPlatformDigest))) for _, opt := range tc.opts { index := buildIndex(sh, imgInfo, withMinLayerSize(opt.minLayerSize), withSpanSize(opt.spanSize)) index = strings.Split(index, "\n")[0] out := sh.O("soci", "push", "--existing-index", "allow", "--user", regConfig.creds(), imgInfo.ref, "-q") pushedIndex := strings.Trim(string(out), "\n") if index != pushedIndex { t.Fatalf("incorrect index pushed to remote registry; expected %s, got %s", index, pushedIndex) } } }) } } func TestLegacyOCI(t *testing.T) { tests := []struct { name string registryImage string expectError bool }{ { name: "OCI 1.0 Artifacts succeed with OCI 1.0 registry", registryImage: oci10RegistryImage, expectError: false, }, { name: "OCI 1.0 Artifacts succeed with OCI 1.1 registry", registryImage: oci11RegistryImage, expectError: false, }, } for _, tc := range tests { tc := tc t.Run(tc.name, func(t *testing.T) { regConfig := newRegistryConfig() sh, done := newShellWithRegistry(t, regConfig, withRegistryImageRef(tc.registryImage)) defer done() rebootContainerd(t, sh, getContainerdConfigToml(t, false), getSnapshotterConfigToml(t, false)) imageName := ubuntuImage copyImage(sh, dockerhub(imageName), regConfig.mirror(imageName)) indexDigest := buildIndex(sh, regConfig.mirror(imageName)) rawJSON := sh.O("soci", "index", "info", indexDigest) var sociIndex soci.Index if err := soci.UnmarshalIndex(rawJSON, &sociIndex); err != nil { t.Fatalf("invalid soci index from digest %s: %v", indexDigest, rawJSON) } _, err := sh.OLog("soci", "push", "--user", regConfig.creds(), regConfig.mirror(imageName).ref) hasError := err != nil if hasError != tc.expectError { t.Fatalf("unexpected error state: expected error? %v, got %v", tc.expectError, err) } else if hasError { // if we have an error and we expected an error, the test is done return } sh.X("rm", "-rf", filepath.Join(store.DefaultSociContentStorePath, "blobs", "sha256")) sh.X("soci", "image", "rpull", "--user", regConfig.creds(), "--soci-index-digest", indexDigest, regConfig.mirror(imageName).ref) if err := sh.Err(); err != nil { t.Fatalf("failed to rpull: %v", err) } checkFuseMounts(t, sh, len(sociIndex.Blobs)) }) } } func TestPushWithExistingIndices(t *testing.T) { t.Parallel() regConfig := newRegistryConfig() sh, done := newShellWithRegistry(t, regConfig) defer done() rebootContainerd(t, sh, getContainerdConfigToml(t, false), getSnapshotterConfigToml(t, false)) const ( singleFoundMessage = "soci index found in remote repository with digest:" multipleFoundMessage = "multiple soci indices found in remote repository:" skipMessageTail = "skipping pushing artifacts for image manifest:" warnMessageHead = "[WARN]" warnMessageTail = "pushing index anyway" ) images := []string{nginxImage, rabbitmqImage, drupalImage, ubuntuImage} imageToIndexDigest := make(map[string]string) imageToManifestDigest := make(map[string]string) for _, img := range images { mirrorImg := regConfig.mirror(img) copyImage(sh, dockerhub(img), mirrorImg) indexDigest := buildIndex(sh, mirrorImg) manifestDigest, err := getManifestDigest(sh, mirrorImg.ref, mirrorImg.platform) if err != nil { t.Fatal(err) } imageToIndexDigest[img] = indexDigest imageToManifestDigest[mirrorImg.ref] = manifestDigest sh.X("soci", "push", "--user", regConfig.creds(), mirrorImg.ref) if img == ubuntuImage { buildIndex(sh, mirrorImg, withSpanSize(1280)) sh.X("soci", "push", "--user", regConfig.creds(), mirrorImg.ref) } } tests := []struct { name string imgInfo imageInfo imgName string cmd []string hasOutput bool outputContains string expectedIndexCount int }{ { name: "Warn with existing index", imgInfo: regConfig.mirror(nginxImage), imgName: "nginx", cmd: []string{"soci", "push", "--user", regConfig.creds(), "--existing-index", "warn"}, hasOutput: true, outputContains: fmt.Sprintf("%s %s %s: %s", warnMessageHead, singleFoundMessage, imageToIndexDigest[nginxImage], warnMessageTail), expectedIndexCount: 2, }, { name: "Skip with existing index", imgInfo: regConfig.mirror(rabbitmqImage), imgName: "rabbitmq", cmd: []string{"soci", "push", "--user", regConfig.creds(), "--existing-index", "skip"}, hasOutput: true, outputContains: fmt.Sprintf("%s %s: %s %s", singleFoundMessage, imageToIndexDigest[rabbitmqImage], skipMessageTail, imageToManifestDigest[regConfig.mirror(rabbitmqImage).ref]), expectedIndexCount: 1, }, { name: "Allow with existing index", imgInfo: regConfig.mirror(drupalImage), imgName: "drupal", cmd: []string{"soci", "push", "--user", regConfig.creds(), "--existing-index", "allow"}, expectedIndexCount: 2, }, { name: "Warn with multiple existing indices", imgInfo: regConfig.mirror(ubuntuImage), imgName: "ubuntu", cmd: []string{"soci", "push", "--user", regConfig.creds(), "--existing-index", "warn"}, hasOutput: true, outputContains: fmt.Sprintf("%s %s %s", warnMessageHead, multipleFoundMessage, warnMessageTail), expectedIndexCount: 3, }, } verifyOutput := func(given, expected string) error { if !strings.Contains(given, expected) { return fmt.Errorf("output: %s does not contain substring %s", given, expected) } return nil } verifyIndexCount := func(imgName, digest string, expected int) error { index, err := getReferrers(sh, regConfig, imgName, digest) if err != nil { return err } if len(index.Manifests) != expected { return fmt.Errorf("unexpected index count in remote: expected: %v; got: %v", expected, len(index.Manifests)) } return nil } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { tc := tc digest := imageToManifestDigest[tc.imgInfo.ref] buildIndex(sh, tc.imgInfo, withSpanSize(1<<19)) tc.cmd = append(tc.cmd, tc.imgInfo.ref) output, err := sh.OLog(tc.cmd...) if err != nil { t.Fatalf("unexpected error for test case: %v", err) } if tc.hasOutput { if err = verifyOutput(string(output), tc.outputContains); err != nil { t.Fatal(err) } } if err = verifyIndexCount(tc.imgName, strings.TrimSpace(digest), tc.expectedIndexCount); err != nil { t.Fatal(err) } }) } } soci-snapshotter-0.4.1/integration/rebuild-db_test.go000066400000000000000000000076531454010642300227550ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package integration import ( "bytes" "fmt" "path/filepath" "testing" "github.com/awslabs/soci-snapshotter/config" "github.com/awslabs/soci-snapshotter/soci" "github.com/awslabs/soci-snapshotter/soci/store" "github.com/awslabs/soci-snapshotter/util/dockershell" "github.com/awslabs/soci-snapshotter/util/testutil" "github.com/opencontainers/go-digest" ) func TestRebuildArtifactsDB(t *testing.T) { regConfig := newRegistryConfig() sh, done := newShellWithRegistry(t, regConfig) defer done() rebootContainerd(t, sh, "", "") img := rabbitmqImage copyImage(sh, dockerhub(img), regConfig.mirror(img)) indexDigest := buildIndex(sh, regConfig.mirror(img), withMinLayerSize(0)) blobPath, _ := testutil.GetContentStoreBlobPath(config.DefaultContentStoreType) dgst, err := digest.Parse(indexDigest) if err != nil { t.Fatalf("cannot parse digest: %v", err) } indexBytes := sh.O("cat", filepath.Join(blobPath, dgst.Encoded())) var sociIndex soci.Index err = soci.DecodeIndex(bytes.NewBuffer(indexBytes), &sociIndex) if err != nil { t.Fatal(err) } sh.X("soci", "push", "--user", regConfig.creds(), regConfig.mirror(img).ref) verifyArtifacts := func(expectedIndexCount, expectedZtocCount int) error { indexOutput := sh.O("soci", "index", "list") ztocOutput := sh.O("soci", "ztoc", "list") indexCount := len(bytes.Split(indexOutput, []byte("\n"))) - 2 ztocCount := len(bytes.Split(ztocOutput, []byte("\n"))) - 2 if indexCount != expectedIndexCount { return fmt.Errorf("expected %v indices; got %v", expectedIndexCount, indexCount) } if ztocCount != expectedZtocCount { return fmt.Errorf(" expected %v ztoc; got %v", expectedZtocCount, ztocCount) } return nil } testCases := []struct { name string setup func(*dockershell.Shell, store.ContentStoreType) afterContent bool expectedIndexCount int exptectedZtocCount int }{ { name: "Rpull and rebuild %s content store", setup: func(sh *dockershell.Shell, contentStoreType store.ContentStoreType) { sh.X( "soci", "image", "rpull", "--user", regConfig.creds(), regConfig.mirror(img).ref) }, afterContent: true, expectedIndexCount: 1, exptectedZtocCount: len(sociIndex.Blobs), }, { name: "Remove artifacts from %s content store and rebuild", setup: func(sh *dockershell.Shell, contentStoreType store.ContentStoreType) { testutil.RemoveContentStoreContent(sh, contentStoreType, indexDigest) for _, blob := range sociIndex.Blobs { testutil.RemoveContentStoreContent(sh, contentStoreType, blob.Digest.String()) } }, expectedIndexCount: 0, exptectedZtocCount: 0, }, } for _, tc := range testCases { for _, contentStoreType := range store.ContentStoreTypes() { t.Run(fmt.Sprintf(tc.name, contentStoreType), func(t *testing.T) { rebootContainerd(t, sh, "", getSnapshotterConfigToml(t, false, GetContentStoreConfigToml(store.WithType(contentStoreType)))) if !tc.afterContent { copyImage(sh, dockerhub(img), regConfig.mirror(img)) buildIndex(sh, regConfig.mirror(img), withMinLayerSize(0), withContentStoreType(contentStoreType)) } tc.setup(sh, contentStoreType) sh.X("soci", "--content-store", string(contentStoreType), "rebuild-db") err := verifyArtifacts(tc.expectedIndexCount, tc.exptectedZtocCount) if err != nil { t.Fatal(err) } }) } } } soci-snapshotter-0.4.1/integration/run_test.go000066400000000000000000000414001454010642300215340ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package integration import ( "bufio" "bytes" "encoding/json" "fmt" "io" "math" "regexp" "strconv" "strings" "testing" "time" "github.com/awslabs/soci-snapshotter/soci/store" shell "github.com/awslabs/soci-snapshotter/util/dockershell" ) // TestRunMultipleContainers runs multiple containers at the same time and performs a test in each func TestRunMultipleContainers(t *testing.T) { tests := []struct { name string containers []containerImageAndTestFunc }{ { name: "Run multiple containers from the same image", containers: []containerImageAndTestFunc{ { containerImage: nginxImage, testFunc: testWebServiceContainer, }, { containerImage: nginxImage, testFunc: testWebServiceContainer, }, }, }, { name: "Run multiple containers from different images", containers: []containerImageAndTestFunc{ { containerImage: nginxImage, testFunc: testWebServiceContainer, }, { containerImage: drupalImage, testFunc: testWebServiceContainer, }, }, }, { name: "Run multiple containers from different images with shared layers", containers: []containerImageAndTestFunc{ { containerImage: nginxAlpineImage, testFunc: testWebServiceContainer, }, { containerImage: nginxAlpineImage2, testFunc: testWebServiceContainer, }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { regConfig := newRegistryConfig() sh, done := newShellWithRegistry(t, regConfig) defer done() rebootContainerd(t, sh, getContainerdConfigToml(t, false), getSnapshotterConfigToml(t, false, tcpMetricsConfig)) for _, container := range deduplicateByContainerImage(tt.containers) { // Mirror image copyImage(sh, dockerhub(container.containerImage), regConfig.mirror(container.containerImage)) // Pull image, create SOCI index indexDigest := buildIndex(sh, regConfig.mirror(container.containerImage), withMinLayerSize(0)) sh.X("soci", "image", "rpull", "--user", regConfig.creds(), "--soci-index-digest", indexDigest, regConfig.mirror(container.containerImage).ref) } var getTestContainerName = func(index int, container containerImageAndTestFunc) string { return "test_" + fmt.Sprint(index) + "_" + makeImageNameValid(container.containerImage) } // Run the containers for index, container := range tt.containers { image := regConfig.mirror(container.containerImage).ref sh.X("soci", "run", "-d", "--snapshotter=soci", image, getTestContainerName(index, container)) } // Verify that no mounts fallback to overlayfs curlOutput := string(sh.O("curl", tcpMetricsAddress+metricsPath)) if err := checkOverlayFallbackCount(curlOutput, 0); err != nil { t.Fatal(err) } // Do something in each container for index, container := range tt.containers { container.testFunc(sh, getTestContainerName(index, container)) } // Check for independent writeable snapshots for each container mountsScanner := bufio.NewScanner(bufio.NewReader(bytes.NewReader(sh.O("mount")))) upperdirs := make(map[string]bool) workdirs := make(map[string]bool) mountRegex := regexp.MustCompile(`^overlay on \/run\/containerd\/io.containerd.runtime.v2.task\/default\/(?P\w+)\/rootfs type overlay \(rw,.*,lowerdir=(?P.*),upperdir=(?P.*),workdir=(?P.*)\)$`) mountRegexGroupNames := mountRegex.SubexpNames() for mountsScanner.Scan() { findResult := mountRegex.FindAllStringSubmatch(mountsScanner.Text(), -1) if findResult == nil { continue } matches := findResult[0] for i, match := range matches { if mountRegexGroupNames[i] == "upperdir" { if upperdirs[match] { t.Fatalf("Duplicate overlay mount upperdir: %s", match) } else { upperdirs[match] = true } } else if mountRegexGroupNames[i] == "workdir" { if workdirs[match] { t.Fatalf("Duplicate overlay mount workdir: %s", match) } else { workdirs[match] = true } } } } }) } } func deduplicateByContainerImage(origList []containerImageAndTestFunc) []containerImageAndTestFunc { foundItems := make(map[string]bool) newList := []containerImageAndTestFunc{} for _, item := range origList { if _, exists := foundItems[item.containerImage]; !exists { foundItems[item.containerImage] = true newList = append(newList, item) } } return newList } // makeImageNameValid replaces special characters other than "_.-", and leading ".-", with "_" func makeImageNameValid(imageName string) string { return regexp.MustCompile(`^[.-]|[^a-zA-Z0-9_.-]+`).ReplaceAllString(imageName, "_") } type containerImageAndTestFunc struct { containerImage string testFunc func(*shell.Shell, string) } func testWebServiceContainer(shell *shell.Shell, containerName string) { shell.X("ctr", "task", "exec", "--exec-id", "test-curl", containerName, "curl", "--retry", "5", "--retry-connrefused", "--retry-max-time", "30", "http://127.0.0.1", ) } type retryConfig struct { maxRetries int minWaitMsec int64 maxWaitMsec int64 networkDisableMsec int64 expectedSuccess bool } // TestNetworkRetry runs a container, disables network access to the remote image, asks the container // to do something requiring the remote image, waits for some/all requests to fail, enables the network, // confirms retries and success/failure func TestNetworkRetry(t *testing.T) { const containerImage = alpineImage tests := []struct { name string config retryConfig }{ { name: "No network interruption, no retries allowed, success", config: retryConfig{ maxRetries: -1, minWaitMsec: 0, maxWaitMsec: 0, networkDisableMsec: 0, expectedSuccess: true, }, }, { name: "1s network interruption, no retries allowed, failure", config: retryConfig{ maxRetries: -1, minWaitMsec: 0, maxWaitMsec: 0, networkDisableMsec: 1000, expectedSuccess: false, }, }, { name: "2s network interruption, ~9-10s retries allowed, success", config: retryConfig{ maxRetries: 2, minWaitMsec: 100, maxWaitMsec: 1600, networkDisableMsec: 2000, expectedSuccess: true, }, }, { name: "10s network interruption, ~6-7s retries allowed, failure", config: retryConfig{ maxRetries: 1, minWaitMsec: 100, maxWaitMsec: 1600, networkDisableMsec: 10000, expectedSuccess: false, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { regConfig := newRegistryConfig() sh, done := newShellWithRegistry(t, regConfig) defer done() registryHostIP := getIP(t, sh, regConfig.host) config := ` [blob] max_retries = ` + strconv.Itoa(tt.config.maxRetries) + ` min_wait_msec = ` + strconv.FormatInt(tt.config.minWaitMsec, 10) + ` max_wait_msec = ` + strconv.FormatInt(tt.config.maxWaitMsec, 10) + ` [background_fetch] disable = true ` m := rebootContainerd(t, sh, getContainerdConfigToml(t, false), getSnapshotterConfigToml(t, false, config)) // Mirror image copyImage(sh, dockerhub(containerImage), regConfig.mirror(containerImage)) // Pull image, create SOCI index with all layers and small (100kiB) spans indexDigest := buildIndex(sh, regConfig.mirror(containerImage), withMinLayerSize(0), withSpanSize(100*1024)) sh.X("soci", "image", "rpull", "--user", regConfig.creds(), "--soci-index-digest", indexDigest, regConfig.mirror(containerImage).ref) // Run the container image := regConfig.mirror(containerImage).ref sh.X("soci", "run", "-d", "--snapshotter=soci", image, "test-container") sh.X("apt-get", "--no-install-recommends", "install", "-y", "iptables") // TODO: Wait for the container to be up and running // Block network access to the registry if tt.config.networkDisableMsec > 0 { sh.X("iptables", "-A", "OUTPUT", "-d", registryHostIP, "-j", "DROP") } // Do something in the container that should work without network access commandSucceedStdout, _, err := sh.R("ctr", "task", "exec", "--exec-id", "test-task-1", "test-container", "sh", "-c", "times") if err != nil { t.Fatalf("attempt to run task without network access failed: %s", err) } type ErrorLogLine struct { Error string `json:"error"` Msg string `json:"msg"` } gaveUpChannel := make(chan bool, 1) defer close(gaveUpChannel) monitorGaveUp := func(rawL string) { if i := strings.Index(rawL, "{"); i > 0 { rawL = rawL[i:] // trim garbage chars; expects "{...}"-styled JSON log } var logLine ErrorLogLine if err := json.Unmarshal([]byte(rawL), &logLine); err == nil { if logLine.Msg == "statFile error" && strings.Contains(logLine.Error, "giving up after") { gaveUpChannel <- true return } } } m.Add("retry", monitorGaveUp) defer m.Remove("retry") // Do something in the container to access un-fetched spans, requiring network access commandNetworkStdout, _, err := sh.R("ctr", "task", "exec", "--exec-id", "test-task-2", "test-container", "cat", "/etc/hosts") if err != nil { t.Fatalf("attempt to run task requiring network access failed: %s", err) } // Wait with network disabled time.Sleep(time.Duration(tt.config.networkDisableMsec) * time.Millisecond) // Short wait to allow commands to complete time.Sleep(time.Duration(1000) * time.Millisecond) // Confirm first command succeeded while network was down buf := make([]byte, 100) if _, err = commandSucceedStdout.Read(buf); err != nil { t.Fatalf("read from expected successful task output failed: %s", err) } if !strings.Contains(string(buf), "s ") { // `times` output looks like "0m0.03s 0m0.05s" t.Fatalf("expected successful task produced unexpected output: %s", string(buf)) } // async read from command_network_stdout commandNetworkStdoutChannel := make(chan []byte) commandNetworkErrChannel := make(chan error) go func() { defer close(commandNetworkStdoutChannel) defer close(commandNetworkErrChannel) var b []byte if _, err := commandNetworkStdout.Read(b); err != nil && err != io.EOF { commandNetworkErrChannel <- fmt.Errorf("read from network bound task output failed: %s", err) return } commandNetworkStdoutChannel <- b if err == io.EOF { commandNetworkErrChannel <- fmt.Errorf("read from network bound task output encountered EOF") return } }() if tt.config.networkDisableMsec > 0 { // Confirm second command has not succeeded while network was down select { case err := <-commandNetworkErrChannel: t.Fatal(err) case data := <-commandNetworkStdoutChannel: t.Fatalf("network bound task produced unexpected output: %s", string(data)) case <-time.After(100 * time.Millisecond): } // Restore access to the registry and image sh.X("iptables", "-D", "OUTPUT", "-d", registryHostIP, "-j", "DROP") // Wait with network enabled, so a final retry has a chance to succeed time.Sleep(2 * time.Millisecond * time.Duration(math.Min( float64(tt.config.maxWaitMsec), math.Pow(2, float64(tt.config.maxRetries))*float64(tt.config.minWaitMsec), ))) } // Confirm whether second command has succeeded with network restored select { case gaveUp := <-gaveUpChannel: if tt.config.expectedSuccess && gaveUp { t.Fatal("retries gave up despite test expecting retry success") } case data := <-commandNetworkStdoutChannel: if !tt.config.expectedSuccess { t.Fatalf("network bound task produced unexpected output: %s", string(data)) } case <-time.After(100 * time.Millisecond): if tt.config.expectedSuccess { t.Fatal("network bound task produced no output when expecting success") } } }) } } // TestRootFolderPermissions tests that non-root users can read "/". // This is a regression test to verify that SOCI has the same behavior as the containerd // overlayfs snapshotter and the stargz-snapshotter https://github.com/awslabs/soci-snapshotter/issues/664 func TestRootFolderPermission(t *testing.T) { image := alpineImage containerName := "TestRootFolderPermission" regConfig := newRegistryConfig() sh, done := newShellWithRegistry(t, regConfig) defer done() rebootContainerd(t, sh, getContainerdConfigToml(t, false), getSnapshotterConfigToml(t, false, tcpMetricsConfig)) sh.X("soci", "image", "rpull", dockerhub(image).ref) // This should have all been pulled ahead of time. checkFuseMounts(t, sh, 0) // Verify that the mount permissions allow non-root to open "/" subfolders := sh.O("soci", "run", "-d", "--snapshotter=soci", "--user", "1000", dockerhub(image).ref, containerName, "ls", "/") if string(subfolders) == "" { t.Fatal("non-root user should be able to `ls /`") } } func TestRunInContentStore(t *testing.T) { imageName := helloImage sh, done := newSnapshotterBaseShell(t) defer done() for _, createContentStoreType := range store.ContentStoreTypes() { for _, runContentStoreType := range store.ContentStoreTypes() { t.Run("create in "+string(createContentStoreType)+", run in "+string(runContentStoreType), func(t *testing.T) { rebootContainerd(t, sh, "", getSnapshotterConfigToml(t, false, tcpMetricsConfig, GetContentStoreConfigToml(store.WithType(runContentStoreType)))) imageInfo := dockerhub(imageName) indexDigest := buildIndex(sh, imageInfo, withMinLayerSize(0), withContentStoreType(createContentStoreType)) if indexDigest == "" { t.Fatal("failed to build index") } sh.X("soci", "image", "rpull", "--soci-index-digest", indexDigest, imageInfo.ref) // Run the container _, err := sh.OLog("soci", "run", "--rm", "--snapshotter=soci", imageInfo.ref, "test") if err != nil { t.Fatalf("encountered error running container: %v", err) } if createContentStoreType == runContentStoreType { // same content store should succeed and use soci checkFuseMounts(t, sh, 1) } else { // different content store should fallback to overlayfs checkFuseMounts(t, sh, 0) } }) } } } func TestRunInNamespace(t *testing.T) { imageName := helloImage sh, done := newSnapshotterBaseShell(t) defer done() namespaces := []string{"default", "test"} for _, contentStoreType := range store.ContentStoreTypes() { for _, createNamespace := range namespaces { for _, runNamespace := range namespaces { t.Run("content store "+string(contentStoreType)+", create in "+createNamespace+", run in "+runNamespace, func(t *testing.T) { rebootContainerd(t, sh, "", getSnapshotterConfigToml(t, false, tcpMetricsConfig, GetContentStoreConfigToml(store.WithType(contentStoreType), store.WithNamespace(runNamespace)))) imageInfo := dockerhub(imageName) indexDigest := buildIndex(sh, imageInfo, withMinLayerSize(0), withContentStoreType(contentStoreType), withNamespace(createNamespace)) if indexDigest == "" { t.Fatal("failed to build index") } sh.X("soci", "--namespace", createNamespace, "--content-store", string(contentStoreType), "image", "rpull", "--soci-index-digest", indexDigest, imageInfo.ref) // Run the container _, err := sh.OLog("soci", "--namespace", runNamespace, "run", "--snapshotter=soci", imageInfo.ref, "test") if createNamespace == runNamespace { // same namespace should succeed without overlayfs fallback if err != nil { t.Fatalf("encountered error running container: %v", err) } checkFuseMounts(t, sh, 1) } else { // different namespace should fail to launch the container if err == nil { t.Fatal("container launch succeeded unexpectedly") } } }) } } } } soci-snapshotter-0.4.1/integration/util_soci_test.go000066400000000000000000000207411454010642300227270ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package integration import ( "bytes" "fmt" "path/filepath" "strings" "github.com/awslabs/soci-snapshotter/config" "github.com/awslabs/soci-snapshotter/soci" "github.com/awslabs/soci-snapshotter/soci/store" shell "github.com/awslabs/soci-snapshotter/util/dockershell" "github.com/awslabs/soci-snapshotter/util/testutil" "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/platforms" "github.com/google/go-cmp/cmp" "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) const ( // copied from `soci/soci_index.go` for convenience so we don't always need to // import the `soci` pkg only to use the default values. defaultSpanSize = int64(1 << 22) // 4MiB defaultMinLayerSize = 10 << 20 // 10MiB ) // indexBuildConfig represents the values of the CLI flags that should be used // when creating an index with `buildIndex` type indexBuildConfig struct { spanSize int64 minLayerSize int64 allowErrors bool contentStoreType store.ContentStoreType namespace string } // indexBuildOption is a functional argument to update `indexBuildConfig` type indexBuildOption func(*indexBuildConfig) // withIndexBuildConfig copies a provided config func withIndexBuildConfig(newIbc indexBuildConfig) indexBuildOption { return func(ibc *indexBuildConfig) { ibc.spanSize = newIbc.spanSize ibc.minLayerSize = newIbc.minLayerSize ibc.allowErrors = newIbc.allowErrors ibc.contentStoreType = newIbc.contentStoreType ibc.namespace = newIbc.namespace } } // withSpanSize overrides the default span size to use when creating an index with `buildIndex` func withSpanSize(spanSize int64) indexBuildOption { return func(ibc *indexBuildConfig) { ibc.spanSize = spanSize } } // withMinLayerSize overrides the minimum layer size for which to create a ztoc // when creating an index with `buildIndex` func withMinLayerSize(minLayerSize int64) indexBuildOption { return func(ibc *indexBuildConfig) { ibc.minLayerSize = minLayerSize } } // withContentStoreType overrides the default content store func withContentStoreType(contentStoreType store.ContentStoreType) indexBuildOption { return func(ibc *indexBuildConfig) { ibc.contentStoreType, _ = store.CanonicalizeContentStoreType(contentStoreType) } } // withNamespace overrides the default namespace func withNamespace(namespace string) indexBuildOption { return func(ibc *indexBuildConfig) { ibc.namespace = namespace } } // withAllowErrors does not fatally fail the test on the a shell command non-zero exit code func withAllowErrors(ibc *indexBuildConfig) { ibc.allowErrors = true } // defaultIndexBuildConfig is the default parameters when creating and index with `buildIndex` func defaultIndexBuildConfig() indexBuildConfig { return indexBuildConfig{ spanSize: defaultSpanSize, minLayerSize: defaultMinLayerSize, contentStoreType: config.DefaultContentStoreType, namespace: namespaces.Default, } } // buildIndex builds an index for the source image with given options. By default, it will build with // min-layer-size = 0 and span-size = CLI default // returns the index digest, or an empty string for failure func buildIndex(sh *shell.Shell, src imageInfo, opt ...indexBuildOption) string { indexBuildConfig := defaultIndexBuildConfig() for _, o := range opt { o(&indexBuildConfig) } opts := encodeImageInfoNerdctl(src) createCommand := []string{ "soci", "--namespace", indexBuildConfig.namespace, "--content-store", string(indexBuildConfig.contentStoreType), "create", src.ref, "--min-layer-size", fmt.Sprintf("%d", indexBuildConfig.minLayerSize), "--span-size", fmt.Sprintf("%d", indexBuildConfig.spanSize), "--platform", platforms.Format(src.platform), } shx := sh.X if indexBuildConfig.allowErrors { shx = sh.XLog } shx(append([]string{"nerdctl", "--namespace", indexBuildConfig.namespace, "pull", "-q", "--platform", platforms.Format(src.platform)}, opts[0]...)...) shx(createCommand...) indexDigest, err := sh.OLog("soci", "--namespace", indexBuildConfig.namespace, "--content-store", string(indexBuildConfig.contentStoreType), "index", "list", "-q", "--ref", src.ref, "--platform", platforms.Format(src.platform), // this will make SOCI artifact available locally ) if err != nil { return "" } return strings.Trim(string(indexDigest), "\n") } func validateSociIndex(sh *shell.Shell, contentStoreType store.ContentStoreType, sociIndex soci.Index, imgManifestDigest string, includedLayers map[string]struct{}) error { if sociIndex.MediaType != ocispec.MediaTypeImageManifest { return fmt.Errorf("unexpected index media type; expected types: [%v], got: %v", ocispec.MediaTypeImageManifest, sociIndex.MediaType) } if sociIndex.ArtifactType != soci.SociIndexArtifactType { return fmt.Errorf("unexpected index artifact type; expected = %v, got = %v", soci.SociIndexArtifactType, sociIndex.ArtifactType) } expectedAnnotations := map[string]string{ soci.IndexAnnotationBuildToolIdentifier: "AWS SOCI CLI v0.1", } if diff := cmp.Diff(sociIndex.Annotations, expectedAnnotations); diff != "" { return fmt.Errorf("unexpected index annotations; diff = %v", diff) } if imgManifestDigest != sociIndex.Subject.Digest.String() { return fmt.Errorf("unexpected subject digest; expected = %v, got = %v", imgManifestDigest, sociIndex.Subject.Digest.String()) } blobs := sociIndex.Blobs if includedLayers != nil && len(blobs) != len(includedLayers) { return fmt.Errorf("unexpected blob count; expected=%v, got=%v", len(includedLayers), len(blobs)) } for _, blob := range blobs { blobPath, err := testutil.GetContentStoreBlobPath(contentStoreType) if err != nil { return err } blobContent := fetchContentFromPath(sh, filepath.Join(blobPath, blob.Digest.Encoded())) blobSize := int64(len(blobContent)) blobDigest := digest.FromBytes(blobContent) if includedLayers != nil { layerDigest := blob.Annotations[soci.IndexAnnotationImageLayerDigest] if _, ok := includedLayers[layerDigest]; !ok { return fmt.Errorf("found ztoc for layer %v in index but should not have built ztoc for it", layerDigest) } } if blobSize != blob.Size { return fmt.Errorf("unexpected blob size; expected = %v, got = %v", blob.Size, blobSize) } if blobDigest != blob.Digest { return fmt.Errorf("unexpected blob digest; expected = %v, got = %v", blob.Digest, blobDigest) } } return nil } // getSociLocalStoreContentDigest will generate a digest based on the contents of the soci content store // Files that are smaller than 10 bytes will not be included when generating the digest func getSociLocalStoreContentDigest(sh *shell.Shell, contentStoreType store.ContentStoreType) (string, error) { content := new(bytes.Buffer) blobPath, err := testutil.GetContentStoreBlobPath(contentStoreType) if err != nil { return "", err } sh.Pipe(nil, []string{"find", blobPath, "-maxdepth", "1", "-type", "f", "-size", "+10c"}).Pipe(content, []string{"sort"}) return digest.FromBytes(content.Bytes()).String(), nil } func sociIndexFromDigest(sh *shell.Shell, indexDigest string) (index soci.Index, err error) { rawSociIndexJSON, err := sh.OLog("soci", "index", "info", indexDigest) if err != nil { return } if err = soci.UnmarshalIndex(rawSociIndexJSON, &index); err != nil { err = fmt.Errorf("invalid soci index from digest %s: %s", indexDigest, err) } return } soci-snapshotter-0.4.1/integration/util_test.go000066400000000000000000000626001454010642300217120ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package integration import ( "bytes" "crypto/rand" "crypto/rsa" "crypto/x509" "crypto/x509/pkix" "encoding/csv" "encoding/json" "encoding/pem" "fmt" "math/big" "os" "path/filepath" "runtime" "sort" "strings" "testing" "time" commonmetrics "github.com/awslabs/soci-snapshotter/fs/metrics/common" "github.com/awslabs/soci-snapshotter/soci/store" shell "github.com/awslabs/soci-snapshotter/util/dockershell" "github.com/awslabs/soci-snapshotter/util/dockershell/compose" dexec "github.com/awslabs/soci-snapshotter/util/dockershell/exec" "github.com/awslabs/soci-snapshotter/util/testutil" "github.com/containerd/containerd/images" "github.com/containerd/containerd/platforms" "github.com/opencontainers/go-digest" spec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pelletier/go-toml" "github.com/rs/xid" "golang.org/x/crypto/bcrypt" ) const ( defaultContainerdConfigPath = "/etc/containerd/config.toml" defaultSnapshotterConfigPath = "/etc/soci-snapshotter-grpc/config.toml" builtinSnapshotterFlagEnv = "BUILTIN_SNAPSHOTTER" buildArgsEnv = "DOCKER_BUILD_ARGS" dockerLibrary = "public.ecr.aws/docker/library/" // Registry images to use in the test infrastructure. These are not intended to be used // as images in the test itself, but just when we're setting up docker compose. oci10RegistryImage = "registry2:soci_test" oci11RegistryImage = "ghcr.io/project-zot/zot-linux-" + runtime.GOARCH + ":v2.0.0-rc6" ) // These are images that we use in our integration tests const ( helloImage = "hello-world:latest" alpineImage = "alpine:3.17.1" nginxImage = "nginx:1.23.3" ubuntuImage = "ubuntu:23.04" drupalImage = "drupal:10.0.2" rabbitmqImage = "rabbitmq:3.11.7" // Pinned version of rabbitmq that points to a multi architecture index. pinnedRabbitmqImage = "rabbitmq@sha256:19e69a7a65fa6b1d0a5c658bad8ec03d2c9900a98ebbc744c34d49179ff517bf" // These 2 images enable us to test cases where 2 different images // have shared layers (thus shared ztocs if built with the same parameters). nginxAlpineImage = "nginx:1.22-alpine3.17" nginxAlpineImage2 = "nginx:1.23-alpine3.17" ) const proxySnapshotterConfig = ` [proxy_plugins] [proxy_plugins.soci] type = "snapshot" address = "/run/soci-snapshotter-grpc/soci-snapshotter-grpc.sock" ` const containerdConfigTemplate = ` version = 2 disabled_plugins = [ "io.containerd.snapshotter.v1.aufs", "io.containerd.snapshotter.v1.btrfs", "io.containerd.snapshotter.v1.devmapper", "io.containerd.snapshotter.v1.zfs", "io.containerd.tracing.processor.v1.otlp", "io.containerd.internal.v1.tracing", "io.containerd.grpc.v1.cri", ] [plugins."io.containerd.snapshotter.v1.soci"] root_path = "/var/lib/soci-snapshotter-grpc/" disable_verification = {{.DisableVerification}} [plugins."io.containerd.snapshotter.v1.soci".blob] check_always = true [debug] format = "json" level = "{{.LogLevel}}" {{.AdditionalConfig}} ` const snapshotterConfigTemplate = ` disable_verification = {{.DisableVerification}} {{.AdditionalConfig}} ` const composeDefaultTemplate = ` version: "3.7" services: testing: image: soci_base:soci_test privileged: true init: true entrypoint: [ "sleep", "infinity" ] environment: - NO_PROXY=127.0.0.1,localhost tmpfs: - /tmp:exec,mode=777 - /var/lib/containerd - /var/lib/soci-snapshotter-grpc volumes: - /dev/fuse:/dev/fuse ` const composeRegistryTemplate = ` version: "3.7" services: {{.ServiceName}}: image: soci_base:soci_test privileged: true init: true entrypoint: [ "sleep", "infinity" ] environment: - NO_PROXY=127.0.0.1,localhost,{{.RegistryHost}}:443 tmpfs: - /tmp:exec,mode=777 - /var/lib/containerd - /var/lib/soci-snapshotter-grpc volumes: - /dev/fuse:/dev/fuse registry: image: {{.RegistryImageRef}} container_name: {{.RegistryHost}} environment: - REGISTRY_AUTH=htpasswd - REGISTRY_AUTH_HTPASSWD_REALM="Registry Realm" - REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd - REGISTRY_HTTP_TLS_CERTIFICATE=/auth/domain.crt - REGISTRY_HTTP_TLS_KEY=/auth/domain.key - REGISTRY_HTTP_ADDR={{.RegistryHost}}:443 - REGISTRY_STORAGE_DELETE_ENABLED=true volumes: - {{.HostVolumeMount}}/auth:/auth:ro - {{.HostVolumeMount}}/etc/zot/config.json:/etc/zot/config.json:ro {{.NetworkConfig}} ` const composeRegistryAltTemplate = ` version: "3.7" services: {{.ServiceName}}: image: soci_base:soci_test privileged: true init: true entrypoint: [ "sleep", "infinity" ] environment: - NO_PROXY=127.0.0.1,localhost,{{.RegistryHost}}:443 tmpfs: - /tmp:exec,mode=777 - /var/lib/containerd - /var/lib/soci-snapshotter-grpc volumes: - /dev/fuse:/dev/fuse registry: image: {{.RegistryImageRef}} container_name: {{.RegistryHost}} environment: - REGISTRY_AUTH=htpasswd - REGISTRY_AUTH_HTPASSWD_REALM="Registry Realm" - REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd - REGISTRY_HTTP_TLS_CERTIFICATE=/auth/domain.crt - REGISTRY_HTTP_TLS_KEY=/auth/domain.key - REGISTRY_HTTP_ADDR={{.RegistryHost}}:443 - REGISTRY_STORAGE_DELETE_ENABLED=true volumes: - {{.HostVolumeMount}}/auth:/auth:ro registry-alt: image: {{.RegistryAltImageRef}} container_name: {{.RegistryAltHost}} ` const composeBuildTemplate = ` version: "3.7" services: {{.ServiceName}}: image: soci_base:soci_test build: context: {{.ImageContextDir}} target: {{.TargetStage}} args: - SNAPSHOTTER_BUILD_FLAGS="-race" registry: image: registry2:soci_test build: context: {{.ImageContextDir}} target: {{.Registry2Stage}} ` const zotConfigTemplate = ` { "storage": { "rootDirectory": "/tmp/zot" }, "http": { "address": "{{.Address}}", "port": "443", "realm": "Registry Realm", "auth": { "htpasswd": { "path": "/auth/htpasswd" } }, "tls": { "cert": "/auth/domain.crt", "key": "/auth/domain.key" } } } ` type dockerComposeYaml struct { ServiceName string ImageContextDir string TargetStage string Registry2Stage string RegistryImageRef string RegistryAltImageRef string RegistryHost string RegistryAltHost string HostVolumeMount string NetworkConfig string } type zotConfigStruct struct { Address string } // getContainerdConfigToml creates a containerd config yaml, by appending all // `additionalConfigs` to the default `containerdConfigTemplate`. func getContainerdConfigToml(t *testing.T, disableVerification bool, additionalConfigs ...string) string { if !isTestingBuiltinSnapshotter() { additionalConfigs = append(additionalConfigs, proxySnapshotterConfig) } s, err := testutil.ApplyTextTemplate(containerdConfigTemplate, struct { LogLevel string DisableVerification bool AdditionalConfig string }{ LogLevel: containerdLogLevel, DisableVerification: disableVerification, AdditionalConfig: strings.Join(additionalConfigs, "\n"), }) if err != nil { t.Fatal(err) } return s } func getSnapshotterConfigToml(t *testing.T, disableVerification bool, additionalConfigs ...string) string { s, err := testutil.ApplyTextTemplate(snapshotterConfigTemplate, struct { DisableVerification bool AdditionalConfig string }{ DisableVerification: disableVerification, AdditionalConfig: strings.Join(additionalConfigs, "\n"), }) if err != nil { t.Fatal(err) } return s } func isTestingBuiltinSnapshotter() bool { return os.Getenv(builtinSnapshotterFlagEnv) == "true" } func getBuildArgsFromEnv() ([]string, error) { buildArgsStr := os.Getenv(buildArgsEnv) if buildArgsStr == "" { return nil, nil } r := csv.NewReader(strings.NewReader(buildArgsStr)) buildArgs, err := r.Read() if err != nil { return nil, fmt.Errorf("failed to get build args from env %v", buildArgsEnv) } return buildArgs, nil } func isFileExists(sh *shell.Shell, file string) bool { return sh.Command("test", "-f", file).Run() == nil } func isDirExists(sh *shell.Shell, dir string) bool { return sh.Command("test", "-d", dir).Run() == nil } type imageOpt func(*imageInfo) func withPlatform(p spec.Platform) imageOpt { return func(i *imageInfo) { i.platform = p } } type imageInfo struct { ref string creds string plainHTTP bool platform spec.Platform } func dockerhub(name string, opts ...imageOpt) imageInfo { i := imageInfo{dockerLibrary + name, "", false, platforms.DefaultSpec()} for _, opt := range opts { opt(&i) } return i } // encodeImageInfoNerdctl assembles command line options for pulling or pushing an image using nerdctl func encodeImageInfoNerdctl(ii ...imageInfo) [][]string { var opts [][]string for _, i := range ii { var o []string if i.plainHTTP { o = append(o, "--insecure-registry") } o = append(o, i.ref) opts = append(opts, o) } return opts } func copyImage(sh *shell.Shell, src, dst imageInfo) { opts := encodeImageInfoNerdctl(src, dst) sh. X(append([]string{"nerdctl", "pull", "-q", "--platform", platforms.Format(src.platform)}, opts[0]...)...). X("ctr", "i", "tag", src.ref, dst.ref). X(append([]string{"nerdctl", "push", "-q", "--platform", platforms.Format(src.platform)}, opts[1]...)...) } type registryConfig struct { host string user string pass string port int credstr string plainHTTP bool } type registryConfigOpt func(*registryConfig) func withPort(port int) registryConfigOpt { return func(rc *registryConfig) { rc.port = port } } func withCreds(creds string) registryConfigOpt { return func(rc *registryConfig) { rc.credstr = creds } } func withPlainHTTP() registryConfigOpt { return func(rc *registryConfig) { rc.plainHTTP = true } } func newRegistryConfig(opts ...registryConfigOpt) registryConfig { rc := registryConfig{ host: fmt.Sprintf("registry-%s.test", xid.New().String()), user: "dummyuser", pass: "dummypass", } rc.credstr = rc.user + ":" + rc.pass for _, opt := range opts { opt(&rc) } return rc } func (c registryConfig) hostWithPort() string { if c.port != 0 { return fmt.Sprintf("%s:%d", c.host, c.port) } return c.host } func (c registryConfig) creds() string { return c.credstr } func (c registryConfig) mirror(imageName string, opts ...imageOpt) imageInfo { i := imageInfo{c.hostWithPort() + "/" + imageName, c.creds(), c.plainHTTP, platforms.DefaultSpec()} for _, opt := range opts { opt(&i) } return i } type registryOptions struct { network string registryImageRef string } func defaultRegistryOptions() registryOptions { return registryOptions{ network: "", registryImageRef: oci10RegistryImage, } } type registryOpt func(o *registryOptions) func withRegistryImageRef(ref string) registryOpt { return func(o *registryOptions) { o.registryImageRef = ref } } func newShellWithRegistry(t *testing.T, r registryConfig, opts ...registryOpt) (sh *shell.Shell, done func() error) { rOpts := defaultRegistryOptions() for _, o := range opts { o(&rOpts) } var ( caCertDir = "/usr/local/share/ca-certificates" serviceName = "testing" ) // Setup dummy creds for test crt, key, err := generateRegistrySelfSignedCert(r.host) if err != nil { t.Fatalf("failed to generate cert: %v", err) } htpasswd, err := generateBasicHtpasswd(r.user, r.pass) if err != nil { t.Fatalf("failed to generate htpasswd: %v", err) } hostVolumeMount := t.TempDir() authDir := filepath.Join(hostVolumeMount, "auth") if err := os.Mkdir(authDir, 0777); err != nil { t.Fatalf("failed to create auth folder in tempdir: %v", err) } if err := os.WriteFile(filepath.Join(authDir, "domain.key"), key, 0666); err != nil { t.Fatalf("failed to prepare key file") } if err := os.WriteFile(filepath.Join(authDir, "domain.crt"), crt, 0666); err != nil { t.Fatalf("failed to prepare crt file") } if err := os.WriteFile(filepath.Join(authDir, "htpasswd"), htpasswd, 0666); err != nil { t.Fatalf("failed to prepare htpasswd file") } buildArgs, err := getBuildArgsFromEnv() if err != nil { t.Fatal(err) } // Run testing environment on docker compose cOpts := []compose.Option{ compose.WithBuildArgs(buildArgs...), compose.WithStdio(testutil.TestingLogDest()), } networkConfig := "" var cleanups []func() error if nw := rOpts.network; nw != "" { done, err := dexec.NewTempNetwork(nw) if err != nil { t.Fatalf("failed to create temp network %v: %v", nw, err) } cleanups = append(cleanups, done) networkConfig = fmt.Sprintf(` networks: default: external: name: %s `, nw) } zotDir := filepath.Join(hostVolumeMount, "etc/zot") if err := os.MkdirAll(zotDir, 0777); err != nil { t.Fatalf("failed to create zot folder in tempdir: %v", err) } zotConfigFile, err := testutil.ApplyTextTemplate(zotConfigTemplate, zotConfigStruct{ Address: r.host, }) if err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(hostVolumeMount, "etc/zot/config.json"), []byte(zotConfigFile), 0666); err != nil { t.Fatalf("failed to prepare config.json: %v", err) } s, err := testutil.ApplyTextTemplate(composeRegistryTemplate, dockerComposeYaml{ ServiceName: serviceName, RegistryHost: r.host, RegistryImageRef: rOpts.registryImageRef, HostVolumeMount: hostVolumeMount, NetworkConfig: networkConfig, }) if err != nil { t.Fatal(err) } c, err := compose.Up(s, cOpts...) if err != nil { t.Fatalf("failed to prepare compose: %v", err) } de, ok := c.Get(serviceName) if !ok { t.Fatalf("failed to get shell of service %v", serviceName) } sh = shell.New(de, testutil.NewTestingReporter(t)) // Install cert and login to the registry if err := testutil.WriteFileContents(sh, filepath.Join(caCertDir, "domain.crt"), crt, 0600); err != nil { t.Fatalf("failed to write cert at %v: %v", caCertDir, err) } sh. X("update-ca-certificates"). Retry(100, "nerdctl", "login", "-u", r.user, "-p", r.pass, r.host) return sh, func() error { if err := c.Cleanup(); err != nil { return err } for _, f := range cleanups { if err := f(); err != nil { return err } } return nil } } func newSnapshotterBaseShell(t *testing.T) (*shell.Shell, func() error) { serviceName := "testing" buildArgs, err := getBuildArgsFromEnv() if err != nil { t.Fatal(err) } c, err := compose.Up(composeDefaultTemplate, compose.WithBuildArgs(buildArgs...), compose.WithStdio(testutil.TestingLogDest())) if err != nil { t.Fatalf("failed to prepare compose: %v", err) } de, ok := c.Get(serviceName) if !ok { t.Fatalf("failed to get shell of service %v", serviceName) } sh := shell.New(de, testutil.NewTestingReporter(t)) if !isTestingBuiltinSnapshotter() { if err := testutil.WriteFileContents(sh, defaultContainerdConfigPath, []byte(getContainerdConfigToml(t, false)), 0600); err != nil { t.Fatalf("failed to write containerd config %v: %v", defaultContainerdConfigPath, err) } } return sh, c.Cleanup } func generateRegistrySelfSignedCert(registryHost string) (crt, key []byte, _ error) { serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 60) serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) if err != nil { return nil, nil, err } template := x509.Certificate{ IsCA: true, BasicConstraintsValid: true, SerialNumber: serialNumber, Subject: pkix.Name{CommonName: registryHost}, NotBefore: time.Now(), NotAfter: time.Now().AddDate(1, 0, 0), // one year KeyUsage: x509.KeyUsageDigitalSignature, ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, DNSNames: []string{registryHost}, } privatekey, err := rsa.GenerateKey(rand.Reader, 2048) if err != nil { return nil, nil, err } publickey := &privatekey.PublicKey cert, err := x509.CreateCertificate(rand.Reader, &template, &template, publickey, privatekey) if err != nil { return nil, nil, err } certPem := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert}) privBytes, err := x509.MarshalPKCS8PrivateKey(privatekey) if err != nil { return nil, nil, err } keyPem := pem.EncodeToMemory(&pem.Block{Type: "PRIVATE KEY", Bytes: privBytes}) return certPem, keyPem, nil } func generateBasicHtpasswd(user, pass string) ([]byte, error) { bpass, err := bcrypt.GenerateFromPassword([]byte(pass), bcrypt.DefaultCost) if err != nil { return nil, err } return []byte(user + ":" + string(bpass) + "\n"), nil } func getManifestDigest(sh *shell.Shell, ref string, platform spec.Platform) (string, error) { buffer := new(bytes.Buffer) sh.Pipe(buffer, []string{"ctr", "image", "list", "name==" + ref}, []string{"awk", `NR==2{printf "%s", $3}`}) content := sh.O("ctr", "content", "get", buffer.String()) var index spec.Index err := json.Unmarshal(content, &index) if err != nil { return "", err } matcher := platforms.OnlyStrict(platform) for _, desc := range index.Manifests { if matcher.Match(*desc.Platform) { return desc.Digest.String(), nil } } return "", fmt.Errorf("could not find manifest for %s for platform %s", ref, platforms.Format(platform)) } func getReferrers(sh *shell.Shell, regConfig registryConfig, imgName, digest string) (*spec.Index, error) { var index spec.Index output, err := sh.OLog("curl", "-u", regConfig.creds(), fmt.Sprintf("https://%s:443/v2/%s/referrers/%s", regConfig.host, imgName, digest)) if err != nil { return nil, fmt.Errorf("failed to get referrers: %w", err) } // If the referrers API returns a 404, try the fallback. if strings.Contains(string(output), "404") { referrersTag := strings.Replace(digest, ":", "-", 1) output, err = sh.OLog("curl", "--header", fmt.Sprintf("Accept: %s, %s", spec.MediaTypeImageIndex, images.MediaTypeDockerSchema2ManifestList), "-u", regConfig.creds(), fmt.Sprintf("https://%s:443/v2/%s/manifests/%s", regConfig.host, imgName, referrersTag)) if err != nil { return nil, fmt.Errorf("failed to get referrers: %w", err) } } err = json.Unmarshal(output, &index) if err != nil { return nil, fmt.Errorf("failed to unmarshal index: %w", err) } return &index, nil } func rebootContainerd(t *testing.T, sh *shell.Shell, customContainerdConfig, customSnapshotterConfig string) *testutil.LogMonitor { var ( containerdRoot = "/var/lib/containerd" containerdStatus = "/run/containerd/" snapshotterSocket = "/run/soci-snapshotter-grpc/soci-snapshotter-grpc.sock" snapshotterRoot = "/var/lib/soci-snapshotter-grpc" ) // cleanup directories testutil.KillMatchingProcess(sh, "containerd") testutil.KillMatchingProcess(sh, "soci-snapshotter-grpc") removeDirContents(sh, containerdRoot) if isDirExists(sh, containerdStatus) { removeDirContents(sh, containerdStatus) } if isFileExists(sh, snapshotterSocket) { sh.X("rm", snapshotterSocket) } if snDir := filepath.Join(snapshotterRoot, "/snapshotter/snapshots"); isDirExists(sh, snDir) { sh.X("find", snDir, "-maxdepth", "1", "-mindepth", "1", "-type", "d", "-exec", "umount", "{}/fs", ";") } removeDirContents(sh, snapshotterRoot) // run containerd and snapshotter containerdCmds := shell.C("containerd", "--log-level", containerdLogLevel) if customContainerdConfig != "" { containerdCmds = addConfig(t, sh, customContainerdConfig, containerdCmds...) } sh.Gox(containerdCmds...) snapshotterCmds := shell.C("/usr/local/bin/soci-snapshotter-grpc", "--log-level", sociLogLevel, "--address", snapshotterSocket) if customSnapshotterConfig != "" { snapshotterCmds = addConfig(t, sh, customSnapshotterConfig, snapshotterCmds...) } outR, errR, err := sh.R(snapshotterCmds...) if err != nil { t.Fatalf("failed to create pipe: %v", err) } reporter := testutil.NewTestingReporter(t) var m *testutil.LogMonitor = testutil.NewLogMonitor(reporter, outR, errR) if err = testutil.LogConfirmStartup(m); err != nil { t.Fatalf("snapshotter startup failed: %v", err) } // make sure containerd and soci-snapshotter-grpc are up-and-running sh.Retry(100, "ctr", "snapshots", "--snapshotter", "soci", "prepare", "connectiontest-dummy-"+xid.New().String(), "") sh.XLog("containerd", "--version") return m } func removeDirContents(sh *shell.Shell, dir string) { // `rm -rf Dir` directly sometimes causes failure, e.g., // rm: cannot remove '/var/lib/containerd/': Device or resource busy. // this might be a mount issue. sh.X("find", dir+"/.", "!", "-name", ".", "-prune", "-exec", "rm", "-rf", "{}", "+") } func addConfig(t *testing.T, sh *shell.Shell, conf string, cmds ...string) []string { configPath := strings.TrimSpace(string(sh.O("mktemp"))) if err := testutil.WriteFileContents(sh, configPath, []byte(conf), 0600); err != nil { t.Fatalf("failed to add config to %v: %v", configPath, err) } return append(cmds, "--config", configPath) } func checkOverlayFallbackCount(output string, expected int) error { lines := strings.Split(output, "\n") for _, line := range lines { if !strings.Contains(line, commonmetrics.FuseMountFailureCount) { continue } var got int _, err := fmt.Sscanf(line, fmt.Sprintf(`soci_fs_operation_count{layer="",operation_type="%s"} %%d`, commonmetrics.FuseMountFailureCount), &got) if err != nil { return err } if got != expected { return fmt.Errorf("unexpected overlay fallbacks: got %d, expected %d", got, expected) } return nil } if expected != 0 { return fmt.Errorf("expected %d overlay fallbacks but got 0", expected) } return nil } // middleSizeLayerInfo finds a layer not the smallest or largest (if possible), returns index, size, and layer count // It requires containerd to be running func middleSizeLayerInfo(t *testing.T, sh *shell.Shell, image imageInfo) (int, int64, int) { sh.O("nerdctl", "pull", "-q", "--platform", platforms.Format(image.platform), image.ref) imageManifestDigest, err := getManifestDigest(sh, image.ref, image.platform) if err != nil { t.Fatalf("Failed to get manifest digest: %v", err) } dgst, err := digest.Parse(imageManifestDigest) if err != nil { t.Fatalf("Failed to parse manifest digest: %v", err) } imageManifestJSON, err := FetchContentByDigest(sh, store.ContainerdContentStoreType, dgst) if err != nil { t.Fatalf("Failed to fetch manifest: %v", err) } imageManifest := new(spec.Manifest) if err := json.Unmarshal(imageManifestJSON, imageManifest); err != nil { t.Fatalf("cannot unmarshal image manifest: %v", err) } snapshotSizes := make([]int64, 0) for _, layerBlob := range imageManifest.Layers { snapshotSizes = append(snapshotSizes, layerBlob.Size) } sort.Slice(snapshotSizes, func(i, j int) bool { return snapshotSizes[i] < snapshotSizes[j] }) if snapshotSizes[0] == snapshotSizes[len(snapshotSizes)-1] { // This condition would almost certainly invalidate the expected behavior of the calling test t.Fatalf("all %v layers are the same size (%v) when seeking middle size layer", len(snapshotSizes), snapshotSizes[0]) } middleIndex := len(snapshotSizes) / 2 middleSize := snapshotSizes[middleIndex] if snapshotSizes[0] == middleSize { // if the middle is also the smallest, find the next larger layer for middleIndex < len(snapshotSizes)-1 && snapshotSizes[middleIndex] == middleSize { middleIndex++ } } else { // find the lowest index that is the same size as the middle for middleIndex > 0 && snapshotSizes[middleIndex-1] == middleSize { middleIndex-- } } return middleIndex, middleSize, len(snapshotSizes) } func fetchContentFromPath(sh *shell.Shell, path string) []byte { return sh.O("cat", path) } func fetchSociContentStoreContentByDigest(sh *shell.Shell, dgst digest.Digest) []byte { path := filepath.Join(store.DefaultSociContentStorePath, "blobs", dgst.Algorithm().String(), dgst.Encoded()) return sh.O("cat", path) } func fetchContainerdContentStoreContentByDigest(sh *shell.Shell, dgst digest.Digest) []byte { return sh.O("ctr", "content", "get", dgst.String()) } func FetchContentByDigest(sh *shell.Shell, contentStoreType store.ContentStoreType, dgst digest.Digest) ([]byte, error) { contentStoreType, err := store.CanonicalizeContentStoreType(contentStoreType) if err != nil { return nil, err } switch contentStoreType { case store.SociContentStoreType: return fetchSociContentStoreContentByDigest(sh, dgst), nil case store.ContainerdContentStoreType: return fetchContainerdContentStoreContentByDigest(sh, dgst), nil default: return nil, store.ErrUnknownContentStoreType(contentStoreType) } } func GetContentStoreConfigToml(opts ...store.Option) string { storeConfig := store.NewStoreConfig(opts...) configToml, err := toml.Marshal(storeConfig) if err != nil { return "" } return "\n[content_store]\n" + string(configToml) } soci-snapshotter-0.4.1/integration/ztoc_test.go000066400000000000000000000324061454010642300217150ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package integration import ( "archive/tar" "bytes" "compress/gzip" "encoding/base64" "encoding/json" "fmt" "io" "path/filepath" "strconv" "strings" "testing" "github.com/awslabs/soci-snapshotter/config" "github.com/awslabs/soci-snapshotter/soci" "github.com/awslabs/soci-snapshotter/soci/store" "github.com/awslabs/soci-snapshotter/util/dockershell" "github.com/awslabs/soci-snapshotter/util/testutil" "github.com/awslabs/soci-snapshotter/ztoc" "github.com/awslabs/soci-snapshotter/ztoc/compression" "github.com/google/go-cmp/cmp" "github.com/opencontainers/go-digest" v1 "github.com/opencontainers/image-spec/specs-go/v1" ) type Info struct { Version string `json:"version"` BuildTool string `json:"build_tool"` Size int64 `json:"size"` SpanSize compression.Offset `json:"span_size"` NumSpans compression.SpanID `json:"num_spans"` NumFiles int `json:"num_files"` NumMultiSpanFiles int `json:"num_multi_span_files"` Files []FileInfo `json:"files"` } type FileInfo struct { Filename string `json:"filename"` Offset int64 `json:"offset"` Size int64 `json:"size"` Type string `json:"type"` StartSpan compression.SpanID `json:"start_span"` EndSpan compression.SpanID `json:"end_span"` } func TestSociZtocList(t *testing.T) { t.Parallel() sh, done := newSnapshotterBaseShell(t) defer done() rebootContainerd(t, sh, "", "") testImages := prepareSociIndices(t, sh) t.Run("soci ztoc list should print all ztocs", func(t *testing.T) { output := strings.Trim(string(sh.O("soci", "ztoc", "list")), "\n") outputLines := strings.Split(output, "\n") // output should have at least a header line if len(outputLines) < 1 { t.Fatalf("output should at least have a header line, actual output: %s", output) } outputLines = outputLines[1:] for _, img := range testImages { sociIndex, err := sociIndexFromDigest(sh, img.sociIndexDigest) if err != nil { t.Fatal(err) } for _, blob := range sociIndex.Blobs { if blob.MediaType != soci.SociLayerMediaType { continue } ztocExistChecker(t, outputLines, img, blob) } } }) t.Run("soci ztoc list --ztoc-digest ztocDigest should print a single ztoc", func(t *testing.T) { target := testImages[ubuntuImage] sociIndex, err := sociIndexFromDigest(sh, target.sociIndexDigest) if err != nil { t.Fatal(err) } for _, blob := range sociIndex.Blobs { if blob.MediaType != soci.SociLayerMediaType { continue } output := strings.Trim(string(sh.O("soci", "ztoc", "list", "--ztoc-digest", blob.Digest.String())), "\n") outputLines := strings.Split(output, "\n") // outputLines should have exact 2 lines: 1 header and 1 ztoc if len(outputLines) != 2 { t.Fatalf("output should have exactly a header line and a ztoc line: %s", output) } outputLines = outputLines[1:] ztocExistChecker(t, outputLines, target, blob) } }) t.Run("soci ztoc list --image-ref imageRef", func(t *testing.T) { for _, img := range testImages { sociIndex, err := sociIndexFromDigest(sh, img.sociIndexDigest) if err != nil { t.Fatal(err) } output := strings.Trim(string(sh.O("soci", "ztoc", "list", "--image-ref", img.imgInfo.ref)), "\n") outputLines := strings.Split(output, "\n") ztocOutput := outputLines[1:] for _, blob := range sociIndex.Blobs { if blob.MediaType != soci.SociLayerMediaType { continue } ztocExistChecker(t, ztocOutput, img, blob) } } }) t.Run("soci ztoc list --image-ref imageRef --ztoc-digest expectedZtoc", func(t *testing.T) { for _, img := range testImages { sociIndex, err := sociIndexFromDigest(sh, img.sociIndexDigest) if err != nil { t.Fatal(err) } var ztoc v1.Descriptor for _, blob := range sociIndex.Blobs { if blob.MediaType == soci.SociLayerMediaType { ztoc = blob break } } output := strings.Trim(string(sh.O("soci", "ztoc", "list", "--image-ref", img.imgInfo.ref, "--ztoc-digest", ztoc.Digest.String())), "\n") outputLines := strings.Split(output, "\n") ztocOutput := outputLines[1:] ztocExistChecker(t, ztocOutput, img, ztoc) } }) t.Run("soci ztoc list --image-ref imageRef --ztoc-digest unexpectedZtoc", func(t *testing.T) { for _, img := range testImages { _, err := sh.OLog("soci", "ztoc", "list", "--image-ref", img.imgInfo.ref, "--ztoc-digest", "digest") if err == nil { t.Fatalf("failed to return err") } } }) } func TestSociZtocInfo(t *testing.T) { t.Parallel() sh, done := newSnapshotterBaseShell(t) defer done() rebootContainerd(t, sh, "", "") testImages := prepareSociIndices(t, sh) getFullZtoc := func(sh *dockershell.Shell, ztocPath string) (*ztoc.Ztoc, error) { output := sh.O("cat", ztocPath) reader := bytes.NewReader(output) z, err := ztoc.Unmarshal(reader) return z, err } for _, img := range testImages { img := img tests := []struct { name string ztocDigest string expectErr bool }{ { name: "Empty ztoc digest", ztocDigest: "", expectErr: true, }, { name: "Invalid ztoc digest format", ztocDigest: "hello", expectErr: true, }, { name: "Invalid ztoc digest length", ztocDigest: "sha256:hello", expectErr: true, }, { name: "Ztoc digest does not exist", ztocDigest: testutil.RandomDigest(), expectErr: true, }, { name: "Correct ztoc digest", ztocDigest: img.ztocDigests[0], expectErr: false, }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { var zinfo Info output, err := sh.OLog("soci", "ztoc", "info", tt.ztocDigest) if !tt.expectErr { err := json.Unmarshal(output, &zinfo) if err != nil { t.Fatalf("expected Info type got %s: %v", output, err) } blobPath, err := testutil.GetContentStoreBlobPath(config.DefaultContentStoreType) if err != nil { t.Fatalf("cannot get local content store blob path: %v", err) } dgst, err := digest.Parse(tt.ztocDigest) if err != nil { t.Fatalf("cannot parse digest: %v", err) } ztocPath := filepath.Join(blobPath, dgst.Encoded()) ztoc, err := getFullZtoc(sh, ztocPath) if err != nil { t.Fatalf("failed getting original ztoc: %v", err) } err = verifyInfoOutput(zinfo, ztoc) if err != nil { t.Fatal(err) } } else if err == nil { t.Fatal("failed to return error") } }) } } } func TestSociZtocGetFile(t *testing.T) { t.Parallel() sh, done := newSnapshotterBaseShell(t) defer done() rebootContainerd(t, sh, "", "") testImages := prepareSociIndices(t, sh) var ( tempOutputStream = "test.txt" randomFile = base64.StdEncoding.EncodeToString(testutil.RandomByteData(12)) randomZtocDigest = testutil.RandomDigest() ) getRandomFilePathsWithinZtoc := func(ztocDigest string, numFilesPerSpan int) []string { r := testutil.NewThreadsafeRandom() var ( zinfo Info randPaths []string ) regPathsBySpan := make(map[compression.SpanID][]string) output := sh.O("soci", "ztoc", "info", ztocDigest) json.Unmarshal(output, &zinfo) for _, file := range zinfo.Files { if file.Type == "reg" { regPathsBySpan[file.StartSpan] = append(regPathsBySpan[file.StartSpan], file.Filename) } } for _, regPaths := range regPathsBySpan { for i := 0; i < numFilesPerSpan; i++ { randPaths = append(randPaths, regPaths[r.Intn(len(regPaths))]) } } return randPaths } verifyOutputStream := func(contents, output []byte) error { d := cmp.Diff(contents, output) if d == "" { return nil } return fmt.Errorf("unexpected output; diff = %v", d) } for _, img := range testImages { img := img ztocDigest := img.ztocDigests[0] var layerDigest string sociIndex, err := sociIndexFromDigest(sh, img.sociIndexDigest) if err != nil { t.Fatalf("Failed getting soci index: %v", err) } for _, blob := range sociIndex.Blobs { if blob.Digest.String() == ztocDigest { layerDigest = blob.Annotations[soci.IndexAnnotationImageLayerDigest] break } } containerdStoreBlobPath, _ := testutil.GetContentStoreBlobPath(store.ContainerdContentStoreType) dgst, err := digest.Parse(layerDigest) if err != nil { t.Fatalf("cannot parse digest: %v", err) } layerContents := sh.O("cat", filepath.Join(containerdStoreBlobPath, dgst.Encoded())) files := getRandomFilePathsWithinZtoc(ztocDigest, 1) testCases := []struct { name string cmd []string toStdout bool expectedErr bool }{ { name: "Ztoc that does not exist", cmd: []string{"soci", "ztoc", "get-file", randomZtocDigest, randomFile}, toStdout: true, expectedErr: true, }, { name: "Ztoc exists but file does not exist", cmd: []string{"soci", "ztoc", "get-file", ztocDigest, randomFile}, toStdout: true, expectedErr: true, }, { name: "Ztoc and each file exists, file contents redirected to stdout", cmd: []string{"soci", "ztoc", "get-file", ztocDigest}, toStdout: true, expectedErr: false, }, { name: "Ztoc and each file exists, file contents redirected to output file", cmd: []string{"soci", "ztoc", "get-file", "-o", tempOutputStream, ztocDigest}, toStdout: false, expectedErr: false, }, } for _, tt := range testCases { tt := tt t.Run(tt.name, func(t *testing.T) { if !tt.expectedErr { for _, f := range files { cmd := append(tt.cmd, f) output, err := sh.OLog(cmd...) if err != nil { t.Fatalf("failed to return file contents: %v", err) } gzipReader, err := gzip.NewReader(bytes.NewReader(layerContents)) if err != nil { t.Fatalf("error returning gzip reader: %v", err) } tarReader := tar.NewReader(gzipReader) var contents []byte for { h, err := tarReader.Next() if err == io.EOF { break } if h.Name == f { contents, err = io.ReadAll(tarReader) if err != nil { t.Fatalf("failed getting original file content: %v", err) } break } } if tt.toStdout { output = output[:len(output)-1] } else { output = sh.O("cat", tempOutputStream) } err = verifyOutputStream(contents, output) if err != nil { t.Fatal(err) } } } else if _, err := sh.OLog(tt.cmd...); err == nil { t.Fatal("failed to return error") } }) } } } // ztocExistChecker checks if a ztoc exists in `soci ztoc list` output func ztocExistChecker(t *testing.T, listOutputLines []string, img testImageIndex, ztocBlob v1.Descriptor) { ztocDigest := ztocBlob.Digest.String() size := strconv.FormatInt(ztocBlob.Size, 10) layerDigest := ztocBlob.Annotations[soci.IndexAnnotationImageLayerDigest] for _, line := range listOutputLines { if strings.Contains(line, ztocDigest) && strings.Contains(line, size) && strings.Contains(line, layerDigest) { return } } t.Fatalf("invalid ztoc from index %s for image %s:\n expected ztoc: digest: %s, size: %s, layer digest: %s\n actual output lines: %s", img.sociIndexDigest, img.imgInfo.ref, ztocDigest, size, layerDigest, listOutputLines) } func verifyInfoOutput(zinfo Info, ztoc *ztoc.Ztoc) error { if zinfo.Version != string(ztoc.Version) { return fmt.Errorf("different versions: expected %s got %s", ztoc.Version, zinfo.Version) } if zinfo.BuildTool != ztoc.BuildToolIdentifier { return fmt.Errorf("different buildtool: expected %s got %s", ztoc.BuildToolIdentifier, zinfo.BuildTool) } if zinfo.NumFiles != len(ztoc.FileMetadata) { return fmt.Errorf("different file counts: expected %v got %v", len(ztoc.FileMetadata), zinfo.NumFiles) } if zinfo.NumSpans != ztoc.MaxSpanID+1 { return fmt.Errorf("different number of spans: expected %v got %v", ztoc.MaxSpanID+1, zinfo.NumSpans) } for i := 0; i < len(zinfo.Files); i++ { zinfoFile := zinfo.Files[i] ztocFile := ztoc.FileMetadata[i] if zinfoFile.Filename != ztocFile.Name { return fmt.Errorf("different filename: expected %s got %s", ztocFile.Name, zinfoFile.Filename) } if zinfoFile.Offset != int64(ztocFile.UncompressedOffset) { return fmt.Errorf("different file offset: expected %v got %v", int64(ztocFile.UncompressedOffset), zinfoFile.Offset) } if zinfoFile.Size != int64(ztocFile.UncompressedSize) { return fmt.Errorf("different file size: expected %v got %v", int64(ztocFile.UncompressedSize), zinfoFile.Size) } if zinfoFile.Type != ztocFile.Type { return fmt.Errorf("different file type: expected %s got %s", ztocFile.Type, zinfoFile.Type) } } return nil } soci-snapshotter-0.4.1/metadata/000077500000000000000000000000001454010642300166005ustar00rootroot00000000000000soci-snapshotter-0.4.1/metadata/db.go000066400000000000000000000265631454010642300175300ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package metadata import ( "encoding/binary" "fmt" "os" "github.com/awslabs/soci-snapshotter/util/dbutil" "github.com/awslabs/soci-snapshotter/ztoc/compression" bolt "go.etcd.io/bbolt" ) // Metadata package stores filesystem metadata in the following schema. // // - filesystems // - *filesystem id* : bucket for each filesystem keyed by a unique string. // - nodes // - *node id* : bucket for each node keyed by a uniqe uint64. // - size : : size of the regular node. // - modtime : : modification time of the node. // - linkName : : link target of symlink // - mode : : permission and mode bits (os.FileMode). // - uid : : uid of the owner. // - gid : : gid of the owner. // - devMajor : : the major device number for device // - devMinor : : the minor device number for device // - xattrKey : : key of the first extended attribute. // - xattrValue : : value of the first extended attribute // - xattrsExtra : 2nd and the following extended attribute. // - *key* : : map of key to value string // - numLink : : the number of links pointing to this node. // - metadata // - *node id* : bucket for each node keyed by a uniqe uint64. // - childName : : base name of the first child // - childID : : id of the first child // - childrenExtra : 2nd and following child nodes of directory. // - *basename* : : map of basename string to the child node id // - uncompressedOffset : : the offset in the uncompressed data, where the node is stored. var ( bucketKeyFilesystems = []byte("filesystems") bucketKeyNodes = []byte("nodes") bucketKeySize = []byte("size") bucketKeyModTime = []byte("modtime") bucketKeyLinkName = []byte("linkName") bucketKeyMode = []byte("mode") bucketKeyUID = []byte("uid") bucketKeyGID = []byte("gid") bucketKeyDevMajor = []byte("devMajor") bucketKeyDevMinor = []byte("devMinor") bucketKeyXattrKey = []byte("xattrKey") bucketKeyXattrValue = []byte("xattrValue") bucketKeyXattrsExtra = []byte("xattrsExtra") bucketKeyNumLink = []byte("numLink") bucketKeyMetadata = []byte("metadata") bucketKeyChildName = []byte("childName") bucketKeyChildID = []byte("childID") bucketKeyChildrenExtra = []byte("childrenExtra") bucketKeyUncompressedOffset = []byte("uncompressedOffset") ) type childEntry struct { base string id uint32 } type metadataEntry struct { children map[string]childEntry UncompressedOffset compression.Offset UncompressedSize compression.Offset } func getNodes(tx *bolt.Tx, fsID string) (*bolt.Bucket, error) { filesystems := tx.Bucket(bucketKeyFilesystems) if filesystems == nil { return nil, fmt.Errorf("fs %q not found: no fs is registered", fsID) } lbkt := filesystems.Bucket([]byte(fsID)) if lbkt == nil { return nil, fmt.Errorf("fs bucket for %q not found", fsID) } nodes := lbkt.Bucket(bucketKeyNodes) if nodes == nil { return nil, fmt.Errorf("nodes bucket for %q not found", fsID) } return nodes, nil } func getMetadata(tx *bolt.Tx, fsID string) (*bolt.Bucket, error) { filesystems := tx.Bucket(bucketKeyFilesystems) if filesystems == nil { return nil, fmt.Errorf("fs %q not found: no fs is registered", fsID) } lbkt := filesystems.Bucket([]byte(fsID)) if lbkt == nil { return nil, fmt.Errorf("fs bucket for %q not found", fsID) } md := lbkt.Bucket(bucketKeyMetadata) if md == nil { return nil, fmt.Errorf("metadata bucket for fs %q not found", fsID) } return md, nil } func getNodeBucketByID(nodes *bolt.Bucket, id uint32) (*bolt.Bucket, error) { b := nodes.Bucket(encodeID(id)) if b == nil { return nil, fmt.Errorf("node bucket for %d not found", id) } return b, nil } func getMetadataBucketByID(md *bolt.Bucket, id uint32) (*bolt.Bucket, error) { b := md.Bucket(encodeID(id)) if b == nil { return nil, fmt.Errorf("metadata bucket for %d not found", id) } return b, nil } func writeAttr(b *bolt.Bucket, attr *Attr) error { for _, v := range []struct { key []byte val int64 }{ {bucketKeySize, attr.Size}, {bucketKeyUID, int64(attr.UID)}, {bucketKeyGID, int64(attr.GID)}, {bucketKeyDevMajor, int64(attr.DevMajor)}, {bucketKeyDevMinor, int64(attr.DevMinor)}, {bucketKeyNumLink, int64(attr.NumLink - 1)}, // numLink = 0 means num link = 1 in DB } { if v.val != 0 { val, err := dbutil.EncodeInt(v.val) if err != nil { return err } if err := b.Put(v.key, val); err != nil { return err } } } if !attr.ModTime.IsZero() { te, err := attr.ModTime.GobEncode() if err != nil { return err } if err := b.Put(bucketKeyModTime, te); err != nil { return err } } if len(attr.LinkName) > 0 { if err := b.Put(bucketKeyLinkName, []byte(attr.LinkName)); err != nil { return err } } if attr.Mode != 0 { val, err := encodeUint(uint64(attr.Mode)) if err != nil { return err } if err := b.Put(bucketKeyMode, val); err != nil { return err } } if len(attr.Xattrs) > 0 { var firstK string var firstV []byte for k, v := range attr.Xattrs { firstK, firstV = k, v break } if err := b.Put(bucketKeyXattrKey, []byte(firstK)); err != nil { return err } if err := b.Put(bucketKeyXattrValue, firstV); err != nil { return err } var xbkt *bolt.Bucket for k, v := range attr.Xattrs { if k == firstK || len(v) == 0 { continue } if xbkt == nil { if xbkt := b.Bucket(bucketKeyXattrsExtra); xbkt != nil { // Reset if err := b.DeleteBucket(bucketKeyXattrsExtra); err != nil { return err } } var err error xbkt, err = b.CreateBucket(bucketKeyXattrsExtra) if err != nil { return err } } if err := xbkt.Put([]byte(k), v); err != nil { return fmt.Errorf("failed to set xattr %q=%q: %w", k, string(v), err) } } } return nil } func readAttr(b *bolt.Bucket, attr *Attr) error { return b.ForEach(func(k, v []byte) error { switch string(k) { case string(bucketKeySize): attr.Size, _ = binary.Varint(v) case string(bucketKeyModTime): if err := (&attr.ModTime).GobDecode(v); err != nil { return err } case string(bucketKeyLinkName): attr.LinkName = string(v) case string(bucketKeyMode): mode, _ := binary.Uvarint(v) attr.Mode = os.FileMode(uint32(mode)) case string(bucketKeyUID): i, _ := binary.Varint(v) attr.UID = int(i) case string(bucketKeyGID): i, _ := binary.Varint(v) attr.GID = int(i) case string(bucketKeyDevMajor): i, _ := binary.Varint(v) attr.DevMajor = int(i) case string(bucketKeyDevMinor): i, _ := binary.Varint(v) attr.DevMinor = int(i) case string(bucketKeyNumLink): i, _ := binary.Varint(v) attr.NumLink = int(i) + 1 // numLink = 0 means num link = 1 in DB case string(bucketKeyXattrKey): if attr.Xattrs == nil { attr.Xattrs = make(map[string][]byte) } attr.Xattrs[string(v)] = b.Get(bucketKeyXattrValue) case string(bucketKeyXattrsExtra): if err := b.Bucket(k).ForEach(func(k, v []byte) error { if attr.Xattrs == nil { attr.Xattrs = make(map[string][]byte) } attr.Xattrs[string(k)] = v return nil }); err != nil { return err } } return nil }) } func readNumLink(b *bolt.Bucket) int { // numLink = 0 means num link = 1 in BD numLink, _ := binary.Varint(b.Get(bucketKeyNumLink)) return int(numLink) + 1 } func readChild(md *bolt.Bucket, base string) (uint32, error) { if base == string(md.Get(bucketKeyChildName)) { return decodeID(md.Get(bucketKeyChildID)), nil } cbkt := md.Bucket(bucketKeyChildrenExtra) if cbkt == nil { return 0, fmt.Errorf("extra children not found") } eid := cbkt.Get([]byte(base)) if len(eid) == 0 { return 0, fmt.Errorf("children %q not found", base) } return decodeID(eid), nil } func writeMetadataEntry(md *bolt.Bucket, m *metadataEntry) error { if len(m.children) > 0 { var firstChildName string var firstChild childEntry for name, child := range m.children { firstChildName, firstChild = name, child break } if err := md.Put(bucketKeyChildID, encodeID(firstChild.id)); err != nil { return fmt.Errorf("failed to put id of first child %q: %w", firstChildName, err) } if err := md.Put(bucketKeyChildName, []byte(firstChildName)); err != nil { return fmt.Errorf("failed to put name first child %q: %w", firstChildName, err) } if len(m.children) > 1 { var cbkt *bolt.Bucket for k, c := range m.children { if k == firstChildName { continue } if cbkt == nil { if cbkt := md.Bucket(bucketKeyChildrenExtra); cbkt != nil { // Reset if err := md.DeleteBucket(bucketKeyChildrenExtra); err != nil { return err } } var err error cbkt, err = md.CreateBucket(bucketKeyChildrenExtra) if err != nil { return err } } if err := cbkt.Put([]byte(c.base), encodeID(c.id)); err != nil { return fmt.Errorf("failed to add child ID %q: %w", c.id, err) } } } } if err := putFileSize(md, bucketKeyUncompressedOffset, m.UncompressedOffset); err != nil { return fmt.Errorf("failed to set UncompressedOffset value %d: %w", m.UncompressedOffset, err) } return nil } func putFileSize(b *bolt.Bucket, k []byte, v compression.Offset) error { return putInt(b, k, int64(v)) } func putInt(b *bolt.Bucket, k []byte, v int64) error { i, err := dbutil.EncodeInt(v) if err != nil { return err } return b.Put(k, i) } func encodeID(id uint32) []byte { b := [4]byte{} binary.BigEndian.PutUint32(b[:], id) return b[:] } func decodeID(b []byte) uint32 { return binary.BigEndian.Uint32(b) } func encodeUint(i uint64) ([]byte, error) { var ( buf [binary.MaxVarintLen64]byte iEncoded = buf[:] ) iEncoded = iEncoded[:binary.PutUvarint(iEncoded, i)] if len(iEncoded) == 0 { return nil, fmt.Errorf("failed encoding integer = %v", i) } return iEncoded, nil } soci-snapshotter-0.4.1/metadata/metadata.go000066400000000000000000000065731454010642300207220ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package metadata import ( "io" "os" "time" "github.com/awslabs/soci-snapshotter/ztoc" "github.com/awslabs/soci-snapshotter/ztoc/compression" ) // Attr reprensents the attributes of a node. type Attr struct { // Size, for regular files, is the logical size of the file. Size int64 // ModTime is the modification time of the node. ModTime time.Time // LinkName, for symlinks, is the link target. LinkName string // Mode is the permission and mode bits. Mode os.FileMode // UID is the user ID of the owner. UID int // GID is the group ID of the owner. GID int // DevMajor is the major device number for device. DevMajor int // DevMinor is the major device number for device. DevMinor int // Xattrs are the extended attribute for the node. Xattrs map[string][]byte // NumLink is the number of names pointing to this node. NumLink int } // Store reads the provided blob and creates a metadata reader. type Store func(sr *io.SectionReader, toc ztoc.TOC, opts ...Option) (Reader, error) // Reader provides access to file metadata of a blob. type Reader interface { RootID() uint32 GetAttr(id uint32) (attr Attr, err error) GetChild(pid uint32, base string) (id uint32, attr Attr, err error) ForeachChild(id uint32, f func(name string, id uint32, mode os.FileMode) bool) error OpenFile(id uint32) (File, error) Clone(sr *io.SectionReader) (Reader, error) Close() error } type File interface { GetUncompressedFileSize() compression.Offset GetUncompressedOffset() compression.Offset } type Options struct { Telemetry *Telemetry } // Option is an option to configure the behaviour of reader. type Option func(o *Options) error // WithTelemetry option specifies the telemetry hooks func WithTelemetry(telemetry *Telemetry) Option { return func(o *Options) error { o.Telemetry = telemetry return nil } } // A func which takes start time and records the diff type MeasureLatencyHook func(time.Time) // A struct which defines telemetry hooks. By implementing these hooks you should be able to record // the latency metrics of the respective steps of SOCI open operation. type Telemetry struct { InitMetadataStoreLatency MeasureLatencyHook // measure time to initialize metadata store (in milliseconds) } soci-snapshotter-0.4.1/metadata/reader.go000066400000000000000000000423031454010642300203730ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package metadata import ( "bytes" "encoding/binary" "errors" "fmt" "io" "math" "os" "path" "path/filepath" "sort" "strings" "sync" "time" "github.com/awslabs/soci-snapshotter/ztoc" "github.com/awslabs/soci-snapshotter/ztoc/compression" "github.com/rs/xid" bolt "go.etcd.io/bbolt" "golang.org/x/sync/errgroup" ) // reader stores filesystem metadata parsed from ztoc to metadata DB // and provides methods to read them. type reader struct { db *bolt.DB fsID string rootID uint32 sr *io.SectionReader curID uint32 curIDMu sync.Mutex initG *errgroup.Group } func (r *reader) nextID() (uint32, error) { r.curIDMu.Lock() defer r.curIDMu.Unlock() if r.curID == math.MaxUint32 { return 0, fmt.Errorf("sequence id too large") } r.curID++ return r.curID, nil } // NewReader parses ztoc and stores filesystem metadata to the provided DB. func NewReader(db *bolt.DB, sr *io.SectionReader, toc ztoc.TOC, opts ...Option) (Reader, error) { var rOpts Options for _, o := range opts { if err := o(&rOpts); err != nil { return nil, fmt.Errorf("failed to apply option: %w", err) } } r := &reader{sr: sr, db: db, initG: new(errgroup.Group)} start := time.Now() if rOpts.Telemetry != nil && rOpts.Telemetry.InitMetadataStoreLatency != nil { rOpts.Telemetry.InitMetadataStoreLatency(start) } if err := r.init(toc, rOpts); err != nil { return nil, fmt.Errorf("failed to initialize metadata: %w", err) } return r, nil } // RootID returns ID of the root node. func (r *reader) RootID() uint32 { return r.rootID } // Clone returns a new reader identical to the current reader // but uses the provided section reader for retrieving file paylaods. func (r *reader) Clone(sr *io.SectionReader) (Reader, error) { if err := r.waitInit(); err != nil { return nil, err } return &reader{ db: r.db, fsID: r.fsID, rootID: r.rootID, sr: sr, initG: new(errgroup.Group), }, nil } func (r *reader) init(toc ztoc.TOC, rOpts Options) (retErr error) { // Initialize root node var ok bool for i := 0; i < 100; i++ { fsID := xid.New().String() if err := r.initRootNode(fsID); err != nil { if errors.Is(err, bolt.ErrBucketExists) { continue // try with another id } return fmt.Errorf("failed to initialize root node %q: %w", fsID, err) } ok = true break } if !ok { return fmt.Errorf("failed to get a unique id for metadata reader") } return r.initNodes(toc) } func (r *reader) initRootNode(fsID string) error { return r.db.Batch(func(tx *bolt.Tx) (err error) { filesystems, err := tx.CreateBucketIfNotExists(bucketKeyFilesystems) if err != nil { return err } lbkt, err := filesystems.CreateBucket([]byte(fsID)) if err != nil { return err } r.fsID = fsID if _, err := lbkt.CreateBucket(bucketKeyMetadata); err != nil { return err } nodes, err := lbkt.CreateBucket(bucketKeyNodes) if err != nil { return err } rootID, err := r.nextID() if err != nil { return err } rootBucket, err := nodes.CreateBucket(encodeID(rootID)) if err != nil { return err } if err := writeAttr(rootBucket, &Attr{ Mode: os.ModeDir | 0755, NumLink: 2, // The directory itself(.) and the parent link to this directory. }); err != nil { return err } r.rootID = rootID return err }) } func (r *reader) initNodes(toc ztoc.TOC) error { md := make(map[uint32]*metadataEntry) if err := r.db.Batch(func(tx *bolt.Tx) (err error) { nodes, err := getNodes(tx, r.fsID) if err != nil { return err } nodes.FillPercent = 1.0 // we only do sequential write to this bucket var attr Attr for _, ent := range toc.FileMetadata { var id uint32 var b *bolt.Bucket ent.Name = cleanEntryName(ent.Name) isLink := ent.Type == "hardlink" if isLink { id, err = getIDByName(md, ent.Linkname, r.rootID) if err != nil { return fmt.Errorf("%q is a hardlink but cannot get link destination %q: %w", ent.Name, ent.Linkname, err) } b, err = getNodeBucketByID(nodes, id) if err != nil { return fmt.Errorf("cannot get hardlink destination %q ==> %q (%d): %w", ent.Name, ent.Linkname, id, err) } numLink, _ := binary.Varint(b.Get(bucketKeyNumLink)) if err := putInt(b, bucketKeyNumLink, numLink+1); err != nil { return fmt.Errorf("cannot put NumLink of %q ==> %q: %w", ent.Name, ent.Linkname, err) } } else { // Write node bucket var found bool if ent.Type == "dir" { // Check if this directory is already created, if so overwrite it. id, err = getIDByName(md, ent.Name, r.rootID) if err == nil { b, err = getNodeBucketByID(nodes, id) if err != nil { return fmt.Errorf("failed to get directory bucket %d: %w", id, err) } found = true attr.NumLink = readNumLink(b) } } if !found { // No existing node. Create a new one. id, err = r.nextID() if err != nil { return err } b, err = nodes.CreateBucket(encodeID(id)) if err != nil { return err } attr.NumLink = 1 // at least the parent dir references this directory. if ent.Type == "dir" { attr.NumLink++ // at least "." references this directory. } } if err := writeAttr(b, attrFromZtocEntry(&ent, &attr)); err != nil { return fmt.Errorf("failed to set attr to %d(%q): %w", id, ent.Name, err) } } pdirName := parentDir(ent.Name) pid, pb, err := r.getOrCreateDir(nodes, md, pdirName, r.rootID) if err != nil { return fmt.Errorf("failed to create parent directory %q of %q: %w", pdirName, ent.Name, err) } if err := setChild(md, pb, pid, path.Base(ent.Name), id, ent.Type == "dir"); err != nil { return err } if !isLink { if md[id] == nil { md[id] = &metadataEntry{} } md[id].UncompressedOffset = ent.UncompressedOffset } } return nil }); err != nil { return err } addendum := make([]struct { id []byte md *metadataEntry }, len(md)) i := 0 for id, d := range md { addendum[i].id, addendum[i].md = encodeID(id), d i++ } sort.Slice(addendum, func(i, j int) bool { return bytes.Compare(addendum[i].id, addendum[j].id) < 0 }) if err := r.db.Batch(func(tx *bolt.Tx) (err error) { meta, err := getMetadata(tx, r.fsID) if err != nil { return err } meta.FillPercent = 1.0 // we only do sequential write to this bucket for _, m := range addendum { md, err := meta.CreateBucket(m.id) if err != nil { return err } if err := writeMetadataEntry(md, m.md); err != nil { return err } } return nil }); err != nil { return err } return nil } func (r *reader) getOrCreateDir(nodes *bolt.Bucket, md map[uint32]*metadataEntry, d string, rootID uint32) (id uint32, b *bolt.Bucket, err error) { id, err = getIDByName(md, d, rootID) if err != nil { id, err = r.nextID() if err != nil { return 0, nil, err } b, err = nodes.CreateBucket(encodeID(id)) if err != nil { return 0, nil, err } attr := &Attr{ Mode: os.ModeDir | 0755, NumLink: 2, // The directory itself(.) and the parent link to this directory. } if err := writeAttr(b, attr); err != nil { return 0, nil, err } if d != "" { pid, pb, err := r.getOrCreateDir(nodes, md, parentDir(d), rootID) if err != nil { return 0, nil, err } if err := setChild(md, pb, pid, path.Base(d), id, true); err != nil { return 0, nil, err } } } else { b, err = getNodeBucketByID(nodes, id) if err != nil { return 0, nil, fmt.Errorf("failed to get dir bucket %d: %w", id, err) } } return id, b, nil } func (r *reader) waitInit() error { // TODO: add timeout err := r.initG.Wait() if err != nil { return fmt.Errorf("initialization failed: %w", err) } return nil } func (r *reader) view(fn func(tx *bolt.Tx) error) error { if err := r.waitInit(); err != nil { return err } return r.db.View(func(tx *bolt.Tx) error { return fn(tx) }) } func (r *reader) update(fn func(tx *bolt.Tx) error) error { if err := r.waitInit(); err != nil { return err } return r.db.Batch(func(tx *bolt.Tx) error { return fn(tx) }) } // Close closes this reader. This removes underlying filesystem metadata as well. func (r *reader) Close() error { return r.update(func(tx *bolt.Tx) (err error) { filesystems := tx.Bucket(bucketKeyFilesystems) if filesystems == nil { return nil } return filesystems.DeleteBucket([]byte(r.fsID)) }) } // GetAttr returns file attribute of specified node. func (r *reader) GetAttr(id uint32) (attr Attr, _ error) { if r.rootID == id { // no need to wait for root dir if err := r.db.View(func(tx *bolt.Tx) error { nodes, err := getNodes(tx, r.fsID) if err != nil { return fmt.Errorf("nodes bucket of %q not found for sarching attr %d: %w", r.fsID, id, err) } b, err := getNodeBucketByID(nodes, id) if err != nil { return fmt.Errorf("failed to get attr bucket %d: %w", id, err) } return readAttr(b, &attr) }); err != nil { return Attr{}, err } return attr, nil } if err := r.view(func(tx *bolt.Tx) error { nodes, err := getNodes(tx, r.fsID) if err != nil { return fmt.Errorf("nodes bucket of %q not found for sarching attr %d: %w", r.fsID, id, err) } b, err := getNodeBucketByID(nodes, id) if err != nil { return fmt.Errorf("failed to get attr bucket %d: %w", id, err) } return readAttr(b, &attr) }); err != nil { return Attr{}, err } return } // GetChild returns a child node that has the specified base name. func (r *reader) GetChild(pid uint32, base string) (id uint32, attr Attr, _ error) { if err := r.view(func(tx *bolt.Tx) error { metadataEntries, err := getMetadata(tx, r.fsID) if err != nil { return fmt.Errorf("metadata bucket of %q not found for getting child of %d: %w", r.fsID, pid, err) } md, err := getMetadataBucketByID(metadataEntries, pid) if err != nil { return fmt.Errorf("failed to get parent metadata %d: %w", pid, err) } id, err = readChild(md, base) if err != nil { return fmt.Errorf("failed to read child %q of %d: %w", base, pid, err) } nodes, err := getNodes(tx, r.fsID) if err != nil { return fmt.Errorf("nodes bucket of %q not found for getting child of %d: %w", r.fsID, pid, err) } child, err := getNodeBucketByID(nodes, id) if err != nil { return fmt.Errorf("failed to get child bucket %d: %w", id, err) } return readAttr(child, &attr) }); err != nil { return 0, Attr{}, err } return } // ForeachChild calls the specified callback function for each child node. // When the callback returns non-nil error, this stops the iteration. func (r *reader) ForeachChild(id uint32, f func(name string, id uint32, mode os.FileMode) bool) error { type childInfo struct { id uint32 mode os.FileMode } children := make(map[string]childInfo) if err := r.view(func(tx *bolt.Tx) error { metadataEntries, err := getMetadata(tx, r.fsID) if err != nil { return fmt.Errorf("nodes bucket of %q not found for getting child of %d: %w", r.fsID, id, err) } md, err := getMetadataBucketByID(metadataEntries, id) if err != nil { return nil // no child } var nodes *bolt.Bucket firstName := md.Get(bucketKeyChildName) if len(firstName) != 0 { firstID := decodeID(md.Get(bucketKeyChildID)) if nodes == nil { nodes, err = getNodes(tx, r.fsID) if err != nil { return fmt.Errorf("nodes bucket of %q not found for getting children of %d: %w", r.fsID, id, err) } } firstChild, err := getNodeBucketByID(nodes, firstID) if err != nil { return fmt.Errorf("failed to get first child bucket %d: %w", firstID, err) } mode, _ := binary.Uvarint(firstChild.Get(bucketKeyMode)) children[string(firstName)] = childInfo{firstID, os.FileMode(uint32(mode))} } cbkt := md.Bucket(bucketKeyChildrenExtra) if cbkt == nil { return nil // no child } if nodes == nil { nodes, err = getNodes(tx, r.fsID) if err != nil { return fmt.Errorf("nodes bucket of %q not found for getting children of %d: %w", r.fsID, id, err) } } return cbkt.ForEach(func(k, v []byte) error { id := decodeID(v) child, err := getNodeBucketByID(nodes, id) if err != nil { return fmt.Errorf("failed to get child bucket %d: %w", id, err) } mode, _ := binary.Uvarint(child.Get(bucketKeyMode)) children[string(k)] = childInfo{id, os.FileMode(uint32(mode))} return nil }) }); err != nil { return err } for k, e := range children { if !f(k, e.id, e.mode) { break } } return nil } // OpenFile returns a section reader of the specified node. func (r *reader) OpenFile(id uint32) (File, error) { var size int64 var uncompressedOffset compression.Offset if err := r.view(func(tx *bolt.Tx) error { nodes, err := getNodes(tx, r.fsID) if err != nil { return fmt.Errorf("nodes bucket of %q not found for opening %d: %w", r.fsID, id, err) } b, err := getNodeBucketByID(nodes, id) if err != nil { return fmt.Errorf("failed to get file bucket %d: %w", id, err) } size, _ = binary.Varint(b.Get(bucketKeySize)) m, _ := binary.Uvarint(b.Get(bucketKeyMode)) if !os.FileMode(uint32(m)).IsRegular() { return fmt.Errorf("%q is not a regular file", id) } metadataEntries, err := getMetadata(tx, r.fsID) if err != nil { return fmt.Errorf("metadata bucket of %q not found for opening %d: %w", r.fsID, id, err) } if md, err := getMetadataBucketByID(metadataEntries, id); err == nil { uncompressedOffset = getUncompressedOffset(md) } return nil }); err != nil { return nil, err } return &file{uncompressedOffset, compression.Offset(size)}, nil } func getUncompressedOffset(md *bolt.Bucket) compression.Offset { ucompOffset, _ := binary.Varint(md.Get(bucketKeyUncompressedOffset)) return compression.Offset(ucompOffset) } type file struct { uncompressedOffset compression.Offset uncompressedSize compression.Offset } func (fr *file) GetUncompressedFileSize() compression.Offset { return fr.uncompressedSize } func (fr *file) GetUncompressedOffset() compression.Offset { return fr.uncompressedOffset } func attrFromZtocEntry(src *ztoc.FileMetadata, dst *Attr) *Attr { dst.Size = int64(src.UncompressedSize) dst.ModTime = src.ModTime dst.LinkName = src.Linkname dst.Mode = src.FileMode() dst.UID = src.UID dst.GID = src.GID dst.DevMajor = int(src.Devmajor) dst.DevMinor = int(src.Devminor) xattrs := make(map[string][]byte) for k, v := range src.Xattrs { xattrs[k] = []byte(v) } dst.Xattrs = xattrs return dst } func getIDByName(md map[uint32]*metadataEntry, name string, rootID uint32) (uint32, error) { name = cleanEntryName(name) if name == "" { return rootID, nil } dir, base := filepath.Split(name) pid, err := getIDByName(md, dir, rootID) if err != nil { return 0, err } if md[pid] == nil { return 0, fmt.Errorf("not found metadata of %d", pid) } if md[pid].children == nil { return 0, fmt.Errorf("not found children of %q", pid) } c, ok := md[pid].children[base] if !ok { return 0, fmt.Errorf("not found child %q in %d", base, pid) } return c.id, nil } func setChild(md map[uint32]*metadataEntry, pb *bolt.Bucket, pid uint32, base string, id uint32, isDir bool) error { if md[pid] == nil { md[pid] = &metadataEntry{} } if md[pid].children == nil { md[pid].children = make(map[string]childEntry) } md[pid].children[base] = childEntry{base, id} if isDir { numLink, _ := binary.Varint(pb.Get(bucketKeyNumLink)) if err := putInt(pb, bucketKeyNumLink, numLink+1); err != nil { return fmt.Errorf("cannot add numlink for children: %w", err) } } return nil } func parentDir(p string) string { dir, _ := path.Split(p) return strings.TrimSuffix(dir, "/") } func cleanEntryName(name string) string { // Use path.Clean to consistently deal with path separators across platforms. return strings.TrimPrefix(path.Clean("/"+name), "/") } func (r *reader) NumOfNodes() (i int, _ error) { if err := r.view(func(tx *bolt.Tx) error { nodes, err := getNodes(tx, r.fsID) if err != nil { return err } return nodes.ForEach(func(k, v []byte) error { b := nodes.Bucket(k) if b == nil { return fmt.Errorf("entry bucket for %q not found", string(k)) } var attr Attr if err := readAttr(b, &attr); err != nil { return err } i++ return nil }) }); err != nil { return 0, err } return } soci-snapshotter-0.4.1/metadata/reader_test.go000066400000000000000000000040441454010642300214320ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package metadata import ( "io" "os" "testing" "github.com/awslabs/soci-snapshotter/ztoc" bolt "go.etcd.io/bbolt" ) func TestMetadataReader(t *testing.T) { testReader(t, newTestableReader) } func newTestableReader(sr *io.SectionReader, toc ztoc.TOC, opts ...Option) (testableReader, error) { f, err := os.CreateTemp("", "readertestdb") if err != nil { return nil, err } defer os.Remove(f.Name()) db, err := bolt.Open(f.Name(), 0600, nil) if err != nil { return nil, err } r, err := NewReader(db, sr, toc, opts...) if err != nil { return nil, err } return &testableReadCloser{ testableReader: r.(*reader), closeFn: func() error { db.Close() return os.Remove(f.Name()) }, }, nil } type testableReadCloser struct { testableReader closeFn func() error } func (r *testableReadCloser) Close() error { r.closeFn() return r.testableReader.Close() } soci-snapshotter-0.4.1/metadata/testutil.go000066400000000000000000000026361454010642300210130ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package metadata import ( "io" "os" "github.com/awslabs/soci-snapshotter/ztoc" "go.etcd.io/bbolt" ) // NewTempDbStore returns a Reader by creating a temp bolt db, which will // be removed when `Reader.Close()` is called. func NewTempDbStore(sr *io.SectionReader, toc ztoc.TOC, opts ...Option) (Reader, error) { f, err := os.CreateTemp("", "readertestdb") if err != nil { return nil, err } defer f.Close() db, err := bbolt.Open(f.Name(), 0600, nil) if err != nil { return nil, err } r, err := NewReader(db, sr, toc, opts...) if err != nil { return nil, err } return &readCloser{ Reader: r, closeFn: func() error { db.Close() return os.Remove(f.Name()) }, }, nil } type readCloser struct { Reader closeFn func() error } func (r *readCloser) Close() error { r.closeFn() return r.Reader.Close() } soci-snapshotter-0.4.1/metadata/util_test.go000066400000000000000000000361271454010642300211540ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package metadata import ( "compress/gzip" "errors" "fmt" "io" "os" "path" "path/filepath" "strings" "testing" "time" "github.com/awslabs/soci-snapshotter/util/testutil" "github.com/awslabs/soci-snapshotter/ztoc" ) var allowedPrefix = [4]string{"", "./", "/", "../"} var srcCompressions = map[string]int{ "gzip-nocompression": gzip.NoCompression, "gzip-bestspeed": gzip.BestSpeed, "gzip-bestcompression": gzip.BestCompression, "gzip-defaultcompression": gzip.DefaultCompression, "gzip-huffmanonly": gzip.HuffmanOnly, } type readerFactory func(sr *io.SectionReader, toc ztoc.TOC, opts ...Option) (r testableReader, err error) type testableReader interface { Reader NumOfNodes() (i int, _ error) } // testReader tests Reader returns correct file metadata. func testReader(t *testing.T, factory readerFactory) { sampleTime := time.Now().Truncate(time.Second) tests := []struct { name string in []testutil.TarEntry want []check }{ { name: "files", in: []testutil.TarEntry{ testutil.File("foo", "foofoo", testutil.WithFileMode(0644|os.ModeSetuid)), testutil.Dir("bar/"), testutil.File("bar/baz.txt", "bazbazbaz", testutil.WithFileOwner(1000, 1000)), testutil.File("xxx.txt", "xxxxx", testutil.WithFileModTime(sampleTime)), testutil.File("y.txt", "", testutil.WithFileXattrs(map[string]string{"testkey": "testval"})), }, want: []check{ numOfNodes(6), // root dir + 1 dir + 4 files hasFile("foo", 6), hasMode("foo", 0644|os.ModeSetuid), hasFile("bar/baz.txt", 9), hasOwner("bar/baz.txt", 1000, 1000), hasFile("xxx.txt", 5), hasModTime("xxx.txt", sampleTime), hasFile("y.txt", 0), // For details on the keys of Xattrs, see https://pkg.go.dev/archive/tar#Header hasXattrs("y.txt", map[string]string{"SCHILY.xattr.testkey": "testval"}), }, }, { name: "dirs", in: []testutil.TarEntry{ testutil.Dir("foo/", testutil.WithDirMode(os.ModeDir|0600|os.ModeSticky)), testutil.Dir("foo/bar/", testutil.WithDirOwner(1000, 1000)), testutil.File("foo/bar/baz.txt", "testtest"), testutil.File("foo/bar/xxxx", "x"), testutil.File("foo/bar/yyy", "yyy"), testutil.Dir("foo/a/", testutil.WithDirModTime(sampleTime)), testutil.Dir("foo/a/1/", testutil.WithDirXattrs(map[string]string{"testkey": "testval"})), testutil.File("foo/a/1/2", "1111111111"), }, want: []check{ numOfNodes(9), // root dir + 4 dirs + 4 files hasDirChildren("foo", "bar", "a"), hasDirChildren("foo/bar", "baz.txt", "xxxx", "yyy"), hasDirChildren("foo/a", "1"), hasDirChildren("foo/a/1", "2"), hasMode("foo", os.ModeDir|0600|os.ModeSticky), hasOwner("foo/bar", 1000, 1000), hasModTime("foo/a", sampleTime), hasXattrs("foo/a/1", map[string]string{"SCHILY.xattr.testkey": "testval"}), hasFile("foo/bar/baz.txt", 8), hasFile("foo/bar/xxxx", 1), hasFile("foo/bar/yyy", 3), hasFile("foo/a/1/2", 10), }, }, { name: "hardlinks", in: []testutil.TarEntry{ testutil.File("foo", "foofoo", testutil.WithFileOwner(1000, 1000)), testutil.Dir("bar/"), testutil.Link("bar/foolink", "foo"), testutil.Link("bar/foolink2", "bar/foolink"), testutil.Dir("bar/1/"), testutil.File("bar/1/baz.txt", "testtest"), testutil.Link("barlink", "bar/1/baz.txt"), testutil.Symlink("foosym", "bar/foolink2"), }, want: []check{ numOfNodes(6), // root dir + 2 dirs + 1 flie(linked) + 1 file(linked) + 1 symlink hasFile("foo", 6), hasOwner("foo", 1000, 1000), hasFile("bar/foolink", 6), hasOwner("bar/foolink", 1000, 1000), hasFile("bar/foolink2", 6), hasOwner("bar/foolink2", 1000, 1000), hasFile("bar/1/baz.txt", 8), hasFile("barlink", 8), hasDirChildren("bar", "foolink", "foolink2", "1"), hasDirChildren("bar/1", "baz.txt"), sameNodes("foo", "bar/foolink", "bar/foolink2"), sameNodes("bar/1/baz.txt", "barlink"), linkName("foosym", "bar/foolink2"), hasNumLink("foo", 3), // parent dir + 2 links hasNumLink("barlink", 2), // parent dir + 1 link hasNumLink("bar", 3), // parent + "." + child's ".." }, }, { name: "various files", in: []testutil.TarEntry{ testutil.Dir("bar/"), testutil.File("bar/../bar///////////////////foo", ""), testutil.Chardev("bar/cdev", 10, 11), testutil.Blockdev("bar/bdev", 100, 101), testutil.Fifo("bar/fifo"), }, want: []check{ numOfNodes(6), // root dir + 1 file + 1 dir + 1 cdev + 1 bdev + 1 fifo hasFile("bar/foo", 0), hasChardev("bar/cdev", 10, 11), hasBlockdev("bar/bdev", 100, 101), hasFifo("bar/fifo"), }, }, } for _, tt := range tests { for _, prefix := range allowedPrefix { prefix := prefix for srcCompresionName, srcCompression := range srcCompressions { t.Run(tt.name+"-"+srcCompresionName, func(t *testing.T) { opts := []testutil.BuildTarOption{ testutil.WithPrefix(prefix), } ztoc, sr, err := ztoc.BuildZtocReader(t, tt.in, srcCompression, 64, opts...) if err != nil { t.Fatalf("failed to build ztoc: %v", err) } telemetry, checkCalled := newCalledTelemetry() // create a metadata reader r, err := factory(sr, ztoc.TOC, WithTelemetry(telemetry)) if err != nil { t.Fatalf("failed to create new reader: %v", err) } defer r.Close() t.Logf("vvvvv Node tree vvvvv") t.Logf("[%d] ROOT", r.RootID()) dumpNodes(t, r, r.RootID(), 1) t.Logf("^^^^^^^^^^^^^^^^^^^^^") for _, want := range tt.want { want(t, r) } if err := checkCalled(); err != nil { t.Errorf("telemetry failure: %v", err) } }) } } } } func newCalledTelemetry() (telemetry *Telemetry, check func() error) { var initMetadataStoreLatencyCalled bool return &Telemetry{ func(time.Time) { initMetadataStoreLatencyCalled = true }, }, func() error { var allErr error if !initMetadataStoreLatencyCalled { allErr = errors.Join(allErr, fmt.Errorf("metrics initMetadataStoreLatency isn't called")) } return allErr } } func dumpNodes(t *testing.T, r testableReader, id uint32, level int) { if err := r.ForeachChild(id, func(name string, id uint32, mode os.FileMode) bool { ind := "" for i := 0; i < level; i++ { ind += " " } t.Logf("%v+- [%d] %q : %v", ind, id, name, mode) dumpNodes(t, r, id, level+1) return true }); err != nil { t.Errorf("failed to dump nodes %v", err) } } type check func(*testing.T, testableReader) func numOfNodes(want int) check { return func(t *testing.T, r testableReader) { i, err := r.NumOfNodes() if err != nil { t.Errorf("num of nodes: %v", err) } if want != i { t.Errorf("unexpected num of nodes %d; want %d", i, want) } } } func sameNodes(n string, nodes ...string) check { return func(t *testing.T, r testableReader) { id, err := lookup(r, n) if err != nil { t.Errorf("failed to lookup %q: %v", n, err) return } for _, en := range nodes { eid, err := lookup(r, en) if err != nil { t.Errorf("failed to lookup %q: %v", en, err) return } if eid != id { t.Errorf("unexpected ID of %q: %d want %d", en, eid, id) } } } } func linkName(name string, linkName string) check { return func(t *testing.T, r testableReader) { id, err := lookup(r, name) if err != nil { t.Errorf("failed to lookup %q: %v", name, err) return } attr, err := r.GetAttr(id) if err != nil { t.Errorf("failed to get attr of %q: %v", name, err) return } if attr.Mode&os.ModeSymlink == 0 { t.Errorf("%q is not a symlink: %v", name, attr.Mode) return } if attr.LinkName != linkName { t.Errorf("unexpected link name of %q : %q want %q", name, attr.LinkName, linkName) return } } } func hasNumLink(name string, numLink int) check { return func(t *testing.T, r testableReader) { id, err := lookup(r, name) if err != nil { t.Errorf("failed to lookup %q: %v", name, err) return } attr, err := r.GetAttr(id) if err != nil { t.Errorf("failed to get attr of %q: %v", name, err) return } if attr.NumLink != numLink { t.Errorf("unexpected numLink of %q: %d want %d", name, attr.NumLink, numLink) return } } } func hasDirChildren(name string, children ...string) check { return func(t *testing.T, r testableReader) { id, err := lookup(r, name) if err != nil { t.Errorf("failed to lookup %q: %v", name, err) return } attr, err := r.GetAttr(id) if err != nil { t.Errorf("failed to get attr of %q: %v", name, err) return } if !attr.Mode.IsDir() { t.Errorf("%q is not directory: %v", name, attr.Mode) return } found := map[string]struct{}{} if err := r.ForeachChild(id, func(name string, id uint32, mode os.FileMode) bool { found[name] = struct{}{} return true }); err != nil { t.Errorf("failed to see children %v", err) return } if len(found) != len(children) { t.Errorf("unexpected number of children of %q : %d want %d", name, len(found), len(children)) } for _, want := range children { if _, ok := found[want]; !ok { t.Errorf("expected child %q not found in %q", want, name) } } } } func hasChardev(name string, maj, min int) check { return func(t *testing.T, r testableReader) { id, err := lookup(r, name) if err != nil { t.Errorf("cannot find chardev %q: %v", name, err) return } attr, err := r.GetAttr(id) if err != nil { t.Errorf("cannot get attr of chardev %q: %v", name, err) return } if attr.Mode&os.ModeDevice == 0 || attr.Mode&os.ModeCharDevice == 0 { t.Errorf("file %q is not a chardev: %v", name, attr.Mode) return } if attr.DevMajor != maj || attr.DevMinor != min { t.Errorf("unexpected major/minor of chardev %q: %d/%d want %d/%d", name, attr.DevMajor, attr.DevMinor, maj, min) return } } } func hasBlockdev(name string, maj, min int) check { return func(t *testing.T, r testableReader) { id, err := lookup(r, name) if err != nil { t.Errorf("cannot find blockdev %q: %v", name, err) return } attr, err := r.GetAttr(id) if err != nil { t.Errorf("cannot get attr of blockdev %q: %v", name, err) return } if attr.Mode&os.ModeDevice == 0 || attr.Mode&os.ModeCharDevice != 0 { t.Errorf("file %q is not a blockdev: %v", name, attr.Mode) return } if attr.DevMajor != maj || attr.DevMinor != min { t.Errorf("unexpected major/minor of blockdev %q: %d/%d want %d/%d", name, attr.DevMajor, attr.DevMinor, maj, min) return } } } func hasFifo(name string) check { return func(t *testing.T, r testableReader) { id, err := lookup(r, name) if err != nil { t.Errorf("cannot find blockdev %q: %v", name, err) return } attr, err := r.GetAttr(id) if err != nil { t.Errorf("cannot get attr of blockdev %q: %v", name, err) return } if attr.Mode&os.ModeNamedPipe == 0 { t.Errorf("file %q is not a fifo: %v", name, attr.Mode) return } } } func hasFile(name string, size int64) check { return func(t *testing.T, r testableReader) { id, err := lookup(r, name) if err != nil { t.Errorf("cannot find file %q: %v", name, err) return } attr, err := r.GetAttr(id) if err != nil { t.Errorf("cannot get attr of file %q: %v", name, err) return } if !attr.Mode.IsRegular() { t.Errorf("file %q is not a regular file: %v", name, attr.Mode) return } f, err := r.OpenFile(id) if err != nil { t.Errorf("cannot open file %q: %v", name, err) return } if attr.Size != size { t.Errorf("unexpected size of file %q : %d want %d", name, attr.Size, size) return } if size != int64(f.GetUncompressedFileSize()) { t.Errorf("unexpected uncompressed file size of %q: %d want %d", name, f.GetUncompressedFileSize(), size) return } } } func hasMode(name string, mode os.FileMode) check { return func(t *testing.T, r testableReader) { id, err := lookup(r, name) if err != nil { t.Errorf("cannot find file %q: %v", name, err) return } attr, err := r.GetAttr(id) if err != nil { t.Errorf("cannot get attr of file %q: %v", name, err) return } if attr.Mode != mode { t.Errorf("unexpected mode of %q: %v want %v", name, attr.Mode, mode) return } } } func hasOwner(name string, uid, gid int) check { return func(t *testing.T, r testableReader) { id, err := lookup(r, name) if err != nil { t.Errorf("cannot find file %q: %v", name, err) return } attr, err := r.GetAttr(id) if err != nil { t.Errorf("cannot get attr of file %q: %v", name, err) return } if attr.UID != uid || attr.GID != gid { t.Errorf("unexpected owner of %q: (%d:%d) want (%d:%d)", name, attr.UID, attr.GID, uid, gid) return } } } func hasModTime(name string, modTime time.Time) check { return func(t *testing.T, r testableReader) { id, err := lookup(r, name) if err != nil { t.Errorf("cannot find file %q: %v", name, err) return } attr, err := r.GetAttr(id) if err != nil { t.Errorf("cannot get attr of file %q: %v", name, err) return } attrModTime := attr.ModTime if attrModTime.Before(modTime) || attrModTime.After(modTime) { t.Errorf("unexpected time of %q: %v; want %v", name, attrModTime, modTime) return } } } func hasXattrs(name string, xattrs map[string]string) check { return func(t *testing.T, r testableReader) { id, err := lookup(r, name) if err != nil { t.Errorf("cannot find file %q: %v", name, err) return } attr, err := r.GetAttr(id) if err != nil { t.Errorf("cannot get attr of file %q: %v", name, err) return } if len(attr.Xattrs) != len(xattrs) { t.Errorf("unexpected size of xattr of %q: %d want %d", name, len(attr.Xattrs), len(xattrs)) return } for k, v := range attr.Xattrs { if xattrs[k] != string(v) { t.Errorf("unexpected xattr of %q: %q=%q want %q=%q", name, k, string(v), k, xattrs[k]) } } } } func lookup(r testableReader, name string) (uint32, error) { name = strings.TrimPrefix(path.Clean("/"+name), "/") if name == "" { return r.RootID(), nil } dir, base := filepath.Split(name) pid, err := lookup(r, dir) if err != nil { return 0, err } id, _, err := r.GetChild(pid, base) return id, err } soci-snapshotter-0.4.1/scripts/000077500000000000000000000000001454010642300165075ustar00rootroot00000000000000soci-snapshotter-0.4.1/scripts/add-ltag.sh000077500000000000000000000015331454010642300205250ustar00rootroot00000000000000#!/bin/bash # Copyright The containerd Authors. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -eux -o pipefail CUR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" SOCI_SNAPSHOTTER_PROJECT_ROOT="${CUR_DIR}/.." pushd ${SOCI_SNAPSHOTTER_PROJECT_ROOT} $(go env GOPATH)/bin/ltag -v -t ${SOCI_SNAPSHOTTER_PROJECT_ROOT}/.headers popdsoci-snapshotter-0.4.1/scripts/build_third_party_licenses.sh000077500000000000000000000034271454010642300244510ustar00rootroot00000000000000#!/bin/bash # Copyright The containerd Authors. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # A script to generate a THIRD_PARTY_LICENSES file containing all the licenses that we use from third parties. # NOTE: This only adds licenses from go dependencies. For other licenses, see NOTICE.md set -eux -o pipefail CUR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" SOCI_SNAPSHOTTER_PROJECT_ROOT="${CUR_DIR}/.." LICENSE_FILE=${SOCI_SNAPSHOTTER_PROJECT_ROOT}/THIRD_PARTY_LICENSES # Remove content from the license file truncate -s 0 ${LICENSE_FILE} # The apache 2.0 license doesn't get modified with a copywrite. To reduce duplication, add attribution for each project using the license, but include the license text just once. go-licenses report --template="${SOCI_SNAPSHOTTER_PROJECT_ROOT}/scripts/third_party_licenses/apache.tpl" --ignore github.com/awslabs/soci ${SOCI_SNAPSHOTTER_PROJECT_ROOT}/... >> ${LICENSE_FILE} cat ${SOCI_SNAPSHOTTER_PROJECT_ROOT}/scripts/third_party_licenses/APACHE_LICENSE >> ${LICENSE_FILE} # For other licenses, just use the entire license text from the package. go-licenses report --template=${SOCI_SNAPSHOTTER_PROJECT_ROOT}/scripts/third_party_licenses/other.tpl --ignore github.com/awslabs/soci ${SOCI_SNAPSHOTTER_PROJECT_ROOT}/... >> ${LICENSE_FILE} soci-snapshotter-0.4.1/scripts/bump-deps.sh000077500000000000000000000024461454010642300207500ustar00rootroot00000000000000#!/bin/bash # Copyright The containerd Authors. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -eux -o pipefail CUR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" SOCI_SNAPSHOTTER_PROJECT_ROOT="${CUR_DIR}/.." pushd ${SOCI_SNAPSHOTTER_PROJECT_ROOT} # skip k8s deps since they use the latest go version/features that may not be in the go version soci uses # Also ignored in /dependabot.yml go get -u $(go list -m -f '{{if not (or .Indirect .Main)}}{{.Path}}{{end}}' all | \ grep -v "^k8s.io/") make vendor pushd ./cmd # skip k8s deps and soci-snapshotter itself # Also ignored in /dependabot.yml go get -u $(go list -m -f '{{if not (or .Indirect .Main)}}{{.Path}}{{end}}' all | \ grep -v "^github.com/awslabs/soci-snapshotter" | \ grep -v "^k8s.io/") popd make vendor popdsoci-snapshotter-0.4.1/scripts/check-all.sh000077500000000000000000000012721454010642300206730ustar00rootroot00000000000000#!/bin/bash # Copyright The containerd Authors. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -eux -o pipefail ./check-dco.sh ./check-flatc.sh ./check-ltag.sh ./check-lint.shsoci-snapshotter-0.4.1/scripts/check-dco.sh000077500000000000000000000015131454010642300206660ustar00rootroot00000000000000#!/bin/bash # Copyright The containerd Authors. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -eux -o pipefail # the very first auto-commit doesn't have a DCO and the first real commit has a slightly different format. Exclude those when doing the check. $(go env GOPATH)/bin/git-validation -run DCO -range HEAD~20..HEADsoci-snapshotter-0.4.1/scripts/check-flatc.sh000077500000000000000000000021621454010642300212130ustar00rootroot00000000000000#!/bin/bash # Copyright The containerd Authors. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -eux -o pipefail CUR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" SOCI_SNAPSHOTTER_PROJECT_ROOT="${CUR_DIR}/.." FBS_FILE_PATH=${SOCI_SNAPSHOTTER_PROJECT_ROOT}/ztoc/fbs/ztoc.fbs # check if flatbuffers needs to be generated again TMPDIR=$(mktemp -d) flatc -o ${TMPDIR} -g ${FBS_FILE_PATH} diff -qr ${TMPDIR}/ztoc ${SOCI_SNAPSHOTTER_PROJECT_ROOT}/ztoc/fbs/ztoc || (printf "\n\nThe Ztoc schema seems to be modified. Please run 'make flatc' to re-generate Go files\n\n"; rm -rf ${TMPDIR}; exit 1) rm -rf ${TMPDIR}soci-snapshotter-0.4.1/scripts/check-lint.sh000077500000000000000000000017261454010642300210750ustar00rootroot00000000000000#!/bin/bash # Copyright The containerd Authors. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -eux -o pipefail CUR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" SOCI_SNAPSHOTTER_PROJECT_ROOT="${CUR_DIR}/.." GO111MODULE_VALUE=auto # check lint pushd ${SOCI_SNAPSHOTTER_PROJECT_ROOT} GO111MODULE=${GO111MODULE_VALUE} $(go env GOPATH)/bin/golangci-lint run pushd ./cmd GO111MODULE=${GO111MODULE_VALUE} $(go env GOPATH)/bin/golangci-lint run popd popdsoci-snapshotter-0.4.1/scripts/check-ltag.sh000077500000000000000000000017311454010642300210520ustar00rootroot00000000000000#!/bin/bash # Copyright The containerd Authors. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -eux -o pipefail CUR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" SOCI_SNAPSHOTTER_PROJECT_ROOT="${CUR_DIR}/.." # check ltag pushd ${SOCI_SNAPSHOTTER_PROJECT_ROOT} $(go env GOPATH)/bin/ltag -t ${SOCI_SNAPSHOTTER_PROJECT_ROOT}/.headers -check -v || (echo "The files listed above are missing a licence header. Please run ./scripts/add-ltag.sh"; exit 1) popdsoci-snapshotter-0.4.1/scripts/check_regression.sh000077500000000000000000000064351454010642300223730ustar00rootroot00000000000000#!/bin/bash set -eu -o pipefail # Check if two arguments are provided (paths to past.json and current.json) if [ $# -ne 2 ]; then echo "Usage: $0 " exit 1 fi # Extract the file paths from command-line arguments past_json_path="$1" current_json_path="$2" # Read the contents of past.json and current.json into variables past_data=$(cat "$past_json_path") current_data=$(cat "$current_json_path") # Function to compare P90 values for a given statistic compare_stat_p90() { local past_value="$1" local current_value="$2" local stat_name="$3" # Calculate 150% of the past value local threshold=$(calculate_threshold "$past_value") # Compare the current value with the threshold if (( $(echo "$current_value > $threshold" |bc -l) )); then echo "ERROR: $stat_name - Current P90 value ($current_value) exceeds the 110% threshold ($threshold) of the past P90 value ($past_value)" return 1 fi return 0 } calculate_threshold() { local past_value="$1" awk -v past="$past_value" 'BEGIN { print past * 1.1 }' } # calculate the p90 ignoring the first result because we generally see an outlier in the first result calculate_p90_after_skip() { local times_array="$1" local num_entries=$(echo "$times_array" | jq 'length') local times=$(echo "$times_array" | jq -r '.[1:] | .[]') local sorted_times=$(echo "$times" | tr '\n' ' ' | xargs -n1 | sort -g) local index=$((num_entries * 90 / 100)) local p90=$(echo "$sorted_times" | sed -n "${index}p") echo "$p90" } # Loop through each object in past.json and compare P90 values with current.json for all statistics compare_p90_values() { local past_json="$1" local current_json="$2" local test_names=$(echo "$past_json" | jq -r '.benchmarkTests[].testName') # Use a flag to indicate if any regression has been detected local regression_detected=0 for test_name in $test_names; do echo "Checking for regression in '$test_name'" for stat_name in "fullRunStats" "pullStats" "lazyTaskStats" "localTaskStats"; do local past_array=$(echo "$past_json" | jq -r --arg test "$test_name" '.benchmarkTests[] | select(.testName == $test) | .'"$stat_name"'.BenchmarkTimes') local past_p90=$(calculate_p90_after_skip "$past_array") local current_array=$(echo "$current_json" | jq -r --arg test "$test_name" '.benchmarkTests[] | select(.testName == $test) | .'"$stat_name"'.BenchmarkTimes') local current_p90=$(calculate_p90_after_skip "$current_array") # Call the compare_stat_p90 function compare_stat_p90 "$past_p90" "$current_p90" "$stat_name" || regression_detected=1 done done # Check if any regression has been detected and return the appropriate exit code return $regression_detected } # Call compare_p90_values and store the exit code in a variable compare_p90_values "$past_data" "$current_data" exit_code=$? # Check the return status and display appropriate message if [ $exit_code -eq 0 ]; then echo "Comparison successful. No regressions detected, all P90 values are within the acceptable range." else echo "Comparison failed. Regression detected." fi # Set the final exit code to indicate if any regression occurred exit $exit_codesoci-snapshotter-0.4.1/scripts/create-releases.sh000077500000000000000000000046431454010642300221210ustar00rootroot00000000000000#!/bin/bash # Copyright The Soci Snapshotter Authors. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # A script to generate release artifacts. # This will create a folder in your project root called release. # This will contain the dynamic + static binaries # as well as their respective sha256 checksums. # NOTE: this will mutate your $SOCI_SNAPSHOTTER_PROJECT_ROOT/out folder. set -eux -o pipefail CUR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" SOCI_SNAPSHOTTER_PROJECT_ROOT="$(cd -- "$CUR_DIR"/.. && pwd)" OUT_DIR="${SOCI_SNAPSHOTTER_PROJECT_ROOT}/out" RELEASE_DIR="${SOCI_SNAPSHOTTER_PROJECT_ROOT}/release" LICENSE_FILE=${SOCI_SNAPSHOTTER_PROJECT_ROOT}/THIRD_PARTY_LICENSES NOTICE_FILE=${SOCI_SNAPSHOTTER_PROJECT_ROOT}/NOTICE.md TAG_REGEX="v[0-9]+.[0-9]+.[0-9]+" ARCH="" case $(uname -m) in x86_64) ARCH="amd64" ;; aarch64) ARCH="arm64" ;; *) echo "Error: Unsupported arch"; exit 1 ;; esac if [ "$#" -ne 1 ]; then echo "Expected 1 parameter, got $#." echo "Usage: $0 [release_tag]" exit 1 fi if ! [[ "$1" =~ $TAG_REGEX ]]; then echo "Improper tag format. Format should match regex $TAG_REGEX" exit 1 fi if [ -d "$RELEASE_DIR" ]; then rm -rf "${RELEASE_DIR:?}"/* else mkdir "$RELEASE_DIR" fi release_version=${1/v/} # Remove v from tag name dynamic_binary_name=soci-snapshotter-${release_version}-linux-${ARCH}.tar.gz static_binary_name=soci-snapshotter-${release_version}-linux-${ARCH}-static.tar.gz make build cp "$NOTICE_FILE" "$LICENSE_FILE" "${OUT_DIR}" tar -czvf "$RELEASE_DIR"/"$dynamic_binary_name" "$OUT_DIR" rm -rf "{$OUT_DIR:?}"/* STATIC=1 make build cp "$NOTICE_FILE" "$LICENSE_FILE" "$OUT_DIR" tar -czvf "$RELEASE_DIR"/"$static_binary_name" "$OUT_DIR" rm -rf "{$OUT_DIR:?}"/* sha256sum "$RELEASE_DIR"/"$dynamic_binary_name" > "$RELEASE_DIR"/"$dynamic_binary_name".sha256sum sha256sum "$RELEASE_DIR"/"$static_binary_name" > "$RELEASE_DIR"/"$static_binary_name".sha256sum soci-snapshotter-0.4.1/scripts/install-check-tools.sh000077500000000000000000000015751454010642300227350ustar00rootroot00000000000000#!/bin/bash # Copyright The containerd Authors. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. echo "Install soci check tools" set -eux -o pipefail curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.53.3 go install github.com/kunalkushwaha/ltag@v0.2.4 go install github.com/vbatts/git-validation@v1.2.0soci-snapshotter-0.4.1/scripts/install-dep.sh000077500000000000000000000033421454010642300212640ustar00rootroot00000000000000#!/bin/bash # Copyright The containerd Authors. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. echo "Install soci dependencies" set -eux -o pipefail # the installation shouldn't assume the script is executed in a specific directory. # move to tmp in case there is leftover while installing dependencies. TMPDIR=$(mktemp -d) pushd ${TMPDIR} # install cmake if ! command -v cmake &> /dev/null then wget https://github.com/Kitware/CMake/releases/download/v3.24.1/cmake-3.24.1-Linux-x86_64.sh -O cmake.sh sh cmake.sh --prefix=/usr/local/ --exclude-subdir rm -rf cmake.sh else echo "cmake is installed, skip..." fi # install flatc if ! command -v flatc &> /dev/null then wget https://github.com/google/flatbuffers/archive/refs/tags/v2.0.8.tar.gz -O flatbuffers.tar.gz tar xzvf flatbuffers.tar.gz cd flatbuffers-2.0.8 && cmake -G "Unix Makefiles" -DCMAKE_BUILD_TYPE=Release && make && sudo make install && cd .. rm -f flatbuffers.tar.gz rm -rf flatbuffers-2.0.8 else echo "flatc is installed, skip..." fi # install-zlib wget https://zlib.net/fossils/zlib-1.2.12.tar.gz tar xzvf zlib-1.2.12.tar.gz cd zlib-1.2.12 && ./configure && sudo make install && cd .. rm -rf zlib-1.2.12 rm -f zlib-1.2.12.tar.gz popdsoci-snapshotter-0.4.1/scripts/third_party_licenses/000077500000000000000000000000001454010642300227255ustar00rootroot00000000000000soci-snapshotter-0.4.1/scripts/third_party_licenses/APACHE_LICENSE000066400000000000000000000261371454010642300247440ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. soci-snapshotter-0.4.1/scripts/third_party_licenses/apache.tpl000066400000000000000000000001771454010642300246740ustar00rootroot00000000000000{{range . -}} {{if eq .LicenseName "Apache-2.0" -}} ** {{.Name}}; version {{.Version}} - https://{{.Name}} {{end -}} {{end -}} soci-snapshotter-0.4.1/scripts/third_party_licenses/other.tpl000066400000000000000000000003471454010642300245730ustar00rootroot00000000000000{{ range . -}} {{ if ne .LicenseName "Apache-2.0" -}} -------------------------------------------------------------------------------- ** {{.Name}}; version {{.Version}} - https://{{.Name}} {{ .LicenseText }} {{end -}} {{end -}} soci-snapshotter-0.4.1/scripts/visualization_data_converter.sh000077500000000000000000000036151454010642300250340ustar00rootroot00000000000000#!/bin/bash set -eux -o pipefail if [ $# -ne 2 ]; then echo "Usage: $0 " exit 1 fi # Read the input JSON file input_file="$1" output_dir="$2" # Check if the input file exists if [ ! -f "$input_file" ]; then echo "Error: Input file '$input_file' not found." exit 1 fi # Function to create JSON file for each testName create_json_file() { local test_name="$1" local lazy_task_value="$2" local local_task_value="$3" local pull_task_value="$4" # mkdir -p ../pre-processed-results # Define the output JSON file name local output_file="${output_dir}/${test_name}.json" # Create the JSON content local json_content='[{ "name": "'"$test_name"'-lazyTaskDuration", "unit": "Seconds", "value": '"$lazy_task_value"', "extra": "P90" }, { "name": "'"$test_name"'-localTaskDuration", "unit": "Seconds", "value": '"$local_task_value"', "extra": "P90" }, { "name": "'"$test_name"'-pullTaskDuration", "unit": "Seconds", "value": '"$pull_task_value"', "extra": "P90" }]' # Save the JSON content to the output file echo "$json_content" > "$output_file" } # Parse the JSON using jq commit=$(jq -r '.commit' "$input_file") tests=$(jq -r '.benchmarkTests | length' "$input_file") # Loop through each test and extract the required data for ((i = 0; i < tests; i++)); do testName=$(jq -r --argjson i $i '.benchmarkTests[$i].testName' "$input_file") # Lazy Task Stats lazyTaskPct90=$(jq -r --argjson i $i '.benchmarkTests[$i].lazyTaskStats.pct90' "$input_file") # Local Task Stats localTaskPct90=$(jq -r --argjson i $i '.benchmarkTests[$i].localTaskStats.pct90' "$input_file") pullTaskPct90=$(jq -r --argjson i $i '.benchmarkTests[$i].pullStats.pct90' "$input_file") # Create JSON file for each testName create_json_file "$testName" "$lazyTaskPct90" "$localTaskPct90" "$pullTaskPct90" donesoci-snapshotter-0.4.1/service/000077500000000000000000000000001454010642300164605ustar00rootroot00000000000000soci-snapshotter-0.4.1/service/keychain/000077500000000000000000000000001454010642300202535ustar00rootroot00000000000000soci-snapshotter-0.4.1/service/keychain/cri/000077500000000000000000000000001454010642300210305ustar00rootroot00000000000000soci-snapshotter-0.4.1/service/keychain/cri/cri.go000066400000000000000000000124671454010642300221460ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cri import ( "context" "errors" "fmt" "sync" "time" "github.com/awslabs/soci-snapshotter/service/resolver" "github.com/containerd/containerd/log" "github.com/containerd/containerd/reference" distribution "github.com/containerd/containerd/reference/docker" runtime_alpha "github.com/containerd/containerd/third_party/k8s.io/cri-api/pkg/apis/runtime/v1alpha2" ) // NewCRIKeychain provides creds passed through CRI PullImage API. // This also returns a CRI image service server that works as a proxy backed by the specified CRI service. // This server reads all PullImageRequest and uses PullImageRequest.AuthConfig for authenticating snapshots. func NewCRIKeychain(ctx context.Context, connectCRI func() (runtime_alpha.ImageServiceClient, error)) (resolver.Credential, runtime_alpha.ImageServiceServer) { server := &instrumentedService{config: make(map[string]*runtime_alpha.AuthConfig)} go func() { log.G(ctx).Debugf("Waiting for CRI service is started...") for i := 0; i < 100; i++ { client, err := connectCRI() if err == nil { server.criMu.Lock() server.cri = client server.criMu.Unlock() log.G(ctx).Info("connected to backend CRI service") return } log.G(ctx).WithError(err).Warnf("failed to connect to CRI") time.Sleep(10 * time.Second) } log.G(ctx).Warnf("no connection is available to CRI") }() return server.credentials, server } type instrumentedService struct { runtime_alpha.UnimplementedImageServiceServer cri runtime_alpha.ImageServiceClient criMu sync.Mutex config map[string]*runtime_alpha.AuthConfig configMu sync.Mutex } func (in *instrumentedService) credentials(host string, refspec reference.Spec) (string, string, error) { if host == "docker.io" || host == "registry-1.docker.io" { // Creds of "docker.io" is stored keyed by "https://index.docker.io/v1/". host = "index.docker.io" } in.configMu.Lock() defer in.configMu.Unlock() if cfg, ok := in.config[refspec.String()]; ok { return resolver.ParseAuth(cfg, host) } return "", "", nil } func (in *instrumentedService) getCRI() (c runtime_alpha.ImageServiceClient) { in.criMu.Lock() c = in.cri in.criMu.Unlock() return } func (in *instrumentedService) ListImages(ctx context.Context, r *runtime_alpha.ListImagesRequest) (res *runtime_alpha.ListImagesResponse, err error) { cri := in.getCRI() if cri == nil { return nil, errors.New("server is not initialized yet") } return cri.ListImages(ctx, r) } func (in *instrumentedService) ImageStatus(ctx context.Context, r *runtime_alpha.ImageStatusRequest) (res *runtime_alpha.ImageStatusResponse, err error) { cri := in.getCRI() if cri == nil { return nil, errors.New("server is not initialized yet") } return cri.ImageStatus(ctx, r) } func (in *instrumentedService) PullImage(ctx context.Context, r *runtime_alpha.PullImageRequest) (res *runtime_alpha.PullImageResponse, err error) { cri := in.getCRI() if cri == nil { return nil, errors.New("server is not initialized yet") } refspec, err := parseReference(r.GetImage().GetImage()) if err != nil { return nil, err } in.configMu.Lock() in.config[refspec.String()] = r.GetAuth() in.configMu.Unlock() return cri.PullImage(ctx, r) } func (in *instrumentedService) RemoveImage(ctx context.Context, r *runtime_alpha.RemoveImageRequest) (_ *runtime_alpha.RemoveImageResponse, err error) { cri := in.getCRI() if cri == nil { return nil, errors.New("server is not initialized yet") } refspec, err := parseReference(r.GetImage().GetImage()) if err != nil { return nil, err } in.configMu.Lock() delete(in.config, refspec.String()) in.configMu.Unlock() return cri.RemoveImage(ctx, r) } func (in *instrumentedService) ImageFsInfo(ctx context.Context, r *runtime_alpha.ImageFsInfoRequest) (res *runtime_alpha.ImageFsInfoResponse, err error) { cri := in.getCRI() if cri == nil { return nil, errors.New("server is not initialized yet") } return cri.ImageFsInfo(ctx, r) } func parseReference(ref string) (reference.Spec, error) { namedRef, err := distribution.ParseDockerRef(ref) if err != nil { return reference.Spec{}, fmt.Errorf("failed to parse image reference %q: %w", ref, err) } return reference.Parse(namedRef.String()) } soci-snapshotter-0.4.1/service/keychain/dockerconfig/000077500000000000000000000000001454010642300227105ustar00rootroot00000000000000soci-snapshotter-0.4.1/service/keychain/dockerconfig/dockerconfig.go000066400000000000000000000040001454010642300256660ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package dockerconfig import ( "context" "github.com/awslabs/soci-snapshotter/service/resolver" "github.com/containerd/containerd/reference" "github.com/docker/cli/cli/config" ) func DockerCreds(host string) (string, string, error) { cf, err := config.Load("") if err != nil { return "", "", nil } if host == "docker.io" || host == "registry-1.docker.io" { // Creds of docker.io is stored keyed by "https://index.docker.io/v1/". host = "https://index.docker.io/v1/" } ac, err := cf.GetAuthConfig(host) if err != nil { return "", "", err } if ac.IdentityToken != "" { return "", ac.IdentityToken, nil } return ac.Username, ac.Password, nil } func NewDockerConfigKeychain(ctx context.Context) resolver.Credential { return func(host string, refspec reference.Spec) (string, string, error) { return DockerCreds(host) } } soci-snapshotter-0.4.1/service/keychain/kubeconfig/000077500000000000000000000000001454010642300223675ustar00rootroot00000000000000soci-snapshotter-0.4.1/service/keychain/kubeconfig/kubeconfig.go000066400000000000000000000206261454010642300250400ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package kubeconfig import ( "bytes" "context" "fmt" "os" "sync" "time" "github.com/awslabs/soci-snapshotter/service/resolver" "github.com/containerd/containerd/log" "github.com/containerd/containerd/reference" dcfile "github.com/docker/cli/cli/config/configfile" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/util/workqueue" ) const dockerconfigSelector = "type=" + string(corev1.SecretTypeDockerConfigJson) type options struct { kubeconfigPath string } type Option func(*options) func WithKubeconfigPath(path string) Option { return func(opts *options) { opts.kubeconfigPath = path } } // NewKubeconfigKeychain provides a keychain which can sync its contents with // kubernetes API server by fetching all `kubernetes.io/dockerconfigjson` // secrets in the cluster with provided kubeconfig. It's OK that config provides // kubeconfig path but the file doesn't exist at that moment. In this case, this // keychain keeps on trying to read the specified path periodically and when the // file is actually provided, this keychain tries to access API server using the // file. This is useful for some environments (e.g. single node cluster with // containerized apiserver) where soci snapshotter needs to start before // everything, including booting containerd/kubelet/apiserver and configuring // users/roles. // TODO: support update of kubeconfig file func NewKubeconfigKeychain(ctx context.Context, opts ...Option) resolver.Credential { var kcOpts options for _, o := range opts { o(&kcOpts) } kc := newKeychain(ctx, kcOpts.kubeconfigPath) return kc.credentials } func newKeychain(ctx context.Context, kubeconfigPath string) *keychain { kc := &keychain{ config: make(map[string]*dcfile.ConfigFile), } ctx = log.WithLogger(ctx, log.G(ctx).WithField("kubeconfig", kubeconfigPath)) go func() { if kubeconfigPath != "" { log.G(ctx).Debugf("Waiting for kubeconfig being installed...") for { if _, err := os.Stat(kubeconfigPath); err == nil { break } else if !os.IsNotExist(err) { log.G(ctx).WithError(err). Warnf("failed to read; Disabling syncing") return } time.Sleep(10 * time.Second) } } // default loader for KUBECONFIG or `~/.kube/config` // if no explicit path provided, KUBECONFIG will be used. // if KUBECONFIG doesn't contain paths, `~/.kube/config` will be used. loadingRule := clientcmd.NewDefaultClientConfigLoadingRules() // explicitly provide path for kubeconfig. // if path isn't "", this path will be respected. loadingRule.ExplicitPath = kubeconfigPath // load and merge config files clientcfg, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( loadingRule, // loader for config files &clientcmd.ConfigOverrides{}, // no overrides for config ).ClientConfig() if err != nil { log.G(ctx).WithError(err).Warnf("failed to load config; Disabling syncing") return } client, err := kubernetes.NewForConfig(clientcfg) if err != nil { log.G(ctx).WithError(err).Warnf("failed to prepare client; Disabling syncing") return } if err := kc.startSyncSecrets(ctx, client); err != nil { log.G(ctx).WithError(err).Warnf("failed to sync secrets") } }() return kc } type keychain struct { config map[string]*dcfile.ConfigFile configMu sync.Mutex // the following entries are used for syncing secrets with API server. // these fields are lazily filled after kubeconfig file is provided. queue *workqueue.Type informer cache.SharedIndexInformer } func (kc *keychain) credentials(host string, refspec reference.Spec) (string, string, error) { if host == "docker.io" || host == "registry-1.docker.io" { // Creds of "docker.io" is stored keyed by "https://index.docker.io/v1/". host = "https://index.docker.io/v1/" } kc.configMu.Lock() defer kc.configMu.Unlock() for _, cfg := range kc.config { if acfg, err := cfg.GetAuthConfig(host); err == nil { if acfg.IdentityToken != "" { return "", acfg.IdentityToken, nil } else if !(acfg.Username == "" && acfg.Password == "") { return acfg.Username, acfg.Password, nil } } } return "", "", nil } func (kc *keychain) startSyncSecrets(ctx context.Context, client kubernetes.Interface) error { // don't let panics crash the process defer utilruntime.HandleCrash() // get informed on `kubernetes.io/dockerconfigjson` secrets in all namespaces informer := cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { // TODO: support legacy image secret `kubernetes.io/dockercfg` options.FieldSelector = dockerconfigSelector return client.CoreV1().Secrets(metav1.NamespaceAll).List(ctx, options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { // TODO: support legacy image secret `kubernetes.io/dockercfg` options.FieldSelector = dockerconfigSelector return client.CoreV1().Secrets(metav1.NamespaceAll).Watch(ctx, options) }, }, &corev1.Secret{}, 0, cache.Indexers{}, ) // use workqueue because each task possibly takes long for parsing config, // wating for lock, etc... queue := workqueue.New() defer queue.ShutDown() informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { key, err := cache.MetaNamespaceKeyFunc(obj) if err == nil { queue.Add(key) } }, UpdateFunc: func(old, new interface{}) { key, err := cache.MetaNamespaceKeyFunc(new) if err == nil { queue.Add(key) } }, DeleteFunc: func(obj interface{}) { key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err == nil { queue.Add(key) } }, }) go informer.Run(ctx.Done()) if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced) { return fmt.Errorf("Timed out for syncing cache") } // get informer and queue kc.informer = informer kc.queue = queue // keep on syncing secrets wait.Until(kc.runWorker, time.Second, ctx.Done()) return nil } func (kc *keychain) runWorker() { for kc.processNextItem() { // continue looping } } // TODO: consider retrying? func (kc *keychain) processNextItem() bool { key, quit := kc.queue.Get() if quit { return false } defer kc.queue.Done(key) obj, exists, err := kc.informer.GetIndexer().GetByKey(key.(string)) if err != nil { utilruntime.HandleError(fmt.Errorf("failed to get object; don't sync %q: %v", key, err)) return true } if !exists { kc.configMu.Lock() delete(kc.config, key.(string)) kc.configMu.Unlock() return true } // TODO: support legacy image secret `kubernetes.io/dockercfg` data, ok := obj.(*corev1.Secret).Data[corev1.DockerConfigJsonKey] if !ok { utilruntime.HandleError(fmt.Errorf("no secret is provided; don't sync %q", key)) return true } configFile := dcfile.New("") if err := configFile.LoadFromReader(bytes.NewReader(data)); err != nil { utilruntime.HandleError(fmt.Errorf("broken data; don't sync %q: %v", key, err)) return true } kc.configMu.Lock() kc.config[key.(string)] = configFile kc.configMu.Unlock() return true } soci-snapshotter-0.4.1/service/plugin/000077500000000000000000000000001454010642300177565ustar00rootroot00000000000000soci-snapshotter-0.4.1/service/plugin/plugin.go000066400000000000000000000136001454010642300216030ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package plugin import ( "errors" "fmt" "net" "os" "path/filepath" "time" "github.com/awslabs/soci-snapshotter/config" "github.com/awslabs/soci-snapshotter/service" "github.com/awslabs/soci-snapshotter/service/keychain/cri" "github.com/awslabs/soci-snapshotter/service/keychain/dockerconfig" "github.com/awslabs/soci-snapshotter/service/keychain/kubeconfig" "github.com/awslabs/soci-snapshotter/service/resolver" "github.com/containerd/containerd/defaults" "github.com/containerd/containerd/log" "github.com/containerd/containerd/pkg/dialer" "github.com/containerd/containerd/platforms" ctdplugin "github.com/containerd/containerd/plugin" runtime_alpha "github.com/containerd/containerd/third_party/k8s.io/cri-api/pkg/apis/runtime/v1alpha2" grpc "google.golang.org/grpc" "google.golang.org/grpc/backoff" "google.golang.org/grpc/credentials/insecure" ) // Config represents configuration for the soci snapshotter plugin. type Config struct { config.ServiceConfig // RootPath is the directory for the plugin RootPath string `toml:"root_path"` // CRIKeychainImageServicePath is the path to expose CRI service wrapped by CRI keychain CRIKeychainImageServicePath string `toml:"cri_keychain_image_service_path"` // Registry is CRI-plugin-compatible registry configuration Registry resolver.Registry `toml:"registry"` } func init() { ctdplugin.Register(&ctdplugin.Registration{ Type: ctdplugin.SnapshotPlugin, ID: "soci", Config: &Config{}, InitFn: func(ic *ctdplugin.InitContext) (interface{}, error) { ic.Meta.Platforms = append(ic.Meta.Platforms, platforms.DefaultSpec()) ctx := ic.Context config, ok := ic.Config.(*Config) if !ok { return nil, errors.New("invalid soci snapshotter configuration") } root := ic.Root if config.RootPath != "" { root = config.RootPath } ic.Meta.Exports["root"] = root // Configure keychain credsFuncs := []resolver.Credential{dockerconfig.NewDockerConfigKeychain(ctx)} if config.KubeconfigKeychainConfig.EnableKeychain { var opts []kubeconfig.Option if kcp := config.KubeconfigKeychainConfig.KubeconfigPath; kcp != "" { opts = append(opts, kubeconfig.WithKubeconfigPath(kcp)) } credsFuncs = append(credsFuncs, kubeconfig.NewKubeconfigKeychain(ctx, opts...)) } if addr := config.CRIKeychainImageServicePath; config.CRIKeychainConfig.EnableKeychain && addr != "" { // connects to the backend CRI service (defaults to containerd socket) criAddr := ic.Address if cp := config.CRIKeychainConfig.ImageServicePath; cp != "" { criAddr = cp } if criAddr == "" { return nil, errors.New("backend CRI service address is not specified") } connectCRI := func() (runtime_alpha.ImageServiceClient, error) { // TODO: make gRPC options configurable from config.toml backoffConfig := backoff.DefaultConfig backoffConfig.MaxDelay = 3 * time.Second connParams := grpc.ConnectParams{ Backoff: backoffConfig, } gopts := []grpc.DialOption{ grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithConnectParams(connParams), grpc.WithContextDialer(dialer.ContextDialer), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize)), grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)), } conn, err := grpc.Dial(dialer.DialAddress(criAddr), gopts...) if err != nil { return nil, err } return runtime_alpha.NewImageServiceClient(conn), nil } criCreds, criServer := cri.NewCRIKeychain(ctx, connectCRI) // Create a gRPC server rpc := grpc.NewServer() runtime_alpha.RegisterImageServiceServer(rpc, criServer) // Prepare the directory for the socket if err := os.MkdirAll(filepath.Dir(addr), 0700); err != nil { return nil, fmt.Errorf("failed to create directory %q: %w", filepath.Dir(addr), err) } // Try to remove the socket file to avoid EADDRINUSE if err := os.RemoveAll(addr); err != nil { return nil, fmt.Errorf("failed to remove %q: %w", addr, err) } // Listen and serve l, err := net.Listen("unix", addr) if err != nil { return nil, fmt.Errorf("error on listen socket %q: %w", addr, err) } go func() { if err := rpc.Serve(l); err != nil { log.G(ctx).WithError(err).Warnf("error on serving via socket %q", addr) } }() credsFuncs = append(credsFuncs, criCreds) } // TODO(ktock): print warn if old configuration is specified. // TODO(ktock): should we respect old configuration? return service.NewSociSnapshotterService(ctx, root, &config.ServiceConfig, service.WithCustomRegistryHosts(resolver.RegistryHostsFromCRIConfig(ctx, config.Registry, credsFuncs...))) }, }) } soci-snapshotter-0.4.1/service/resolver/000077500000000000000000000000001454010642300203215ustar00rootroot00000000000000soci-snapshotter-0.4.1/service/resolver/cri.go000066400000000000000000000326041454010642300214320ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package resolver // ===== // This is CRI-plugin-compatible registry hosts configuration. // Some functions are ported from https://github.com/containerd/containerd/blob/v1.5.2/pkg/cri as noted on each one. // TODO: import them from CRI package once we drop support to continerd v1.4.x // ===== import ( "context" "crypto/tls" "crypto/x509" "encoding/base64" "errors" "fmt" "net" "net/http" "net/url" "os" "path/filepath" "strings" "github.com/awslabs/soci-snapshotter/fs/source" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/reference" "github.com/containerd/containerd/remotes/docker" dconfig "github.com/containerd/containerd/remotes/docker/config" runtime_alpha "github.com/containerd/containerd/third_party/k8s.io/cri-api/pkg/apis/runtime/v1alpha2" rhttp "github.com/hashicorp/go-retryablehttp" ) // Registry is registry settings configured type Registry struct { // ConfigPath is a path to the root directory containing registry-specific // configurations. // If ConfigPath is set, the rest of the registry specific options are ignored. ConfigPath string `toml:"config_path" json:"configPath"` // Mirrors are namespace to mirror mapping for all namespaces. // This option will not be used when ConfigPath is provided. // DEPRECATED: Use ConfigPath instead. Remove in containerd 1.7. Mirrors map[string]Mirror `toml:"mirrors" json:"mirrors"` // Configs are configs for each registry. // The key is the domain name or IP of the registry. // This option will be fully deprecated for ConfigPath in the future. Configs map[string]RegistryConfig `toml:"configs" json:"configs"` } // Mirror contains the config related to the registry mirror type Mirror struct { // Endpoints are endpoints for a namespace. CRI plugin will try the endpoints // one by one until a working one is found. The endpoint must be a valid url // with host specified. // The scheme, host and path from the endpoint URL will be used. Endpoints []string `toml:"endpoint" json:"endpoint"` } // RegistryConfig contains configuration used to communicate with the registry. type RegistryConfig struct { // Auth contains information to authenticate to the registry. Auth *AuthConfig `toml:"auth" json:"auth"` // TLS is a pair of CA/Cert/Key which then are used when creating the transport // that communicates with the registry. // This field will not be used when ConfigPath is provided. // DEPRECATED: Use ConfigPath instead. Remove in containerd 1.7. TLS *TLSConfig `toml:"tls" json:"tls"` } // AuthConfig contains the config related to authentication to a specific registry type AuthConfig struct { // Username is the username to login the registry. Username string `toml:"username" json:"username"` // Password is the password to login the registry. Password string `toml:"password" json:"password"` // Auth is a base64 encoded string from the concatenation of the username, // a colon, and the password. Auth string `toml:"auth" json:"auth"` // IdentityToken is used to authenticate the user and get // an access token for the registry. IdentityToken string `toml:"identitytoken" json:"identitytoken"` } // TLSConfig contains the CA/Cert/Key used for a registry type TLSConfig struct { InsecureSkipVerify bool `toml:"insecure_skip_verify" json:"insecure_skip_verify"` CAFile string `toml:"ca_file" json:"caFile"` CertFile string `toml:"cert_file" json:"certFile"` KeyFile string `toml:"key_file" json:"keyFile"` } // RegistryHostsFromCRIConfig creates RegistryHosts (a set of registry configuration) from CRI-plugin-compatible config. // NOTE: ported from https://github.com/containerd/containerd/blob/v1.5.2/pkg/cri/server/image_pull.go#L332-L405 // TODO: import this from CRI package once we drop support to continerd v1.4.x func RegistryHostsFromCRIConfig(ctx context.Context, config Registry, credsFuncs ...Credential) source.RegistryHosts { paths := filepath.SplitList(config.ConfigPath) if len(paths) > 0 { return func(ref reference.Spec) ([]docker.RegistryHost, error) { hostOptions := dconfig.HostOptions{} hostOptions.Credentials = multiCredsFuncs(ref, append(credsFuncs, func(host string, ref reference.Spec) (string, string, error) { config := config.Configs[host] if config.Auth != nil { return ParseAuth(toRuntimeAuthConfig(*config.Auth), host) } return "", "", nil })...) hostOptions.HostDir = hostDirFromRoots(paths) return dconfig.ConfigureHosts(ctx, hostOptions)(ref.Hostname()) } } return func(ref reference.Spec) ([]docker.RegistryHost, error) { host := ref.Hostname() var registries []docker.RegistryHost endpoints, err := registryEndpoints(config, host) if err != nil { return nil, fmt.Errorf("get registry endpoints: %w", err) } for _, e := range endpoints { u, err := url.Parse(e) if err != nil { return nil, fmt.Errorf("parse registry endpoint %q from mirrors: %w", e, err) } var ( rclient = rhttp.NewClient() config = config.Configs[u.Host] ) rclient.Logger = nil // disable logging every request if config.TLS != nil { if tr, ok := rclient.HTTPClient.Transport.(*http.Transport); ok { tr.TLSClientConfig, err = getTLSConfig(*config.TLS) if err != nil { return nil, fmt.Errorf("get TLSConfig for registry %q: %w", e, err) } } else { return nil, errors.New("TLS config cannot be applied; Client.Transport is not *http.Transport") } } client := rclient.StandardClient() authorizer := docker.NewDockerAuthorizer( docker.WithAuthClient(client), docker.WithAuthCreds(multiCredsFuncs(ref, credsFuncs...))) if u.Path == "" { u.Path = "/v2" } registries = append(registries, docker.RegistryHost{ Client: client, Authorizer: authorizer, Host: u.Host, Scheme: u.Scheme, Path: u.Path, Capabilities: docker.HostCapabilityResolve | docker.HostCapabilityPull, }) } return registries, nil } } // Ported from https://github.com/containerd/containerd/blob/v1.5.2/pkg/cri/server/image_pull.go#L316-L330 // TODO: import this from CRI package once we drop support to continerd v1.4.x func hostDirFromRoots(roots []string) func(string) (string, error) { rootfn := make([]func(string) (string, error), len(roots)) for i := range roots { rootfn[i] = dconfig.HostDirFromRoot(roots[i]) } return func(host string) (dir string, err error) { for _, fn := range rootfn { dir, err = fn(host) if (err != nil && !errdefs.IsNotFound(err)) || (dir != "") { break } } return } } // toRuntimeAuthConfig converts cri plugin auth config to runtime auth config. // Ported from https://github.com/containerd/containerd/blob/v1.5.2/pkg/cri/server/helpers.go#L295-L303 // TODO: import this from CRI package once we drop support to continerd v1.4.x func toRuntimeAuthConfig(a AuthConfig) *runtime_alpha.AuthConfig { return &runtime_alpha.AuthConfig{ Username: a.Username, Password: a.Password, Auth: a.Auth, IdentityToken: a.IdentityToken, } } // getTLSConfig returns a TLSConfig configured with a CA/Cert/Key specified by registryTLSConfig // Ported from https://github.com/containerd/containerd/blob/v1.5.2/pkg/cri/server/image_pull.go#L316-L330 // TODO: import this from CRI package once we drop support to continerd v1.4.x func getTLSConfig(registryTLSConfig TLSConfig) (*tls.Config, error) { var ( tlsConfig = &tls.Config{} cert tls.Certificate err error ) if registryTLSConfig.CertFile != "" && registryTLSConfig.KeyFile == "" { return nil, fmt.Errorf("cert file %q was specified, but no corresponding key file was specified", registryTLSConfig.CertFile) } if registryTLSConfig.CertFile == "" && registryTLSConfig.KeyFile != "" { return nil, fmt.Errorf("key file %q was specified, but no corresponding cert file was specified", registryTLSConfig.KeyFile) } if registryTLSConfig.CertFile != "" && registryTLSConfig.KeyFile != "" { cert, err = tls.LoadX509KeyPair(registryTLSConfig.CertFile, registryTLSConfig.KeyFile) if err != nil { return nil, fmt.Errorf("failed to load cert file: %w", err) } if len(cert.Certificate) != 0 { tlsConfig.Certificates = []tls.Certificate{cert} } tlsConfig.BuildNameToCertificate() // nolint:staticcheck } if registryTLSConfig.CAFile != "" { caCertPool, err := x509.SystemCertPool() if err != nil { return nil, fmt.Errorf("failed to get system cert pool: %w", err) } caCert, err := os.ReadFile(registryTLSConfig.CAFile) if err != nil { return nil, fmt.Errorf("failed to load CA file: %w", err) } caCertPool.AppendCertsFromPEM(caCert) tlsConfig.RootCAs = caCertPool } tlsConfig.InsecureSkipVerify = registryTLSConfig.InsecureSkipVerify return tlsConfig, nil } // defaultScheme returns the default scheme for a registry host. // Ported from https://github.com/containerd/containerd/blob/v1.5.2/pkg/cri/server/image_pull.go#L316-L330 // TODO: import this from CRI package once we drop support to continerd v1.4.x func defaultScheme(host string) string { if h, _, err := net.SplitHostPort(host); err == nil { host = h } if host == "localhost" || host == "127.0.0.1" || host == "::1" { return "http" } return "https" } // addDefaultScheme returns the endpoint with default scheme // Ported from https://github.com/containerd/containerd/blob/v1.5.2/pkg/cri/server/image_pull.go#L316-L330 // TODO: import this from CRI package once we drop support to continerd v1.4.x func addDefaultScheme(endpoint string) (string, error) { if strings.Contains(endpoint, "://") { return endpoint, nil } ue := "dummy://" + endpoint u, err := url.Parse(ue) if err != nil { return "", err } return fmt.Sprintf("%s://%s", defaultScheme(u.Host), endpoint), nil } // registryEndpoints returns endpoints for a given host. // It adds default registry endpoint if it does not exist in the passed-in endpoint list. // It also supports wildcard host matching with `*`. // Ported from https://github.com/containerd/containerd/blob/v1.5.2/pkg/cri/server/image_pull.go#L431-L464 // TODO: import this from CRI package once we drop support to continerd v1.4.x func registryEndpoints(config Registry, host string) ([]string, error) { var endpoints []string _, ok := config.Mirrors[host] if ok { endpoints = config.Mirrors[host].Endpoints } else { endpoints = config.Mirrors["*"].Endpoints } defaultHost, err := docker.DefaultHost(host) if err != nil { return nil, fmt.Errorf("get default host: %w", err) } for i := range endpoints { en, err := addDefaultScheme(endpoints[i]) if err != nil { return nil, fmt.Errorf("parse endpoint url: %w", err) } endpoints[i] = en } for _, e := range endpoints { u, err := url.Parse(e) if err != nil { return nil, fmt.Errorf("parse endpoint url: %w", err) } if u.Host == host { // Do not add default if the endpoint already exists. return endpoints, nil } } return append(endpoints, defaultScheme(defaultHost)+"://"+defaultHost), nil } // ParseAuth parses AuthConfig and returns username and password/secret required by containerd. // Ported from https://github.com/containerd/containerd/blob/v1.5.2/pkg/cri/server/image_pull.go#L176-L214 // TODO: import this from CRI package once we drop support to continerd v1.4.x func ParseAuth(auth *runtime_alpha.AuthConfig, host string) (string, string, error) { if auth == nil { return "", "", nil } if auth.ServerAddress != "" { // Do not return the auth info when server address doesn't match. u, err := url.Parse(auth.ServerAddress) if err != nil { return "", "", fmt.Errorf("parse server address: %w", err) } if host != u.Host { return "", "", nil } } if auth.Username != "" { return auth.Username, auth.Password, nil } if auth.IdentityToken != "" { return "", auth.IdentityToken, nil } if auth.Auth != "" { decLen := base64.StdEncoding.DecodedLen(len(auth.Auth)) decoded := make([]byte, decLen) _, err := base64.StdEncoding.Decode(decoded, []byte(auth.Auth)) if err != nil { return "", "", err } fields := strings.SplitN(string(decoded), ":", 2) if len(fields) != 2 { return "", "", fmt.Errorf("invalid decoded auth: %q", decoded) } user, passwd := fields[0], fields[1] return user, strings.Trim(passwd, "\x00"), nil } // TODO(random-liu): Support RegistryToken. // An empty auth config is valid for anonymous registry return "", "", nil } soci-snapshotter-0.4.1/service/resolver/registry.go000066400000000000000000000062471454010642300225310ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package resolver import ( "github.com/awslabs/soci-snapshotter/config" "github.com/awslabs/soci-snapshotter/fs/source" socihttp "github.com/awslabs/soci-snapshotter/util/http" "github.com/containerd/containerd/reference" "github.com/containerd/containerd/remotes/docker" ) type Credential func(string, reference.Spec) (string, string, error) // RegistryHostsFromConfig creates RegistryHosts (a set of registry configuration) from Config. func RegistryHostsFromConfig(registryConfig config.ResolverConfig, httpConfig config.RetryableHTTPClientConfig, credsFuncs ...Credential) source.RegistryHosts { return func(ref reference.Spec) (hosts []docker.RegistryHost, _ error) { host := ref.Hostname() for _, h := range append(registryConfig.Host[host].Mirrors, config.MirrorConfig{ Host: host, }) { if h.RequestTimeoutSec < 0 { httpConfig.RequestTimeoutMsec = 0 } if h.RequestTimeoutSec > 0 { httpConfig.RequestTimeoutMsec = h.RequestTimeoutSec * 1000 } client := socihttp.NewRetryableClient(httpConfig) config := docker.RegistryHost{ Client: client, Host: h.Host, Scheme: "https", Path: "/v2", Capabilities: docker.HostCapabilityPull | docker.HostCapabilityResolve, Authorizer: docker.NewDockerAuthorizer( docker.WithAuthClient(client), docker.WithAuthCreds(multiCredsFuncs(ref, credsFuncs...))), } if localhost, _ := docker.MatchLocalhost(config.Host); localhost || h.Insecure { config.Scheme = "http" } if config.Host == "docker.io" { config.Host = "registry-1.docker.io" } hosts = append(hosts, config) } return } } func multiCredsFuncs(ref reference.Spec, credsFuncs ...Credential) func(string) (string, string, error) { return func(host string) (string, string, error) { for _, f := range credsFuncs { if username, secret, err := f(host, ref); err != nil { return "", "", err } else if !(username == "" && secret == "") { return username, secret, nil } } return "", "", nil } } soci-snapshotter-0.4.1/service/service.go000066400000000000000000000112601454010642300204470ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package service import ( "context" "path/filepath" "github.com/awslabs/soci-snapshotter/config" socifs "github.com/awslabs/soci-snapshotter/fs" "github.com/awslabs/soci-snapshotter/fs/layer" "github.com/awslabs/soci-snapshotter/fs/source" "github.com/awslabs/soci-snapshotter/service/resolver" snbase "github.com/awslabs/soci-snapshotter/snapshot" "github.com/containerd/containerd/log" "github.com/containerd/containerd/snapshots" "github.com/containerd/containerd/snapshots/overlay/overlayutils" ) type Option func(*options) type options struct { credsFuncs []resolver.Credential registryHosts source.RegistryHosts fsOpts []socifs.Option } // WithCredsFuncs specifies credsFuncs to be used for connecting to the registries. func WithCredsFuncs(creds ...resolver.Credential) Option { return func(o *options) { o.credsFuncs = append(o.credsFuncs, creds...) } } // WithCustomRegistryHosts is registry hosts to use instead. func WithCustomRegistryHosts(hosts source.RegistryHosts) Option { return func(o *options) { o.registryHosts = hosts } } // WithFilesystemOptions allowes to pass filesystem-related configuration. func WithFilesystemOptions(opts ...socifs.Option) Option { return func(o *options) { o.fsOpts = opts } } // NewSociSnapshotterService returns soci snapshotter. func NewSociSnapshotterService(ctx context.Context, root string, serviceCfg *config.ServiceConfig, opts ...Option) (snapshots.Snapshotter, error) { var sOpts options for _, o := range opts { o(&sOpts) } hosts := sOpts.registryHosts if hosts == nil { // Use RegistryHosts based on ResolverConfig and keychain hosts = resolver.RegistryHostsFromConfig(serviceCfg.ResolverConfig, serviceCfg.FSConfig.RetryableHTTPClientConfig, sOpts.credsFuncs...) } userxattr, err := overlayutils.NeedsUserXAttr(snapshotterRoot(root)) if err != nil { log.G(ctx).WithError(err).Warnf("cannot detect whether \"userxattr\" option needs to be used, assuming to be %v", userxattr) } opq := layer.OverlayOpaqueTrusted if userxattr { opq = layer.OverlayOpaqueUser } // Configure filesystem and snapshotter fsOpts := append(sOpts.fsOpts, socifs.WithGetSources( source.FromDefaultLabels(hosts), // provides source info based on default labels ), socifs.WithOverlayOpaqueType(opq)) fs, err := socifs.NewFilesystem(ctx, fsRoot(root), serviceCfg.FSConfig, fsOpts...) if err != nil { log.G(ctx).WithError(err).Fatalf("failed to configure filesystem") } var snapshotter snapshots.Snapshotter snOpts := []snbase.Opt{snbase.WithAsynchronousRemove} if serviceCfg.MinLayerSize > -1 { snOpts = append(snOpts, snbase.WithMinLayerSize(serviceCfg.MinLayerSize)) } if serviceCfg.SnapshotterConfig.AllowInvalidMountsOnRestart { snOpts = append(snOpts, snbase.AllowInvalidMountsOnRestart) } snapshotter, err = snbase.NewSnapshotter(ctx, snapshotterRoot(root), fs, snOpts...) if err != nil { log.G(ctx).WithError(err).Fatalf("failed to create new snapshotter") } return snapshotter, err } func snapshotterRoot(root string) string { return filepath.Join(root, "snapshotter") } func fsRoot(root string) string { return filepath.Join(root, "soci") } // Supported returns nil when the remote snapshotter is functional on the system with the root directory. // Supported is not called during plugin initialization, but exposed for downstream projects which uses // this snapshotter as a library. func Supported(root string) error { // Remote snapshotter is implemented based on overlayfs snapshotter. return overlayutils.Supported(snapshotterRoot(root)) } soci-snapshotter-0.4.1/snapshot/000077500000000000000000000000001454010642300166575ustar00rootroot00000000000000soci-snapshotter-0.4.1/snapshot/snapshot.go000066400000000000000000000654021454010642300210540ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package snapshot import ( "context" "errors" "fmt" "os" "path/filepath" "strconv" "strings" "syscall" commonmetrics "github.com/awslabs/soci-snapshotter/fs/metrics/common" "github.com/awslabs/soci-snapshotter/fs/source" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/log" "github.com/containerd/containerd/mount" ctdsnapshotters "github.com/containerd/containerd/pkg/snapshotters" "github.com/containerd/containerd/snapshots" "github.com/containerd/containerd/snapshots/overlay/overlayutils" "github.com/containerd/containerd/snapshots/storage" "github.com/containerd/continuity/fs" "github.com/moby/sys/mountinfo" "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" ) const ( targetSnapshotLabel = "containerd.io/snapshot.ref" remoteLabel = "containerd.io/snapshot/remote" remoteLabelVal = "remote snapshot" // remoteSnapshotLogKey is a key for log line, which indicates whether // `Prepare` method successfully prepared targeting remote snapshot or not, as // defined in the following: // - "true" : indicates the snapshot has been successfully prepared as a // remote snapshot // - "false" : indicates the snapshot failed to be prepared as a remote // snapshot // - null : undetermined remoteSnapshotLogKey = "remote-snapshot-prepared" prepareSucceeded = "true" prepareFailed = "false" ) var ( // Error returned by `fs.Mount` when there is no ztoc for a particular layer. ErrNoZtoc = errors.New("no ztoc for layer") ) // FileSystem is a backing filesystem abstraction. // // Mount() tries to mount a remote snapshot to the specified mount point // directory. If succeed, the mountpoint directory will be treated as a layer // snapshot. If Mount() fails, the mountpoint directory MUST be cleaned up. // Check() is called to check the connectibity of the existing layer snapshot // every time the layer is used by containerd. // Unmount() is called to unmount a remote snapshot from the specified mount point // directory. // MountLocal() is called to download and decompress a layer to a mount point // directory. After that it applies the difference to the parent layers if there are any. // If succeeded, the mountpoint directory will be treated as a regular layer snapshot. // If MountLocal() fails, the mountpoint directory MUST be cleaned up. type FileSystem interface { Mount(ctx context.Context, mountpoint string, labels map[string]string) error Check(ctx context.Context, mountpoint string, labels map[string]string) error Unmount(ctx context.Context, mountpoint string) error MountLocal(ctx context.Context, mountpoint string, labels map[string]string, mounts []mount.Mount) error } // SnapshotterConfig is used to configure the remote snapshotter instance type SnapshotterConfig struct { asyncRemove bool // minLayerSize skips remote mounting of smaller layers minLayerSize int64 allowInvalidMountsOnRestart bool } // Opt is an option to configure the remote snapshotter type Opt func(config *SnapshotterConfig) error // WithAsynchronousRemove defers removal of filesystem content until // the Cleanup method is called. Removals will make the snapshot // referred to by the key unavailable and make the key immediately // available for re-use. func WithAsynchronousRemove(config *SnapshotterConfig) error { config.asyncRemove = true return nil } // WithMinLayerSize sets the smallest layer that will be mounted remotely. func WithMinLayerSize(minLayerSize int64) Opt { return func(config *SnapshotterConfig) error { config.minLayerSize = minLayerSize return nil } } func AllowInvalidMountsOnRestart(config *SnapshotterConfig) error { config.allowInvalidMountsOnRestart = true return nil } type snapshotter struct { root string ms *storage.MetaStore asyncRemove bool // fs is a filesystem that this snapshotter recognizes. fs FileSystem userxattr bool // whether to enable "userxattr" mount option minLayerSize int64 // minimum layer size for remote mounting allowInvalidMountsOnRestart bool } // NewSnapshotter returns a Snapshotter which can use unpacked remote layers // as snapshots. This is implemented based on the overlayfs snapshotter, so // diffs are stored under the provided root and a metadata file is stored under // the root as same as overlayfs snapshotter. func NewSnapshotter(ctx context.Context, root string, targetFs FileSystem, opts ...Opt) (snapshots.Snapshotter, error) { if targetFs == nil { return nil, fmt.Errorf("specify filesystem to use") } var config SnapshotterConfig for _, opt := range opts { if err := opt(&config); err != nil { return nil, err } } if err := os.MkdirAll(root, 0700); err != nil { return nil, err } supportsDType, err := fs.SupportsDType(root) if err != nil { return nil, err } if !supportsDType { return nil, fmt.Errorf("%s does not support d_type. If the backing filesystem is xfs, please reformat with ftype=1 to enable d_type support", root) } ms, err := storage.NewMetaStore(filepath.Join(root, "metadata.db")) if err != nil { return nil, err } if err := os.Mkdir(filepath.Join(root, "snapshots"), 0700); err != nil && !os.IsExist(err) { return nil, err } userxattr, err := overlayutils.NeedsUserXAttr(root) if err != nil { logrus.WithError(err).Warnf("cannot detect whether \"userxattr\" option needs to be used, assuming to be %v", userxattr) } o := &snapshotter{ root: root, ms: ms, asyncRemove: config.asyncRemove, fs: targetFs, userxattr: userxattr, minLayerSize: config.minLayerSize, allowInvalidMountsOnRestart: config.allowInvalidMountsOnRestart, } if err := o.restoreRemoteSnapshot(ctx); err != nil { return nil, fmt.Errorf("failed to restore remote snapshot: %w", err) } return o, nil } // Stat returns the info for an active or committed snapshot by name or // key. // // Should be used for parent resolution, existence checks and to discern // the kind of snapshot. func (o *snapshotter) Stat(ctx context.Context, key string) (snapshots.Info, error) { ctx, t, err := o.ms.TransactionContext(ctx, false) if err != nil { return snapshots.Info{}, err } defer t.Rollback() _, info, _, err := storage.GetInfo(ctx, key) if err != nil { return snapshots.Info{}, err } return info, nil } func (o *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpaths ...string) (snapshots.Info, error) { ctx, t, err := o.ms.TransactionContext(ctx, true) if err != nil { return snapshots.Info{}, err } info, err = storage.UpdateInfo(ctx, info, fieldpaths...) if err != nil { t.Rollback() return snapshots.Info{}, err } if err := t.Commit(); err != nil { return snapshots.Info{}, err } return info, nil } // Usage returns the resources taken by the snapshot identified by key. // // For active snapshots, this will scan the usage of the overlay "diff" (aka // "upper") directory and may take some time. // for remote snapshots, no scan will be held and recognise the number of inodes // and these sizes as "zero". // // For committed snapshots, the value is returned from the metadata database. func (o *snapshotter) Usage(ctx context.Context, key string) (snapshots.Usage, error) { ctx, t, err := o.ms.TransactionContext(ctx, false) if err != nil { return snapshots.Usage{}, err } id, info, usage, err := storage.GetInfo(ctx, key) t.Rollback() // transaction no longer needed at this point. if err != nil { return snapshots.Usage{}, err } upperPath := o.upperPath(id) if info.Kind == snapshots.KindActive { du, err := fs.DiskUsage(ctx, upperPath) if err != nil { // TODO(stevvooe): Consider not reporting an error in this case. return snapshots.Usage{}, err } usage = snapshots.Usage(du) } return usage, nil } func (o *snapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { s, err := o.createSnapshot(ctx, snapshots.KindActive, key, parent, opts) if err != nil { return nil, err } // Try to prepare the remote snapshot. If succeeded, we commit the snapshot now // and return ErrAlreadyExists. var base snapshots.Info for _, opt := range opts { if err := opt(&base); err != nil { return nil, err } } target, ok := base.Labels[targetSnapshotLabel] if !ok { return o.mounts(ctx, s, parent) } // NOTE: If passed labels include a target of the remote snapshot, `Prepare` // must log whether this method succeeded to prepare that remote snapshot // or not, using the key `remoteSnapshotLogKey` defined in the above. This // log is used by tests in this project. lCtx := log.WithLogger(ctx, log.G(ctx).WithField("key", key).WithField("parent", parent)) // remote snapshot prepare if !o.skipRemoteSnapshotPrepare(lCtx, base.Labels) { err := o.prepareRemoteSnapshot(lCtx, key, base.Labels) if err == nil { base.Labels[remoteLabel] = remoteLabelVal // Mark this snapshot as remote err := o.commit(ctx, true, target, key, append(opts, snapshots.WithLabels(base.Labels))...) if err == nil || errdefs.IsAlreadyExists(err) { // count also AlreadyExists as "success" log.G(lCtx).WithField(remoteSnapshotLogKey, prepareSucceeded).Info("remote snapshot successfully prepared.") return nil, fmt.Errorf("target snapshot %q: %w", target, errdefs.ErrAlreadyExists) } log.G(lCtx).WithField(remoteSnapshotLogKey, prepareFailed).WithError(err).Warn("failed to internally commit remote snapshot") // Don't fallback here (= prohibit to use this key again) because the FileSystem // possible has done some work on this "upper" directory. return nil, err } log.G(lCtx).WithField(remoteSnapshotLogKey, prepareFailed).WithError(err).Warn("failed to prepare remote snapshot") if !errors.Is(err, ErrNoZtoc) { commonmetrics.IncOperationCount(commonmetrics.FuseMountFailureCount, digest.Digest("")) } } // fall back to local snapshot mounts, err := o.mounts(ctx, s, parent) if err != nil { // don't fallback here, since there was an error getting mounts return nil, err } log.G(ctx).WithField("layerDigest", base.Labels[ctdsnapshotters.TargetLayerDigestLabel]).Info("preparing snapshot as local snapshot") err = o.prepareLocalSnapshot(lCtx, key, base.Labels, mounts) if err == nil { err := o.commit(ctx, false, target, key, append(opts, snapshots.WithLabels(base.Labels))...) if err == nil || errdefs.IsAlreadyExists(err) { // count also AlreadyExists as "success" // there's no need to provide any details on []mount.Mount because mounting is already taken care of // by snapshotter log.G(lCtx).Info("local snapshot successfully prepared") return nil, fmt.Errorf("target snapshot %q: %w", target, errdefs.ErrAlreadyExists) } log.G(lCtx).WithError(err).Warn("failed to internally commit local snapshot") // Don't fallback here (= prohibit to use this key again) because the FileSystem // possible has done some work on this "upper" directory. return nil, err } log.G(lCtx).WithError(err).Warn("failed to prepare snapshot; deferring to container runtime") return mounts, nil } func (o *snapshotter) skipRemoteSnapshotPrepare(ctx context.Context, labels map[string]string) bool { if o.minLayerSize > 0 { if strVal, ok := labels[source.TargetSizeLabel]; ok { if intVal, err := strconv.ParseInt(strVal, 10, 64); err == nil { if intVal < o.minLayerSize { log.G(ctx).Info("layer size less than runtime min_layer_size, skipping remote snapshot preparation") return true } } else { log.G(ctx).WithError(err).Errorf("config min_layer_size cannot be converted to int: %s", strVal) } } } return false } func (o *snapshotter) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { s, err := o.createSnapshot(ctx, snapshots.KindView, key, parent, opts) if err != nil { return nil, err } return o.mounts(ctx, s, parent) } // Mounts returns the mounts for the transaction identified by key. Can be // called on an read-write or readonly transaction. // // This can be used to recover mounts after calling View or Prepare. func (o *snapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, error) { ctx, t, err := o.ms.TransactionContext(ctx, false) if err != nil { return nil, err } s, err := storage.GetSnapshot(ctx, key) t.Rollback() if err != nil { return nil, fmt.Errorf("failed to get active mount: %w", err) } return o.mounts(ctx, s, key) } func (o *snapshotter) Commit(ctx context.Context, name, key string, opts ...snapshots.Opt) error { return o.commit(ctx, false, name, key, opts...) } func (o *snapshotter) commit(ctx context.Context, isRemote bool, name, key string, opts ...snapshots.Opt) error { ctx, t, err := o.ms.TransactionContext(ctx, true) if err != nil { return err } defer func() { if err != nil { if rerr := t.Rollback(); rerr != nil { log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") } } }() // grab the existing id id, _, usage, err := storage.GetInfo(ctx, key) if err != nil { return err } if !isRemote { // skip diskusage for remote snapshots for allowing lazy preparation of nodes du, err := fs.DiskUsage(ctx, o.upperPath(id)) if err != nil { return err } usage = snapshots.Usage(du) } if _, err = storage.CommitActive(ctx, key, name, usage, opts...); err != nil { return fmt.Errorf("failed to commit snapshot: %w", err) } return t.Commit() } // Remove abandons the snapshot identified by key. The snapshot will // immediately become unavailable and unrecoverable. Disk space will // be freed up on the next call to `Cleanup`. func (o *snapshotter) Remove(ctx context.Context, key string) (err error) { ctx, t, err := o.ms.TransactionContext(ctx, true) if err != nil { return err } defer func() { if err != nil { if rerr := t.Rollback(); rerr != nil { log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") } } }() _, _, err = storage.Remove(ctx, key) if err != nil { return fmt.Errorf("failed to remove: %w", err) } if !o.asyncRemove { var removals []string const cleanupCommitted = false removals, err = o.getCleanupDirectories(ctx, t, cleanupCommitted) if err != nil { return fmt.Errorf("unable to get directories for removal: %w", err) } // Remove directories after the transaction is closed, failures must not // return error since the transaction is committed with the removal // key no longer available. defer func() { if err == nil { for _, dir := range removals { if err := o.cleanupSnapshotDirectory(ctx, dir); err != nil { log.G(ctx).WithError(err).WithField("path", dir).Warn("failed to remove directory") } } } }() } return t.Commit() } // Walk the snapshots. func (o *snapshotter) Walk(ctx context.Context, fn snapshots.WalkFunc, fs ...string) error { ctx, t, err := o.ms.TransactionContext(ctx, false) if err != nil { return err } defer t.Rollback() return storage.WalkInfo(ctx, fn, fs...) } // Cleanup cleans up disk resources from removed or abandoned snapshots func (o *snapshotter) Cleanup(ctx context.Context) error { const cleanupCommitted = false return o.cleanup(ctx, cleanupCommitted) } func (o *snapshotter) cleanup(ctx context.Context, cleanupCommitted bool) error { cleanup, err := o.cleanupDirectories(ctx, cleanupCommitted) if err != nil { return err } log.G(ctx).Debugf("cleanup: dirs=%v", cleanup) for _, dir := range cleanup { if err := o.cleanupSnapshotDirectory(ctx, dir); err != nil { log.G(ctx).WithError(err).WithField("path", dir).Warn("failed to remove directory") } } return nil } func (o *snapshotter) cleanupDirectories(ctx context.Context, cleanupCommitted bool) ([]string, error) { // Get a write transaction to ensure no other write transaction can be entered // while the cleanup is scanning. ctx, t, err := o.ms.TransactionContext(ctx, true) if err != nil { return nil, err } defer t.Rollback() return o.getCleanupDirectories(ctx, t, cleanupCommitted) } func (o *snapshotter) getCleanupDirectories(ctx context.Context, t storage.Transactor, cleanupCommitted bool) ([]string, error) { ids, err := storage.IDMap(ctx) if err != nil { return nil, err } snapshotDir := filepath.Join(o.root, "snapshots") fd, err := os.Open(snapshotDir) if err != nil { return nil, err } defer fd.Close() dirs, err := fd.Readdirnames(0) if err != nil { return nil, err } cleanup := []string{} for _, d := range dirs { if !cleanupCommitted { if _, ok := ids[d]; ok { continue } } cleanup = append(cleanup, filepath.Join(snapshotDir, d)) } return cleanup, nil } func (o *snapshotter) cleanupSnapshotDirectory(ctx context.Context, dir string) error { // On a remote snapshot, the layer is mounted on the "fs" directory. // We use Filesystem's Unmount API so that it can do necessary finalization // before/after the unmount. mp := filepath.Join(dir, "fs") if err := o.fs.Unmount(ctx, mp); err != nil { log.G(ctx).WithError(err).WithField("dir", mp).Debug("failed to unmount") } if err := os.RemoveAll(dir); err != nil { return fmt.Errorf("failed to remove directory %q: %w", dir, err) } return nil } func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, key, parent string, opts []snapshots.Opt) (_ storage.Snapshot, err error) { ctx, t, err := o.ms.TransactionContext(ctx, true) if err != nil { return storage.Snapshot{}, err } var td, path string defer func() { if err != nil { if td != "" { if err1 := o.cleanupSnapshotDirectory(ctx, td); err1 != nil { log.G(ctx).WithError(err1).Warn("failed to cleanup temp snapshot directory") } } if path != "" { if err1 := o.cleanupSnapshotDirectory(ctx, path); err1 != nil { log.G(ctx).WithError(err1).WithField("path", path).Error("failed to reclaim snapshot directory, directory may need removal") err = fmt.Errorf("failed to remove path: %v: %w", err1, err) } } } }() snapshotDir := filepath.Join(o.root, "snapshots") td, err = o.prepareDirectory(ctx, snapshotDir, kind) if err != nil { if rerr := t.Rollback(); rerr != nil { log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") } return storage.Snapshot{}, fmt.Errorf("failed to create prepare snapshot dir: %w", err) } rollback := true defer func() { if rollback { if rerr := t.Rollback(); rerr != nil { log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") } } }() s, err := storage.CreateSnapshot(ctx, kind, key, parent, opts...) if err != nil { return storage.Snapshot{}, fmt.Errorf("failed to create snapshot: %w", err) } if len(s.ParentIDs) > 0 { st, err := os.Stat(o.upperPath(s.ParentIDs[0])) if err != nil { return storage.Snapshot{}, fmt.Errorf("failed to stat parent: %w", err) } stat := st.Sys().(*syscall.Stat_t) if err := os.Lchown(filepath.Join(td, "fs"), int(stat.Uid), int(stat.Gid)); err != nil { if rerr := t.Rollback(); rerr != nil { log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") } return storage.Snapshot{}, fmt.Errorf("failed to chown: %w", err) } } path = filepath.Join(snapshotDir, s.ID) if err = os.Rename(td, path); err != nil { return storage.Snapshot{}, fmt.Errorf("failed to rename: %w", err) } td = "" rollback = false if err = t.Commit(); err != nil { return storage.Snapshot{}, fmt.Errorf("commit failed: %w", err) } return s, nil } func (o *snapshotter) prepareDirectory(ctx context.Context, snapshotDir string, kind snapshots.Kind) (string, error) { td, err := os.MkdirTemp(snapshotDir, "new-") if err != nil { return "", fmt.Errorf("failed to create temp dir: %w", err) } if err := os.Mkdir(filepath.Join(td, "fs"), 0755); err != nil { return td, err } if kind == snapshots.KindActive { if err := os.Mkdir(filepath.Join(td, "work"), 0711); err != nil { return td, err } } return td, nil } func (o *snapshotter) mounts(ctx context.Context, s storage.Snapshot, checkKey string) ([]mount.Mount, error) { // Make sure that all layers lower than the target layer are available if checkKey != "" && !o.checkAvailability(ctx, checkKey) { return nil, fmt.Errorf("layer %q unavailable: %w", s.ID, errdefs.ErrUnavailable) } if len(s.ParentIDs) == 0 { // if we only have one layer/no parents then just return a bind mount as overlay // will not work roFlag := "rw" if s.Kind == snapshots.KindView { roFlag = "ro" } return []mount.Mount{ { Source: o.upperPath(s.ID), Type: "bind", Options: []string{ roFlag, "rbind", }, }, }, nil } var options []string if s.Kind == snapshots.KindActive { options = append(options, fmt.Sprintf("workdir=%s", o.workPath(s.ID)), fmt.Sprintf("upperdir=%s", o.upperPath(s.ID)), ) } else if len(s.ParentIDs) == 1 { return []mount.Mount{ { Source: o.upperPath(s.ParentIDs[0]), Type: "bind", Options: []string{ "ro", "rbind", }, }, }, nil } parentPaths := make([]string, len(s.ParentIDs)) for i := range s.ParentIDs { parentPaths[i] = o.upperPath(s.ParentIDs[i]) } options = append(options, fmt.Sprintf("lowerdir=%s", strings.Join(parentPaths, ":"))) if o.userxattr { options = append(options, "userxattr") } return []mount.Mount{ { Type: "overlay", Source: "overlay", Options: options, }, }, nil } // upperPath produces a file path like "{snapshotter.root}/snapshots/{id}/fs" func (o *snapshotter) upperPath(id string) string { return filepath.Join(o.root, "snapshots", id, "fs") } // workPath produces a file path like "{snapshotter.root}/snapshots/{id}/work" func (o *snapshotter) workPath(id string) string { return filepath.Join(o.root, "snapshots", id, "work") } // Close closes the snapshotter func (o *snapshotter) Close() error { // unmount all mounts including Committed const cleanupCommitted = true ctx := context.Background() if err := o.cleanup(ctx, cleanupCommitted); err != nil { log.G(ctx).WithError(err).Warn("failed to cleanup") } return o.ms.Close() } // prepareLocalSnapshot tries to prepare the snapshot as a local snapshot. func (o *snapshotter) prepareLocalSnapshot(ctx context.Context, key string, labels map[string]string, mounts []mount.Mount) error { ctx, t, err := o.ms.TransactionContext(ctx, false) if err != nil { return err } defer t.Rollback() id, _, _, err := storage.GetInfo(ctx, key) if err != nil { return err } mountpoint := o.upperPath(id) log.G(ctx).Infof("preparing local filesystem at mountpoint=%v", mountpoint) return o.fs.MountLocal(ctx, mountpoint, labels, mounts) } // prepareRemoteSnapshot tries to prepare the snapshot as a remote snapshot // using filesystems registered in this snapshotter. func (o *snapshotter) prepareRemoteSnapshot(ctx context.Context, key string, labels map[string]string) error { ctx, t, err := o.ms.TransactionContext(ctx, false) if err != nil { return err } defer t.Rollback() id, _, _, err := storage.GetInfo(ctx, key) if err != nil { return err } mountpoint := o.upperPath(id) log.G(ctx).Infof("preparing filesystem mount at mountpoint=%v", mountpoint) return o.fs.Mount(ctx, mountpoint, labels) } // checkAvailability checks avaiability of the specified layer and all lower // layers using filesystem's checking functionality. func (o *snapshotter) checkAvailability(ctx context.Context, key string) bool { ctx = log.WithLogger(ctx, log.G(ctx).WithField("key", key)) log.G(ctx).Debug("checking layer availability") ctx, t, err := o.ms.TransactionContext(ctx, false) if err != nil { log.G(ctx).WithError(err).Warn("failed to get transaction") return false } defer t.Rollback() eg, egCtx := errgroup.WithContext(ctx) for cKey := key; cKey != ""; { id, info, _, err := storage.GetInfo(ctx, cKey) if err != nil { log.G(ctx).WithError(err).Warnf("failed to get info of %q", cKey) return false } mp := o.upperPath(id) lCtx := log.WithLogger(ctx, log.G(ctx).WithField("mount-point", mp)) if _, ok := info.Labels[remoteLabel]; ok { eg.Go(func() error { log.G(lCtx).Debug("checking mount point") if err := o.fs.Check(egCtx, mp, info.Labels); err != nil { log.G(lCtx).WithError(err).Warn("layer is unavailable") return err } return nil }) } else { log.G(lCtx).Debug("layer is normal snapshot(overlayfs)") } cKey = info.Parent } if err := eg.Wait(); err != nil { return false } return true } func (o *snapshotter) restoreRemoteSnapshot(ctx context.Context) error { mounts, err := mountinfo.GetMounts(nil) if err != nil { return err } for _, m := range mounts { if strings.HasPrefix(m.Mountpoint, filepath.Join(o.root, "snapshots")) { if err := syscall.Unmount(m.Mountpoint, syscall.MNT_FORCE); err != nil { return fmt.Errorf("failed to unmount %s: %w", m.Mountpoint, err) } } } var task []snapshots.Info if err := o.Walk(ctx, func(ctx context.Context, info snapshots.Info) error { if _, ok := info.Labels[remoteLabel]; ok { task = append(task, info) } return nil }); err != nil && !errdefs.IsNotFound(err) { return err } for _, info := range task { if err := o.prepareRemoteSnapshot(ctx, info.Name, info.Labels); err != nil { if o.allowInvalidMountsOnRestart { logrus.WithError(err).Warnf("failed to restore remote snapshot %s; remove this snapshot manually", info.Name) // This snapshot mount is invalid but allow this. // NOTE: snapshotter.Mount() will fail to return the mountpoint of these invalid snapshots so // containerd cannot use them anymore. User needs to manually remove the snapshots from // containerd's metadata store using ctr (e.g. `ctr snapshot rm`). continue } return fmt.Errorf("failed to prepare remote snapshot: %s: %w", info.Name, err) } } return nil } soci-snapshotter-0.4.1/snapshot/snapshot_test.go000066400000000000000000000442161454010642300221130ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package snapshot import ( "context" _ "crypto/sha256" "fmt" "os" "path/filepath" "syscall" "testing" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/mount" "github.com/containerd/containerd/pkg/testutil" "github.com/containerd/containerd/snapshots" "github.com/containerd/containerd/snapshots/storage" "github.com/containerd/containerd/snapshots/testsuite" ) const ( remoteSampleFile = "foo" remoteSampleFileContents = "remote layer" brokenLabel = "containerd.io/snapshot/broken" ) func prepareWithTarget(t *testing.T, sn snapshots.Snapshotter, target, key, parent string, labels map[string]string) string { ctx := context.TODO() if labels == nil { labels = make(map[string]string) } labels[targetSnapshotLabel] = target if _, err := sn.Prepare(ctx, key, parent, snapshots.WithLabels(labels)); !errdefs.IsAlreadyExists(err) { t.Fatalf("failed to prepare remote snapshot: %v", err) } return target } func TestRemotePrepare(t *testing.T) { testutil.RequiresRoot(t) ctx := context.TODO() root := t.TempDir() sn, err := NewSnapshotter(context.TODO(), root, bindFileSystem(t)) if err != nil { t.Fatalf("failed to make new remote snapshotter: %q", err) } // Prepare a remote snapshot. target := prepareWithTarget(t, sn, "testTarget", "/tmp/prepareTarget", "", nil) defer sn.Remove(ctx, target) // Get internally committed remote snapshot. var tinfo *snapshots.Info if err := sn.Walk(ctx, func(ctx context.Context, i snapshots.Info) error { if tinfo == nil && i.Kind == snapshots.KindCommitted { if i.Labels[targetSnapshotLabel] != target { return nil } if i.Parent != "" { return nil } tinfo = &i } return nil }); err != nil { t.Fatalf("failed to get remote snapshot: %v", err) } if tinfo == nil { t.Fatalf("prepared remote snapshot %q not found", target) } // Stat and validate the remote snapshot. info, err := sn.Stat(ctx, tinfo.Name) if err != nil { t.Fatal("failed to stat remote snapshot") } if info.Kind != snapshots.KindCommitted { t.Errorf("snapshot Kind is %q; want %q", info.Kind, snapshots.KindCommitted) } if label, ok := info.Labels[targetSnapshotLabel]; !ok || label != target { t.Errorf("remote snapshot hasn't valid remote label: %q", label) } } func TestRemoteOverlay(t *testing.T) { testutil.RequiresRoot(t) ctx := context.TODO() root := t.TempDir() sn, err := NewSnapshotter(context.TODO(), root, bindFileSystem(t)) if err != nil { t.Fatalf("failed to make new remote snapshotter: %q", err) } // Prepare a remote snapshot. target := prepareWithTarget(t, sn, "testTarget", "/tmp/prepareTarget", "", nil) defer sn.Remove(ctx, target) // Prepare a new layer based on the remote snapshot. pKey := "/tmp/test" mounts, err := sn.Prepare(ctx, pKey, target) if err != nil { t.Fatalf("faild to prepare using lower remote layer: %v", err) } if len(mounts) != 1 { t.Errorf("should only have 1 mount but received %d", len(mounts)) } m := mounts[0] if m.Type != "overlay" { t.Errorf("mount type should be overlay but received %q", m.Type) } if m.Source != "overlay" { t.Errorf("expected source %q but received %q", "overlay", m.Source) } var ( bp = getBasePath(ctx, sn, root, pKey) work = "workdir=" + filepath.Join(bp, "work") upper = "upperdir=" + filepath.Join(bp, "fs") lower = "lowerdir=" + getParents(ctx, sn, root, pKey)[0] ) for i, v := range []string{ work, upper, lower, } { if m.Options[i] != v { t.Errorf("expected %q but received %q", v, m.Options[i]) } } // Validate the contents of the snapshot data, err := os.ReadFile(filepath.Join(getParents(ctx, sn, root, pKey)[0], remoteSampleFile)) if err != nil { t.Fatalf("failed to read a file in the remote snapshot: %v", err) } if e := string(data); e != remoteSampleFileContents { t.Fatalf("expected file contents %q but got %q", remoteSampleFileContents, e) } } func TestRemoteCommit(t *testing.T) { testutil.RequiresRoot(t) ctx := context.TODO() root := t.TempDir() sn, err := NewSnapshotter(context.TODO(), root, bindFileSystem(t)) if err != nil { t.Fatalf("failed to make new remote snapshotter: %q", err) } // Prepare a remote snapshot. target := prepareWithTarget(t, sn, "testTarget", "/tmp/prepareTarget", "", nil) defer sn.Remove(ctx, target) // Prepare a new snapshot based on the remote snapshot pKey := "/tmp/test" mounts, err := sn.Prepare(ctx, pKey, target) if err != nil { t.Fatal(err) } // Make a new active snapshot based on the remote snapshot. snapshot := t.TempDir() m := mounts[0] if err := m.Mount(snapshot); err != nil { t.Fatal(err) } defer mount.Unmount(snapshot, 0) if err := os.WriteFile(filepath.Join(snapshot, "bar"), []byte("hi"), 0660); err != nil { t.Fatal(err) } mount.Unmount(snapshot, 0) // Commit the active snapshot cKey := "/tmp/layer" if err := sn.Commit(ctx, cKey, pKey); err != nil { t.Fatal(err) } // Validate the committed snapshot check := t.TempDir() mounts, err = sn.Prepare(ctx, "/tmp/test2", cKey) if err != nil { t.Fatal(err) } m = mounts[0] if err := m.Mount(check); err != nil { t.Fatal(err) } defer mount.Unmount(check, 0) data, err := os.ReadFile(filepath.Join(check, "bar")) if err != nil { t.Fatal(err) } if e := string(data); e != "hi" { t.Fatalf("expected file contents %q but got %q", "hi", e) } } func TestFailureDetection(t *testing.T) { testutil.RequiresRoot(t) tests := []struct { name string broken []bool // top element is the lowest layer overlay bool // whether appending the topmost normal overlay snapshot or not wantOK bool }{ { name: "flat_ok", broken: []bool{false}, wantOK: true, }, { name: "deep_ok", broken: []bool{false, false, false}, wantOK: true, }, { name: "flat_overlay_ok", broken: []bool{false}, overlay: true, wantOK: true, }, { name: "deep_overlay_ok", broken: []bool{false, false, false}, overlay: true, wantOK: true, }, { name: "flat_ng", broken: []bool{true}, wantOK: false, }, { name: "deep_ng", broken: []bool{false, true, false}, wantOK: false, }, { name: "flat_overlay_ng", broken: []bool{true}, overlay: true, wantOK: false, }, { name: "deep_overlay_ng", broken: []bool{false, true, false}, overlay: true, wantOK: false, }, } check := func(t *testing.T, ok bool, err error) bool { if err == nil { if !ok { t.Error("check all passed but wanted to be failed") return false } } else if errdefs.IsUnavailable(err) { if ok { t.Error("got Unavailable but wanted to be non-error") return false } } else { t.Errorf("got unexpected error %q", err) return false } return true } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ctx := context.TODO() root := t.TempDir() fi := bindFileSystem(t) sn, err := NewSnapshotter(context.TODO(), root, fi) if err != nil { t.Fatalf("failed to make new Snapshotter: %q", err) } fs, ok := fi.(*bindFs) if !ok { t.Fatalf("Invalid filesystem type(not *filesystem)") } // Prepare snapshots fs.checkFailure = false pKey := "" for i, broken := range tt.broken { var ( targetName = fmt.Sprintf("/tmp/testTarget%d", i) key = fmt.Sprintf("/tmp/testKey%d", i) labels = make(map[string]string) ) if broken { labels[brokenLabel] = "true" } target := prepareWithTarget(t, sn, targetName, key, pKey, labels) defer sn.Remove(ctx, target) pKey = target } if tt.overlay { key := "/tmp/test" _, err := sn.Prepare(ctx, key, pKey) if err != nil { t.Fatal(err) } cKey := "/tmp/layer" if err := sn.Commit(ctx, cKey, key); err != nil { t.Fatal(err) } defer sn.Remove(ctx, cKey) pKey = cKey } // Tests if we can detect layer unavailablity key := "/tmp/snapshot.test" fs.checkFailure = true defer sn.Remove(ctx, key) if _, err := sn.Prepare(ctx, key, pKey); !check(t, tt.wantOK, err) { return } fs.checkFailure = false key2 := "/tmp/test2" if _, err = sn.Prepare(ctx, key2, pKey); err != nil { t.Fatal(err) } defer sn.Remove(ctx, key2) fs.checkFailure = true if _, err := sn.Mounts(ctx, key2); !check(t, tt.wantOK, err) { return } }) } } func bindFileSystem(t *testing.T) FileSystem { root := t.TempDir() if err := os.WriteFile(filepath.Join(root, remoteSampleFile), []byte(remoteSampleFileContents), 0660); err != nil { t.Fatalf("failed to write sample file of bind filesystem: %q", err) } return &bindFs{ root: root, t: t, broken: make(map[string]bool), } } type bindFs struct { t *testing.T root string checkFailure bool broken map[string]bool } func (fs *bindFs) Mount(ctx context.Context, mountpoint string, labels map[string]string) error { if _, ok := labels[brokenLabel]; ok { fs.broken[mountpoint] = true } if err := syscall.Mount(fs.root, mountpoint, "none", syscall.MS_BIND, ""); err != nil { fs.t.Fatalf("failed to bind mount %q to %q: %v", fs.root, mountpoint, err) } return nil } func (fs *bindFs) Check(ctx context.Context, mountpoint string, labels map[string]string) error { if fs.checkFailure { if broken, ok := fs.broken[mountpoint]; ok && broken { return fmt.Errorf("broken") } } return nil } func (fs *bindFs) Unmount(ctx context.Context, mountpoint string) error { return syscall.Unmount(mountpoint, 0) } func (fs *bindFs) MountLocal(ctx context.Context, mountpoint string, labels map[string]string, mounts []mount.Mount) error { if _, ok := labels[brokenLabel]; ok { fs.broken[mountpoint] = true } if err := syscall.Mount(fs.root, mountpoint, "none", syscall.MS_BIND, ""); err != nil { fs.t.Fatalf("failed to bind mount %q to %q: %v", fs.root, mountpoint, err) } return nil } func dummyFileSystem() FileSystem { return &dummyFs{} } type dummyFs struct{} func (fs *dummyFs) Mount(ctx context.Context, mountpoint string, labels map[string]string) error { return fmt.Errorf("dummy") } func (fs *dummyFs) Check(ctx context.Context, mountpoint string, labels map[string]string) error { return fmt.Errorf("dummy") } func (fs *dummyFs) Unmount(ctx context.Context, mountpoint string) error { return fmt.Errorf("dummy") } func (fs *dummyFs) MountLocal(ctx context.Context, mountpoint string, labels map[string]string, mounts []mount.Mount) error { return fmt.Errorf("dummy") } // ============================================================================= // Tests backword-comaptibility of overlayfs snapshotter. func newSnapshotter(ctx context.Context, root string) (snapshots.Snapshotter, func() error, error) { snapshotter, err := NewSnapshotter(context.TODO(), root, dummyFileSystem()) if err != nil { return nil, nil, err } return snapshotter, func() error { return snapshotter.Close() }, nil } func TestOverlay(t *testing.T) { testutil.RequiresRoot(t) testsuite.SnapshotterSuite(t, "Overlay", newSnapshotter) } func TestOverlayMounts(t *testing.T) { ctx := context.TODO() root := t.TempDir() o, _, err := newSnapshotter(ctx, root) if err != nil { t.Fatal(err) } mounts, err := o.Prepare(ctx, "/tmp/test", "") if err != nil { t.Fatal(err) } if len(mounts) != 1 { t.Errorf("should only have 1 mount but received %d", len(mounts)) } m := mounts[0] if m.Type != "bind" { t.Errorf("mount type should be bind but received %q", m.Type) } expected := filepath.Join(root, "snapshots", "1", "fs") if m.Source != expected { t.Errorf("expected source %q but received %q", expected, m.Source) } if m.Options[0] != "rw" { t.Errorf("expected mount option rw but received %q", m.Options[0]) } if m.Options[1] != "rbind" { t.Errorf("expected mount option rbind but received %q", m.Options[1]) } } func TestOverlayCommit(t *testing.T) { ctx := context.TODO() root := t.TempDir() o, _, err := newSnapshotter(ctx, root) if err != nil { t.Fatal(err) } key := "/tmp/test" mounts, err := o.Prepare(ctx, key, "") if err != nil { t.Fatal(err) } m := mounts[0] if err := os.WriteFile(filepath.Join(m.Source, "foo"), []byte("hi"), 0660); err != nil { t.Fatal(err) } if err := o.Commit(ctx, "base", key); err != nil { t.Fatal(err) } } func TestOverlayOverlayMount(t *testing.T) { ctx := context.TODO() root := t.TempDir() o, _, err := newSnapshotter(ctx, root) if err != nil { t.Fatal(err) } key := "/tmp/test" if _, err = o.Prepare(ctx, key, ""); err != nil { t.Fatal(err) } if err := o.Commit(ctx, "base", key); err != nil { t.Fatal(err) } var mounts []mount.Mount if mounts, err = o.Prepare(ctx, "/tmp/layer2", "base"); err != nil { t.Fatal(err) } if len(mounts) != 1 { t.Errorf("should only have 1 mount but received %d", len(mounts)) } m := mounts[0] if m.Type != "overlay" { t.Errorf("mount type should be overlay but received %q", m.Type) } if m.Source != "overlay" { t.Errorf("expected source %q but received %q", "overlay", m.Source) } var ( bp = getBasePath(ctx, o, root, "/tmp/layer2") work = "workdir=" + filepath.Join(bp, "work") upper = "upperdir=" + filepath.Join(bp, "fs") lower = "lowerdir=" + getParents(ctx, o, root, "/tmp/layer2")[0] ) for i, v := range []string{ work, upper, lower, } { if m.Options[i] != v { t.Errorf("expected %q but received %q", v, m.Options[i]) } } } func getBasePath(ctx context.Context, sn snapshots.Snapshotter, root, key string) string { o := sn.(*snapshotter) ctx, t, err := o.ms.TransactionContext(ctx, false) if err != nil { panic(err) } defer t.Rollback() s, err := storage.GetSnapshot(ctx, key) if err != nil { panic(err) } return filepath.Join(root, "snapshots", s.ID) } func getParents(ctx context.Context, sn snapshots.Snapshotter, root, key string) []string { o := sn.(*snapshotter) ctx, t, err := o.ms.TransactionContext(ctx, false) if err != nil { panic(err) } defer t.Rollback() s, err := storage.GetSnapshot(ctx, key) if err != nil { panic(err) } parents := make([]string, len(s.ParentIDs)) for i := range s.ParentIDs { parents[i] = filepath.Join(root, "snapshots", s.ParentIDs[i], "fs") } return parents } func TestOverlayOverlayRead(t *testing.T) { testutil.RequiresRoot(t) ctx := context.TODO() root := t.TempDir() o, _, err := newSnapshotter(ctx, root) if err != nil { t.Fatal(err) } key := "/tmp/test" mounts, err := o.Prepare(ctx, key, "") if err != nil { t.Fatal(err) } m := mounts[0] if err := os.WriteFile(filepath.Join(m.Source, "foo"), []byte("hi"), 0660); err != nil { t.Fatal(err) } if err := o.Commit(ctx, "base", key); err != nil { t.Fatal(err) } if mounts, err = o.Prepare(ctx, "/tmp/layer2", "base"); err != nil { t.Fatal(err) } dest := filepath.Join(root, "dest") if err := os.Mkdir(dest, 0700); err != nil { t.Fatal(err) } if err := mount.All(mounts, dest); err != nil { t.Fatal(err) } defer syscall.Unmount(dest, 0) data, err := os.ReadFile(filepath.Join(dest, "foo")) if err != nil { t.Fatal(err) } if e := string(data); e != "hi" { t.Fatalf("expected file contents hi but got %q", e) } } func TestOverlayView(t *testing.T) { ctx := context.TODO() root := t.TempDir() o, _, err := newSnapshotter(ctx, root) if err != nil { t.Fatal(err) } key := "/tmp/base" mounts, err := o.Prepare(ctx, key, "") if err != nil { t.Fatal(err) } m := mounts[0] if err := os.WriteFile(filepath.Join(m.Source, "foo"), []byte("hi"), 0660); err != nil { t.Fatal(err) } if err := o.Commit(ctx, "base", key); err != nil { t.Fatal(err) } key = "/tmp/top" _, err = o.Prepare(ctx, key, "base") if err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(getParents(ctx, o, root, "/tmp/top")[0], "foo"), []byte("hi, again"), 0660); err != nil { t.Fatal(err) } if err := o.Commit(ctx, "top", key); err != nil { t.Fatal(err) } mounts, err = o.View(ctx, "/tmp/view1", "base") if err != nil { t.Fatal(err) } if len(mounts) != 1 { t.Fatalf("should only have 1 mount but received %d", len(mounts)) } m = mounts[0] if m.Type != "bind" { t.Errorf("mount type should be bind but received %q", m.Type) } expected := getParents(ctx, o, root, "/tmp/view1")[0] if m.Source != expected { t.Errorf("expected source %q but received %q", expected, m.Source) } if m.Options[0] != "ro" { t.Errorf("expected mount option ro but received %q", m.Options[0]) } if m.Options[1] != "rbind" { t.Errorf("expected mount option rbind but received %q", m.Options[1]) } mounts, err = o.View(ctx, "/tmp/view2", "top") if err != nil { t.Fatal(err) } if len(mounts) != 1 { t.Fatalf("should only have 1 mount but received %d", len(mounts)) } m = mounts[0] if m.Type != "overlay" { t.Errorf("mount type should be overlay but received %q", m.Type) } if m.Source != "overlay" { t.Errorf("mount source should be overlay but received %q", m.Source) } if len(m.Options) != 1 { t.Errorf("expected 1 mount option but got %d", len(m.Options)) } lowers := getParents(ctx, o, root, "/tmp/view2") expected = fmt.Sprintf("lowerdir=%s:%s", lowers[0], lowers[1]) if m.Options[0] != expected { t.Errorf("expected option %q but received %q", expected, m.Options[0]) } } soci-snapshotter-0.4.1/soci-snapshotter.service000066400000000000000000000026721454010642300217160ustar00rootroot00000000000000# Copyright The Soci Snapshotter Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright The containerd Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. [Unit] Description=soci snapshotter containerd plugin Documentation=https://github.com/awslabs/soci-snapshotter After=network.target Before=containerd.service [Service] Type=notify ExecStart=/usr/local/bin/soci-snapshotter-grpc Restart=always RestartSec=5 [Install] WantedBy=multi-user.targetsoci-snapshotter-0.4.1/soci/000077500000000000000000000000001454010642300157555ustar00rootroot00000000000000soci-snapshotter-0.4.1/soci/artifacts.go000066400000000000000000000354501454010642300202730ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package soci import ( "context" "errors" "fmt" "io/fs" "os" "path" "path/filepath" "sync" "time" "github.com/awslabs/soci-snapshotter/config" "github.com/awslabs/soci-snapshotter/soci/store" "github.com/awslabs/soci-snapshotter/util/dbutil" "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/containerd/containerd/log" "github.com/containerd/containerd/platforms" "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" bolt "go.etcd.io/bbolt" ) // Artifacts package stores SOCI artifacts info in the following schema. // // - soci_artifacts // - *soci_artifact_digest* : bucket for each soci layer keyed by a unique string. // - size : : size of the artifact. // - originalDigest : : the digest for the image manifest or layer // - imageDigest: : the digest of the image index // - platform: : the platform for the index // - location: : the location of the artifact // - type: : the type of the artifact (can be either "soci_index" or "soci_layer") // ArtifactsDB is a store for SOCI artifact metadata type ArtifactsDb struct { db *bolt.DB } // ArtifactEntryType is the type of SOCI artifact represented by the ArtifactEntry type ArtifactEntryType string const ( artifactsDbName = "artifacts.db" ) var ( bucketKeySociArtifacts = []byte("soci_artifacts") bucketKeySize = []byte("size") bucketKeyOriginalDigest = []byte("oci_digest") bucketKeyImageDigest = []byte("image_digest") bucketKeyPlatform = []byte("platform") bucketKeyLocation = []byte("location") bucketKeyType = []byte("type") bucketKeyMediaType = []byte("media_type") bucketKeyCreatedAt = []byte("created_at") // ArtifactEntryTypeIndex indicates that an ArtifactEntry is a SOCI index artifact ArtifactEntryTypeIndex ArtifactEntryType = "soci_index" // ArtifactEntryTypeLayer indicates that an ArtifactEntry is a SOCI layer artifact ArtifactEntryTypeLayer ArtifactEntryType = "soci_layer" db *ArtifactsDb once sync.Once ) var ( ErrArtifactBucketNotFound = errors.New("soci_artifacts not found") ) // Get the default artifacts db path func ArtifactsDbPath() string { return path.Join(config.SociSnapshotterRootPath, artifactsDbName) } // ArtifactEntry is a metadata object for a SOCI artifact. type ArtifactEntry struct { // Size is the SOCI artifact's size in bytes. Size int64 // Digest is the SOCI artifact's digest. Digest string // OriginalDigest is the digest of the content for which the SOCI artifact was created. OriginalDigest string // ImageDigest is the digest of the container image that was used to generate the artifact // ImageDigest refers to the image, OriginalDigest refers to the specific content within that // image that was used to generate the Artifact. ImageDigest string // Platform is the platform for which the artifact was generated. Platform string // Location is the file path for the SOCI artifact. Location string // Type is the type of SOCI artifact. Type ArtifactEntryType // Media Type of the stored artifact. MediaType string // Creation time of SOCI artifact. CreatedAt time.Time } // NewDB returns an instance of an ArtifactsDB func NewDB(path string) (*ArtifactsDb, error) { once.Do(func() { f, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0600) if err != nil { log.G(context.Background()).Errorf("can't create or open the file %s", path) return } defer f.Close() database, err := bolt.Open(f.Name(), 0600, nil) if err != nil { log.G(context.Background()).Errorf("can't open the db") return } db = &ArtifactsDb{db: database} }) if db == nil { return nil, fmt.Errorf("artifacts.db is not available") } return db, nil } func (db *ArtifactsDb) getIndexArtifactEntries(indexDigest string) ([]ArtifactEntry, error) { artifactEntries := []ArtifactEntry{} err := db.Walk(func(ae *ArtifactEntry) error { if ae.Type == ArtifactEntryTypeIndex && ae.OriginalDigest == indexDigest { artifactEntries = append(artifactEntries, *ae) } return nil }) return artifactEntries, err } // Walk applys a function to all ArtifactEntries in the ArtifactsDB func (db *ArtifactsDb) Walk(f func(*ArtifactEntry) error) error { err := db.db.View(func(tx *bolt.Tx) error { bucket, err := getArtifactsBucket(tx) if err != nil { return nil } bucket.ForEachBucket(func(k []byte) error { artifactBkt := bucket.Bucket(k) ae, err := loadArtifact(artifactBkt, string(k)) if err != nil { return err } return f(ae) }) return nil }) return err } // SyncWithLocalStore will sync the artifacts databse with SOCIs local content store, either adding new or removing old artifacts. func (db *ArtifactsDb) SyncWithLocalStore(ctx context.Context, blobStore store.Store, blobStorePath string, cs content.Store) error { if err := db.RemoveOldArtifacts(blobStore); err != nil { return fmt.Errorf("failed to remove old artifacts from db: %w", err) } if err := db.addNewArtifacts(ctx, blobStorePath, cs); err != nil { return fmt.Errorf("failed to add new artifacts to db: %w", err) } return nil } // RemoveOldArtifacts will remove any artifacts from the artifacts database that // no longer exist in SOCIs local content store. NOTE: Removing buckets while iterating // (bucket.ForEach) causes unexpected behavior (see: https://github.com/boltdb/bolt/issues/426). // This implementation works around this issue by appending buckets to a slice when // iterating and removing them after. func (db *ArtifactsDb) RemoveOldArtifacts(blobStore store.Store) error { err := db.db.Update(func(tx *bolt.Tx) error { bucket, err := getArtifactsBucket(tx) if err != nil { return nil } var bucketsToRemove [][]byte bucket.ForEachBucket(func(k []byte) error { artifactBucket := bucket.Bucket(k) ae, err := loadArtifact(artifactBucket, string(k)) if err != nil { return err } existsInContentStore, err := blobStore.Exists(context.Background(), ocispec.Descriptor{MediaType: ae.MediaType, Digest: digest.Digest(ae.Digest)}) if err != nil { return err } if !existsInContentStore { bucketsToRemove = append(bucketsToRemove, k) } return nil }) // remove the buckets for _, k := range bucketsToRemove { if err := bucket.DeleteBucket(k); err != nil { return err } } return nil }) return err } // addNewArtifacts will add any new artifacts discovered in SOCIs local content store to the artifacts database. func (db *ArtifactsDb) addNewArtifacts(ctx context.Context, blobStorePath string, cs content.Store) error { addHashPrefix := func(name string) string { if len(name) == 64 { return fmt.Sprintf("sha256:%s", name) } return fmt.Sprintf("sha512:%s", name) } return filepath.WalkDir(blobStorePath, func(path string, d fs.DirEntry, err error) error { if err != nil { return err } if d.IsDir() { return nil } info, err := d.Info() if err != nil { return err } // skip: entry is an empty config if info.Size() < 10 { return nil } f, err := os.Open(path) if err != nil { return err } defer f.Close() var sociIndex Index // tests to ensure artifact is really an index if err = DecodeIndex(f, &sociIndex); err != nil { return nil } if sociIndex.MediaType != ocispec.MediaTypeImageManifest { return nil } if sociIndex.ArtifactType != SociIndexArtifactType { return nil } if sociIndex.Subject == nil { return nil } // entry is an index indexDigest := addHashPrefix(d.Name()) ae, err := db.GetArtifactEntry(indexDigest) if err != nil && !errors.Is(err, ErrArtifactBucketNotFound) && !errors.Is(err, errdefs.ErrNotFound) { return err } if ae == nil { manifestDigest := sociIndex.Subject.Digest.String() platform, err := images.Platforms(ctx, cs, ocispec.Descriptor{ MediaType: ocispec.MediaTypeImageManifest, Digest: digest.Digest(manifestDigest)}) if err != nil { return err } indexEntry := &ArtifactEntry{ Size: info.Size(), Digest: indexDigest, OriginalDigest: manifestDigest, ImageDigest: manifestDigest, Platform: platforms.Format(platform[0]), Type: ArtifactEntryTypeIndex, Location: manifestDigest, MediaType: sociIndex.MediaType, CreatedAt: time.Now(), } if err = db.WriteArtifactEntry(indexEntry); err != nil { return err } for _, zt := range sociIndex.Blobs { ztocEntry := &ArtifactEntry{ Size: zt.Size, Digest: zt.Digest.String(), OriginalDigest: zt.Annotations[IndexAnnotationImageLayerDigest], Type: ArtifactEntryTypeLayer, Location: zt.Annotations[IndexAnnotationImageLayerDigest], MediaType: SociLayerMediaType, CreatedAt: time.Now(), } if err := db.WriteArtifactEntry(ztocEntry); err != nil { return err } } } return nil }) } // GetArtifactEntry loads a single ArtifactEntry from the ArtifactsDB by digest func (db *ArtifactsDb) GetArtifactEntry(digest string) (*ArtifactEntry, error) { entry := ArtifactEntry{} err := db.db.View(func(tx *bolt.Tx) error { bucket, err := getArtifactsBucket(tx) if err != nil { return err } e, err := getArtifactEntryByDigest(bucket, digest) if err != nil { return err } entry = *e return nil }) if err != nil { return nil, err } return &entry, nil } // GetArtifactType gets Type of an ArtifactEntry from the ArtifactsDB by digest func (db *ArtifactsDb) GetArtifactType(digest string) (ArtifactEntryType, error) { ae, err := db.GetArtifactEntry(digest) if err != nil { return "", err } return ae.Type, nil } // RemoveArtifactEntryByIndexDigest removes an index's artifact entry using its digest func (db *ArtifactsDb) RemoveArtifactEntryByIndexDigest(digest []byte) error { return db.db.Update(func(tx *bolt.Tx) error { bucket, err := getArtifactsBucket(tx) if err != nil { return err } dgstBucket := bucket.Bucket(digest) if dgstBucket == nil { return fmt.Errorf("the index of the digest %v doesn't exist", digest) } if indexBucket(dgstBucket) { return bucket.DeleteBucket(digest) } return fmt.Errorf("the digest %v does not correspond to an index", digest) }) } // GetArtifactEntriesByImageDigest returns all index digests greated from a given image digest func (db *ArtifactsDb) GetArtifactEntriesByImageDigest(digest string) ([][]byte, error) { entries := make([][]byte, 0) return entries, db.db.View(func(tx *bolt.Tx) error { bucket, err := getArtifactsBucket(tx) if err != nil { return err } c := bucket.Cursor() for k, _ := c.First(); k != nil; k, _ = c.Next() { artifactBucket := bucket.Bucket(k) if indexBucket(artifactBucket) && hasImageDigest(artifactBucket, digest) { entries = append(entries, k) } } return nil }) } // Determines whether a bucket represents an index, as opposed to a zTOC func indexBucket(b *bolt.Bucket) bool { mt := string(b.Get(bucketKeyMediaType)) return mt == ocispec.MediaTypeImageManifest } // Determines whether a bucket's image digest is the same as digest func hasImageDigest(b *bolt.Bucket, digest string) bool { imgDigest := string(b.Get(bucketKeyImageDigest)) return digest == imgDigest } // WriteArtifactEntry stores a single ArtifactEntry into the ArtifactsDB. // If there is already an artifact in the ArtifactsDB with the same Digest, // the old data is overwritten. func (db *ArtifactsDb) WriteArtifactEntry(entry *ArtifactEntry) error { if entry == nil { return fmt.Errorf("no entry to write") } err := db.db.Update(func(tx *bolt.Tx) error { bucket, err := tx.CreateBucketIfNotExists(bucketKeySociArtifacts) if err != nil { return err } err = putArtifactEntry(bucket, entry) return err }) return err } func getArtifactsBucket(tx *bolt.Tx) (*bolt.Bucket, error) { artifacts := tx.Bucket(bucketKeySociArtifacts) if artifacts == nil { return nil, ErrArtifactBucketNotFound } return artifacts, nil } func getArtifactEntryByDigest(artifacts *bolt.Bucket, digest string) (*ArtifactEntry, error) { artifactBkt := artifacts.Bucket([]byte(digest)) if artifactBkt == nil { return nil, fmt.Errorf("couldn't retrieve artifact for %s, %w", digest, errdefs.ErrNotFound) } return loadArtifact(artifactBkt, digest) } func loadArtifact(artifactBkt *bolt.Bucket, digest string) (*ArtifactEntry, error) { ae := ArtifactEntry{Digest: digest} encodedSize := artifactBkt.Get(bucketKeySize) size, err := dbutil.DecodeInt(encodedSize) if err != nil { return nil, err } createdAt := time.Time{} createdAtBytes := artifactBkt.Get(bucketKeyCreatedAt) if createdAtBytes != nil { err := createdAt.UnmarshalBinary(createdAtBytes) if err != nil { return nil, fmt.Errorf("cannot unmarshal CreatedAt time: %w", err) } } ae.Size = size ae.Location = string(artifactBkt.Get(bucketKeyLocation)) ae.Type = ArtifactEntryType(artifactBkt.Get(bucketKeyType)) ae.OriginalDigest = string(artifactBkt.Get(bucketKeyOriginalDigest)) ae.ImageDigest = string(artifactBkt.Get(bucketKeyImageDigest)) ae.Platform = string(artifactBkt.Get(bucketKeyPlatform)) ae.MediaType = string(artifactBkt.Get(bucketKeyMediaType)) ae.CreatedAt = createdAt return &ae, nil } func putArtifactEntry(artifacts *bolt.Bucket, ae *ArtifactEntry) error { if artifacts == nil { return fmt.Errorf("can't write ArtifactEntry: the bucket does not exist") } artifactBkt, err := artifacts.CreateBucketIfNotExists([]byte(ae.Digest)) if err != nil { return err } sizeInBytes, err := dbutil.EncodeInt(ae.Size) if err != nil { return err } createdAt, err := ae.CreatedAt.MarshalBinary() if err != nil { return err } updates := []struct { key []byte val []byte }{ {bucketKeySize, sizeInBytes}, {bucketKeyLocation, []byte(ae.Location)}, {bucketKeyOriginalDigest, []byte(ae.OriginalDigest)}, {bucketKeyImageDigest, []byte(ae.ImageDigest)}, {bucketKeyPlatform, []byte(ae.Platform)}, {bucketKeyType, []byte(ae.Type)}, {bucketKeyMediaType, []byte(ae.MediaType)}, {bucketKeyCreatedAt, createdAt}, } for _, update := range updates { if err := artifactBkt.Put(update.key, update.val); err != nil { return err } } return nil } soci-snapshotter-0.4.1/soci/artifacts_test.go000066400000000000000000000140341454010642300213250ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package soci import ( "os" "testing" bolt "go.etcd.io/bbolt" ) func TestGetIndexArtifactEntries(t *testing.T) { db, err := newTestableDb() if err != nil { t.Fatalf("can't create a test db") } const ( dgst1 = "sha256:10d6aec48c0a74635a5f3dc555528c1673afaa21ed6e1270a9a44de66e8ffa55" originalDgst1 = "sha256:1236aec48c0a74635a5f3dc666628c1673afaa21ed6e1270a9a44de66e811111" dgst2 = "sha256:20d6a9c48c0a74635a5f3dc555528c1673afaa21ed6e1270a9a44de66e8ffa55" dgst3 = "sha256:80d6aec48caaaaaaaa5f3dc555528c1673afaa21ed6e1270a9a44de66e8ffa55" originalDgst3 = "sha256:bbbbbbb48c0a74635a5f3dc666628c1673afaa21ed6e1270a9a44de66e811111" dgst4 = "sha256:99d6aec48caaaaaaaa5f3dc555528c1673afaa21ed6e1270a9a44de66e8ffa55" imageDigest = "sha256:0000000000000000000000000000000000000000000000000000000000000000" platform = "linux/amd64" ) entries := []ArtifactEntry{ { Size: 10, Digest: dgst1, OriginalDigest: originalDgst1, Location: "/var/soci-snapshotter/test1", Type: ArtifactEntryTypeIndex, ImageDigest: imageDigest, Platform: platform, }, { Size: 20, Digest: dgst2, OriginalDigest: originalDgst1, Location: "/var/soci-snapshotter/test2", Type: ArtifactEntryTypeIndex, ImageDigest: imageDigest, Platform: platform, }, { Size: 15, Digest: dgst3, OriginalDigest: originalDgst3, Location: "/var/soci-snapshotter/test3", Type: ArtifactEntryTypeIndex, ImageDigest: imageDigest, Platform: platform, }, { Size: 10, Digest: dgst4, OriginalDigest: originalDgst1, Location: "/var/soci-snapshotter/test1", Type: ArtifactEntryTypeLayer, ImageDigest: imageDigest, Platform: platform, }, } for _, entry := range entries { err = db.WriteArtifactEntry(&entry) if err != nil { t.Fatalf("can't put ArtifactEntry to a bucket") } } retrievedEntries, err := db.getIndexArtifactEntries(originalDgst1) if err != nil { t.Fatalf("could not retrieve artifact entries for original digest %s", originalDgst1) } if len(retrievedEntries) != 2 { t.Fatalf("the length of retrieved entries should be equal to 2, but equals to %d", len(retrievedEntries)) } if retrievedEntries[0] != entries[0] || retrievedEntries[1] != entries[1] { t.Fatalf("the retrieved content should match to the original content") } } func TestArtifactDB_DoesNotExist(t *testing.T) { _, err := NewDB(ArtifactsDbPath()) if err == nil { t.Fatalf("getArtifactEntry should fail since artifacts.db doesn't exist") } } func TestArtifactEntry_ReadWrite_Using_ArtifactsDb(t *testing.T) { db, err := newTestableDb() if err != nil { t.Fatalf("can't create a test db") } var ( dgst = "sha256:80d6aec48c0a74635a5f3dc555528c1673afaa21ed6e1270a9a44de66e8ffa55" originalDgst = "sha256:1236aec48c0a74635a5f3dc666628c1673afaa21ed6e1270a9a44de66e811111" imageDigest = "sha256:0000000000000000000000000000000000000000000000000000000000000000" platform = "linux/amd64" ) ae := &ArtifactEntry{ Size: 10, Digest: dgst, OriginalDigest: originalDgst, Location: "/var/soci-snapshotter/test", Type: ArtifactEntryTypeIndex, ImageDigest: imageDigest, Platform: platform, } err = db.WriteArtifactEntry(ae) if err != nil { t.Fatalf("can't put ArtifactEntry to a bucket") } readArtifactEntry, err := db.GetArtifactEntry(dgst) if err != nil { t.Fatalf("cannot get artifact entry with the digest=%s", dgst) } if *ae != *readArtifactEntry { t.Fatalf("the retrieved artifact entry is not valid") } } func TestArtifactEntry_ReadWrite_AtomicDbOperations(t *testing.T) { db, err := newTestableDb() if err != nil { t.Fatalf("can't create a test db") } var ( dgst = "sha256:80d6aec48c0a74635a5f3dc106328c1673afaa21ed6e1270a9a44de66e8ffa55" originalDgst = "sha256:1236aec48c0a74635a5f3dc106328c1673afaa21ed6e1270a9a44de66e811111" imageDigest = "sha256:0000000000000000000000000000000000000000000000000000000000000000" platform = "linux/amd64" ) ae := ArtifactEntry{ Size: 10, Digest: dgst, OriginalDigest: originalDgst, Location: "/var/soci-snapshotter/test", ImageDigest: imageDigest, Platform: platform, } err = db.db.Update(func(tx *bolt.Tx) error { root, err := getArtifactsBucket(tx) if err != nil { return err } err = putArtifactEntry(root, &ae) return err }) if err != nil { t.Fatalf("can't put ArtifactEntry to a bucket") } db.db.View(func(tx *bolt.Tx) error { root, err := getArtifactsBucket(tx) if err != nil { return err } readArtifactEntry, err := getArtifactEntryByDigest(root, dgst) if err != nil { t.Fatalf("cannot get artifact entry with the digest=%s", dgst) return err } if ae != *readArtifactEntry { t.Fatalf("the retrieved artifact entry is not valid") } return nil }) } func newTestableDb() (*ArtifactsDb, error) { f, err := os.CreateTemp("", "readertestdb") if err != nil { return nil, err } defer f.Close() defer os.Remove(f.Name()) db, err := bolt.Open(f.Name(), 0600, nil) if err != nil { return nil, err } err = db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucketIfNotExists(bucketKeySociArtifacts) if err != nil { return err } return nil }) if err != nil { return nil, err } return &ArtifactsDb{db: db}, nil } soci-snapshotter-0.4.1/soci/soci_index.go000066400000000000000000000447471454010642300204500ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package soci import ( "bytes" "context" "encoding/json" "errors" "fmt" "io" "os" "strconv" "sync" "time" "github.com/awslabs/soci-snapshotter/soci/store" "github.com/awslabs/soci-snapshotter/ztoc" "github.com/awslabs/soci-snapshotter/ztoc/compression" "github.com/containerd/containerd/content" "github.com/containerd/containerd/images" "github.com/containerd/containerd/log" "github.com/containerd/containerd/platforms" "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" orascontent "oras.land/oras-go/v2/content" "oras.land/oras-go/v2/errdef" ) const ( // SociIndexArtifactType is the artifactType of index SOCI index SociIndexArtifactType = "application/vnd.amazon.soci.index.v1+json" // SociLayerMediaType is the mediaType of ztoc SociLayerMediaType = "application/octet-stream" // IndexAnnotationImageLayerMediaType is the index annotation for image layer media type IndexAnnotationImageLayerMediaType = "com.amazon.soci.image-layer-mediaType" // IndexAnnotationImageLayerDigest is the index annotation for image layer digest IndexAnnotationImageLayerDigest = "com.amazon.soci.image-layer-digest" // IndexAnnotationBuildToolIdentifier is the index annotation for build tool identifier IndexAnnotationBuildToolIdentifier = "com.amazon.soci.build-tool-identifier" defaultSpanSize = int64(1 << 22) // 4MiB defaultMinLayerSize = 10 << 20 // 10MiB defaultBuildToolIdentifier = "AWS SOCI CLI v0.1" // emptyJSONObjectDigest is the digest of the content "{}". emptyJSONObjectDigest = "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a" ) var ( errNotLayerType = errors.New("not a layer mediaType") errUnsupportedLayerFormat = errors.New("unsupported layer format") // defaultConfigContent is the content of the config object used when serializing // a SOCI index as an OCI 1.0 Manifest for fallback compatibility. OCI 1.0 Manifests // require a non-empty config object, so we use the empty JSON object. The content of // the config is never used by SOCI, but it is validated by registries. defaultConfigContent = []byte("{}") // defaultConfigDescriptor is the descriptor of the of the config object used when // serializing a SOCI index as an OCI 1.0 Manifest for fallback compatibility. defaultConfigDescriptor = ocispec.Descriptor{ // The Config's media type is set to `SociIndexArtifactType` so that the oras-go // library can use it to filter artifacts. MediaType: SociIndexArtifactType, Digest: emptyJSONObjectDigest, Size: 2, } ) // Index represents a SOCI index manifest. type Index struct { // MediaType represents the type of document into which the SOCI index manifest will be serialized MediaType string `json:"mediaType"` // Artifact type is the media type of the SOCI index itself. ArtifactType string `json:"artifactType"` // Blobs are descriptors for the zTOCs in the index. Blobs []ocispec.Descriptor `json:"blobs,omitempty"` // Subject is the descriptor for the resource to which the index applies. Subject *ocispec.Descriptor `json:"subject,omitempty"` // Annotations are optional additional metadata for the index. Annotations map[string]string `json:"annotations,omitempty"` } // IndexWithMetadata has a soci `Index` and its metadata. type IndexWithMetadata struct { Index *Index Platform *ocispec.Platform ImageDigest digest.Digest CreatedAt time.Time } // IndexDescriptorInfo has a soci index descriptor and additional metadata. type IndexDescriptorInfo struct { ocispec.Descriptor CreatedAt time.Time } // DecodeIndex deserializes a JSON blob in an io.Reader // into a SOCI index. The blob is an OCI 1.0 Manifest func DecodeIndex(r io.Reader, index *Index) error { b, err := io.ReadAll(r) if err != nil { return err } return UnmarshalIndex(b, index) } // UnmarshalIndex deserializes a JSON blob in a byte array // into a SOCI index. The blob is an OCI 1.0 Manifest func UnmarshalIndex(b []byte, index *Index) error { if err := json.Unmarshal(b, index); err != nil { return err } var manifest ocispec.Manifest if err := json.Unmarshal(b, &manifest); err != nil { return err } fromManifest(manifest, index) return nil } // fromManifest converts an OCI 1.0 Manifest to a SOCI Index func fromManifest(manifest ocispec.Manifest, index *Index) { index.MediaType = manifest.MediaType index.ArtifactType = SociIndexArtifactType index.Blobs = manifest.Layers index.Subject = manifest.Subject index.Annotations = manifest.Annotations } // MarshalIndex serializes a SOCI index into a JSON blob. // The JSON blob is an OCI 1.0 Manifest func MarshalIndex(i *Index) ([]byte, error) { var manifest ocispec.Manifest manifest.SchemaVersion = 2 manifest.MediaType = ocispec.MediaTypeImageManifest manifest.Config = defaultConfigDescriptor manifest.Layers = i.Blobs manifest.Subject = i.Subject manifest.Annotations = i.Annotations return json.Marshal(manifest) } // GetIndexDescriptorCollection returns all `IndexDescriptorInfo` of the given image and platforms. func GetIndexDescriptorCollection(ctx context.Context, cs content.Store, artifactsDb *ArtifactsDb, img images.Image, ps []ocispec.Platform) ([]IndexDescriptorInfo, *ocispec.Descriptor, error) { var ( descriptors []IndexDescriptorInfo entries []ArtifactEntry indexDesc *ocispec.Descriptor err error ) for _, platform := range ps { indexDesc, err = GetImageManifestDescriptor(ctx, cs, img.Target, platforms.OnlyStrict(platform)) if err != nil { return nil, nil, err } e, err := artifactsDb.getIndexArtifactEntries(indexDesc.Digest.String()) if err != nil { return nil, nil, err } entries = append(entries, e...) } for _, entry := range entries { dgst, err := digest.Parse(entry.Digest) if err != nil { continue } desc := ocispec.Descriptor{ MediaType: entry.MediaType, Digest: dgst, Size: entry.Size, } descriptors = append(descriptors, IndexDescriptorInfo{ Descriptor: desc, CreatedAt: entry.CreatedAt, }) } return descriptors, indexDesc, nil } type buildConfig struct { spanSize int64 minLayerSize int64 buildToolIdentifier string artifactsDb *ArtifactsDb platform ocispec.Platform } // BuildOption specifies a config change to build soci indices. type BuildOption func(c *buildConfig) error // WithSpanSize specifies span size. func WithSpanSize(spanSize int64) BuildOption { return func(c *buildConfig) error { c.spanSize = spanSize return nil } } // WithMinLayerSize specifies min layer size to build a ztoc for a layer. func WithMinLayerSize(minLayerSize int64) BuildOption { return func(c *buildConfig) error { c.minLayerSize = minLayerSize return nil } } // WithBuildToolIdentifier specifies the build tool annotation value. func WithBuildToolIdentifier(tool string) BuildOption { return func(c *buildConfig) error { c.buildToolIdentifier = tool return nil } } // WithPlatform specifies platform used to build soci indices. func WithPlatform(platform ocispec.Platform) BuildOption { return func(c *buildConfig) error { c.platform = platform return nil } } // WithArtifactsDb speicifies the artifacts database func WithArtifactsDb(db *ArtifactsDb) BuildOption { return func(c *buildConfig) error { c.artifactsDb = db return nil } } // IndexBuilder creates soci indices. type IndexBuilder struct { contentStore content.Store blobStore orascontent.Storage ArtifactsDb *ArtifactsDb config *buildConfig ztocBuilder *ztoc.Builder } // NewIndexBuilder returns an `IndexBuilder` that is used to create soci indices. func NewIndexBuilder(contentStore content.Store, blobStore orascontent.Storage, artifactsDb *ArtifactsDb, opts ...BuildOption) (*IndexBuilder, error) { defaultPlatform := platforms.DefaultSpec() config := &buildConfig{ spanSize: defaultSpanSize, minLayerSize: defaultMinLayerSize, buildToolIdentifier: defaultBuildToolIdentifier, platform: defaultPlatform, } for _, opt := range opts { if err := opt(config); err != nil { return nil, err } } return &IndexBuilder{ contentStore: contentStore, blobStore: blobStore, ArtifactsDb: artifactsDb, config: config, ztocBuilder: ztoc.NewBuilder(config.buildToolIdentifier), }, nil } // Build builds a soci index for `img` and return the index with metadata. func (b *IndexBuilder) Build(ctx context.Context, img images.Image) (*IndexWithMetadata, error) { // we get manifest descriptor before calling images.Manifest, since after calling // images.Manifest, images.Children will error out when reading the manifest blob (this happens on containerd side) imgManifestDesc, err := GetImageManifestDescriptor(ctx, b.contentStore, img.Target, platforms.OnlyStrict(b.config.platform)) if err != nil { return nil, err } manifest, err := images.Manifest(ctx, b.contentStore, img.Target, platforms.OnlyStrict(b.config.platform)) if err != nil { return nil, err } // attempt to build a ztoc for each layer sociLayersDesc := make([]*ocispec.Descriptor, len(manifest.Layers)) errChan := make(chan error) go func() { var wg sync.WaitGroup for i, l := range manifest.Layers { wg.Add(1) go func(i int, l ocispec.Descriptor) { defer wg.Done() desc, err := b.buildSociLayer(ctx, l) if err != nil { if err != errUnsupportedLayerFormat { errChan <- err } return } if desc != nil { // index layers must be in some deterministic order // actual layer order used for historic consistency sociLayersDesc[i] = desc } }(i, l) } wg.Wait() close(errChan) }() errs := make([]error, 0, len(manifest.Layers)) for err := range errChan { errs = append(errs, err) } if len(errs) > 0 { errWrap := errors.New("errors encountered while building soci layers") for _, err := range errs { errWrap = fmt.Errorf("%w; %v", errWrap, err) } return nil, errWrap } ztocsDesc := make([]ocispec.Descriptor, 0, len(sociLayersDesc)) for _, desc := range sociLayersDesc { if desc != nil { ztocsDesc = append(ztocsDesc, *desc) } } if len(ztocsDesc) == 0 { return nil, errors.New("no ztocs created, all layers either skipped or produced errors") } annotations := map[string]string{ IndexAnnotationBuildToolIdentifier: b.config.buildToolIdentifier, } refers := &ocispec.Descriptor{ MediaType: imgManifestDesc.MediaType, Digest: imgManifestDesc.Digest, Size: imgManifestDesc.Size, } index := NewIndex(ztocsDesc, refers, annotations) return &IndexWithMetadata{ Index: index, Platform: &b.config.platform, ImageDigest: img.Target.Digest, CreatedAt: time.Now(), }, nil } // buildSociLayer builds a ztoc for an image layer (`desc`) and returns ztoc descriptor. // It may skip building ztoc (e.g., if layer size < `minLayerSize`) and return nil. func (b *IndexBuilder) buildSociLayer(ctx context.Context, desc ocispec.Descriptor) (*ocispec.Descriptor, error) { if !images.IsLayerType(desc.MediaType) { return nil, errNotLayerType } // check if we need to skip building the zTOC if skip, reason := skipBuildingZtoc(desc, b.config); skip { fmt.Printf("ztoc skipped - layer %s (%s) %s\n", desc.Digest, desc.MediaType, reason) return nil, nil } compressionAlgo, err := images.DiffCompression(ctx, desc.MediaType) if err != nil { return nil, fmt.Errorf("could not determine layer compression: %w", err) } if compressionAlgo == "" { switch desc.MediaType { case ocispec.MediaTypeImageLayer: // for OCI image layers, empty is returned for an uncompressed layer. compressionAlgo = compression.Uncompressed } } if !b.ztocBuilder.CheckCompressionAlgorithm(compressionAlgo) { fmt.Printf("ztoc skipped - layer %s (%s) is compressed in an unsupported format. expect: [tar, gzip, unknown] but got %q\n", desc.Digest, desc.MediaType, compressionAlgo) return nil, errUnsupportedLayerFormat } ra, err := b.contentStore.ReaderAt(ctx, desc) if err != nil { return nil, err } defer ra.Close() sr := io.NewSectionReader(ra, 0, desc.Size) tmpFile, err := os.CreateTemp("", "tmp.*") if err != nil { return nil, err } defer os.Remove(tmpFile.Name()) n, err := io.Copy(tmpFile, sr) if err != nil { return nil, err } if n != desc.Size { return nil, errors.New("the size of the temp file doesn't match that of the layer") } toc, err := b.ztocBuilder.BuildZtoc(tmpFile.Name(), b.config.spanSize, ztoc.WithCompression(compressionAlgo)) if err != nil { return nil, err } ztocReader, ztocDesc, err := ztoc.Marshal(toc) if err != nil { return nil, err } err = b.blobStore.Push(ctx, ztocDesc, ztocReader) if err != nil && !errors.Is(err, errdef.ErrAlreadyExists) { return nil, fmt.Errorf("cannot push ztoc to local store: %w", err) } // write the artifact entry for soci layer // this part is needed for local store only entry := &ArtifactEntry{ Size: ztocDesc.Size, Digest: ztocDesc.Digest.String(), OriginalDigest: desc.Digest.String(), Type: ArtifactEntryTypeLayer, Location: desc.Digest.String(), MediaType: SociLayerMediaType, CreatedAt: time.Now(), } err = b.ArtifactsDb.WriteArtifactEntry(entry) if err != nil { return nil, err } fmt.Printf("layer %s -> ztoc %s\n", desc.Digest, ztocDesc.Digest) ztocDesc.MediaType = SociLayerMediaType ztocDesc.Annotations = map[string]string{ IndexAnnotationImageLayerMediaType: desc.MediaType, IndexAnnotationImageLayerDigest: desc.Digest.String(), } return &ztocDesc, err } // NewIndex returns a new index. func NewIndex(blobs []ocispec.Descriptor, subject *ocispec.Descriptor, annotations map[string]string) *Index { return &Index{ Blobs: blobs, ArtifactType: SociIndexArtifactType, Annotations: annotations, Subject: subject, MediaType: ocispec.MediaTypeImageManifest, } } // NewIndexFromReader returns a new index from a Reader. func NewIndexFromReader(reader io.Reader) (*Index, error) { index := new(Index) if err := json.NewDecoder(reader).Decode(index); err != nil { return nil, fmt.Errorf("unable to decode reader into index: %v", err) } return index, nil } func skipBuildingZtoc(desc ocispec.Descriptor, cfg *buildConfig) (bool, string) { if cfg == nil { return false, "" } // avoid the file access if the layer size is below threshold if desc.Size < cfg.minLayerSize { return true, fmt.Sprintf("size %d is less than min-layer-size %d", desc.Size, cfg.minLayerSize) } return false, "" } // GetImageManifestDescriptor gets the descriptor of image manifest func GetImageManifestDescriptor(ctx context.Context, cs content.Store, imageTarget ocispec.Descriptor, platform platforms.MatchComparer) (*ocispec.Descriptor, error) { if images.IsIndexType(imageTarget.MediaType) { manifests, err := images.Children(ctx, cs, imageTarget) if err != nil { return nil, err } for _, manifest := range manifests { if manifest.Platform == nil { return nil, errors.New("manifest should have proper platform") } if platform.Match(*manifest.Platform) { return &manifest, nil } } return nil, errors.New("image manifest not found") } else if images.IsManifestType(imageTarget.MediaType) { return &imageTarget, nil } return nil, nil } // WriteSociIndex writes the SociIndex manifest to oras `store`. func WriteSociIndex(ctx context.Context, indexWithMetadata *IndexWithMetadata, contentStore store.Store, artifactsDb *ArtifactsDb) error { // batch will prevent content from being garbage collected in the middle of the following operations ctx, batchDone, err := contentStore.BatchOpen(ctx) if err != nil { return err } defer batchDone(ctx) manifest, err := MarshalIndex(indexWithMetadata.Index) if err != nil { return err } // If we're serializing the SOCI index as an OCI 1.0 Manifest, create an // empty config objct in the store as well. We will need to push this to the // registry later. if indexWithMetadata.Index.MediaType == ocispec.MediaTypeImageManifest { err = contentStore.Push(ctx, defaultConfigDescriptor, bytes.NewReader(defaultConfigContent)) if err != nil && !errors.Is(err, errdef.ErrAlreadyExists) { return fmt.Errorf("error creating OCI 1.0 empty config: %w", err) } } dgst := digest.FromBytes(manifest) size := int64(len(manifest)) desc := ocispec.Descriptor{ Digest: dgst, Size: size, } err = contentStore.Push(ctx, desc, bytes.NewReader(manifest)) if err != nil && !errors.Is(err, errdef.ErrAlreadyExists) { return fmt.Errorf("cannot write SOCI index to local store: %w", err) } log.G(ctx).WithField("digest", dgst.String()).Debugf("soci index has been written") err = store.LabelGCRoot(ctx, contentStore, desc) if err != nil { return fmt.Errorf("cannot apply garbage collection label to index %s: %w", desc.Digest.String(), err) } err = store.LabelGCRefContent(ctx, contentStore, desc, "config", defaultConfigDescriptor.Digest.String()) if err != nil { return fmt.Errorf("cannot apply garbage collection label to index %s referencing default config: %w", desc.Digest.String(), err) } var allErr error for i, blob := range indexWithMetadata.Index.Blobs { err = store.LabelGCRefContent(ctx, contentStore, desc, "ztoc."+strconv.Itoa(i), blob.Digest.String()) if err != nil { errors.Join(allErr, err) } } if allErr != nil { return fmt.Errorf("cannot apply one or more garbage collection labels to index %s: %w", desc.Digest.String(), allErr) } refers := indexWithMetadata.Index.Subject if refers == nil { return errors.New("cannot write soci index: the Refers field is nil") } // this entry is persisted to be used by cli push entry := &ArtifactEntry{ Digest: dgst.String(), OriginalDigest: refers.Digest.String(), ImageDigest: indexWithMetadata.ImageDigest.String(), Platform: platforms.Format(*indexWithMetadata.Platform), Type: ArtifactEntryTypeIndex, Location: refers.Digest.String(), Size: size, MediaType: indexWithMetadata.Index.MediaType, CreatedAt: indexWithMetadata.CreatedAt, } return artifactsDb.WriteArtifactEntry(entry) } soci-snapshotter-0.4.1/soci/soci_index_test.go000066400000000000000000000223011454010642300214650ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package soci import ( "bytes" "context" "encoding/json" "errors" "testing" "github.com/containerd/containerd/images" "github.com/google/go-cmp/cmp" "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "oras.land/oras-go/v2/content/memory" ) func TestSkipBuildingZtoc(t *testing.T) { testcases := []struct { name string desc ocispec.Descriptor buildConfig buildConfig skip bool }{ { name: "skip, sizeminLayerSize", desc: ocispec.Descriptor{ MediaType: SociLayerMediaType, Digest: parseDigest("sha256:88a7002d88ed7b174259637a08a2ef9b7f4f2a314dfb51fa1a4a6a1d7e05dd01"), Size: 5000, }, buildConfig: buildConfig{ minLayerSize: 500, }, skip: false, }, } for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { if skip, _ := skipBuildingZtoc(tc.desc, &tc.buildConfig); skip != tc.skip { t.Fatalf("%v: the value returned does not equal actual value %v", tc.name, tc.skip) } }) } } func TestBuildSociIndexNotLayer(t *testing.T) { testcases := []struct { name string mediaType string err error }{ { name: "empty media type", mediaType: "", err: errNotLayerType, }, { name: "soci index manifest", mediaType: ocispec.MediaTypeImageManifest, err: errNotLayerType, }, { name: "soci layer", mediaType: SociLayerMediaType, err: errNotLayerType, }, { name: "index manifest", mediaType: "application/vnd.oci.image.manifest.v1+json", err: errNotLayerType, }, { name: "layer as tar", mediaType: "application/vnd.oci.image.layer.v1.tar", }, { name: "docker", mediaType: images.MediaTypeDockerSchema2Layer, }, { name: "layer as tar+gzip", mediaType: "application/vnd.oci.image.layer.v1.tar+gzip", }, { name: "layer as tar+zstd", mediaType: "application/vnd.oci.image.layer.v1.tar+zstd", }, { name: "layer prefix", mediaType: "application/vnd.oci.image.layer.", }, } spanSize := int64(65535) ctx := context.Background() cs := newFakeContentStore() blobStore := memory.New() artifactsDb, err := newTestableDb() if err != nil { t.Fatalf("can't create a test db") } builder, err := NewIndexBuilder(cs, blobStore, artifactsDb, WithSpanSize(spanSize), WithMinLayerSize(0)) if err != nil { t.Fatalf("cannot create index builer: %v", err) } for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { desc := ocispec.Descriptor{ MediaType: tc.mediaType, Digest: "layerdigest", } _, err := builder.buildSociLayer(ctx, desc) if tc.err != nil { if !errors.Is(err, tc.err) { t.Fatalf("%v: should error out as not a layer", tc.name) } } else { if err == errNotLayerType { t.Fatalf("%v: should not error out for any of the layer types", tc.name) } } }) } } func TestBuildSociIndexWithLimits(t *testing.T) { testcases := []struct { name string layerSize int64 minLayerSize int64 ztocGenerated bool }{ { name: "skip building ztoc: layer size 500 bytes, minimal layer size 32kB", layerSize: 500, minLayerSize: 32000, ztocGenerated: false, }, { name: "skip building ztoc: layer size 20kB, minimal layer size 32kB", layerSize: 20000, minLayerSize: 32000, ztocGenerated: false, }, { name: "build ztoc: layer size 500 bytes, minimal layer size 500 bytes", layerSize: 500, minLayerSize: 500, ztocGenerated: true, }, { name: "build ztoc: layer size 20kB, minimal layer size 500 bytes", layerSize: 20000, minLayerSize: 500, ztocGenerated: true, }, } for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { ctx := context.Background() cs := newFakeContentStore() desc := ocispec.Descriptor{ MediaType: "application/vnd.oci.image.layer.", Size: tc.layerSize, } spanSize := int64(65535) blobStore := memory.New() artifactsDb, err := newTestableDb() if err != nil { t.Fatalf("can't create a test db") } builder, _ := NewIndexBuilder(cs, blobStore, artifactsDb, WithSpanSize(spanSize), WithMinLayerSize(tc.minLayerSize)) ztoc, err := builder.buildSociLayer(ctx, desc) if tc.ztocGenerated { // we check only for build skip, which is indicated as nil value for ztoc and nil value for error if ztoc == nil && err == nil { t.Fatalf("%v: ztoc should've been generated; error=%v", tc.name, err) } } else { if ztoc != nil { t.Fatalf("%v: ztoc should've skipped", tc.name) } } }) } } func TestNewIndex(t *testing.T) { testcases := []struct { name string blobs []ocispec.Descriptor subject ocispec.Descriptor annotations map[string]string }{ { name: "successfully build OCI ref type manifest", blobs: []ocispec.Descriptor{ { Size: 4, Digest: digest.FromBytes([]byte("test")), }, }, subject: ocispec.Descriptor{ Size: 4, Digest: digest.FromBytes([]byte("test")), }, annotations: map[string]string{ "foo": "bar", }, }, } for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { index := NewIndex(tc.blobs, &tc.subject, tc.annotations) if diff := cmp.Diff(index.Blobs, tc.blobs); diff != "" { t.Fatalf("unexpected blobs; diff = %v", diff) } if index.ArtifactType != SociIndexArtifactType { t.Fatalf("unexpected artifact type; expected = %s, got = %s", SociIndexArtifactType, index.ArtifactType) } if index.MediaType != ocispec.MediaTypeImageManifest { t.Fatalf("unexpected media type; expected = %v, got = %v", ocispec.MediaTypeImageManifest, index.MediaType) } if diff := cmp.Diff(index.Subject, &tc.subject); diff != "" { t.Fatalf("the subject field is not equal; diff = %v", diff) } }) } } func TestDecodeIndex(t *testing.T) { testcases := []struct { name string blobs []ocispec.Descriptor subject ocispec.Descriptor annotations map[string]string }{ { name: "successfully build OCI ref type manifest", blobs: []ocispec.Descriptor{ { Size: 4, Digest: digest.FromBytes([]byte("test")), }, }, subject: ocispec.Descriptor{ Size: 4, Digest: digest.FromBytes([]byte("test")), }, annotations: map[string]string{ "foo": "bar", }, }, } for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { index := NewIndex(tc.blobs, &tc.subject, tc.annotations) jsonBytes, err := MarshalIndex(index) if err != nil { t.Fatalf("cannot convert index to json byte data: %v", err) } var index2 Index err = DecodeIndex(bytes.NewReader(jsonBytes), &index2) if err != nil { t.Fatal(err) } if diff := cmp.Diff(index, &index2); diff != "" { t.Fatalf("unexpected index after deserialzing from byte data; diff = %v", diff) } }) } } func TestMarshalIndex(t *testing.T) { blobs := []ocispec.Descriptor{ { Size: 4, Digest: digest.FromBytes([]byte("test")), }, } subject := ocispec.Descriptor{ Size: 4, Digest: digest.FromBytes([]byte("test")), } annotations := map[string]string{ "foo": "bar", } testcases := []struct { name string index *Index ty interface{} }{ { name: "successfully roundtrip as Image Manifest", index: NewIndex(blobs, &subject, annotations), ty: ocispec.Manifest{}, }, } for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { b, err := MarshalIndex(tc.index) if err != nil { t.Fatalf("could not marshal index: %v", err) } err = json.Unmarshal(b, &tc.ty) if err != nil { t.Fatalf("could not unmarshal index as underlying type: %v", err) } var unmarshalled Index err = UnmarshalIndex(b, &unmarshalled) if err != nil { t.Fatalf("could not unmarshal index as index: %v", err) } diff := cmp.Diff(tc.index, &unmarshalled) if diff != "" { t.Fatalf("deserialized index does not match original index: %s", diff) } }) } } soci-snapshotter-0.4.1/soci/store/000077500000000000000000000000001454010642300171115ustar00rootroot00000000000000soci-snapshotter-0.4.1/soci/store/store.go000066400000000000000000000254551454010642300206070ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package store import ( "context" "errors" "fmt" "io" "time" "github.com/awslabs/soci-snapshotter/config" "github.com/containerd/containerd" "github.com/containerd/containerd/content" "github.com/containerd/containerd/defaults" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/namespaces" "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "oras.land/oras-go/v2/content/oci" "oras.land/oras-go/v2/errdef" ) // BasicStore describes the functionality common to oras-go oci.Store, oras-go memory.Store, and containerd ContentStore. type BasicStore interface { Exists(ctx context.Context, target ocispec.Descriptor) (bool, error) Fetch(ctx context.Context, target ocispec.Descriptor) (io.ReadCloser, error) Push(ctx context.Context, expected ocispec.Descriptor, reader io.Reader) error } // Store extends BasicStore with functionality that in not present in some BasicStore // implementations and may be stubbed in some Store implementations type Store interface { BasicStore Label(ctx context.Context, target ocispec.Descriptor, label string, value string) error Delete(ctx context.Context, dgst digest.Digest) error // BatchOpen starts a series of operations that should not be interrupted by garbage collection. // It returns a cleanup function that ends the batch, which should be called after // all associated content operations are finished. BatchOpen(ctx context.Context) (context.Context, CleanupFunc, error) } type ContentStoreType string const ( ContainerdContentStoreType ContentStoreType = "containerd" SociContentStoreType ContentStoreType = "soci" ) // ContentStoreTypes returns a slice of all supported content store types. func ContentStoreTypes() []ContentStoreType { return []ContentStoreType{SociContentStoreType, ContainerdContentStoreType} } const ( // Default path to containerd content addressable storage DefaultContainerdContentStorePath = "/var/lib/containerd/io.containerd.content.v1.content" // Default path to soci content addressable storage DefaultSociContentStorePath = "/var/lib/soci-snapshotter-grpc/content" ) func NewStoreConfig(opts ...Option) config.ContentStoreConfig { storeConfig := config.ContentStoreConfig{ Type: config.DefaultContentStoreType, Namespace: namespaces.Default, } for _, o := range opts { o(&storeConfig) } return storeConfig } type Option func(*config.ContentStoreConfig) func WithNamespace(namespace string) Option { return func(sc *config.ContentStoreConfig) { sc.Namespace = namespace } } func WithType(contentStoreType ContentStoreType) Option { return func(sc *config.ContentStoreConfig) { sc.Type = string(contentStoreType) } } func ErrUnknownContentStoreType(contentStoreType ContentStoreType) error { return fmt.Errorf("unknown content store type: %s; must be one of %s or %s", contentStoreType, ContainerdContentStoreType, SociContentStoreType) } // CanonicalizeContentStoreType resolves the empty string to DefaultContentStoreType, // returns other types, or errors on unrecognized types. func CanonicalizeContentStoreType(contentStoreType ContentStoreType) (ContentStoreType, error) { switch contentStoreType { case "": return config.DefaultContentStoreType, nil case ContainerdContentStoreType, SociContentStoreType: return contentStoreType, nil default: return "", ErrUnknownContentStoreType(contentStoreType) } } // GetContentStorePath returns the top level directory for the content store. func GetContentStorePath(contentStoreType ContentStoreType) (string, error) { contentStoreType, err := CanonicalizeContentStoreType(contentStoreType) if err != nil { return "", err } switch contentStoreType { case ContainerdContentStoreType: return DefaultContainerdContentStorePath, nil case SociContentStoreType: return DefaultSociContentStorePath, nil } return "", errors.New("unexpectedly reached end of GetContentStorePath") } type CleanupFunc func(context.Context) error func nopCleanup(context.Context) error { return nil } func NewContentStore(ctx context.Context, opts ...Option) (context.Context, Store, error) { storeConfig := NewStoreConfig(opts...) contentStoreType, err := CanonicalizeContentStoreType(ContentStoreType(storeConfig.Type)) if err != nil { return ctx, nil, err } switch contentStoreType { case ContainerdContentStoreType: return NewContainerdStore(ctx, storeConfig) case SociContentStoreType: return NewSociStore(ctx) } return ctx, nil, errors.New("unexpectedly reached end of NewContentStore") } // SociStore wraps oci.Store and adds or stubs additional functionality of the Store interface. type SociStore struct { *oci.Store } // assert that SociStore implements Store var _ Store = (*SociStore)(nil) // NewSociStore creates a sociStore. func NewSociStore(ctx context.Context) (context.Context, *SociStore, error) { store, err := oci.New(DefaultSociContentStorePath) return ctx, &SociStore{store}, err } // Label is a no-op for sociStore until sociStore and ArtifactsDb are better integrated. func (s *SociStore) Label(_ context.Context, _ ocispec.Descriptor, _ string, _ string) error { return nil } // Delete is a no-op for sociStore until oci.Store provides this method. func (s *SociStore) Delete(_ context.Context, _ digest.Digest) error { return nil } // BatchOpen is a no-op for sociStore; it does not support batching operations. func (s *SociStore) BatchOpen(ctx context.Context) (context.Context, CleanupFunc, error) { return ctx, nopCleanup, nil } type ContainerdStore struct { config.ContentStoreConfig client *containerd.Client } // assert that ContainerdStore implements Store var _ Store = (*ContainerdStore)(nil) func NewContainerdStore(ctx context.Context, storeConfig config.ContentStoreConfig) (context.Context, *ContainerdStore, error) { client, err := containerd.New(config.DefaultImageServiceAddress) if err != nil { return ctx, nil, fmt.Errorf("could not connect to containerd socket for content store access: %w", err) } ctx = namespaces.WithNamespace(ctx, storeConfig.Namespace) containerdStore := ContainerdStore{ client: client, } containerdStore.ContentStoreConfig = storeConfig return ctx, &containerdStore, nil } // Exists returns true iff the described content exists. func (s *ContainerdStore) Exists(ctx context.Context, target ocispec.Descriptor) (bool, error) { ctx = namespaces.WithNamespace(ctx, s.Namespace) cs := s.client.ContentStore() _, err := cs.Info(ctx, target.Digest) if errors.Is(err, errdefs.ErrNotFound) { return false, nil } if err != nil { return false, err } return true, nil } type sectionReaderAt struct { content.ReaderAt *io.SectionReader } // Fetch fetches the content identified by the descriptor. func (s *ContainerdStore) Fetch(ctx context.Context, target ocispec.Descriptor) (io.ReadCloser, error) { ctx = namespaces.WithNamespace(ctx, s.Namespace) cs := s.client.ContentStore() ra, err := cs.ReaderAt(ctx, target) if err != nil { return nil, err } return sectionReaderAt{ra, io.NewSectionReader(ra, 0, ra.Size())}, nil } // Push pushes the content, matching the expected descriptor. // This should be done within a Batch and followed by Label calls to prevent garbage collection. func (s *ContainerdStore) Push(ctx context.Context, expected ocispec.Descriptor, reader io.Reader) error { ctx = namespaces.WithNamespace(ctx, s.Namespace) exists, err := s.Exists(ctx, expected) if err != nil { return err } if exists { // error format based on oras.land/oras-go/v2/content/oci.Storage.Push() return fmt.Errorf("%s: %s: %w", expected.Digest, expected.MediaType, errdef.ErrAlreadyExists) } cs := s.client.ContentStore() // gRPC message size limit includes some overhead that cannot be calculated from here buf := make([]byte, defaults.DefaultMaxRecvMsgSize/2) totalWritten := 0 writer, err := cs.Writer(ctx, content.WithRef(expected.Digest.String())) if err != nil { return err } defer writer.Close() for { n, err := reader.Read(buf) if n > 0 { written, err := writer.Write(buf[:n]) if err != nil { return err } totalWritten += written } if err != nil { if err != io.EOF { return err } break } if n == 0 { break } } if expected.Size > 0 && expected.Size != int64(totalWritten) { return fmt.Errorf("unexpected copy size %d, expected %d: %w", totalWritten, expected.Size, errdefs.ErrFailedPrecondition) } return writer.Commit(ctx, expected.Size, expected.Digest) } // LabelGCRoot labels the target resource to prevent garbage collection of itself. func LabelGCRoot(ctx context.Context, store Store, target ocispec.Descriptor) error { return store.Label(ctx, target, "containerd.io/gc.root", time.Now().Format(time.RFC3339)) } // LabelGCRefContent labels the target resource to prevent garbage collection of another resource identified by digest // with an optional ref to allow and disambiguate multiple content labels. func LabelGCRefContent(ctx context.Context, store Store, target ocispec.Descriptor, ref string, digest string) error { if len(ref) > 0 { ref = "." + ref } return store.Label(ctx, target, "containerd.io/gc.ref.content"+ref, digest) } // Label creates or updates the named label with the given value. func (s *ContainerdStore) Label(ctx context.Context, target ocispec.Descriptor, name string, value string) error { ctx = namespaces.WithNamespace(ctx, s.Namespace) cs := s.client.ContentStore() info := content.Info{ Digest: target.Digest, Labels: map[string]string{name: value}, } paths := []string{"labels." + name} _, err := cs.Update(ctx, info, paths...) if err != nil { return err } return nil } // Delete removes the described content. func (s *ContainerdStore) Delete(ctx context.Context, dgst digest.Digest) error { ctx = namespaces.WithNamespace(ctx, s.Namespace) cs := s.client.ContentStore() return cs.Delete(ctx, dgst) } // BatchOpen creates a lease, ensuring that no content created within the batch will be garbage collected. // It returns a cleanup function that ends the lease, which should be called after content is created and labeled. func (s *ContainerdStore) BatchOpen(ctx context.Context) (context.Context, CleanupFunc, error) { ctx, leaseDone, err := s.client.WithLease(ctx) if err != nil { return ctx, nopCleanup, fmt.Errorf("unable to open batch: %w", err) } return ctx, leaseDone, nil } soci-snapshotter-0.4.1/soci/store/store_test.go000066400000000000000000000131461454010642300216400ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package store import ( "context" "testing" "github.com/awslabs/soci-snapshotter/config" "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "oras.land/oras-go/v2/content/memory" ) func TestStoreCanonicalizeContentStoreType(t *testing.T) { tests := []struct { input string output ContentStoreType fail bool }{ { input: "", output: config.DefaultContentStoreType, }, { input: "soci", output: SociContentStoreType, }, { input: "containerd", output: ContainerdContentStoreType, }, { input: "bad", fail: true, }, } for _, tt := range tests { t.Run(tt.input, func(t *testing.T) { output, err := CanonicalizeContentStoreType(ContentStoreType(tt.input)) if err != nil { if !tt.fail { t.Fatalf("content store type \"%s\" canonicalized to \"%s\" and produced unexpected error %v", tt.input, output, err) } } else { if tt.output != output { t.Fatalf("content store type \"%s\" canonicalized to \"%s\", expected %s", tt.input, output, tt.output) } } }) } } func TestStoreGetContentStorePath(t *testing.T) { var defaultContentStorePath string switch ContentStoreType(config.DefaultContentStoreType) { case SociContentStoreType: defaultContentStorePath = DefaultSociContentStorePath case ContainerdContentStoreType: defaultContentStorePath = DefaultContainerdContentStorePath default: t.Fatalf("test invalidated by unrecognized default content store type: %s", config.DefaultContentStoreType) } tests := []struct { input string output string fail bool }{ { input: "", output: defaultContentStorePath, }, { input: "soci", output: DefaultSociContentStorePath, }, { input: "containerd", output: DefaultContainerdContentStorePath, }, { input: "bad", fail: true, }, } for _, tt := range tests { t.Run(tt.input, func(t *testing.T) { output, err := GetContentStorePath(ContentStoreType(tt.input)) if err != nil { if !tt.fail { t.Fatalf("content store type \"%s\" produced path %s with unexpected error %v", tt.input, output, err) } } else { if tt.output != output { t.Fatalf("content store type \"%s\" produced path %s, expected %s", tt.input, output, tt.output) } } }) } } type fakeStore struct { *memory.Store Labels [][]string Deleted []string } // assert that FakeStore implements Store var _ Store = (*fakeStore)(nil) func newFakeStore() *fakeStore { fakeStore := fakeStore{} fakeStore.Store = memory.New() return &fakeStore } // TODO read and record namespace from context // Label fakes labeling resources by maintaining an array of labels that have been added func (s *fakeStore) Label(_ context.Context, desc ocispec.Descriptor, name string, value string) error { s.Labels = append(s.Labels, []string{desc.Digest.String(), name, value}) return nil } // Delete fakes deleting resources by maintaining an array of resources that have been "deleted" func (s *fakeStore) Delete(_ context.Context, dgst digest.Digest) error { s.Deleted = append(s.Deleted, dgst.String()) return nil } // BatchOpen is a TODO to mock and test func (s *fakeStore) BatchOpen(ctx context.Context) (context.Context, CleanupFunc, error) { return ctx, func(context.Context) error { return nil }, nil } func TestStoreLabelGCRoot(t *testing.T) { store := newFakeStore() testTarget, _ := digest.Parse("sha256:7b236f6c6ca259a4497e98c204bc1dcf3e653438e74af17bfe39da5329789f4a") LabelGCRoot(context.Background(), store, ocispec.Descriptor{Digest: testTarget}) if len(store.Labels) != 1 { t.Fatalf("wrong number of labels applied, expected 1, got %d", len(store.Labels)) } if store.Labels[0][0] != testTarget.String() { t.Fatalf("label applied to wrong digest, expected \"%s\", got \"%s\"", testTarget.String(), store.Labels[0][0]) } if store.Labels[0][1] != "containerd.io/gc.root" { t.Fatalf("label applied with wrong name, expected \"containerd.io/gc.root\", got \"%s\"", store.Labels[0][1]) } } func TestStoreLabelGCRefContent(t *testing.T) { store := newFakeStore() testTarget, _ := digest.Parse("sha256:7b236f6c6ca259a4497e98c204bc1dcf3e653438e74af17bfe39da5329789f4a") testRef := "testRef" testDigest, _ := digest.Parse("sha256:4452aadba3e99771ff3559735dab16279c5a352359d79f38737c6fdca941c6e5") LabelGCRefContent(context.Background(), store, ocispec.Descriptor{Digest: testTarget}, testRef, testDigest.String()) if len(store.Labels) != 1 { t.Fatalf("wrong number of labels applied, expected 1, got %d", len(store.Labels)) } if store.Labels[0][0] != testTarget.String() { t.Fatalf("label applied to wrong digest, expected \"%s\", got \"%s\"", testTarget.String(), store.Labels[0][0]) } if store.Labels[0][1] != "containerd.io/gc.ref.content."+testRef { t.Fatalf("label applied with wrong name, expected \"containerd.io/gc.ref.content."+testRef+"\", got \"%s\"", store.Labels[0][1]) } if store.Labels[0][2] != testDigest.String() { t.Fatalf("label references wrong digest, expected \"%s\", got \"%s\"", testDigest.String(), store.Labels[0][2]) } } soci-snapshotter-0.4.1/soci/util_test.go000066400000000000000000000066521454010642300203310ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package soci import ( "context" "io" "github.com/containerd/containerd/content" "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) func parseDigest(digestString string) digest.Digest { dgst, _ := digest.Parse(digestString) return dgst } type fakeContentStore struct { } // Abort implements content.Store func (fakeContentStore) Abort(ctx context.Context, ref string) error { return nil } // ListStatuses implements content.Store func (fakeContentStore) ListStatuses(ctx context.Context, filters ...string) ([]content.Status, error) { panic("unimplemented") } // Status implements content.Store func (fakeContentStore) Status(ctx context.Context, ref string) (content.Status, error) { panic("unimplemented") } // Writer implements content.Store func (fakeContentStore) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { return fakeWriter{}, nil } // Delete implements content.Store func (fakeContentStore) Delete(ctx context.Context, dgst digest.Digest) error { return nil } // Info implements content.Store func (fakeContentStore) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { panic("unimplemented") } // Update implements content.Store func (fakeContentStore) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { panic("unimplemented") } // Walk implements content.Store func (fakeContentStore) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error { return nil } // ReaderAt implements content.Store func (fakeContentStore) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { return newFakeReaderAt(desc), nil } func newFakeContentStore() content.Store { return fakeContentStore{} } type fakeReaderAt struct { size int64 } // Close implements content.ReaderAt func (fakeReaderAt) Close() error { return nil } // ReadAt implements content.ReaderAt func (r fakeReaderAt) ReadAt(p []byte, off int64) (n int, err error) { return int(r.size), nil } // Size implements content.ReaderAt func (r fakeReaderAt) Size() int64 { return r.size } func newFakeReaderAt(desc ocispec.Descriptor) content.ReaderAt { return fakeReaderAt{size: desc.Size} } type fakeWriter struct { io.Writer status content.Status commitFunc func() error } func (f fakeWriter) Write(p []byte) (n int, err error) { return len(p), nil } func (f fakeWriter) Close() error { return nil } func (f fakeWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { if f.commitFunc == nil { return nil } return f.commitFunc() } func (f fakeWriter) Digest() digest.Digest { return digest.FromString("") } func (f fakeWriter) Status() (content.Status, error) { return f.status, nil } func (f fakeWriter) Truncate(size int64) error { return nil } soci-snapshotter-0.4.1/util/000077500000000000000000000000001454010642300157755ustar00rootroot00000000000000soci-snapshotter-0.4.1/util/dbutil/000077500000000000000000000000001454010642300172605ustar00rootroot00000000000000soci-snapshotter-0.4.1/util/dbutil/encoders.go000066400000000000000000000022301454010642300214060ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package dbutil import ( "encoding/binary" "errors" "fmt" ) func EncodeInt(i int64) ([]byte, error) { var ( buf [binary.MaxVarintLen64]byte iEncoded = buf[:] ) iEncoded = iEncoded[:binary.PutVarint(iEncoded, i)] if len(iEncoded) == 0 { return nil, fmt.Errorf("failed encoding integer = %v", i) } return iEncoded, nil } func DecodeInt(data []byte) (int64, error) { i, n := binary.Varint(data) if i == 0 { if n == 0 { return 0, errors.New("not enough data") } if n < 0 { return 0, errors.New("data overflows int64") } } return i, nil } soci-snapshotter-0.4.1/util/dockershell/000077500000000000000000000000001454010642300202745ustar00rootroot00000000000000soci-snapshotter-0.4.1/util/dockershell/compose/000077500000000000000000000000001454010642300217415ustar00rootroot00000000000000soci-snapshotter-0.4.1/util/dockershell/compose/compose.go000066400000000000000000000200601454010642300237330ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package compose import ( "bufio" "errors" "io" "os" "os/exec" "path/filepath" "strings" dexec "github.com/awslabs/soci-snapshotter/util/dockershell/exec" "github.com/rs/xid" ) // Supported checks if this pkg can run on the current system. func Supported() error { if err := exec.Command("docker", "version").Run(); err != nil { return err } return exec.Command("docker-compose", "--version").Run() } // Compose represents a set of container execution environment (i.e. a set of *dexec.Exec) that // is orchestrated as a docker compose project. // This can be created using docker compose yaml. Get method provides *dexec.Exec // of arbitrary service. type Compose struct { execs map[string]*dexec.Exec cleanups []func() error } type options struct { buildArgs []string addStdio func(c *exec.Cmd) addStderr func(c *exec.Cmd) } // Option is an option for creating compose. type Option func(o *options) // WithBuildArgs specifies the build args that will be used during build. func WithBuildArgs(buildArgs ...string) Option { return func(o *options) { o.buildArgs = buildArgs } } // WithStdio specifies stdio which docker-compose build command's stdio will be streamed into. func WithStdio(stdout, stderr io.Writer) Option { return func(o *options) { o.addStdio = func(c *exec.Cmd) { c.Stdout = stdout c.Stderr = stderr } o.addStderr = func(c *exec.Cmd) { c.Stderr = stderr } } } // Build builds all services defined in a docker-compose yaml. func Build(dockerComposeYaml string, opts ...Option) ([]func() error, error) { var cOpts options for _, o := range opts { o(&cOpts) } tmpContext, err := os.MkdirTemp("", "compose"+xid.New().String()) if err != nil { return nil, err } confFile := filepath.Join(tmpContext, "docker-compose.yml") if err := os.WriteFile(confFile, []byte(dockerComposeYaml), 0600); err != nil { return nil, err } var cleanups []func() error for i := 0; i < 3; i++ { rm := func() error { return exec.Command("docker-compose", "-f", confFile, "down", "--rmi", "all").Run() } cleanups = append(cleanups, rm) } cleanups = append(cleanups, func() error { return os.RemoveAll(tmpContext) }) var buildArgs []string for _, arg := range cOpts.buildArgs { buildArgs = append(buildArgs, "--build-arg", arg) } cmd := exec.Command("docker-compose", append([]string{"-f", confFile, "build", "-q"}, buildArgs...)...) if cOpts.addStdio != nil { cOpts.addStdio(cmd) } if err := cmd.Run(); err != nil { return nil, err } return cleanups, nil } // Up starts all services defined in a docker-compose yaml and returns the execution environment for each service. func Up(dockerComposeYaml string, opts ...Option) (*Compose, error) { var cOpts options for _, o := range opts { o(&cOpts) } tmpContext, err := os.MkdirTemp("", "compose"+xid.New().String()) if err != nil { return nil, err } confFile := filepath.Join(tmpContext, "docker-compose.yml") if err := os.WriteFile(confFile, []byte(dockerComposeYaml), 0600); err != nil { return nil, err } var cleanups []func() error cleanups = append(cleanups, func() error { return exec.Command("docker-compose", "-f", confFile, "down", "-v").Run() }) cleanups = append(cleanups, func() error { return os.RemoveAll(tmpContext) }) cmd := exec.Command("docker-compose", "-f", confFile, "up", "-d") if cOpts.addStdio != nil { cOpts.addStdio(cmd) } if err := cmd.Run(); err != nil { return nil, err } cmd = exec.Command("docker-compose", "-f", confFile, "ps", "--services") if cOpts.addStderr != nil { cOpts.addStderr(cmd) } stdout, err := cmd.StdoutPipe() if err != nil { return nil, err } if err := cmd.Start(); err != nil { return nil, err } var services []string scanner := bufio.NewScanner(stdout) for scanner.Scan() { services = append(services, strings.TrimSpace(scanner.Text())) } if err := cmd.Wait(); err != nil { return nil, err } execs := map[string]*dexec.Exec{} for _, s := range services { cmd = exec.Command("docker-compose", "-f", confFile, "ps", "-q", s) if cOpts.addStderr != nil { cOpts.addStderr(cmd) } cNameB, err := cmd.Output() if err != nil { return nil, err } de, err := dexec.New(strings.TrimSpace(string(cNameB))) if err != nil { return nil, err } execs[s] = de } return &Compose{execs, cleanups}, nil } // New creates a new Compose of the specified docker-compose yaml data. func New(dockerComposeYaml string, opts ...Option) (*Compose, error) { var cOpts options for _, o := range opts { o(&cOpts) } tmpContext, err := os.MkdirTemp("", "compose"+xid.New().String()) if err != nil { return nil, err } confFile := filepath.Join(tmpContext, "docker-compose.yml") if err := os.WriteFile(confFile, []byte(dockerComposeYaml), 0600); err != nil { return nil, err } var cleanups []func() error cleanups = append(cleanups, func() error { return exec.Command("docker-compose", "-f", confFile, "down", "-v").Run() }) cleanups = append(cleanups, func() error { return os.RemoveAll(tmpContext) }) var buildArgs []string for _, arg := range cOpts.buildArgs { buildArgs = append(buildArgs, "--build-arg", arg) } cmd := exec.Command("docker-compose", append([]string{"-f", confFile, "build", "-q"}, buildArgs...)...) if cOpts.addStdio != nil { cOpts.addStdio(cmd) } if err := cmd.Run(); err != nil { return nil, err } cmd = exec.Command("docker-compose", "-f", confFile, "up", "-d") if cOpts.addStdio != nil { cOpts.addStdio(cmd) } if err := cmd.Run(); err != nil { return nil, err } cmd = exec.Command("docker-compose", "-f", confFile, "ps", "--services") if cOpts.addStderr != nil { cOpts.addStderr(cmd) } stdout, err := cmd.StdoutPipe() if err != nil { return nil, err } if err := cmd.Start(); err != nil { return nil, err } var services []string scanner := bufio.NewScanner(stdout) for scanner.Scan() { services = append(services, strings.TrimSpace(scanner.Text())) } if err := cmd.Wait(); err != nil { return nil, err } execs := map[string]*dexec.Exec{} for _, s := range services { cmd = exec.Command("docker-compose", "-f", confFile, "ps", "-q", s) if cOpts.addStderr != nil { cOpts.addStderr(cmd) } cNameB, err := cmd.Output() if err != nil { return nil, err } de, err := dexec.New(strings.TrimSpace(string(cNameB))) if err != nil { return nil, err } execs[s] = de } return &Compose{execs, cleanups}, nil } // Get returns *dexec.Exec of an arbitrary service contained in this Compose. func (c *Compose) Get(serviceName string) (*dexec.Exec, bool) { v, ok := c.execs[serviceName] return v, ok } // List lists all service names contained in this Compose. func (c *Compose) List() (l []string) { for k := range c.execs { l = append(l, k) } return } // Cleanup teardowns this Compose and cleans up related resources. func (c *Compose) Cleanup() (retErr error) { for _, f := range c.cleanups { if err := f(); err != nil { retErr = errors.Join(retErr, err) } } return } soci-snapshotter-0.4.1/util/dockershell/exec/000077500000000000000000000000001454010642300212205ustar00rootroot00000000000000soci-snapshotter-0.4.1/util/dockershell/exec/cmd.go000066400000000000000000000122501454010642300223120ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package exec import ( "fmt" "io" "os/exec" ) // Supported checks if this pkg can run on the current system. func Supported() error { return exec.Command("docker", "version").Run() } // Exec is an executing environment for a container. Commands can be executed in the // container using Command method. type Exec struct { // ContainerName is the name of the target container. ContainerName string } // New creates a new Exec for the specified container. func New(containerName string) (*Exec, error) { if err := exec.Command("docker", "inspect", containerName).Run(); err != nil { return nil, fmt.Errorf("container %v is unavailable: %w", containerName, err) } return &Exec{containerName}, nil } // Command creates a new Cmd for the specified commands. func (e Exec) Command(name string, arg ...string) *Cmd { cmd := &Cmd{ Path: name, Args: append([]string{name}, arg...), dockerExec: &exec.Cmd{}, containerName: e.ContainerName, } if lp, err := exec.LookPath("docker"); err != nil { cmd.lookPathErr = fmt.Errorf("docker command not found: %w", err) } else { cmd.dockerExec.Path = lp } return cmd } // Kill kills the underlying container. func (e Exec) Kill() error { return exec.Command("docker", "kill", e.ContainerName).Run() } // Cmd is exec.Cmd-like object which provides the way to execute commands in a container. type Cmd struct { // Path is the path of the command to run. Path string // Args holds the command line arguments. Args []string // Env holds the environment variables for the command. Env []string // Dir specifies the working direcotroy of the command. Dir string // Stdin specifies the stdin of the command. Stdin io.Reader // Stdout and Stderr specifies the stdout and stderr of the command. Stdout io.Writer Stderr io.Writer lookPathErr error dockerExec *exec.Cmd containerName string // TODO: support the following fields // ExtraFiles []*os.File // SysProcAttr *syscall.SysProcAttr // Process *os.Process // ProcessState *os.ProcessState } func (cmd *Cmd) toDocker() *exec.Cmd { var opts []string if cmd.Stdin != nil { opts = append(opts, "-i") } if cmd.Dir != "" { opts = append(opts, "-w", cmd.Dir) } for _, e := range cmd.Env { opts = append(opts, "-e", e) } base := append([]string{"docker", "exec"}, append(opts, cmd.containerName)...) cmd.dockerExec.Args = append(base, cmd.Args...) if cmd.dockerExec.Stdin == nil { cmd.dockerExec.Stdin = cmd.Stdin } if cmd.dockerExec.Stdout == nil { cmd.dockerExec.Stdout = cmd.Stdout } if cmd.dockerExec.Stderr == nil { cmd.dockerExec.Stderr = cmd.Stderr } return cmd.dockerExec } // CombinedOutput runs the specified commands and returns the combined output of stdout and stderr. func (cmd *Cmd) CombinedOutput() ([]byte, error) { if err := cmd.lookPathErr; err != nil { return nil, err } return cmd.toDocker().CombinedOutput() } // Output runs the specified commands and returns its stdout. func (cmd *Cmd) Output() ([]byte, error) { if err := cmd.lookPathErr; err != nil { return nil, err } return cmd.toDocker().Output() } // Run runs the specified commands. func (cmd *Cmd) Run() error { if err := cmd.lookPathErr; err != nil { return err } return cmd.toDocker().Run() } func (cmd *Cmd) Start() error { return cmd.toDocker().Start() } func (cmd *Cmd) Wait() error { return cmd.toDocker().Wait() } // StderrPipe returns the pipe that will be connected to stderr of the executed command. func (cmd *Cmd) StderrPipe() (io.ReadCloser, error) { return cmd.toDocker().StderrPipe() } // StdinPipe returns the pipe that will be connected to stdin of the executed command. func (cmd *Cmd) StdinPipe() (io.WriteCloser, error) { return cmd.toDocker().StdinPipe() } // StdoutPipe returns the pipe that will be connected to stdout of the executed command. func (cmd *Cmd) StdoutPipe() (io.ReadCloser, error) { return cmd.toDocker().StdoutPipe() } // String returns a human-readable description of this command. func (cmd *Cmd) String() string { return cmd.toDocker().String() } soci-snapshotter-0.4.1/util/dockershell/exec/util.go000066400000000000000000000123771454010642300225360ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package exec import ( "fmt" "io" "os" "os/exec" "path/filepath" "github.com/rs/xid" ) // NewTempNetwork creates a new network and returns its cleaner function. func NewTempNetwork(networkName string) (func() error, error) { cmd := exec.Command("docker", "network", "create", networkName) if err := cmd.Run(); err != nil { return nil, err } return func() error { return exec.Command("docker", "network", "rm", networkName).Run() }, nil } // Connect connects an Exec to the specified docker network. func Connect(de *Exec, networkName string) error { return exec.Command("docker", "network", "connect", networkName, de.ContainerName).Run() } type imageOptions struct { patchDockerfile string patchContextDir string buildArgs []string addStdio func(c *exec.Cmd) } type ImageOption func(o *imageOptions) // WithPatchDockerfile is a part of Dockerfile that will be built based on the // Dockerfile specified by the arguments of NewTempImage. func WithPatchDockerfile(patchDockerfile string) ImageOption { return func(o *imageOptions) { o.patchDockerfile = patchDockerfile } } // WithPatchContextDir is a context dir of a build which will be executed based on the // Dockerfile specified by the arguments of NewTempImage. When this option is used, // WithPatchDockerfile corresponding to this context dir must be specified as well. func WithPatchContextDir(patchContextDir string) ImageOption { return func(o *imageOptions) { o.patchContextDir = patchContextDir } } // WithTempImageBuildArgs specifies the build args that will be used during build. func WithTempImageBuildArgs(buildArgs ...string) ImageOption { return func(o *imageOptions) { o.buildArgs = buildArgs } } // WithTempImageStdio specifies stdio which docker build command's stdio will be streamed into. func WithTempImageStdio(stdout, stderr io.Writer) ImageOption { return func(o *imageOptions) { o.addStdio = func(c *exec.Cmd) { c.Stdout = stdout c.Stderr = stderr } } } // NewTempImage builds a new image of the specified context and stage then returns the tag and // cleaner function. func NewTempImage(contextDir, targetStage string, opts ...ImageOption) (string, func() error, error) { var iOpts imageOptions for _, o := range opts { o(&iOpts) } if iOpts.patchContextDir != "" { if iOpts.patchDockerfile == "" { return "", nil, fmt.Errorf("Dockerfile patch must be specified with context dir") } } if !filepath.IsAbs(contextDir) { return "", nil, fmt.Errorf("context dir %v must be an absolute path", contextDir) } tmpImage, tmpDone, err := newTempImage(contextDir, "", targetStage, &iOpts) if err != nil { return "", nil, err } if iOpts.patchDockerfile == "" { return tmpImage, tmpDone, err } defer tmpDone() patchContextDir := iOpts.patchContextDir if patchContextDir == "" { patchContextDir, err = os.MkdirTemp("", "tmpcontext") if err != nil { return "", nil, err } defer os.RemoveAll(patchContextDir) } dfData := fmt.Sprintf(` FROM %s %s `, tmpImage, iOpts.patchDockerfile) dfContextDir, err := os.MkdirTemp("", "tmpdfcontext") if err != nil { return "", nil, err } defer os.RemoveAll(dfContextDir) dockerfilePath := filepath.Join(dfContextDir, "Dockerfile") if err := os.WriteFile(dockerfilePath, []byte(dfData), 0666); err != nil { return "", nil, err } return newTempImage(patchContextDir, dockerfilePath, "", &iOpts) } func newTempImage(contextDir, dockerfilePath, targetStage string, opts *imageOptions) (string, func() error, error) { image := "tmpimage" + xid.New().String() c := []string{"build", "-q", "-t", image} if dockerfilePath != "" { c = append(c, "-f", dockerfilePath) } if targetStage != "" { c = append(c, "--target", targetStage) } for _, arg := range opts.buildArgs { c = append(c, "--build-arg", arg) } c = append(c, contextDir) cmd := exec.Command("docker", c...) if opts.addStdio != nil { opts.addStdio(cmd) } if err := cmd.Run(); err != nil { return "", nil, err } return image, func() error { cmd := exec.Command("docker", "image", "rm", image) if opts.addStdio != nil { opts.addStdio(cmd) } return cmd.Run() }, nil } soci-snapshotter-0.4.1/util/dockershell/shell.go000066400000000000000000000270061454010642300217370ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package dockershell import ( "bufio" "fmt" "io" "os" "sync" "time" dexec "github.com/awslabs/soci-snapshotter/util/dockershell/exec" ) // Supported checks if this pkg can run on the current system. func Supported() error { return dexec.Supported() } // Reporter is used by Shell pkg to report logs and errors during commands execution. type Reporter interface { // Errorf is called when Shell encounters unrecoverable error. Errorf(format string, v ...interface{}) // Logf is called to report some useful information (e.g. executing command) by Shell. Logf(format string, v ...interface{}) // Stdout is used as a stdout destination of executing commands. Stdout() io.Writer // Stdout is used as a stderr destination of executing commands. Stderr() io.Writer } // DefaultReporter is the default implementation of Reporter. type DefaultReporter struct{} // Errorf prints the occurred error. func (r DefaultReporter) Errorf(format string, v ...interface{}) { fmt.Printf("error: %v\n", fmt.Sprintf(format, v...)) } // Errorf prints the information reported. func (r DefaultReporter) Logf(format string, v ...interface{}) { fmt.Printf("log: %v\n", fmt.Sprintf(format, v...)) } // Stdout provides the writer to stdout. func (r DefaultReporter) Stdout() io.Writer { return os.Stdout } // Stdout provides the writer to stderr. func (r DefaultReporter) Stderr() io.Writer { return os.Stderr } // Shell provides provides means to execute commands inside a container, in // a shellscript-like experience. type Shell struct { *dexec.Exec r Reporter err error invalid bool invalidMu sync.Mutex } // New creates a new Shell for the provided execution environment created by packages including // dockershell/exec, dockershell/compose and dockershell/kind, etc. // // Most of methods of Shell don't return error but returns Shell itself. This allows the user to // run commands using methods chain like Shell.X(commandA).X(commandB).X(commandC). This provides // shellscript-like experience. Instead of reporting errors as return values, Shell reports errors // through Reporter. // When Shell encounters an unrecoverable error (e.g. failure of a command execution), this immediately // calls Reporter.Errorf and don't execute the remaining (chained) commands. Err() method returns the // last encountered error. Once Shell encounters an error, this is marked as "invalid" and doesn't // accept any further command execution. For continuing further execution, call Refresh for aquiering // a new instance of Shell. // // Some useful information are also reported via Reporter.Logf during commands execution and command // outputs to stdio are streamed into Reporter.Stdout and Reporter.Stderr. // // If no Reporter is specified (i.e. nil is provided), DefaultReporter is used by default. func New(de *dexec.Exec, r Reporter) *Shell { if r == nil { r = DefaultReporter{} } return &Shell{ Exec: de, r: r, } } func (s *Shell) fatal(format string, v ...interface{}) *Shell { s.r.Errorf(format, v...) s.err = fmt.Errorf(format, v...) s.invalidMu.Lock() s.invalid = true s.invalidMu.Unlock() return s } // Err returns an error encouterd at the last. func (s *Shell) Err() error { return s.err } // IsInvalid returns true when this Shell is marked as "invalid". For continuing further // command execution, call Refresh for aquiering a new instance of Shell. func (s *Shell) IsInvalid() bool { s.invalidMu.Lock() b := s.invalid s.invalidMu.Unlock() return b } // Refresh returns a new cloned instance of this Shell. func (s *Shell) Refresh() *Shell { return New(s.Exec, s.r) } // X executes a command. Stdio is streamed to Reporter. When the command fails, the error is reported // via Reporter.Errorf and this Shell is marked as "invalid" (i.e. doesn't accept further command // execution). func (s *Shell) X(args ...string) *Shell { if s.IsInvalid() { return s } if len(args) < 1 { return s.fatal("no command to run") } s.r.Logf(">>> Running: %v\n", args) cmd := s.Command(args[0], args[1:]...) cmd.Stdout = s.r.Stdout() cmd.Stderr = s.r.Stderr() if err := cmd.Run(); err != nil { return s.fatal("failed to run %v: %v", args, err) } return s } // XLog executes a command. Stdio is streamed to Reporter. When the command fails, different from X, // the error is reported via Reporter.Logf and this Shell still *accepts* further command execution. func (s *Shell) XLog(args ...string) *Shell { if s.IsInvalid() { return s } if len(args) < 1 { return s.fatal("no command to run") } s.r.Logf(">>> Running: %v\n", args) cmd := s.Command(args[0], args[1:]...) cmd.Stdout = s.r.Stdout() cmd.Stderr = s.r.Stderr() if err := cmd.Run(); err != nil { s.r.Logf("failed to run %v: %v", args, err) } return s } // Gox executes a command in an goroutine and doesn't wait for the command completion. Stdio is // streamed to Reporter. When the command fails, different from X, the error is reported via // Reporter.Logf and this Shell still *accepts* further command execution. func (s *Shell) Gox(args ...string) *Shell { if s.IsInvalid() { return s } if len(args) < 1 { return s.fatal("no command to run") } go func() { s.r.Logf(">>> Running: %v\n", args) cmd := s.Command(args[0], args[1:]...) cmd.Stdout = s.r.Stdout() cmd.Stderr = s.r.Stderr() if err := cmd.Run(); err != nil { s.r.Logf("command %v exit: %v", args, err) } }() return s } // C is an alias of []string which represents a command. func C(args ...string) []string { return args } // Pipe executes passed commands sequentially and stdout of a command is piped into the next command's // stdin. The stdout of the last command is streamed to the specified io.Writer. // When a command fails, the error is reported via Reporter.Errorf and this Shell is marked as // "invalid" (i.e. doesn't accept further command execution). func (s *Shell) Pipe(out io.Writer, commands ...[]string) *Shell { if s.IsInvalid() { return s } if out == nil { out = s.r.Stdout() } var lastStdout io.ReadCloser var err error var cmds []*dexec.Cmd for i, args := range commands { i, args := i, args if len(args) < 1 { return s.fatal("no command to run") } s.r.Logf(">>> Running: %v\n", args) cmd := s.Command(args[0], args[1:]...) cmd.Stdin = lastStdout if i == len(commands)-1 { cmd.Stdout = out } else { lastStdout, err = cmd.StdoutPipe() if err != nil { s.r.Errorf("failed to create stdout pipe for %v: %v", cmd.Args, err) break } } cmd.Stderr = s.r.Stderr() err = cmd.Start() if err != nil { s.r.Errorf("failed to start %v: %v", cmd.Args, err) break } cmds = append(cmds, cmd) } ok := true // The lifecycle of `exec.Cmd.StdoutPipe` requires that we don't wait // on a process until the reader end has completed reading or else we // could truncate the pipe. Therefore, we wait on processes in reverse // order so that we when we wait on a process, we can guarantee that we // do not depend on its output. // reference: https://pkg.go.dev/os/exec@go1.19#Cmd.StdoutPipe for i := len(cmds) - 1; i >= 0; i-- { cmd := cmds[i] if err = cmd.Wait(); err != nil { s.r.Errorf("error waiting on %v: %v", cmd.Args, err) ok = false } } if !ok { return s.fatal("could not run %v", commands) } return s } // Retry executes a command repeatedly until it succeeds, up to num times. Stdio is streamed to // Reporter. If all attemptions fail, the error is reported via Reporter.Errorf and this Shell is // marked as "invalid" (i.e. doesn't accept further command execution). func (s *Shell) Retry(num int, args ...string) *Shell { if s.IsInvalid() { return s } for i := 0; i < num; i++ { s.r.Logf(">>> Running(%d/%d): %v\n", i, num, args) cmd := s.Command(args[0], args[1:]...) cmd.Stdout = s.r.Stdout() cmd.Stderr = s.r.Stderr() err := cmd.Run() if err == nil { return s } s.r.Logf("failed to run (%d/%d) %v: %v", i, num, args, err) time.Sleep(time.Second) } return s.fatal("failed to run %v", args) } // O executes a command and return the stdout. Stderr is streamed to Reporter. When the command fails, // the error is reported via Reporter.Errorf and this Shell is marked as "invalid" (i.e. doesn't // accept further command execution). func (s *Shell) O(args ...string) []byte { if s.IsInvalid() { return nil } if len(args) < 1 { s.fatal("no command to run") return nil } s.r.Logf(">>> Getting output of: %v\n", args) cmd := s.Command(args[0], args[1:]...) cmd.Stderr = s.r.Stderr() out, err := cmd.Output() if err != nil { s.fatal("failed to run for getting output from %v: %v", args, err) return nil } return out } // OLog executes a command and return the stdout. Stdio is streamed to Reporter. When the command fails, different from O, // the error is reported via Reporter.Logf and this Shell still *accepts* further command execution. func (s *Shell) OLog(args ...string) ([]byte, error) { if s.IsInvalid() { return nil, fmt.Errorf("invalid shell") } if len(args) < 1 { s.fatal("no command to run") return nil, fmt.Errorf("no command to run") } s.r.Logf(">>> Getting output of: %v\n", args) cmd := s.Command(args[0], args[1:]...) cmd.Stderr = s.r.Stderr() out, err := cmd.Output() if err != nil { s.r.Logf("failed to run for getting output from %v: %v", args, err) return out, err } return out, nil } // R executes a command. Stdio is returned as io.Reader. streamed to Reporter. func (s *Shell) R(args ...string) (stdout, stderr io.Reader, err error) { if s.IsInvalid() { return nil, nil, fmt.Errorf("invalid shell") } if len(args) < 1 { return nil, nil, fmt.Errorf("no command to run") } s.r.Logf(">>> Running(returning reader): %v\n", args) cmd := s.Command(args[0], args[1:]...) outR, outW := io.Pipe() errR, errW := io.Pipe() cmd.Stdout, cmd.Stderr = outW, errW go func() { if err := cmd.Run(); err != nil { outW.CloseWithError(err) errW.CloseWithError(err) return } outW.Close() errW.Close() }() return outR, errR, nil } // ForEach executes a command. For each line of stdout, the callback function is called until it // returns false. Stderr is streamed to Reporter. The encountered erros are returned instead of // using Reporter. func (s *Shell) ForEach(args []string, f func(l string) bool) error { stdout, stderr, err := s.R(args...) if err != nil { return err } go io.Copy(s.r.Stderr(), stderr) scanner := bufio.NewScanner(stdout) for scanner.Scan() { if !f(scanner.Text()) { break } } return nil } soci-snapshotter-0.4.1/util/http/000077500000000000000000000000001454010642300167545ustar00rootroot00000000000000soci-snapshotter-0.4.1/util/http/retry.go000066400000000000000000000063321454010642300204540ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package http import ( "context" "fmt" "math/rand" "net" "net/http" "time" "github.com/awslabs/soci-snapshotter/config" "github.com/awslabs/soci-snapshotter/version" "github.com/containerd/containerd/log" rhttp "github.com/hashicorp/go-retryablehttp" "github.com/sirupsen/logrus" ) var ( UserAgent = fmt.Sprintf("soci-snapshotter/%s", version.Version) ) // NewRetryableClient creates a go http.Client which will automatically // retry on non-fatal errors func NewRetryableClient(config config.RetryableHTTPClientConfig) *http.Client { rhttpClient := rhttp.NewClient() // Don't log every request rhttpClient.Logger = nil // set retry config rhttpClient.RetryMax = config.MaxRetries rhttpClient.RetryWaitMin = time.Duration(config.MinWaitMsec) * time.Millisecond rhttpClient.RetryWaitMax = time.Duration(config.MaxWaitMsec) * time.Millisecond rhttpClient.Backoff = BackoffStrategy rhttpClient.CheckRetry = RetryStrategy rhttpClient.HTTPClient.Timeout = time.Duration(config.RequestTimeoutMsec) * time.Millisecond // set timeouts innerTransport := rhttpClient.HTTPClient.Transport if t, ok := innerTransport.(*http.Transport); ok { t.DialContext = (&net.Dialer{ Timeout: time.Duration(config.DialTimeoutMsec) * time.Millisecond, }).DialContext t.ResponseHeaderTimeout = time.Duration(config.ResponseHeaderTimeoutMsec) * time.Millisecond } return rhttpClient.StandardClient() } // Jitter returns a number in the range duration to duration+(duration/divisor)-1, inclusive func Jitter(duration time.Duration, divisor int64) time.Duration { return time.Duration(rand.Int63n(int64(duration)/divisor) + int64(duration)) } // BackoffStrategy extends retryablehttp's DefaultBackoff to add a random jitter to avoid // overwhelming the repository when it comes back online // DefaultBackoff either tries to parse the 'Retry-After' header of the response; or, it uses an // exponential backoff 2 ^ numAttempts, limited by max func BackoffStrategy(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { delayTime := rhttp.DefaultBackoff(min, max, attemptNum, resp) return Jitter(delayTime, 8) } // RetryStrategy extends retryablehttp's DefaultRetryPolicy to log the error and response when retrying // DefaultRetryPolicy retries whenever err is non-nil (except for some url errors) or if returned // status code is 429 or 5xx (except 501) func RetryStrategy(ctx context.Context, resp *http.Response, err error) (bool, error) { retry, err2 := rhttp.DefaultRetryPolicy(ctx, resp, err) if retry { log.G(ctx).WithFields(logrus.Fields{ "error": err, "response": resp, }).Debugf("retrying request") } return retry, err2 } soci-snapshotter-0.4.1/util/ioutils/000077500000000000000000000000001454010642300174655ustar00rootroot00000000000000soci-snapshotter-0.4.1/util/ioutils/countwriter.go000066400000000000000000000027401454010642300224040ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package ioutils import "sync" type CountWriter struct { n int64 mu sync.Mutex } func (c *CountWriter) Write(p []byte) (n int, err error) { c.mu.Lock() c.n += int64(len(p)) c.mu.Unlock() return len(p), nil } func (c *CountWriter) Size() (n int64) { c.mu.Lock() n = c.n c.mu.Unlock() return } soci-snapshotter-0.4.1/util/lrucache/000077500000000000000000000000001454010642300175635ustar00rootroot00000000000000soci-snapshotter-0.4.1/util/lrucache/lrucache.go000066400000000000000000000111031454010642300216740ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package lrucache provides reference-count-aware lru cache. package lrucache import ( "sync" "github.com/golang/groupcache/lru" ) // Cache is "groupcache/lru"-like cache. The difference is that "groupcache/lru" immediately // finalizes theevicted contents using OnEvicted callback but our version strictly tracks the // reference counts of contents and calls OnEvicted when nobody refers to the evicted contents. type Cache struct { cache *lru.Cache mu sync.Mutex // OnEvicted optionally specifies a callback function to be // executed when an entry is purged from the cache. OnEvicted func(key string, value interface{}) } // New creates new cache. func New(maxEntries int) *Cache { inner := lru.New(maxEntries) inner.OnEvicted = func(key lru.Key, value interface{}) { // Decrease the ref count incremented in Add(). // When nobody refers to this value, this value will be finalized via refCounter. value.(*refCounter).finalize() } return &Cache{ cache: inner, } } // Get retrieves the specified object from the cache and increments the reference counter of the // target content. Client must call `done` callback to decrease the reference count when the value // will no longer be used. func (c *Cache) Get(key string) (value interface{}, done func(), ok bool) { c.mu.Lock() defer c.mu.Unlock() o, ok := c.cache.Get(key) if !ok { return nil, nil, false } rc := o.(*refCounter) rc.inc() return rc.v, c.decreaseOnceFunc(rc), true } // Add adds object to the cache and returns the cached contents with incrementing the reference count. // If the specified content already exists in the cache, this sets `added` to false and returns // "already cached" content (i.e. doesn't replace the content with the new one). Client must call // `done` callback to decrease the counter when the value will no longer be used. func (c *Cache) Add(key string, value interface{}) (cachedValue interface{}, done func(), added bool) { c.mu.Lock() defer c.mu.Unlock() if o, ok := c.cache.Get(key); ok { rc := o.(*refCounter) rc.inc() return rc.v, c.decreaseOnceFunc(rc), false } rc := &refCounter{ key: key, v: value, onEvicted: c.OnEvicted, } rc.initialize() // Keep this object having at least 1 ref count (will be decreased in OnEviction) rc.inc() // The client references this object (will be decreased on "done") c.cache.Add(key, rc) return rc.v, c.decreaseOnceFunc(rc), true } // Remove removes the specified contents from the cache. OnEvicted callback will be called when // nobody refers to the removed content. func (c *Cache) Remove(key string) { c.mu.Lock() defer c.mu.Unlock() c.cache.Remove(key) } func (c *Cache) decreaseOnceFunc(rc *refCounter) func() { var once sync.Once return func() { c.mu.Lock() defer c.mu.Unlock() once.Do(func() { rc.dec() }) } } type refCounter struct { onEvicted func(key string, value interface{}) key string v interface{} refCounts int64 mu sync.Mutex initializeOnce sync.Once finalizeOnce sync.Once } func (r *refCounter) inc() { r.mu.Lock() defer r.mu.Unlock() r.refCounts++ } func (r *refCounter) dec() { r.mu.Lock() defer r.mu.Unlock() r.refCounts-- if r.refCounts <= 0 && r.onEvicted != nil { // nobody will refer this object r.onEvicted(r.key, r.v) } } func (r *refCounter) initialize() { r.initializeOnce.Do(func() { r.inc() }) } func (r *refCounter) finalize() { r.finalizeOnce.Do(func() { r.dec() }) } soci-snapshotter-0.4.1/util/lrucache/lrucache_test.go000066400000000000000000000100301454010642300227310ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package lrucache import ( "fmt" "testing" ) func TestAdd(t *testing.T) { c := New(10) key, value := "key1", "abcd" v, _, added := c.Add(key, value) if !added { t.Errorf("failed to add %q", key) return } else if v.(string) != value { t.Errorf("returned different object for %q; want %q; got %q", key, value, v.(string)) return } key, newvalue := "key1", "dummy" v, _, added = c.Add(key, newvalue) if added || v.(string) != value { t.Errorf("%q must be originally stored one; want %q; got %q (added:%v)", key, value, v.(string), added) } } func TestGet(t *testing.T) { c := New(10) key, value := "key1", "abcd" v, _, added := c.Add(key, value) if !added { t.Errorf("failed to add %q", key) return } else if v.(string) != value { t.Errorf("returned different object for %q; want %q; got %q", key, value, v.(string)) return } v, _, ok := c.Get(key) if !ok { t.Errorf("failed to get obj %q (%q)", key, value) return } else if v.(string) != value { t.Errorf("unexpected object for %q; want %q; got %q", key, value, v.(string)) return } } func TestRemove(t *testing.T) { var evicted []string c := New(2) c.OnEvicted = func(key string, value interface{}) { evicted = append(evicted, key) } key1, value1 := "key1", "abcd1" _, done1, _ := c.Add(key1, value1) _, done12, _ := c.Get(key1) c.Remove(key1) if len(evicted) != 0 { t.Errorf("no content must be evicted after remove") return } done1() if len(evicted) != 0 { t.Errorf("no content must be evicted until all reference are discarded") return } done12() if len(evicted) != 1 { t.Errorf("content must be evicted") return } if evicted[0] != key1 { t.Errorf("1st content %q must be evicted but got %q", key1, evicted[0]) return } } func TestEviction(t *testing.T) { var evicted []string c := New(2) c.OnEvicted = func(key string, value interface{}) { evicted = append(evicted, key) } key1, value1 := "key1", "abcd1" key2, value2 := "key2", "abcd2" _, done1, _ := c.Add(key1, value1) _, done2, _ := c.Add(key2, value2) _, done22, _ := c.Get(key2) if len(evicted) != 0 { t.Errorf("no content must be evicted after addition") return } for i := 0; i < 2; i++ { c.Add(fmt.Sprintf("key-add-%d", i), fmt.Sprintf("abcd-add-%d", i)) } if len(evicted) != 0 { t.Errorf("no content must be evicted after overflow") return } done1() if len(evicted) != 1 { t.Errorf("1 content must be evicted") return } if evicted[0] != key1 { t.Errorf("1st content %q must be evicted but got %q", key1, evicted[0]) return } done2() // effective done2() // ignored done2() // ignored if len(evicted) != 1 { t.Errorf("only 1 content must be evicted") return } done22() if len(evicted) != 2 { t.Errorf("2 contents must be evicted") return } if evicted[1] != key2 { t.Errorf("2nd content %q must be evicted but got %q", key2, evicted[1]) return } } soci-snapshotter-0.4.1/util/namedmutex/000077500000000000000000000000001454010642300201445ustar00rootroot00000000000000soci-snapshotter-0.4.1/util/namedmutex/namedmutex.go000066400000000000000000000041111454010642300226370ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package namedmutex provides NamedMutex that wraps sync.Mutex // and provides namespaced mutex. package namedmutex import ( "sync" ) // NamedMutex wraps sync.Mutex and provides namespaced mutex. type NamedMutex struct { muMap map[string]*sync.Mutex refMap map[string]int mu sync.Mutex } // Lock locks the mutex of the given name func (nl *NamedMutex) Lock(name string) { nl.mu.Lock() if nl.muMap == nil { nl.muMap = make(map[string]*sync.Mutex) } if nl.refMap == nil { nl.refMap = make(map[string]int) } if _, ok := nl.muMap[name]; !ok { nl.muMap[name] = &sync.Mutex{} } mu := nl.muMap[name] nl.refMap[name]++ nl.mu.Unlock() mu.Lock() } // Unlock unlocks the mutex of the given name func (nl *NamedMutex) Unlock(name string) { nl.mu.Lock() mu := nl.muMap[name] nl.refMap[name]-- if nl.refMap[name] <= 0 { delete(nl.muMap, name) delete(nl.refMap, name) } nl.mu.Unlock() mu.Unlock() } soci-snapshotter-0.4.1/util/testutil/000077500000000000000000000000001454010642300176525ustar00rootroot00000000000000soci-snapshotter-0.4.1/util/testutil/ensurehello.go000066400000000000000000000061541454010642300225340ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package testutil import ( "compress/gzip" "context" "fmt" "io" "net/http" "os" "github.com/containerd/containerd/content" "github.com/containerd/containerd/content/local" "github.com/containerd/containerd/images/archive" "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) const ( // HelloArchiveURL points to an OCI archive of `hello-world`. // Exported from `docker.io/library/hello-world@sha256:1a523af650137b8accdaed439c17d684df61ee4d74feac151b5b337bd29e7eec` . // See https://github.com/AkihiroSuda/test-oci-archives/releases/tag/v20210101 HelloArchiveURL = "https://github.com/AkihiroSuda/test-oci-archives/releases/download/v20210101/hello-world.tar.gz" // HelloArchiveDigest is the digest of the archive. HelloArchiveDigest = "sha256:5aa022621c4de0e941ab2a30d4569c403e156b4ba2de2ec32e382ae8679f40e1" ) // EnsureHello creates a temp content store and ensures `hello-world` image from HelloArchiveURL into the store. func EnsureHello(ctx context.Context) (*ocispec.Descriptor, content.Store, error) { // Pulling an image without the daemon is a mess, so we use OCI archive here. resp, err := http.Get(HelloArchiveURL) if err != nil { return nil, nil, err } defer resp.Body.Close() sha256Digester := digest.SHA256.Digester() sha256Hasher := sha256Digester.Hash() tr := io.TeeReader(resp.Body, sha256Hasher) gzReader, err := gzip.NewReader(tr) if err != nil { return nil, nil, err } tempDir, err := os.MkdirTemp("", "test-estargz") if err != nil { return nil, nil, err } cs, err := local.NewStore(tempDir) if err != nil { return nil, nil, err } desc, err := archive.ImportIndex(ctx, cs, gzReader) if err != nil { return nil, nil, err } resp.Body.Close() if d := sha256Digester.Digest().String(); d != HelloArchiveDigest { err = fmt.Errorf("expected digest of %q to be %q, got %q", HelloArchiveURL, HelloArchiveDigest, d) return nil, nil, err } return &desc, cs, nil } soci-snapshotter-0.4.1/util/testutil/shell.go000066400000000000000000000306031454010642300213120ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package testutil // This file contains some utilities that supports to manipulate dockershell. import ( "bufio" "bytes" "encoding/json" "errors" "fmt" "io" "os/exec" "path/filepath" "regexp" "strconv" "strings" "sync/atomic" "testing" "time" "github.com/awslabs/soci-snapshotter/soci/store" shell "github.com/awslabs/soci-snapshotter/util/dockershell" "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/rs/xid" "golang.org/x/sync/errgroup" ) // TestingReporter is an implementation of dockershell.Reporter backed by testing.T and TestingL. type TestingReporter struct { t *testing.T } // NewTestingReporter returns a new TestingReporter instance for the specified testing.T. func NewTestingReporter(t *testing.T) *TestingReporter { return &TestingReporter{t} } // Errorf prints the provided message to TestingL and stops the test using testing.T.Fatalf. func (r *TestingReporter) Errorf(format string, v ...interface{}) { TestingL.Printf(format, v...) r.t.Fatalf(format, v...) } // Logf prints the provided message to TestingL testing.T. func (r *TestingReporter) Logf(format string, v ...interface{}) { TestingL.Printf(format, v...) r.t.Logf(format, v...) } // Stdout returns the writer to TestingL as stdout. This enables to print command logs realtime. func (r *TestingReporter) Stdout() io.Writer { return TestingL.Writer() } // Stderr returns the writer to TestingL as stderr. This enables to print command logs realtime. func (r *TestingReporter) Stderr() io.Writer { return TestingL.Writer() } // LogMonitor manages a list of functions that should scan lines coming from stdout and stderr Readers type LogMonitor struct { monitorFuncs map[string]func(string) } // NewLogMonitor creates a LogMonitor for a given pair of stdout and stderr Readers func NewLogMonitor(r shell.Reporter, stdout, stderr io.Reader) *LogMonitor { m := &LogMonitor{} m.monitorFuncs = make(map[string]func(string)) go m.scanLog(io.TeeReader(stdout, r.Stdout())) go m.scanLog(io.TeeReader(stderr, r.Stderr())) return m } // Add registers a new log monitor function func (m *LogMonitor) Add(name string, monitorFunc func(string)) error { if _, ok := m.monitorFuncs[name]; ok { return fmt.Errorf("attempted to add log monitor with already existing name: %s", name) } m.monitorFuncs[name] = monitorFunc return nil } // Remove unregisters a log monitor function func (m *LogMonitor) Remove(name string) error { if _, ok := m.monitorFuncs[name]; ok { delete(m.monitorFuncs, name) return nil } return fmt.Errorf("attempted to remove nonexistent log monitor: %s", name) } // scanLog calls each registered log monitor function for each new line of the Reader func (m *LogMonitor) scanLog(inputR io.Reader) { scanner := bufio.NewScanner(inputR) for scanner.Scan() { rawL := scanner.Text() for _, monitorFunc := range m.monitorFuncs { monitorFunc(rawL) } } } // RemoteSnapshotMonitor scans log of soci snapshotter and provides the way to check // if all snapshots are prepared as remote snpashots. type RemoteSnapshotMonitor struct { remote uint64 local uint64 } // NewRemoteSnapshotMonitor creates a new instance of RemoteSnapshotMonitor and registers it // with the LogMonitor func NewRemoteSnapshotMonitor(m *LogMonitor) (*RemoteSnapshotMonitor, func()) { rsm := &RemoteSnapshotMonitor{} m.Add("remote snapshot", rsm.MonitorFunc) return rsm, func() { m.Remove("remote snapshot") } } type RemoteSnapshotPreparedLogLine struct { RemoteSnapshotPrepared string `json:"remote-snapshot-prepared"` } // MonitorFunc counts remote/local snapshot preparation totals func (m *RemoteSnapshotMonitor) MonitorFunc(rawL string) { var logline RemoteSnapshotPreparedLogLine if i := strings.Index(rawL, "{"); i > 0 { rawL = rawL[i:] // trim garbage chars; expects "{...}"-styled JSON log } if err := json.Unmarshal([]byte(rawL), &logline); err == nil { if logline.RemoteSnapshotPrepared == "true" { atomic.AddUint64(&m.remote, 1) } else if logline.RemoteSnapshotPrepared == "false" { atomic.AddUint64(&m.local, 1) } } } // CheckAllRemoteSnapshots checks if the scanned log reports that all snapshots are prepared // as remote snapshots. func (m *RemoteSnapshotMonitor) CheckAllRemoteSnapshots(t *testing.T) { remote := atomic.LoadUint64(&m.remote) local := atomic.LoadUint64(&m.local) result := fmt.Sprintf("(local:%d,remote:%d)", local, remote) if local > 0 { t.Fatalf("some local snapshots creation have been reported %v", result) } else if remote > 0 { t.Logf("all layers have been reported as remote snapshots %v", result) return } else { t.Fatalf("no log for checking remote snapshot was provided; Is the log-level = debug?") } } // LogConfirmStartup registers a LogMonitor function to scan until startup succeeds or fails func LogConfirmStartup(m *LogMonitor) error { errs := make(chan error, 1) m.Add("startup", monitorStartup(errs)) defer m.Remove("startup") select { case err := <-errs: if err != nil { return err } case <-time.After(10 * time.Second): // timeout return fmt.Errorf("log did not produce success or fatal error within 10 seconds") } return nil } type LevelLogLine struct { Level string `json:"level"` Msg string `json:"msg"` } // monitorStartup creates a LogMonitor function to pass success or failure back through the given channel func monitorStartup(errs chan error) func(string) { return func(rawL string) { if i := strings.Index(rawL, "{"); i > 0 { rawL = rawL[i:] // trim garbage chars; expects "{...}"-styled JSON log } var logline LevelLogLine if err := json.Unmarshal([]byte(rawL), &logline); err == nil { if logline.Level == "fatal" { errs <- errors.New("fatal snapshotter log entry encountered, snapshotter failed to start") return } if strings.Contains(logline.Msg, "background") { errs <- nil return } } } } // TempDir creates a temporary directory in the specified execution environment. func TempDir(sh *shell.Shell) (string, error) { out, err := sh.Command("mktemp", "-d").Output() if err != nil { return "", fmt.Errorf("failed to run mktemp: %v", err) } return strings.TrimSpace(string(out)), nil } func injectContainerdContentStoreContentFromReader(sh *shell.Shell, desc ocispec.Descriptor, content io.Reader) error { reference := desc.Digest.String() cmd := sh.Command("ctr", "content", "ingest", reference) cmd.Stdin = content if err := cmd.Run(); err != nil { return err } cmd = sh.Command("ctr", "content", "label", desc.Digest.String(), "") return cmd.Run() } func injectSociContentStoreContentFromReader(sh *shell.Shell, desc ocispec.Descriptor, content io.Reader) error { dir := filepath.Join(store.DefaultSociContentStorePath, "blobs", desc.Digest.Algorithm().String()) if err := sh.Command("mkdir", "-p", dir).Run(); err != nil { return err } path := filepath.Join(dir, desc.Digest.Encoded()) cmd := sh.Command("/bin/sh", "-c", fmt.Sprintf("cat > %s && chmod %#o %s", path, 0600, path)) cmd.Stdin = content return cmd.Run() } func InjectContentStoreContentFromReader(sh *shell.Shell, contentStoreType store.ContentStoreType, desc ocispec.Descriptor, content io.Reader) error { contentStoreType, err := store.CanonicalizeContentStoreType(contentStoreType) if err != nil { return err } switch contentStoreType { case store.SociContentStoreType: injectSociContentStoreContentFromReader(sh, desc, content) case store.ContainerdContentStoreType: injectContainerdContentStoreContentFromReader(sh, desc, content) default: return store.ErrUnknownContentStoreType(contentStoreType) } return nil } func InjectContentStoreContentFromBytes(sh *shell.Shell, contentStoreType store.ContentStoreType, desc ocispec.Descriptor, content []byte) error { return InjectContentStoreContentFromReader(sh, contentStoreType, desc, bytes.NewReader(content)) } func writeFileFromReader(sh *shell.Shell, name string, content io.Reader, mode uint32) error { if err := sh.Command("mkdir", "-p", filepath.Dir(name)).Run(); err != nil { return err } cmd := sh.Command("/bin/sh", "-c", fmt.Sprintf("cat > %s && chmod %#o %s", name, mode, name)) cmd.Stdin = content return cmd.Run() } // WriteFileContents creates a file at the specified location in the specified execution environment // and writes the specified contents to that file. func WriteFileContents(sh *shell.Shell, name string, content []byte, mode uint32) error { return writeFileFromReader(sh, name, bytes.NewReader(content), mode) } // CopyInDir copies a directory into the specified location in the specified execution environment. func CopyInDir(sh *shell.Shell, from, to string) error { if !filepath.IsAbs(from) || !filepath.IsAbs(to) { return fmt.Errorf("path %v and %v must be absolute path", from, to) } pr, pw := io.Pipe() cmdFrom := exec.Command("tar", "-zcf", "-", "-C", from, ".") cmdFrom.Stdout = pw var eg errgroup.Group eg.Go(func() error { if err := cmdFrom.Run(); err != nil { pw.CloseWithError(err) return err } pw.Close() return nil }) tmpTar := "/tmptar" + xid.New().String() if err := writeFileFromReader(sh, tmpTar, pr, 0755); err != nil { return fmt.Errorf("writeFileFromReader: %w", err) } if err := eg.Wait(); err != nil { return fmt.Errorf("taring: %w", err) } if err := sh.Command("mkdir", "-p", to).Run(); err != nil { return fmt.Errorf("mkdir -p %v: %w", to, err) } if err := sh.Command("tar", "zxf", tmpTar, "-C", to).Run(); err != nil { return fmt.Errorf("tar zxf %v -C %v: %w", tmpTar, to, err) } return sh.Command("rm", tmpTar).Run() } // KillMatchingProcess kills processes that "ps" line matches the specified pattern in the // specified execution environment. func KillMatchingProcess(sh *shell.Shell, psLinePattern string) error { data, err := sh.Command("ps", "auxww").Output() if err != nil { return fmt.Errorf("failed to run ps command : %v", err) } var targets []int scanner := bufio.NewScanner(bytes.NewReader(data)) for scanner.Scan() { psline := scanner.Text() matched, err := regexp.Match(psLinePattern, []byte(psline)) if err != nil { return err } if matched { es := strings.Fields(psline) if len(es) < 2 { continue } pid, err := strconv.ParseInt(es[1], 10, 32) if err != nil { continue } targets = append(targets, int(pid)) } } var allErr error for _, pid := range targets { if err := sh.Command("kill", "-9", fmt.Sprintf("%d", pid)).Run(); err != nil { errors.Join(allErr, fmt.Errorf("failed to kill %v: %w", pid, err)) } } return allErr } func RemoveContentStoreContent(sh *shell.Shell, contentStoreType store.ContentStoreType, contentDigest string) error { contentStoreType, err := store.CanonicalizeContentStoreType(contentStoreType) if err != nil { return err } switch contentStoreType { case store.SociContentStoreType: removeSociContentStoreContent(sh, contentDigest) case store.ContainerdContentStoreType: removeContainerdContentStoreContent(sh, contentDigest) } return nil } func removeSociContentStoreContent(sh *shell.Shell, contentDigest string) { path, _ := GetContentStoreBlobPath(store.SociContentStoreType) dgst, err := digest.Parse(contentDigest) if err == nil { sh.X("rm", filepath.Join(path, dgst.Encoded())) } } func removeContainerdContentStoreContent(sh *shell.Shell, contentDigest string) { sh.X("ctr", "content", "rm", contentDigest) } soci-snapshotter-0.4.1/util/testutil/store.go000066400000000000000000000032001454010642300213300ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package testutil import ( "path/filepath" "github.com/awslabs/soci-snapshotter/soci/store" ) // GetContentStoreBlobPath returns the bottom level directory for the content store, e.g. "/blobs/sha256". func GetContentStoreBlobPath(contentStoreType store.ContentStoreType) (string, error) { contentStorePath, err := store.GetContentStorePath(contentStoreType) if err != nil { return "", err } return filepath.Join(contentStorePath, "blobs", "sha256"), nil } soci-snapshotter-0.4.1/util/testutil/tar.go000066400000000000000000000277631454010642300210060ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package testutil // This utility helps test codes to generate sample tar blobs. import ( "archive/tar" "bytes" "compress/gzip" "fmt" "io" "os" "strings" "time" "github.com/klauspost/compress/zstd" ) // TarEntry is an entry of tar. type TarEntry interface { AppendTar(tw *tar.Writer, opts BuildTarOptions) error } // BuildTarOptions is a set of options used during building blob. type BuildTarOptions struct { // Prefix is the prefix string need to be added to each file name (e.g. "./", "/", etc.) Prefix string GzipComment string GzipFilename string GzipExtra []byte } // BuildTarOption is an option used during building blob. type BuildTarOption func(o *BuildTarOptions) // WithPrefix is an option to add a prefix string to each file name (e.g. "./", "/", etc.) func WithPrefix(prefix string) BuildTarOption { return func(o *BuildTarOptions) { o.Prefix = prefix } } func WithGzipComment(comment string) BuildTarOption { return func(o *BuildTarOptions) { o.GzipComment = comment } } func WithGzipFilename(filename string) BuildTarOption { return func(o *BuildTarOptions) { o.GzipFilename = filename } } func WithGzipExtra(extra []byte) BuildTarOption { return func(o *BuildTarOptions) { o.GzipExtra = extra } } // BuildTar builds a tar given a list of tar entries and returns an io.Reader func BuildTar(ents []TarEntry, opts ...BuildTarOption) io.Reader { var bo BuildTarOptions for _, o := range opts { o(&bo) } pr, pw := io.Pipe() go func() { tw := tar.NewWriter(pw) for _, ent := range ents { if err := ent.AppendTar(tw, bo); err != nil { pw.CloseWithError(err) return } } if err := tw.Close(); err != nil { pw.CloseWithError(err) return } pw.Close() }() return pr } // BuildTarGz builds a tar.gz given a list of tar entries and returns an io.Reader func BuildTarGz(ents []TarEntry, compressionLevel int, opts ...BuildTarOption) io.Reader { var bo BuildTarOptions for _, o := range opts { o(&bo) } pr, pw := io.Pipe() go func() { gw, err := gzip.NewWriterLevel(pw, compressionLevel) if err != nil { pw.CloseWithError(err) return } gw.Comment = bo.GzipComment gw.Name = bo.GzipFilename gw.Extra = bo.GzipExtra tw := tar.NewWriter(gw) for _, ent := range ents { if err := ent.AppendTar(tw, bo); err != nil { pw.CloseWithError(err) return } } if err := tw.Close(); err != nil { pw.CloseWithError(err) return } if err := gw.Close(); err != nil { pw.CloseWithError(err) return } pw.Close() }() return pr } // BuildTarZstd builds a tar blob with zstd compression. func BuildTarZstd(ents []TarEntry, compressionLevel int, opts ...BuildTarOption) io.Reader { var bo BuildTarOptions for _, o := range opts { o(&bo) } pr, pw := io.Pipe() go func() { zw, err := zstd.NewWriter(pw, zstd.WithEncoderLevel(zstd.EncoderLevelFromZstd(compressionLevel))) if err != nil { pw.CloseWithError(err) return } tw := tar.NewWriter(zw) for _, ent := range ents { if err := ent.AppendTar(tw, bo); err != nil { pw.CloseWithError(err) return } } if err := tw.Close(); err != nil { pw.CloseWithError(err) return } if err := zw.Close(); err != nil { pw.CloseWithError(err) return } pw.Close() }() return pr } // WriteTarToTempFile writes the contents of a tar archive to a specified path and // return the temp filename and the tar data (as []byte). // // It's the caller's responsibility to remove the genreated temp file. func WriteTarToTempFile(tarNamePattern string, tarReader io.Reader) (string, []byte, error) { tarFile, err := os.CreateTemp("", tarNamePattern) if err != nil { return "", nil, fmt.Errorf("failed to create temp file: %w", err) } defer tarFile.Close() tarBuf := new(bytes.Buffer) w := io.MultiWriter(tarFile, tarBuf) _, err = io.Copy(w, tarReader) if err != nil { return "", nil, fmt.Errorf("failed to write tar file: %w", err) } return tarFile.Name(), tarBuf.Bytes(), nil } // GetFilesAndContentsWithinTarGz takes a path to a targz archive and returns a list of its files and their contents func GetFilesAndContentsWithinTarGz(tarGz string) (map[string][]byte, []string, error) { f, err := os.Open(tarGz) if err != nil { return nil, nil, err } defer f.Close() gr, err := gzip.NewReader(f) if err != nil { return nil, nil, err } tr := tar.NewReader(gr) return getFilesAndContentsFromTarReader(tr) } // GetFilesAndContentsWithinTar takes a path to a tar archive and returns a list of its files and their contents func GetFilesAndContentsWithinTar(tarFile string) (map[string][]byte, []string, error) { f, err := os.Open(tarFile) if err != nil { return nil, nil, err } defer f.Close() tr := tar.NewReader(f) return getFilesAndContentsFromTarReader(tr) } func getFilesAndContentsFromTarReader(tr *tar.Reader) (map[string][]byte, []string, error) { m := make(map[string][]byte) var files []string for { header, err := tr.Next() if err == io.EOF { break } if header.Typeflag == tar.TypeReg { files = append(files, header.Name) contents, err := io.ReadAll(tr) if err != nil { return nil, nil, err } m[header.Name] = contents } } return m, files, nil } type tarEntryFunc func(*tar.Writer, BuildTarOptions) error // AppendTar appends a file to a tar writer func (f tarEntryFunc) AppendTar(tw *tar.Writer, opts BuildTarOptions) error { return f(tw, opts) } // DirectoryBuildTarOption is an option for a directory entry. type DirectoryBuildTarOption func(o *dirOpts) type dirOpts struct { uid int gid int xattrs map[string]string mode *os.FileMode modTime time.Time } // WithDirModTime specifies the modtime of the dir. func WithDirModTime(modTime time.Time) DirectoryBuildTarOption { return func(o *dirOpts) { o.modTime = modTime } } // WithDirOwner specifies the owner of the directory. func WithDirOwner(uid, gid int) DirectoryBuildTarOption { return func(o *dirOpts) { o.uid = uid o.gid = gid } } // WithDirXattrs specifies the extended attributes of the directory. func WithDirXattrs(xattrs map[string]string) DirectoryBuildTarOption { return func(o *dirOpts) { o.xattrs = xattrs } } // WithDirMode specifies the mode of the directory. func WithDirMode(mode os.FileMode) DirectoryBuildTarOption { return func(o *dirOpts) { o.mode = &mode } } // Dir is a directory entry func Dir(name string, opts ...DirectoryBuildTarOption) TarEntry { return tarEntryFunc(func(tw *tar.Writer, buildOpts BuildTarOptions) error { var dOpts dirOpts for _, o := range opts { o(&dOpts) } if !strings.HasSuffix(name, "/") { panic(fmt.Sprintf("missing trailing slash in dir %q ", name)) } var mode int64 = 0755 if dOpts.mode != nil { mode = permAndExtraMode2TarMode(*dOpts.mode) } return tw.WriteHeader(&tar.Header{ Typeflag: tar.TypeDir, Name: buildOpts.Prefix + name, Mode: mode, ModTime: dOpts.modTime, Xattrs: dOpts.xattrs, Uid: dOpts.uid, Gid: dOpts.gid, }) }) } // FileBuildTarOption is an option for a file entry. type FileBuildTarOption func(o *fileOpts) type fileOpts struct { uid int gid int xattrs map[string]string mode *os.FileMode modTime time.Time } // WithFileOwner specifies the owner of the file. func WithFileOwner(uid, gid int) FileBuildTarOption { return func(o *fileOpts) { o.uid = uid o.gid = gid } } // WithFileXattrs specifies the extended attributes of the file. func WithFileXattrs(xattrs map[string]string) FileBuildTarOption { return func(o *fileOpts) { o.xattrs = xattrs } } // WithFileModTime specifies the modtime of the file. func WithFileModTime(modTime time.Time) FileBuildTarOption { return func(o *fileOpts) { o.modTime = modTime } } // WithFileMode specifies the mode of the file. func WithFileMode(mode os.FileMode) FileBuildTarOption { return func(o *fileOpts) { o.mode = &mode } } // File is a regular file entry func File(name, contents string, opts ...FileBuildTarOption) TarEntry { return tarEntryFunc(func(tw *tar.Writer, buildOpts BuildTarOptions) error { var fOpts fileOpts for _, o := range opts { o(&fOpts) } if strings.HasSuffix(name, "/") { return fmt.Errorf("bogus trailing slash in file %q", name) } var mode int64 = 0644 if fOpts.mode != nil { mode = permAndExtraMode2TarMode(*fOpts.mode) } if err := tw.WriteHeader(&tar.Header{ Typeflag: tar.TypeReg, Name: buildOpts.Prefix + name, Mode: mode, ModTime: fOpts.modTime, Xattrs: fOpts.xattrs, Size: int64(len(contents)), Uid: fOpts.uid, Gid: fOpts.gid, }); err != nil { return err } _, err := io.WriteString(tw, contents) return err }) } // Symlink is a symlink entry func Symlink(name, target string) TarEntry { return tarEntryFunc(func(tw *tar.Writer, buildOpts BuildTarOptions) error { return tw.WriteHeader(&tar.Header{ Typeflag: tar.TypeSymlink, Name: buildOpts.Prefix + name, Linkname: target, Mode: 0644, }) }) } // Link is a hard-link entry func Link(name, linkname string) TarEntry { now := time.Now() return tarEntryFunc(func(w *tar.Writer, buildOpts BuildTarOptions) error { return w.WriteHeader(&tar.Header{ Typeflag: tar.TypeLink, Name: buildOpts.Prefix + name, Linkname: linkname, ModTime: now, AccessTime: now, ChangeTime: now, }) }) } // Chardev is a character device entry func Chardev(name string, major, minor int64) TarEntry { now := time.Now() return tarEntryFunc(func(w *tar.Writer, buildOpts BuildTarOptions) error { return w.WriteHeader(&tar.Header{ Typeflag: tar.TypeChar, Name: buildOpts.Prefix + name, Devmajor: major, Devminor: minor, ModTime: now, AccessTime: now, ChangeTime: now, }) }) } // Blockdev is a block device entry func Blockdev(name string, major, minor int64) TarEntry { now := time.Now() return tarEntryFunc(func(w *tar.Writer, buildOpts BuildTarOptions) error { return w.WriteHeader(&tar.Header{ Typeflag: tar.TypeBlock, Name: buildOpts.Prefix + name, Devmajor: major, Devminor: minor, ModTime: now, AccessTime: now, ChangeTime: now, }) }) } // Fifo is a fifo entry func Fifo(name string) TarEntry { now := time.Now() return tarEntryFunc(func(w *tar.Writer, buildOpts BuildTarOptions) error { return w.WriteHeader(&tar.Header{ Typeflag: tar.TypeFifo, Name: buildOpts.Prefix + name, ModTime: now, AccessTime: now, ChangeTime: now, }) }) } // suid, guid, sticky bits for archive/tar // https://github.com/golang/go/blob/release-branch.go1.13/src/archive/tar/common.go#L607-L609 const ( cISUID = 04000 // Set uid cISGID = 02000 // Set gid cISVTX = 01000 // Save text (sticky bit) ) func permAndExtraMode2TarMode(fm os.FileMode) (tm int64) { tm = int64(fm & os.ModePerm) if fm&os.ModeSetuid != 0 { tm |= cISUID } if fm&os.ModeSetgid != 0 { tm |= cISGID } if fm&os.ModeSticky != 0 { tm |= cISVTX } return } soci-snapshotter-0.4.1/util/testutil/template.go000066400000000000000000000036331454010642300220210ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package testutil import ( "bytes" "fmt" "text/template" "github.com/opencontainers/go-digest" ) // ApplyTextTemplate applies the config to the specified template. func ApplyTextTemplate(temp string, config interface{}) (string, error) { data, err := ApplyTextTemplateErr(temp, config) if err != nil { return "", fmt.Errorf("failed to apply config %v to template", config) } return string(data), nil } // ApplyTextTemplateErr applies the config to the specified template. func ApplyTextTemplateErr(temp string, conf interface{}) ([]byte, error) { var buf bytes.Buffer if err := template.Must(template.New(digest.FromString(temp).String()).Parse(temp)).Execute(&buf, conf); err != nil { return nil, err } return buf.Bytes(), nil } soci-snapshotter-0.4.1/util/testutil/util.go000066400000000000000000000134511454010642300211620ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package testutil import ( "encoding/binary" "fmt" "io" "log" "math/rand" "os" "os/exec" "path/filepath" "strings" "sync" "github.com/opencontainers/go-digest" ) const ( rootRelGOPATH = "/src/github.com/awslabs/soci-snapshotter" projectRootEnv = "SOCI_SNAPSHOTTER_PROJECT_ROOT" BuildKitVersion = "v0.8.1" ) // TestingL is a Logger instance used during testing. This allows tests to prints logs in realtime. var TestingL = log.New(os.Stdout, "testing: ", log.Ldate|log.Ltime) // TestingLogDest returns Writes of Testing.T. func TestingLogDest() (io.Writer, io.Writer) { return TestingL.Writer(), TestingL.Writer() } // StreamTestingLogToFile allows TestingL to stream the logging output to the speicified file. func StreamTestingLogToFile(destPath string) (func() error, error) { if !filepath.IsAbs(destPath) { return nil, fmt.Errorf("log destination must be an absolute path: got %v", destPath) } f, err := os.Create(destPath) if err != nil { return nil, fmt.Errorf("failed to create %v: %w", destPath, err) } TestingL.SetOutput(io.MultiWriter(f, os.Stdout)) return f.Close, nil } // GetProjectRoot returns the path to the directory where the source code of this project reside. func GetProjectRoot() (string, error) { pRoot := os.Getenv(projectRootEnv) if pRoot == "" { gopath := os.Getenv("GOPATH") if gopath == "" { gopathB, err := exec.Command("go", "env", "GOPATH").Output() if len(gopathB) == 0 || err != nil { return "", fmt.Errorf("project unknown; specify %v or GOPATH: %v", projectRootEnv, err) } gopath = strings.TrimSpace(string(gopathB)) } pRoot = filepath.Join(gopath, rootRelGOPATH) if _, err := os.Stat(pRoot); err != nil { return "", fmt.Errorf("project (%v) unknown; specify %v", pRoot, projectRootEnv) } } if _, err := os.Stat(filepath.Join(pRoot, "Dockerfile")); err != nil { return "", fmt.Errorf("no Dockerfile was found under project root") } return pRoot, nil } const TestRandomSeed = 1658503010463818386 // ThreadsafeRandom is like rand.Rand with thread safety. // rand.Rand is not threadsafe except for the global rand.Rand which is only accessible through // the exported function on the rand package (e.g. rand.Int63()). This is done by special casing // a non-exported rand.lockedSource which is locked before doing any modification of the source or rand. // It's not possible to create our own lockedSource that implements `rand.Source` because `rand.Rand` // itself is not threadsafe. The actual implementation gets around this by locking the `rand.Rand`'s source // which effectively locks the `rand.Rand` as well. // There is an expermiental version of rand that exports `rand.LockedSource`. If that ever lands, then we can // remove all of this code and just use `r := rand.New(rand.NewLockedSource(seed))`. // https://pkg.go.dev/golang.org/x/exp@v0.0.0-20230801115018-d63ba01acd4b/rand#LockedSource type ThreadsafeRandom struct { l sync.Mutex r *rand.Rand } func NewThreadsafeRandom() *ThreadsafeRandom { return &ThreadsafeRandom{ l: sync.Mutex{}, r: rand.New(rand.NewSource(TestRandomSeed)), } } func (tsr *ThreadsafeRandom) Intn(n int) int { tsr.l.Lock() defer tsr.l.Unlock() return tsr.r.Intn(n) } func (tsr *ThreadsafeRandom) Int63() int64 { tsr.l.Lock() defer tsr.l.Unlock() return tsr.r.Int63() } func (tsr *ThreadsafeRandom) Read(b []byte) (int, error) { tsr.l.Lock() defer tsr.l.Unlock() return tsr.r.Read(b) } var r = NewThreadsafeRandom() // RandomUInt64 returns a random uint64 value generated from /dev/uramdom. func RandomUInt64() (uint64, error) { f, err := os.Open("/dev/urandom") if err != nil { return 0, fmt.Errorf("failed to open /dev/urandom") } defer f.Close() b := make([]byte, 8) if _, err := f.Read(b); err != nil { return 0, fmt.Errorf("failed to read /dev/urandom") } return binary.LittleEndian.Uint64(b), nil } // RandomByteData returns a byte slice with `size` populated with random generated data func RandomByteData(size int64) []byte { b := make([]byte, size) r.Read(b) return b } // RandomByteDataRange returns a byte slice with `size` between minBytes and maxBytes exclusive populated with random data func RandomByteDataRange(minBytes int, maxBytes int) []byte { const charset = "abcdefghijklmnopqrstuvwxyz" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + " " r := NewThreadsafeRandom() randByteNum := r.Intn(maxBytes-minBytes) + minBytes randBytes := make([]byte, randByteNum) for i := range randBytes { randBytes[i] = charset[r.Intn(len(charset))] } return randBytes } // RandomDigest generates a random digest from a random sequence of bytes func RandomDigest() string { d := digest.FromBytes(RandomByteData(10)) return d.String() } soci-snapshotter-0.4.1/version/000077500000000000000000000000001454010642300165055ustar00rootroot00000000000000soci-snapshotter-0.4.1/version/version.go000066400000000000000000000026521454010642300205260ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package version const Unset = "" var ( // Version is the version number. Filled in at linking time (via Makefile). Version = Unset // Revision is the VCS (e.g. git) revision. Filled in at linking time (via Makefile). Revision = Unset ) soci-snapshotter-0.4.1/version/version_test.go000066400000000000000000000024551454010642300215660ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package version import "testing" func TestVersion(t *testing.T) { if Version == Unset { t.Fatalf("version is unset") } } soci-snapshotter-0.4.1/ztoc/000077500000000000000000000000001454010642300157775ustar00rootroot00000000000000soci-snapshotter-0.4.1/ztoc/compression/000077500000000000000000000000001454010642300203405ustar00rootroot00000000000000soci-snapshotter-0.4.1/ztoc/compression/fbs/000077500000000000000000000000001454010642300211125ustar00rootroot00000000000000soci-snapshotter-0.4.1/ztoc/compression/fbs/zinfo.fbs000066400000000000000000000001601454010642300227300ustar00rootroot00000000000000namespace zinfo; table TarZinfo { version : int32; span_size : int64; size : int64; } root_type TarZinfo; soci-snapshotter-0.4.1/ztoc/compression/fbs/zinfo/000077500000000000000000000000001454010642300222375ustar00rootroot00000000000000soci-snapshotter-0.4.1/ztoc/compression/fbs/zinfo/TarZinfo.go000066400000000000000000000037641454010642300243340ustar00rootroot00000000000000// Code generated by the FlatBuffers compiler. DO NOT EDIT. package zinfo import ( flatbuffers "github.com/google/flatbuffers/go" ) type TarZinfo struct { _tab flatbuffers.Table } func GetRootAsTarZinfo(buf []byte, offset flatbuffers.UOffsetT) *TarZinfo { n := flatbuffers.GetUOffsetT(buf[offset:]) x := &TarZinfo{} x.Init(buf, n+offset) return x } func GetSizePrefixedRootAsTarZinfo(buf []byte, offset flatbuffers.UOffsetT) *TarZinfo { n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) x := &TarZinfo{} x.Init(buf, n+offset+flatbuffers.SizeUint32) return x } func (rcv *TarZinfo) Init(buf []byte, i flatbuffers.UOffsetT) { rcv._tab.Bytes = buf rcv._tab.Pos = i } func (rcv *TarZinfo) Table() flatbuffers.Table { return rcv._tab } func (rcv *TarZinfo) Version() int32 { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { return rcv._tab.GetInt32(o + rcv._tab.Pos) } return 0 } func (rcv *TarZinfo) MutateVersion(n int32) bool { return rcv._tab.MutateInt32Slot(4, n) } func (rcv *TarZinfo) SpanSize() int64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { return rcv._tab.GetInt64(o + rcv._tab.Pos) } return 0 } func (rcv *TarZinfo) MutateSpanSize(n int64) bool { return rcv._tab.MutateInt64Slot(6, n) } func (rcv *TarZinfo) Size() int64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) if o != 0 { return rcv._tab.GetInt64(o + rcv._tab.Pos) } return 0 } func (rcv *TarZinfo) MutateSize(n int64) bool { return rcv._tab.MutateInt64Slot(8, n) } func TarZinfoStart(builder *flatbuffers.Builder) { builder.StartObject(3) } func TarZinfoAddVersion(builder *flatbuffers.Builder, version int32) { builder.PrependInt32Slot(0, version, 0) } func TarZinfoAddSpanSize(builder *flatbuffers.Builder, spanSize int64) { builder.PrependInt64Slot(1, spanSize, 0) } func TarZinfoAddSize(builder *flatbuffers.Builder, size int64) { builder.PrependInt64Slot(2, size, 0) } func TarZinfoEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { return builder.EndObject() } soci-snapshotter-0.4.1/ztoc/compression/gzip_zinfo.c000066400000000000000000000474671454010642300227040ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. Jean-loup Gailly Mark Adler jloup@gzip.org madler@alumni.caltech.edu */ /* This source code is based on https://github.com/madler/zlib/blob/master/examples/zran.c and related code from that repository. It retains the copyright and distribution restrictions of that work. It has been substantially modified from the original. */ #include "gzip_zinfo.h" #include #include #include #include #include #define CHUNK (1 << 14) // file input buffer size // zinfo - internal helpers start. /* Convert integer types to little endian and vice versa. This is needed to keep zinfo consistent across multiple architectures, ensuring that all integer fields will be stored in little endian. */ inline offset_t encode_offset(offset_t source) { return htole64(source); } inline offset_t decode_offset(offset_t source) { return le64toh(source); } inline int32_t encode_int32(int32_t source) { return htole32(source); } inline int32_t decode_int32(int32_t source) { return le32toh(source); } static int min(int lhs, int rhs) { return lhs < rhs ? lhs : rhs; } int init_flate(z_stream *strm, int windowBits) { int ret; strm->zalloc = Z_NULL; strm->zfree = Z_NULL; strm->opaque = Z_NULL; strm->avail_in = 0; strm->next_in = Z_NULL; ret = inflateInit2(strm, windowBits); return ret; } static uint8_t get_bits(struct gzip_zinfo *index, int checkpoint) { return index->list[checkpoint].bits; } // zinfo - internal helpers end. // zinfo - metadata starts. int pt_index_from_ucmp_offset(struct gzip_zinfo* index, offset_t off) { if (index == NULL) return -1; int res = 0; struct gzip_checkpoint* here = index->list; int ret = decode_int32(index->have); while (--ret && decode_offset(here[1].out) <= off) { here++; res++; } return res; } offset_t get_ucomp_off(struct gzip_zinfo *index, int checkpoint) { return decode_offset(index->list[checkpoint].out); } offset_t get_comp_off(struct gzip_zinfo *index, int checkpoint) { return decode_offset(index->list[checkpoint].in); } unsigned get_blob_size(struct gzip_zinfo *index) { if (index == NULL) return 0; unsigned size = decode_int32(index->size); if (decode_int32(index->version) == ZINFO_VERSION_ONE) size--; /* The buffer will be tightly packed. The layout of the buffer is: - Some fixed size based on which version - PACKED_CHECKPOINT_SIZE for each span. If we have a v1 gzip_zinfo, we skip the first checkpoint this is a bug, but it keeps backwards compatibility */ return PACKED_CHECKPOINT_SIZE * size + BLOB_HEADER_SIZE; } int32_t get_max_span_id(struct gzip_zinfo *index) { if (index == NULL) return 0; return decode_int32(index->have) - 1; } int has_bits(struct gzip_zinfo *index, int checkpoint) { if (checkpoint >= decode_int32(index->have)) return 0; return index->list[checkpoint].bits != 0; } // zinfo - metadata ends. void free_zinfo(struct gzip_zinfo *index) { if (index != NULL) { free(index->list); free(index); } } // zinfo - generation/extraction starts. /* Add an entry to the access point list. If out of memory, deallocate the existing list and return NULL. */ static struct gzip_zinfo *add_checkpoint(struct gzip_zinfo *index, uint8_t bits, offset_t in, offset_t out, unsigned left, unsigned char *window) { struct gzip_checkpoint *next; if (index == NULL) { /* if list is empty, create it (start with eight points) */ index = malloc(sizeof(struct gzip_zinfo)); if (index == NULL) return NULL; index->list = malloc(sizeof(struct gzip_checkpoint) << 3); if (index->list == NULL) { free(index); return NULL; } index->size = 8; index->have = 0; } else if (index->have == index->size) { /* if list is full, make it bigger */ index->size <<= 1; next = realloc(index->list, sizeof(struct gzip_checkpoint) * index->size); if (next == NULL) { free_zinfo(index); return NULL; } index->list = next; } /* fill in entry and increment how many we have */ next = index->list + index->have; next->bits = bits; next->in = encode_offset(in); next->out = encode_offset(out); if (left) memcpy(next->window, window + WINSIZE - left, left); if (left < WINSIZE) memcpy(next->window + left, window, WINSIZE - left); index->have++; /* return list, possibly reallocated */ return index; } /* Pretty much the same as from zran.c */ int generate_zinfo_from_fp(FILE* in, offset_t span, struct gzip_zinfo** idx) { int ret; offset_t totin, totout; /* our own total counters to avoid 4GB limit */ offset_t last; /* totout value of last access point */ struct gzip_zinfo *index; /* access points being generated */ z_stream strm; unsigned char input[CHUNK], window[WINSIZE]; memset(window, 0, WINSIZE); /* initialize inflate */ ret = init_flate(&strm, 47); /* automatic zlib or gzip decoding */ if (ret != Z_OK) return ret; /* inflate the input, maintain a sliding window, and build an index -- this also validates the integrity of the compressed data using the check information at the end of the gzip or zlib stream */ totin = totout = last = 0; index = NULL; /* will be allocated by first add_checkpoint() */ strm.avail_out = 0; do { /* get some compressed data from input file */ memset(input, 0, CHUNK); strm.avail_in = fread(input, 1, CHUNK, in); if (ferror(in)) { ret = Z_ERRNO; goto build_index_error; } if (strm.avail_in == 0) { ret = Z_DATA_ERROR; goto build_index_error; } strm.next_in = input; /* process all of that, or until end of stream */ do { /* reset sliding window if necessary */ if (strm.avail_out == 0) { strm.avail_out = WINSIZE; strm.next_out = window; } /* inflate until out of input, output, or at end of block -- update the total input and output counters */ totin += strm.avail_in; totout += strm.avail_out; ret = inflate(&strm, Z_BLOCK); /* return at end of block */ totin -= strm.avail_in; totout -= strm.avail_out; if (ret == Z_NEED_DICT) ret = Z_DATA_ERROR; if (ret == Z_MEM_ERROR || ret == Z_DATA_ERROR) goto build_index_error; if (ret == Z_STREAM_END) break; /* if at end of block, consider adding an index entry (note that if data_type indicates an end-of-block, then all of the uncompressed data from that block has been delivered, and none of the compressed data after that block has been consumed, except for up to seven bits) -- the totout == 0 provides an entry point after the zlib or gzip header, and assures that the index always has at least one access point; we avoid creating an access point after the last block by checking bit 6 of data_type */ if ((strm.data_type & 128) && !(strm.data_type & 64) && (totout == 0 || totout - last > span)) { index = add_checkpoint(index, (uint8_t)(strm.data_type & 7), totin, totout, strm.avail_out, window); if (index == NULL) { ret = Z_MEM_ERROR; goto build_index_error; } last = totout; } } while (strm.avail_in != 0); } while (ret != Z_STREAM_END); /* clean up and return index (release unused entries in list) */ (void)inflateEnd(&strm); index->list = realloc(index->list, sizeof(struct gzip_checkpoint) * index->have); index->size = index->have; index->have = encode_int32(index->have); int32_t sz = index->size; index->size = encode_int32(index->size); index->span_size = encode_offset(span); index->version = encode_int32(ZINFO_VERSION_CUR); *idx = index; return sz; /* return error */ build_index_error: (void)inflateEnd(&strm); free_zinfo(index); return ret; } int generate_zinfo_from_file(const char *filepath, offset_t span, struct gzip_zinfo **index) { FILE *fp = fopen(filepath, "rb"); if (fp == NULL) return GZIP_ZINFO_FILE_NOT_FOUND; int ret = generate_zinfo_from_fp(fp, span, index); fclose(fp); return ret; } int extract_data_from_fp(FILE *in, struct gzip_zinfo *index, offset_t offset, void *buffer, int len) { int ret, skip; z_stream strm; struct gzip_checkpoint *here; unsigned char input[CHUNK], discard[WINSIZE]; uchar* buf = buffer; /* proceed only if something reasonable to do */ if (len < 0) return 0; /* find where in stream to start */ here = index->list; ret = decode_int32(index->have); while (--ret && decode_offset(here[1].out) <= offset) here++; /* initialize inflate */ ret = init_flate(&strm, -15); /* raw inflate */ if (ret != Z_OK) return ret; ret = fseeko(in, decode_offset(here->in) - (here->bits ? 1 : 0), SEEK_SET); if (ret == -1) goto extract_ret; if (here->bits) { ret = getc(in); if (ret == -1) { ret = ferror(in) ? Z_ERRNO : Z_DATA_ERROR; goto extract_ret; } (void)inflatePrime(&strm, here->bits, ret >> (8 - here->bits)); } (void)inflateSetDictionary(&strm, here->window, WINSIZE); /* skip uncompressed bytes until offset reached, then satisfy request */ offset -= decode_offset(here->out); strm.avail_in = 0; skip = 1; /* while skipping to offset */ do { /* define where to put uncompressed data, and how much */ if (offset == 0 && skip) { /* at offset now */ strm.avail_out = len; strm.next_out = buf; skip = 0; /* only do this once */ } if (offset > WINSIZE) { /* skip WINSIZE bytes */ strm.avail_out = WINSIZE; strm.next_out = discard; offset -= WINSIZE; } else if (offset != 0) { /* last skip */ strm.avail_out = (unsigned)offset; strm.next_out = discard; offset = 0; } /* uncompress until avail_out filled, or end of stream */ do { if (strm.avail_in == 0) { strm.avail_in = fread(input, 1, CHUNK, in); if (ferror(in)) { ret = Z_ERRNO; goto extract_ret; } if (strm.avail_in == 0) { ret = Z_DATA_ERROR; goto extract_ret; } strm.next_in = input; } ret = inflate(&strm, Z_NO_FLUSH); /* normal inflate */ if (ret == Z_NEED_DICT) ret = Z_DATA_ERROR; if (ret == Z_MEM_ERROR || ret == Z_DATA_ERROR) goto extract_ret; if (ret == Z_STREAM_END) break; } while (strm.avail_out != 0); /* if reach end of stream, then don't keep trying to get more */ if (ret == Z_STREAM_END) break; /* do until offset reached and requested data read, or stream ends */ } while (skip); /* compute number of uncompressed bytes read after offset */ ret = skip ? 0 : len - strm.avail_out; /* clean up and return bytes read or error */ extract_ret: (void)inflateEnd(&strm); return ret; } int extract_data_from_file(const char* file, struct gzip_zinfo* index, offset_t offset, void* buf, int len) { FILE* fp = fopen(file, "rb"); if (fp == NULL) return GZIP_ZINFO_FILE_NOT_FOUND; int ret = extract_data_from_fp(fp, index, offset, buf, len); fclose(fp); return ret; } // This is the same as extract_data_fp, but instead of a file, it decompresses // data from a buffer which contains the exact data to decompress int extract_data_from_buffer(void *d, offset_t datalen, struct gzip_zinfo *index, offset_t offset, void *buffer, offset_t len, int first_checkpoint) { int ret, skip; z_stream strm; unsigned char input[CHUNK], discard[WINSIZE]; uchar *buf = buffer; uchar *data = d; /* proceed only if something reasonable to do */ if (len < 0) return 0; uint8_t bits = get_bits(index, first_checkpoint); /* initialize inflate */ ret = init_flate(&strm, -15); /* raw inflate */ if (ret != Z_OK) return ret; if (bits) { int ret = data[0]; inflatePrime(&strm, bits, ret >> (8 - bits)); data++; } (void)inflateSetDictionary(&strm, index->list[first_checkpoint].window, WINSIZE); offset -= decode_offset(index->list[first_checkpoint].out); strm.avail_in = 0; skip = 1; /* while skipping to offset */ int remaining = datalen; do { /* define where to put uncompressed data, and how much */ if (offset == 0 && skip) { /* at offset now */ strm.avail_out = len; strm.next_out = buf; skip = 0; /* only do this once */ } if (offset > WINSIZE) { /* skip WINSIZE bytes */ strm.avail_out = WINSIZE; strm.next_out = discard; offset -= WINSIZE; } else if (offset != 0) { /* last skip */ strm.avail_out = (unsigned)offset; strm.next_out = discard; offset = 0; } /* uncompress until avail_out filled, or end of stream */ do { if (strm.avail_in == 0) { int read = min(remaining, CHUNK); remaining -= read; memcpy(input, data, read); data += read; strm.avail_in = read; strm.next_in = input; } ret = inflate(&strm, Z_NO_FLUSH); /* normal inflate */ if (ret == Z_NEED_DICT) ret = Z_DATA_ERROR; if (ret == Z_MEM_ERROR || ret == Z_DATA_ERROR) goto extract_ret; if (ret == Z_STREAM_END) break; } while (strm.avail_out != 0); /* if reach end of stream, then don't keep trying to get more */ if (ret == Z_STREAM_END) break; /* do until offset reached and requested data read, or stream ends */ } while (skip); /* compute number of uncompressed bytes read after offset */ ret = skip ? 0 : len - strm.avail_out; /* clean up and return bytes read or error */ extract_ret: (void)inflateEnd(&strm); return ret; } // zinfo - generation/extraction ends. // zinfo - zinfo <-> blob conversion starts. int zinfo_to_blob(struct gzip_zinfo* index, void* buf) { if (index == NULL) return GZIP_ZINFO_INDEX_NULL; // TODO: Since this will be serialized to file, we need to be mindful of endianness. Right now, we are just ignoring it // Or maybe not, since Golang might take care of it if (buf == NULL) return 0; uchar* cur = buf; int32_t first_checkpoint_index; memcpy(cur, &index->have, 4); cur += 4; memcpy(cur, &index->span_size, 8); cur += 8; first_checkpoint_index = 0; // in v1, we skipped the 0th checkpoint becasue we assumed it was fixed size. // in v2, we encode the 0th block because it's not a fixed size if gzip headers are used. // for backwards compatibility we want to reserialize v1 zinfo to exactly the same bytes // even though there is technically a bug. if (decode_int32(index->version) == ZINFO_VERSION_ONE) first_checkpoint_index = 1; for(int i = first_checkpoint_index; i < decode_int32(index->have); i++) { struct gzip_checkpoint* pt = &index->list[i]; memcpy(cur, &pt->in, 8); cur += 8; memcpy(cur, &pt->out, 8); cur += 8; memcpy(cur, &pt->bits, 1); cur += 1; memcpy(cur, &pt->window, WINSIZE); cur += WINSIZE; } return get_blob_size(index); } struct gzip_zinfo* blob_to_zinfo(void* buf, offset_t len) { if (buf == NULL) return NULL; if (len < BLOB_HEADER_SIZE) return NULL; struct gzip_zinfo* index = malloc(sizeof(struct gzip_zinfo)); if (index == NULL) return NULL; int32_t size, first_checkpoint_index, version; offset_t claimed_size, span_size; uchar* cur = buf; memcpy(&size, cur, 4); cur += 4; memcpy(&span_size, cur, 8); cur += 8; claimed_size = PACKED_CHECKPOINT_SIZE * decode_int32(size) + BLOB_HEADER_SIZE; if (claimed_size == len) { // If we have exactly size checkpoints, then we have a current blob version = ZINFO_VERSION_CUR; } else if (claimed_size - PACKED_CHECKPOINT_SIZE == len) { // If we only have size - 1 checkpoints, then we have a v1 blob version = ZINFO_VERSION_ONE; } else { // size is invalid. don't attempt to deserialize any more data. return NULL; } index->list = malloc(sizeof(struct gzip_checkpoint) * decode_int32(size)); if (index->list == NULL) { free_zinfo(index); return NULL; } first_checkpoint_index = 0; index->version = encode_int32(version); if (version == ZINFO_VERSION_ONE) { first_checkpoint_index = 1; struct gzip_checkpoint* pt0 = &index->list[0]; pt0->bits = 0; pt0->in = encode_offset(10); pt0->out = 0; memset(pt0->window, 0, WINSIZE); } for(int32_t i = first_checkpoint_index; i < decode_int32(size); i++) { struct gzip_checkpoint* pt = &index->list[i]; memcpy(&pt->in, cur, 8); cur += 8; memcpy(&pt->out, cur, 8); cur += 8; memcpy(&pt->bits, cur, 1); cur += 1; memcpy(&pt->window, cur, WINSIZE); cur += WINSIZE; } index->have = size; index->size = size; index->span_size = span_size; return index; } // zinfo - zinfo <-> blob conversion ends. soci-snapshotter-0.4.1/ztoc/compression/gzip_zinfo.go000066400000000000000000000146221454010642300230520ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package compression // #cgo CFLAGS: -I${SRCDIR}/ // #cgo LDFLAGS: -L${SRCDIR}/../out -l:libz.a // #include "gzip_zinfo.h" // #include // #include import "C" import ( "fmt" "unsafe" ) // GzipZinfo is a go struct wrapper of the gzip zinfo's C implementation. type GzipZinfo struct { cZinfo *C.struct_gzip_zinfo } // newGzipZinfo creates a new instance of `GzipZinfo` from cZinfo byte blob on zTOC. func newGzipZinfo(zinfoBytes []byte) (*GzipZinfo, error) { if len(zinfoBytes) == 0 { return nil, fmt.Errorf("empty checkpoints") } cZinfo := C.blob_to_zinfo(unsafe.Pointer(&zinfoBytes[0]), C.off_t(len(zinfoBytes))) if cZinfo == nil { return nil, fmt.Errorf("cannot convert blob to gzip_zinfo") } return &GzipZinfo{ cZinfo: cZinfo, }, nil } // newGzipZinfoFromFile creates a new instance of `GzipZinfo` given gzip file name and span size. func newGzipZinfoFromFile(gzipFile string, spanSize int64) (*GzipZinfo, error) { cstr := C.CString(gzipFile) defer C.free(unsafe.Pointer(cstr)) var cZinfo *C.struct_gzip_zinfo ret := C.generate_zinfo_from_file(cstr, C.off_t(spanSize), &cZinfo) if int(ret) < 0 { return nil, fmt.Errorf("could not generate gzip zinfo. gzip error: %v", ret) } return &GzipZinfo{ cZinfo: cZinfo, }, nil } // Close calls `C.free` on the pointer to `C.struct_gzip_zinfo`. func (i *GzipZinfo) Close() { if i.cZinfo != nil { C.free(unsafe.Pointer(i.cZinfo)) } } // Bytes returns the byte slice containing the zinfo. func (i *GzipZinfo) Bytes() ([]byte, error) { blobSize := C.get_blob_size(i.cZinfo) bytes := make([]byte, uint64(blobSize)) if len(bytes) == 0 { return nil, fmt.Errorf("could not allocate byte array of size %d", blobSize) } ret := C.zinfo_to_blob(i.cZinfo, unsafe.Pointer(&bytes[0])) if int(ret) <= 0 { return nil, fmt.Errorf("could not serialize gzip zinfo to byte array; gzip error: %v", ret) } return bytes, nil } // MaxSpanID returns the max span ID. func (i *GzipZinfo) MaxSpanID() SpanID { return SpanID(C.get_max_span_id(i.cZinfo)) } // SpanSize returns the span size of the constructed ztoc. func (i *GzipZinfo) SpanSize() Offset { return Offset(i.cZinfo.span_size) } // UncompressedOffsetToSpanID returns the ID of the span containing the data pointed by uncompressed offset. func (i *GzipZinfo) UncompressedOffsetToSpanID(offset Offset) SpanID { return SpanID(C.pt_index_from_ucmp_offset(i.cZinfo, C.long(offset))) } // ExtractDataFromBuffer wraps the call to `C.extract_data_from_buffer`, which takes in the compressed bytes // and returns the decompressed bytes. func (i *GzipZinfo) ExtractDataFromBuffer(compressedBuf []byte, uncompressedSize, uncompressedOffset Offset, spanID SpanID) ([]byte, error) { if len(compressedBuf) == 0 { return nil, fmt.Errorf("empty compressed buffer") } if uncompressedSize < 0 { return nil, fmt.Errorf("invalid uncompressed size: %d", uncompressedSize) } if uncompressedSize == 0 { return []byte{}, nil } bytes := make([]byte, uncompressedSize) ret := C.extract_data_from_buffer( unsafe.Pointer(&compressedBuf[0]), C.off_t(len(compressedBuf)), i.cZinfo, C.off_t(uncompressedOffset), unsafe.Pointer(&bytes[0]), C.off_t(uncompressedSize), C.int(spanID), ) if ret <= 0 { return bytes, fmt.Errorf("error extracting data; return code: %v", ret) } return bytes, nil } // ExtractDataFromFile wraps `C.extract_data_from_file` and returns the decompressed bytes given the name of the .tar.gz file, // offset and the size in uncompressed stream. func (i *GzipZinfo) ExtractDataFromFile(fileName string, uncompressedSize, uncompressedOffset Offset) ([]byte, error) { cstr := C.CString(fileName) defer C.free(unsafe.Pointer(cstr)) if uncompressedSize < 0 { return nil, fmt.Errorf("invalid uncompressed size: %d", uncompressedSize) } if uncompressedSize == 0 { return []byte{}, nil } bytes := make([]byte, uncompressedSize) ret := C.extract_data_from_file(cstr, i.cZinfo, C.off_t(uncompressedOffset), unsafe.Pointer(&bytes[0]), C.int(uncompressedSize)) if ret <= 0 { return nil, fmt.Errorf("unable to extract data; return code = %v", ret) } return bytes, nil } // StartCompressedOffset returns the start offset of the span in the compressed stream. func (i *GzipZinfo) StartCompressedOffset(spanID SpanID) Offset { start := i.getCompressedOffset(spanID) if i.hasBits(spanID) { start-- } return start } // EndCompressedOffset returns the end offset of the span in the compressed stream. If // it's the last span, returns the size of the compressed stream. func (i *GzipZinfo) EndCompressedOffset(spanID SpanID, fileSize Offset) Offset { if spanID == i.MaxSpanID() { return fileSize } return i.getCompressedOffset(spanID + 1) } // StartUncompressedOffset returns the start offset of the span in the uncompressed stream. func (i *GzipZinfo) StartUncompressedOffset(spanID SpanID) Offset { return i.getUncompressedOffset(spanID) } // EndUncompressedOffset returns the end offset of the span in the uncompressed stream. If // it's the last span, returns the size of the uncompressed stream. func (i *GzipZinfo) EndUncompressedOffset(spanID SpanID, fileSize Offset) Offset { if spanID == i.MaxSpanID() { return fileSize } return i.getUncompressedOffset(spanID + 1) } // getCompressedOffset wraps `C.get_comp_off` and returns the offset for the span in the compressed stream. func (i *GzipZinfo) getCompressedOffset(spanID SpanID) Offset { return Offset(C.get_comp_off(i.cZinfo, C.int(spanID))) } // hasBits wraps `C.has_bits` and returns true if any data is contained in the previous span. func (i *GzipZinfo) hasBits(spanID SpanID) bool { return C.has_bits(i.cZinfo, C.int(spanID)) != 0 } // getUncompressedOffset wraps `C.get_uncomp_off` and returns the offset for the span in the uncompressed stream. func (i *GzipZinfo) getUncompressedOffset(spanID SpanID) Offset { return Offset(C.get_ucomp_off(i.cZinfo, C.int(spanID))) } soci-snapshotter-0.4.1/ztoc/compression/gzip_zinfo.h000066400000000000000000000110531454010642300226670ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. Jean-loup Gailly Mark Adler jloup@gzip.org madler@alumni.caltech.edu */ /* This source code is based on https://github.com/madler/zlib/blob/master/examples/zran.c and related code from that repository. It retains the copyright and distribution restrictions of that work. It has been substantially modified from the original. */ #ifndef GZIP_ZINFO_H #define GZIP_ZINFO_H #include #include #include #include #include typedef unsigned char uchar; typedef int64_t offset_t; #define ZINFO_VERSION_ONE 1 #define ZINFO_VERSION_TWO 2 #define ZINFO_VERSION_CUR ZINFO_VERSION_TWO /* Since gzip is compressed with 32 KiB window size, WINDOW_SIZE is fixed */ #define WINSIZE 32768U /* - 8 bytes, compressed offset - 8 bytes, uncompressed offset - 1 byte, bits - 32768 bytes, window */ #define PACKED_CHECKPOINT_SIZE (8 + 8 + 1 + WINSIZE) /* - 4 bytes, number of checkpoints - 8 bytes, span size */ #define BLOB_HEADER_SIZE (4 + 8) enum { GZIP_ZINFO_OK = 0, GZIP_ZINFO_FILE_NOT_FOUND = -80, GZIP_ZINFO_INDEX_NULL = -81, GZIP_ZINFO_CANNOT_ALLOC = -82, }; struct gzip_checkpoint { offset_t out; /* corresponding offset in uncompressed data */ offset_t in; /* offset in input file of first full byte */ uint8_t bits; /* number of bits (1-7) from byte at in - 1, or 0 */ unsigned char window[WINSIZE]; /* preceding 32K of uncompressed data */ }; struct gzip_zinfo { int32_t version; int32_t have; /* number of list entries filled in */ int32_t size; /* number of list entries allocated */ struct gzip_checkpoint *list; /* allocated list */ offset_t span_size; }; // zinfo - metadata starts. // Get index number of gzip zinfo within which the uncompressed offset is present int pt_index_from_ucmp_offset(struct gzip_zinfo *index, offset_t off); offset_t get_ucomp_off(struct gzip_zinfo *index, int checkpoint); offset_t get_comp_off(struct gzip_zinfo *index, int checkpoint); unsigned get_blob_size(struct gzip_zinfo *index); int32_t get_max_span_id(struct gzip_zinfo *index); int has_bits(struct gzip_zinfo *index, int checkpoint); // zinfo - metadata ends. // zinfo - generation/extraction starts. int generate_zinfo_from_file(const char* filepath, offset_t span, struct gzip_zinfo** index); int extract_data_from_file(const char* file, struct gzip_zinfo* index, offset_t offset, void* buf, int len); int extract_data_from_buffer(void* d, offset_t datalen, struct gzip_zinfo* index, offset_t offset, void* buffer, offset_t len, int first_checkpoint); // zinfo - generation/extraction ends. // zinfo - zinfo <-> blob conversion starts. /* Converts zinfo to blob Returns the size of the buffer on success This function assumes that the buffer is large enough already to hold the entire zinfo */ int zinfo_to_blob(struct gzip_zinfo* index, void* buf); struct gzip_zinfo* blob_to_zinfo(void* buf, offset_t len); // zinfo - zinfo <-> blob conversion ends. #endif // GZIP_ZINFO_H soci-snapshotter-0.4.1/ztoc/compression/gzip_zinfo_test.go000066400000000000000000000111511454010642300241030ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package compression import ( "testing" ) func TestNewGzipZinfo(t *testing.T) { t.Parallel() testCases := []struct { name string zinfoBytes []byte expectError bool }{ { name: "nil zinfoBytes should return error", zinfoBytes: nil, expectError: true, }, { name: "empty zinfoBytes should return error", zinfoBytes: []byte{}, expectError: true, }, { name: "zinfoBytes with less than 'header size' bytes header should return error", zinfoBytes: []byte{00}, expectError: true, }, { name: "zinfoBytes with too few checkpoints should return error", zinfoBytes: []byte{ 0xFF, 00, 00, 00, // 255 checkpoints 00, 00, 00, 00, 00, 00, 00, 00, // span size 0 // No checkpoint data. We should not try to read 255 checkpoints from this buffer. }, expectError: true, }, { name: "zinfoBytes with zero checkpoints should succeed", zinfoBytes: []byte{ 00, 00, 00, 00, // 0 checkpoints 00, 00, 00, 00, 00, 00, 00, 00, // span size 0 }, expectError: false, }, { name: "zinfoBytes v1 with zero checkpoints should succeed", zinfoBytes: []byte{ 01, 00, 00, 00, // 1 checkpoint 00, 00, 00, 00, 00, 00, 00, 00, // span size 0 }, expectError: false, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { _, err := newGzipZinfo(tc.zinfoBytes) if tc.expectError != (err != nil) { t.Fatalf("expect error: %t, actual error: %v", tc.expectError, err) } }) } } func TestExtractDataFromBuffer(t *testing.T) { t.Parallel() testCases := []struct { name string gzipZinfo GzipZinfo compressedBuf []byte uncompressedSize Offset uncompressedOffset Offset spanID SpanID expectError bool }{ { name: "nil buffer should return error", gzipZinfo: GzipZinfo{}, compressedBuf: nil, expectError: true, }, { name: "empty buffer should return error", gzipZinfo: GzipZinfo{}, compressedBuf: []byte{}, expectError: true, }, { name: "negative uncompressedSize should return error", gzipZinfo: GzipZinfo{}, compressedBuf: []byte("foobar"), uncompressedSize: -1, expectError: true, }, { name: "zero uncompressedSize should return empty byte slice", gzipZinfo: GzipZinfo{}, compressedBuf: []byte("foobar"), uncompressedSize: 0, expectError: false, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { data, err := tc.gzipZinfo.ExtractDataFromBuffer(tc.compressedBuf, tc.uncompressedSize, tc.uncompressedOffset, tc.spanID) if tc.expectError != (err != nil) { t.Fatalf("expect error: %t, actual error: %v", tc.expectError, err) } if err == nil && len(data) != int(tc.uncompressedSize) { t.Fatalf("wrong uncompressed size. expect: %d, actual: %d ", len(data), tc.uncompressedSize) } }) } } func TestExtractDataFromFile(t *testing.T) { t.Parallel() testCases := []struct { name string gzipZinfo GzipZinfo filename string uncompressedSize Offset uncompressedOffset Offset expectError bool }{ { name: "negative uncompressedSize should return error", gzipZinfo: GzipZinfo{}, filename: "", uncompressedSize: -1, expectError: true, }, { name: "zero uncompressedSize should return empty byte slice", gzipZinfo: GzipZinfo{}, filename: "", uncompressedSize: 0, expectError: false, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { data, err := tc.gzipZinfo.ExtractDataFromFile(tc.filename, tc.uncompressedSize, tc.uncompressedOffset) if tc.expectError != (err != nil) { t.Fatalf("expect error: %t, actual error: %v", tc.expectError, err) } if err == nil && len(data) != int(tc.uncompressedSize) { t.Fatalf("wrong uncompressed size. expect: %d, actual: %d ", len(data), tc.uncompressedSize) } }) } } soci-snapshotter-0.4.1/ztoc/compression/tar_zinfo.go000066400000000000000000000142741454010642300226720ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package compression import ( "fmt" "os" zinfo_flatbuffers "github.com/awslabs/soci-snapshotter/ztoc/compression/fbs/zinfo" flatbuffers "github.com/google/flatbuffers/go" ) const ( // `TarZinfo` version. consistent with `GzipZinfo` version zinfoVersion = 2 ) // TarZinfo implements the `Zinfo` interface for uncompressed tar files. // It only needs a span size and tar file size, since a tar file is already // uncompressed. // For tar file, `compressed`-related concepts (e.g., `CompressedArchiveSize`) // are only to santisfy the `Zinfo` interface and equal to their `uncompressed`-equivalent. type TarZinfo struct { version int32 spanSize int64 size int64 } // newTarZinfo creates a new instance of `TarZinfo` from serialized bytes. func newTarZinfo(zinfoBytes []byte) (zinfo *TarZinfo, err error) { defer func() { if r := recover(); r != nil { zinfo = nil err = fmt.Errorf("cannot unmarshal tar zinfo: %w", err) } }() zinfo = new(TarZinfo) zinfoFlatbuf := zinfo_flatbuffers.GetRootAsTarZinfo(zinfoBytes, 0) zinfo.version = zinfoFlatbuf.Version() zinfo.spanSize = zinfoFlatbuf.SpanSize() zinfo.size = zinfoFlatbuf.Size() return zinfo, nil } // newTarZinfoFromFile creates a new instance of `TarZinfo` given tar file name and span size. func newTarZinfoFromFile(tarFile string, spanSize int64) (*TarZinfo, error) { fstat, err := os.Stat(tarFile) if err != nil { return nil, fmt.Errorf("unable to get file stat: %w", err) } return &TarZinfo{ version: zinfoVersion, spanSize: spanSize, size: fstat.Size(), }, nil } // Close doesn't do anything since there is nothing to close/release. func (i *TarZinfo) Close() {} // Bytes returns the byte slice containing the `TarZinfo`. Integers are serialized // to `LittleEndian` binaries. func (i *TarZinfo) Bytes() (fb []byte, err error) { defer func() { if r := recover(); r != nil { fb = nil err = fmt.Errorf("failed to generate tar zinfo flatbuf bytes: %w", err) } }() builder := flatbuffers.NewBuilder(0) zinfo_flatbuffers.TarZinfoStart(builder) zinfo_flatbuffers.TarZinfoAddVersion(builder, i.version) zinfo_flatbuffers.TarZinfoAddSpanSize(builder, i.spanSize) zinfo_flatbuffers.TarZinfoAddSize(builder, i.size) tarZinfoFlatbuf := zinfo_flatbuffers.TarZinfoEnd(builder) builder.Finish(tarZinfoFlatbuf) return builder.FinishedBytes(), nil } // MaxSpanID returns the max span ID. func (i *TarZinfo) MaxSpanID() SpanID { res := SpanID(i.size / i.spanSize) if i.size%i.spanSize == 0 { res-- } return res } // SpanSize returns the span size of the constructed zinfo. func (i *TarZinfo) SpanSize() Offset { return Offset(i.spanSize) } // UncompressedOffsetToSpanID returns the ID of the span containing the data pointed by uncompressed offset. func (i *TarZinfo) UncompressedOffsetToSpanID(offset Offset) SpanID { return SpanID(int64(offset) / i.spanSize) } // ExtractDataFromBuffer does sanity checks and returns the bytes specified by // offset and size from the buffer, since for tar file the buffer is already uncompressed. func (i *TarZinfo) ExtractDataFromBuffer(compressedBuf []byte, uncompressedSize, uncompressedOffset Offset, spanID SpanID) ([]byte, error) { if len(compressedBuf) == 0 { return nil, fmt.Errorf("empty compressed buffer") } if uncompressedSize < 0 { return nil, fmt.Errorf("invalid uncompressed size: %d", uncompressedSize) } if uncompressedSize == 0 { return []byte{}, nil } // minus offset from spans before `spanID`. uncompressedOffset -= i.StartUncompressedOffset(spanID) return compressedBuf[uncompressedOffset : uncompressedOffset+uncompressedSize], nil } // ExtractDataFromFile does sanity checks and returns the bytes specified by // offset and size by reading from the tar file, since for tar file the buffer is already uncompressed. func (i *TarZinfo) ExtractDataFromFile(fileName string, uncompressedSize, uncompressedOffset Offset) ([]byte, error) { if uncompressedSize < 0 { return nil, fmt.Errorf("invalid uncompressed size: %d", uncompressedSize) } if uncompressedSize == 0 { return []byte{}, nil } f, err := os.Open(fileName) if err != nil { return nil, err } defer f.Close() bytes := make([]byte, uncompressedSize) if n, err := f.ReadAt(bytes, int64(uncompressedOffset)); err != nil || Offset(n) != uncompressedSize { return nil, fmt.Errorf("failed to extract data. expect length: %d, actual length: %d", uncompressedSize, n) } return bytes, nil } // Notice that for tar files, compressed and uncompressed means the same thing // since tar file is already uncompressed. // StartCompressedOffset returns the start offset of the span in the compressed stream. func (i *TarZinfo) StartCompressedOffset(spanID SpanID) Offset { return i.spanIDToOffset(spanID) } // EndCompressedOffset returns the end offset of the span in the compressed stream. If // it's the last span, returns the size of the compressed stream. func (i *TarZinfo) EndCompressedOffset(spanID SpanID, fileSize Offset) Offset { if spanID == i.MaxSpanID() { return fileSize } return i.spanIDToOffset(spanID + 1) } // StartUncompressedOffset returns the start offset of the span in the uncompressed stream. func (i *TarZinfo) StartUncompressedOffset(spanID SpanID) Offset { return i.spanIDToOffset(spanID) } // EndUncompressedOffset returns the end offset of the span in the uncompressed stream. If // it's the last span, returns the size of the uncompressed stream. func (i *TarZinfo) EndUncompressedOffset(spanID SpanID, fileSize Offset) Offset { if spanID == i.MaxSpanID() { return fileSize } return i.spanIDToOffset(spanID + 1) } func (i *TarZinfo) spanIDToOffset(spanID SpanID) Offset { return Offset(i.spanSize * int64(spanID)) } soci-snapshotter-0.4.1/ztoc/compression/types.go000066400000000000000000000021411454010642300220310ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package compression // Offset will hold any file size and offset values type Offset int64 // SpanID will hold any span related values (SpanID, MaxSpanID, etc) type SpanID int32 // Compression algorithms used by an image layer. They should be kept consistent // with the return of `DiffCompression` from containerd. // https://github.com/containerd/containerd/blob/v1.7.0-beta.3/images/mediatypes.go#L66 const ( Gzip = "gzip" Zstd = "zstd" Uncompressed = "uncompressed" Unknown = "unknown" ) soci-snapshotter-0.4.1/ztoc/compression/zinfo.go000066400000000000000000000120161454010642300220140ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package compression import ( "fmt" ) // Zinfo is the interface for dealing with compressed data efficiently. It chunks // a compressed stream (e.g. a gzip file) into spans and records the chunk offset, // so that you can interact with the compressed stream per span individually (or in parallel). // For example, you can extract uncompressed data/file from the relevant compressed // spans only (i.e., without uncompressing the whole compress file). // // The interface contains methods that are used to: // 1. build a zinfo (e.g., `SpanSize`); // 2. extract a chunk of uncompressed data (e.g., from a compressed buffer or file); // 3. conversion between span and its start and end offset in the (un)compressed // stream so you can work on the individual span data only. type Zinfo interface { // ExtractDataFromBuffer extracts the uncompressed data from `compressedBuf` and returns // as a byte slice. ExtractDataFromBuffer(compressedBuf []byte, uncompressedSize, uncompressedOffset Offset, spanID SpanID) ([]byte, error) // ExtractDataFromFile extracts the uncompressed data directly from a compressed file // (e.g. a gzip file) and returns as a byte slice. ExtractDataFromFile(fileName string, uncompressedSize, uncompressedOffset Offset) ([]byte, error) // Close releases any resources held by the interface implementation. Close() // Bytes serilizes the underlying zinfo data (depending on implementation) into bytes for storage. Bytes() ([]byte, error) // MaxSpanID returns the maximum span ID after chunking the compress stream into spans. MaxSpanID() SpanID // SpanSize returns the span size used to chunk compress stream into spans. SpanSize() Offset // Below funcs need to be part of the interface because of how we currently // extract data from a compressed data stream. Specifically, if we need to // extract a chunk of uncompressed data (e.g. `[start:end]`), we need to: // 1. get the span id of both `start` and `end` (thus `UncompressedOffsetToSpanID`); // 2. know where the uncompressed data `[start:end]` is located in the compressed stream // (thus `StartCompressedOffset` and `EndCompressedOffset`); // 3. to speed up the uncompressed data extraction, we paralellize and extract // the uncompressed data per *span*; // (thus `StartUncompressedOffset` and `EndUncompressedOffset`). // // This may change in the future if we figure out better abstraction (e.g. when // implementing the interface for a new compression like zstd). // UncompressedOffsetToSpanID returns the ID of the span containing given `offset`. UncompressedOffsetToSpanID(offset Offset) SpanID // StartCompressedOffset returns the offset (in compressed stream) // of the 1st byte belonging to `spanID`. StartCompressedOffset(spanID SpanID) Offset // EndCompressedOffset returns the offset (in compressed stream) // of the last byte belonging to `spanID`. If it's the last span, `fileSize` is returned. EndCompressedOffset(spanID SpanID, fileSize Offset) Offset // StartUncompressedOffset returns the offset (in uncompressed stream) // of the 1st byte belonging to `spanID`. StartUncompressedOffset(spanID SpanID) Offset // EndUncompressedOffset returns the offset (in uncompressed stream) // of the last byte belonging to `spanID`. If it's the last span, `fileSize` is returned. EndUncompressedOffset(spanID SpanID, fileSize Offset) Offset } // NewZinfo deseralizes given zinfo bytes into a zinfo struct. // This is often used when you have a serialized zinfo bytes and want to get the zinfo struct. func NewZinfo(compressionAlgo string, zinfoBytes []byte) (Zinfo, error) { switch compressionAlgo { case Gzip: return newGzipZinfo(zinfoBytes) case Zstd: return nil, fmt.Errorf("not implemented: %s", Zstd) case Uncompressed, Unknown: return newTarZinfo(zinfoBytes) default: return nil, fmt.Errorf("unexpected compression algorithm: %s", compressionAlgo) } } // NewZinfoFromFile creates a zinfo struct given a compressed file and a span size. // This is often used when you have a compressed file (e.g. gzip) and want to create // a new zinfo for it. func NewZinfoFromFile(compressionAlgo string, filename string, spanSize int64) (Zinfo, error) { switch compressionAlgo { case Gzip: return newGzipZinfoFromFile(filename, spanSize) case Zstd: return nil, fmt.Errorf("not implemented: %s", Zstd) case Uncompressed: return newTarZinfoFromFile(filename, spanSize) default: return nil, fmt.Errorf("unexpected compression algorithm: %s", compressionAlgo) } } soci-snapshotter-0.4.1/ztoc/fbs/000077500000000000000000000000001454010642300165515ustar00rootroot00000000000000soci-snapshotter-0.4.1/ztoc/fbs/ztoc.fbs000066400000000000000000000026261454010642300202320ustar00rootroot00000000000000namespace ztoc; table Xattr { key : string; value : string; } table FileMetadata { name : string; type : string; uncompressed_offset : long; uncompressed_size : long; linkname : string; // Target name of link (valid for TypeLink or TypeSymlink) mode : long; // Permission and mode bits uid : uint32; // User ID of owner gid : uint32; // Group ID of owner uname : string; // User name of owner gname : string; // Group name of owner mod_time : string; // Modification time (is formatted in RFC 3339 format, // with sub-second precision added if present: "2006-01-02T15:04:05.999999999Z07:00") devmajor : long; // Major device number (valid for TypeChar or TypeBlock) devminor : long; // Minor device number (valid for TypeChar or TypeBlock) xattrs : [Xattr]; } enum CompressionAlgorithm : byte { Gzip = 1, Uncompressed } table CompressionInfo { compression_algorithm : CompressionAlgorithm = Gzip; max_span_id : int; // The total number of spans in Ztoc - 1 span_digests : [string]; checkpoints : [ubyte]; // the binary data used to decompress the span } table TOC { metadata : [FileMetadata]; } table Ztoc { version : string; // The version of the Ztoc in format ., e.g. 1.0 build_tool_identifier : string; compressed_archive_size : long; uncompressed_archive_size : long; toc : TOC; compression_info : CompressionInfo; } root_type Ztoc; soci-snapshotter-0.4.1/ztoc/fbs/ztoc/000077500000000000000000000000001454010642300175305ustar00rootroot00000000000000soci-snapshotter-0.4.1/ztoc/fbs/ztoc/CompressionAlgorithm.go000066400000000000000000000014071454010642300242310ustar00rootroot00000000000000// Code generated by the FlatBuffers compiler. DO NOT EDIT. package ztoc import "strconv" type CompressionAlgorithm int8 const ( CompressionAlgorithmGzip CompressionAlgorithm = 1 CompressionAlgorithmUncompressed CompressionAlgorithm = 2 ) var EnumNamesCompressionAlgorithm = map[CompressionAlgorithm]string{ CompressionAlgorithmGzip: "Gzip", CompressionAlgorithmUncompressed: "Uncompressed", } var EnumValuesCompressionAlgorithm = map[string]CompressionAlgorithm{ "Gzip": CompressionAlgorithmGzip, "Uncompressed": CompressionAlgorithmUncompressed, } func (v CompressionAlgorithm) String() string { if s, ok := EnumNamesCompressionAlgorithm[v]; ok { return s } return "CompressionAlgorithm(" + strconv.FormatInt(int64(v), 10) + ")" } soci-snapshotter-0.4.1/ztoc/fbs/ztoc/CompressionInfo.go000066400000000000000000000073051454010642300232010ustar00rootroot00000000000000// Code generated by the FlatBuffers compiler. DO NOT EDIT. package ztoc import ( flatbuffers "github.com/google/flatbuffers/go" ) type CompressionInfo struct { _tab flatbuffers.Table } func GetRootAsCompressionInfo(buf []byte, offset flatbuffers.UOffsetT) *CompressionInfo { n := flatbuffers.GetUOffsetT(buf[offset:]) x := &CompressionInfo{} x.Init(buf, n+offset) return x } func GetSizePrefixedRootAsCompressionInfo(buf []byte, offset flatbuffers.UOffsetT) *CompressionInfo { n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) x := &CompressionInfo{} x.Init(buf, n+offset+flatbuffers.SizeUint32) return x } func (rcv *CompressionInfo) Init(buf []byte, i flatbuffers.UOffsetT) { rcv._tab.Bytes = buf rcv._tab.Pos = i } func (rcv *CompressionInfo) Table() flatbuffers.Table { return rcv._tab } func (rcv *CompressionInfo) CompressionAlgorithm() CompressionAlgorithm { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { return CompressionAlgorithm(rcv._tab.GetInt8(o + rcv._tab.Pos)) } return 1 } func (rcv *CompressionInfo) MutateCompressionAlgorithm(n CompressionAlgorithm) bool { return rcv._tab.MutateInt8Slot(4, int8(n)) } func (rcv *CompressionInfo) MaxSpanId() int32 { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { return rcv._tab.GetInt32(o + rcv._tab.Pos) } return 0 } func (rcv *CompressionInfo) MutateMaxSpanId(n int32) bool { return rcv._tab.MutateInt32Slot(6, n) } func (rcv *CompressionInfo) SpanDigests(j int) []byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) if o != 0 { a := rcv._tab.Vector(o) return rcv._tab.ByteVector(a + flatbuffers.UOffsetT(j*4)) } return nil } func (rcv *CompressionInfo) SpanDigestsLength() int { o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) if o != 0 { return rcv._tab.VectorLen(o) } return 0 } func (rcv *CompressionInfo) Checkpoints(j int) byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) if o != 0 { a := rcv._tab.Vector(o) return rcv._tab.GetByte(a + flatbuffers.UOffsetT(j*1)) } return 0 } func (rcv *CompressionInfo) CheckpointsLength() int { o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) if o != 0 { return rcv._tab.VectorLen(o) } return 0 } func (rcv *CompressionInfo) CheckpointsBytes() []byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) if o != 0 { return rcv._tab.ByteVector(o + rcv._tab.Pos) } return nil } func (rcv *CompressionInfo) MutateCheckpoints(j int, n byte) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) if o != 0 { a := rcv._tab.Vector(o) return rcv._tab.MutateByte(a+flatbuffers.UOffsetT(j*1), n) } return false } func CompressionInfoStart(builder *flatbuffers.Builder) { builder.StartObject(4) } func CompressionInfoAddCompressionAlgorithm(builder *flatbuffers.Builder, compressionAlgorithm CompressionAlgorithm) { builder.PrependInt8Slot(0, int8(compressionAlgorithm), 1) } func CompressionInfoAddMaxSpanId(builder *flatbuffers.Builder, maxSpanId int32) { builder.PrependInt32Slot(1, maxSpanId, 0) } func CompressionInfoAddSpanDigests(builder *flatbuffers.Builder, spanDigests flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(spanDigests), 0) } func CompressionInfoStartSpanDigestsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(4, numElems, 4) } func CompressionInfoAddCheckpoints(builder *flatbuffers.Builder, checkpoints flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(3, flatbuffers.UOffsetT(checkpoints), 0) } func CompressionInfoStartCheckpointsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(1, numElems, 1) } func CompressionInfoEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { return builder.EndObject() } soci-snapshotter-0.4.1/ztoc/fbs/ztoc/FileMetadata.go000066400000000000000000000143611454010642300224040ustar00rootroot00000000000000// Code generated by the FlatBuffers compiler. DO NOT EDIT. package ztoc import ( flatbuffers "github.com/google/flatbuffers/go" ) type FileMetadata struct { _tab flatbuffers.Table } func GetRootAsFileMetadata(buf []byte, offset flatbuffers.UOffsetT) *FileMetadata { n := flatbuffers.GetUOffsetT(buf[offset:]) x := &FileMetadata{} x.Init(buf, n+offset) return x } func GetSizePrefixedRootAsFileMetadata(buf []byte, offset flatbuffers.UOffsetT) *FileMetadata { n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) x := &FileMetadata{} x.Init(buf, n+offset+flatbuffers.SizeUint32) return x } func (rcv *FileMetadata) Init(buf []byte, i flatbuffers.UOffsetT) { rcv._tab.Bytes = buf rcv._tab.Pos = i } func (rcv *FileMetadata) Table() flatbuffers.Table { return rcv._tab } func (rcv *FileMetadata) Name() []byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { return rcv._tab.ByteVector(o + rcv._tab.Pos) } return nil } func (rcv *FileMetadata) Type() []byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { return rcv._tab.ByteVector(o + rcv._tab.Pos) } return nil } func (rcv *FileMetadata) UncompressedOffset() int64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) if o != 0 { return rcv._tab.GetInt64(o + rcv._tab.Pos) } return 0 } func (rcv *FileMetadata) MutateUncompressedOffset(n int64) bool { return rcv._tab.MutateInt64Slot(8, n) } func (rcv *FileMetadata) UncompressedSize() int64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) if o != 0 { return rcv._tab.GetInt64(o + rcv._tab.Pos) } return 0 } func (rcv *FileMetadata) MutateUncompressedSize(n int64) bool { return rcv._tab.MutateInt64Slot(10, n) } func (rcv *FileMetadata) Linkname() []byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) if o != 0 { return rcv._tab.ByteVector(o + rcv._tab.Pos) } return nil } func (rcv *FileMetadata) Mode() int64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(14)) if o != 0 { return rcv._tab.GetInt64(o + rcv._tab.Pos) } return 0 } func (rcv *FileMetadata) MutateMode(n int64) bool { return rcv._tab.MutateInt64Slot(14, n) } func (rcv *FileMetadata) Uid() uint32 { o := flatbuffers.UOffsetT(rcv._tab.Offset(16)) if o != 0 { return rcv._tab.GetUint32(o + rcv._tab.Pos) } return 0 } func (rcv *FileMetadata) MutateUid(n uint32) bool { return rcv._tab.MutateUint32Slot(16, n) } func (rcv *FileMetadata) Gid() uint32 { o := flatbuffers.UOffsetT(rcv._tab.Offset(18)) if o != 0 { return rcv._tab.GetUint32(o + rcv._tab.Pos) } return 0 } func (rcv *FileMetadata) MutateGid(n uint32) bool { return rcv._tab.MutateUint32Slot(18, n) } func (rcv *FileMetadata) Uname() []byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(20)) if o != 0 { return rcv._tab.ByteVector(o + rcv._tab.Pos) } return nil } func (rcv *FileMetadata) Gname() []byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(22)) if o != 0 { return rcv._tab.ByteVector(o + rcv._tab.Pos) } return nil } func (rcv *FileMetadata) ModTime() []byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(24)) if o != 0 { return rcv._tab.ByteVector(o + rcv._tab.Pos) } return nil } func (rcv *FileMetadata) Devmajor() int64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(26)) if o != 0 { return rcv._tab.GetInt64(o + rcv._tab.Pos) } return 0 } func (rcv *FileMetadata) MutateDevmajor(n int64) bool { return rcv._tab.MutateInt64Slot(26, n) } func (rcv *FileMetadata) Devminor() int64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(28)) if o != 0 { return rcv._tab.GetInt64(o + rcv._tab.Pos) } return 0 } func (rcv *FileMetadata) MutateDevminor(n int64) bool { return rcv._tab.MutateInt64Slot(28, n) } func (rcv *FileMetadata) Xattrs(obj *Xattr, j int) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(30)) if o != 0 { x := rcv._tab.Vector(o) x += flatbuffers.UOffsetT(j) * 4 x = rcv._tab.Indirect(x) obj.Init(rcv._tab.Bytes, x) return true } return false } func (rcv *FileMetadata) XattrsLength() int { o := flatbuffers.UOffsetT(rcv._tab.Offset(30)) if o != 0 { return rcv._tab.VectorLen(o) } return 0 } func FileMetadataStart(builder *flatbuffers.Builder) { builder.StartObject(14) } func FileMetadataAddName(builder *flatbuffers.Builder, name flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(name), 0) } func FileMetadataAddType(builder *flatbuffers.Builder, type_ flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(type_), 0) } func FileMetadataAddUncompressedOffset(builder *flatbuffers.Builder, uncompressedOffset int64) { builder.PrependInt64Slot(2, uncompressedOffset, 0) } func FileMetadataAddUncompressedSize(builder *flatbuffers.Builder, uncompressedSize int64) { builder.PrependInt64Slot(3, uncompressedSize, 0) } func FileMetadataAddLinkname(builder *flatbuffers.Builder, linkname flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(4, flatbuffers.UOffsetT(linkname), 0) } func FileMetadataAddMode(builder *flatbuffers.Builder, mode int64) { builder.PrependInt64Slot(5, mode, 0) } func FileMetadataAddUid(builder *flatbuffers.Builder, uid uint32) { builder.PrependUint32Slot(6, uid, 0) } func FileMetadataAddGid(builder *flatbuffers.Builder, gid uint32) { builder.PrependUint32Slot(7, gid, 0) } func FileMetadataAddUname(builder *flatbuffers.Builder, uname flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(8, flatbuffers.UOffsetT(uname), 0) } func FileMetadataAddGname(builder *flatbuffers.Builder, gname flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(9, flatbuffers.UOffsetT(gname), 0) } func FileMetadataAddModTime(builder *flatbuffers.Builder, modTime flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(10, flatbuffers.UOffsetT(modTime), 0) } func FileMetadataAddDevmajor(builder *flatbuffers.Builder, devmajor int64) { builder.PrependInt64Slot(11, devmajor, 0) } func FileMetadataAddDevminor(builder *flatbuffers.Builder, devminor int64) { builder.PrependInt64Slot(12, devminor, 0) } func FileMetadataAddXattrs(builder *flatbuffers.Builder, xattrs flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(13, flatbuffers.UOffsetT(xattrs), 0) } func FileMetadataStartXattrsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(4, numElems, 4) } func FileMetadataEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { return builder.EndObject() } soci-snapshotter-0.4.1/ztoc/fbs/ztoc/TOC.go000066400000000000000000000030761454010642300205120ustar00rootroot00000000000000// Code generated by the FlatBuffers compiler. DO NOT EDIT. package ztoc import ( flatbuffers "github.com/google/flatbuffers/go" ) type TOC struct { _tab flatbuffers.Table } func GetRootAsTOC(buf []byte, offset flatbuffers.UOffsetT) *TOC { n := flatbuffers.GetUOffsetT(buf[offset:]) x := &TOC{} x.Init(buf, n+offset) return x } func GetSizePrefixedRootAsTOC(buf []byte, offset flatbuffers.UOffsetT) *TOC { n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) x := &TOC{} x.Init(buf, n+offset+flatbuffers.SizeUint32) return x } func (rcv *TOC) Init(buf []byte, i flatbuffers.UOffsetT) { rcv._tab.Bytes = buf rcv._tab.Pos = i } func (rcv *TOC) Table() flatbuffers.Table { return rcv._tab } func (rcv *TOC) Metadata(obj *FileMetadata, j int) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { x := rcv._tab.Vector(o) x += flatbuffers.UOffsetT(j) * 4 x = rcv._tab.Indirect(x) obj.Init(rcv._tab.Bytes, x) return true } return false } func (rcv *TOC) MetadataLength() int { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { return rcv._tab.VectorLen(o) } return 0 } func TOCStart(builder *flatbuffers.Builder) { builder.StartObject(1) } func TOCAddMetadata(builder *flatbuffers.Builder, metadata flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(metadata), 0) } func TOCStartMetadataVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(4, numElems, 4) } func TOCEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { return builder.EndObject() } soci-snapshotter-0.4.1/ztoc/fbs/ztoc/Xattr.go000066400000000000000000000027511454010642300211660ustar00rootroot00000000000000// Code generated by the FlatBuffers compiler. DO NOT EDIT. package ztoc import ( flatbuffers "github.com/google/flatbuffers/go" ) type Xattr struct { _tab flatbuffers.Table } func GetRootAsXattr(buf []byte, offset flatbuffers.UOffsetT) *Xattr { n := flatbuffers.GetUOffsetT(buf[offset:]) x := &Xattr{} x.Init(buf, n+offset) return x } func GetSizePrefixedRootAsXattr(buf []byte, offset flatbuffers.UOffsetT) *Xattr { n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) x := &Xattr{} x.Init(buf, n+offset+flatbuffers.SizeUint32) return x } func (rcv *Xattr) Init(buf []byte, i flatbuffers.UOffsetT) { rcv._tab.Bytes = buf rcv._tab.Pos = i } func (rcv *Xattr) Table() flatbuffers.Table { return rcv._tab } func (rcv *Xattr) Key() []byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { return rcv._tab.ByteVector(o + rcv._tab.Pos) } return nil } func (rcv *Xattr) Value() []byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { return rcv._tab.ByteVector(o + rcv._tab.Pos) } return nil } func XattrStart(builder *flatbuffers.Builder) { builder.StartObject(2) } func XattrAddKey(builder *flatbuffers.Builder, key flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(key), 0) } func XattrAddValue(builder *flatbuffers.Builder, value flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(value), 0) } func XattrEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { return builder.EndObject() } soci-snapshotter-0.4.1/ztoc/fbs/ztoc/Ztoc.go000066400000000000000000000063161454010642300210040ustar00rootroot00000000000000// Code generated by the FlatBuffers compiler. DO NOT EDIT. package ztoc import ( flatbuffers "github.com/google/flatbuffers/go" ) type Ztoc struct { _tab flatbuffers.Table } func GetRootAsZtoc(buf []byte, offset flatbuffers.UOffsetT) *Ztoc { n := flatbuffers.GetUOffsetT(buf[offset:]) x := &Ztoc{} x.Init(buf, n+offset) return x } func GetSizePrefixedRootAsZtoc(buf []byte, offset flatbuffers.UOffsetT) *Ztoc { n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) x := &Ztoc{} x.Init(buf, n+offset+flatbuffers.SizeUint32) return x } func (rcv *Ztoc) Init(buf []byte, i flatbuffers.UOffsetT) { rcv._tab.Bytes = buf rcv._tab.Pos = i } func (rcv *Ztoc) Table() flatbuffers.Table { return rcv._tab } func (rcv *Ztoc) Version() []byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { return rcv._tab.ByteVector(o + rcv._tab.Pos) } return nil } func (rcv *Ztoc) BuildToolIdentifier() []byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { return rcv._tab.ByteVector(o + rcv._tab.Pos) } return nil } func (rcv *Ztoc) CompressedArchiveSize() int64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) if o != 0 { return rcv._tab.GetInt64(o + rcv._tab.Pos) } return 0 } func (rcv *Ztoc) MutateCompressedArchiveSize(n int64) bool { return rcv._tab.MutateInt64Slot(8, n) } func (rcv *Ztoc) UncompressedArchiveSize() int64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) if o != 0 { return rcv._tab.GetInt64(o + rcv._tab.Pos) } return 0 } func (rcv *Ztoc) MutateUncompressedArchiveSize(n int64) bool { return rcv._tab.MutateInt64Slot(10, n) } func (rcv *Ztoc) Toc(obj *TOC) *TOC { o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) if o != 0 { x := rcv._tab.Indirect(o + rcv._tab.Pos) if obj == nil { obj = new(TOC) } obj.Init(rcv._tab.Bytes, x) return obj } return nil } func (rcv *Ztoc) CompressionInfo(obj *CompressionInfo) *CompressionInfo { o := flatbuffers.UOffsetT(rcv._tab.Offset(14)) if o != 0 { x := rcv._tab.Indirect(o + rcv._tab.Pos) if obj == nil { obj = new(CompressionInfo) } obj.Init(rcv._tab.Bytes, x) return obj } return nil } func ZtocStart(builder *flatbuffers.Builder) { builder.StartObject(6) } func ZtocAddVersion(builder *flatbuffers.Builder, version flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(version), 0) } func ZtocAddBuildToolIdentifier(builder *flatbuffers.Builder, buildToolIdentifier flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(buildToolIdentifier), 0) } func ZtocAddCompressedArchiveSize(builder *flatbuffers.Builder, compressedArchiveSize int64) { builder.PrependInt64Slot(2, compressedArchiveSize, 0) } func ZtocAddUncompressedArchiveSize(builder *flatbuffers.Builder, uncompressedArchiveSize int64) { builder.PrependInt64Slot(3, uncompressedArchiveSize, 0) } func ZtocAddToc(builder *flatbuffers.Builder, toc flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(4, flatbuffers.UOffsetT(toc), 0) } func ZtocAddCompressionInfo(builder *flatbuffers.Builder, compressionInfo flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(5, flatbuffers.UOffsetT(compressionInfo), 0) } func ZtocEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { return builder.EndObject() } soci-snapshotter-0.4.1/ztoc/testutil.go000066400000000000000000000027041454010642300202060ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package ztoc import ( "bytes" "fmt" "io" "os" "testing" "github.com/awslabs/soci-snapshotter/util/testutil" ) // BuildZtocReader creates the tar gz file for tar entries. It returns ztoc and io.SectionReader of the file. func BuildZtocReader(_ *testing.T, ents []testutil.TarEntry, compressionLevel int, spanSize int64, opts ...testutil.BuildTarOption) (*Ztoc, *io.SectionReader, error) { tarReader := testutil.BuildTarGz(ents, compressionLevel, opts...) tarFileName, tarData, err := testutil.WriteTarToTempFile("tmp.*", tarReader) if err != nil { return nil, nil, err } defer os.Remove(tarFileName) sr := io.NewSectionReader(bytes.NewReader(tarData), 0, int64(len(tarData))) ztoc, err := NewBuilder("test").BuildZtoc(tarFileName, spanSize) if err != nil { return nil, nil, fmt.Errorf("failed to build sample ztoc: %v", err) } return ztoc, sr, nil } soci-snapshotter-0.4.1/ztoc/toc_builder.go000066400000000000000000000127621454010642300206310ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package ztoc import ( "archive/tar" "compress/gzip" "fmt" "io" "os" "github.com/awslabs/soci-snapshotter/ztoc/compression" "github.com/klauspost/compress/zstd" ) // TarProvider creates a tar reader from a compressed file reader (e.g., a gzip file reader), // which can be used by `TocBuilder` to create `TOC` from it. type TarProvider func(file *os.File) (io.Reader, error) // TarProviderGzip creates a tar reader from gzip reader. func TarProviderGzip(compressedReader *os.File) (io.Reader, error) { return gzip.NewReader(compressedReader) } // TarProviderZstd creates a tar reader from zstd reader. func TarProviderZstd(compressedReader *os.File) (io.Reader, error) { return zstd.NewReader(compressedReader) } // TarProviderTar return the tar file directly as the input to // `tar.NewReader`. func TarProviderTar(compressedReader *os.File) (io.Reader, error) { return compressedReader, nil } // TocBuilder builds the `TOC` part of a ztoc and works with different // compression algorithms (e.g., gzip, zstd) with a registered `TarProvider`. type TocBuilder struct { tarProviders map[string]TarProvider } // NewTocBuilder return a `TocBuilder` struct. Users need to call `RegisterTarProvider` // to support a specific compression algorithm. func NewTocBuilder() TocBuilder { return TocBuilder{ tarProviders: make(map[string]TarProvider), } } // RegisterTarProvider adds a TarProvider for a compression algorithm. func (tb TocBuilder) RegisterTarProvider(algorithm string, provider TarProvider) { if tb.tarProviders == nil { tb.tarProviders = make(map[string]TarProvider) } tb.tarProviders[algorithm] = provider } // CheckCompressionAlgorithm checks if a compression algorithm is supported. func (tb TocBuilder) CheckCompressionAlgorithm(algorithm string) bool { _, ok := tb.tarProviders[algorithm] return ok } // TocFromFile creates a `TOC` given a layer blob filename and the compression // algorithm used by the layer. func (tb TocBuilder) TocFromFile(algorithm, filename string) (TOC, compression.Offset, error) { if !tb.CheckCompressionAlgorithm(algorithm) { return TOC{}, 0, fmt.Errorf("unsupported compression algorithm: %s", algorithm) } fm, uncompressedArchiveSize, err := tb.getFileMetadata(algorithm, filename) if err != nil { return TOC{}, 0, err } return TOC{FileMetadata: fm}, uncompressedArchiveSize, nil } // getFileMetadata creates `FileMetadata` for each file within the compressed file // and calculate the uncompressed size of the passed file. func (tb TocBuilder) getFileMetadata(algorithm, filename string) ([]FileMetadata, compression.Offset, error) { // read compress file and create compress tar reader. compressFile, err := os.Open(filename) if err != nil { return nil, 0, fmt.Errorf("could not open file for reading: %v", err) } defer compressFile.Close() compressTarReader, err := tb.tarProviders[algorithm](compressFile) if err != nil { return nil, 0, err } md, uncompressFileSize, err := metadataFromTarReader(compressTarReader) if err != nil { return nil, 0, err } return md, uncompressFileSize, nil } // metadataFromTarReader reads every file from tar reader `sr` and creates // `FileMetadata` for each file. func metadataFromTarReader(r io.Reader) ([]FileMetadata, compression.Offset, error) { pt := &positionTrackerReader{r: r} tarRdr := tar.NewReader(pt) var md []FileMetadata for { hdr, err := tarRdr.Next() if err != nil { if err == io.EOF { break } return nil, 0, fmt.Errorf("error while reading tar header: %w", err) } fileType, err := getType(hdr) if err != nil { return nil, 0, err } metadataEntry := FileMetadata{ Name: hdr.Name, Type: fileType, UncompressedOffset: pt.CurrentPos(), UncompressedSize: compression.Offset(hdr.Size), Linkname: hdr.Linkname, Mode: hdr.Mode, UID: hdr.Uid, GID: hdr.Gid, Uname: hdr.Uname, Gname: hdr.Gname, ModTime: hdr.ModTime, Devmajor: hdr.Devmajor, Devminor: hdr.Devminor, Xattrs: hdr.PAXRecords, } md = append(md, metadataEntry) } return md, pt.CurrentPos(), nil } func getType(header *tar.Header) (fileType string, e error) { switch header.Typeflag { case tar.TypeLink: fileType = "hardlink" case tar.TypeSymlink: fileType = "symlink" case tar.TypeDir: fileType = "dir" case tar.TypeReg: fileType = "reg" case tar.TypeChar: fileType = "char" case tar.TypeBlock: fileType = "block" case tar.TypeFifo: fileType = "fifo" default: return "", fmt.Errorf("unsupported input tar entry %q", header.Typeflag) } return } type positionTrackerReader struct { r io.Reader pos compression.Offset } func (p *positionTrackerReader) Read(b []byte) (int, error) { n, err := p.r.Read(b) p.pos += compression.Offset(n) return n, err } func (p *positionTrackerReader) CurrentPos() compression.Offset { return p.pos } soci-snapshotter-0.4.1/ztoc/toc_builder_test.go000066400000000000000000000065771454010642300216770ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package ztoc import ( "compress/gzip" "io" "os" "testing" "github.com/awslabs/soci-snapshotter/util/testutil" "github.com/awslabs/soci-snapshotter/ztoc/compression" "github.com/klauspost/compress/zstd" ) func TestTocBuilder(t *testing.T) { t.Parallel() tarEntries := []testutil.TarEntry{ testutil.File("test1", string(testutil.RandomByteData(10000000))), testutil.File("test2", string(testutil.RandomByteData(20000000))), } tarReader := func(entries []testutil.TarEntry) io.Reader { return testutil.BuildTar(entries) } gzipTarReader := func(entries []testutil.TarEntry) io.Reader { return testutil.BuildTarGz(entries, gzip.BestCompression) } zstdTarReader := func(entries []testutil.TarEntry) io.Reader { return testutil.BuildTarZstd(entries, int(zstd.SpeedDefault)) } testCases := []struct { name string algorithm string tarEntries []testutil.TarEntry makeTarReader func(entries []testutil.TarEntry) io.Reader expectErr bool }{ { name: "TocBuilder supports gzip", algorithm: compression.Gzip, tarEntries: tarEntries, makeTarReader: gzipTarReader, expectErr: false, }, { name: "TocBuilder supports zstd", algorithm: compression.Zstd, tarEntries: tarEntries, makeTarReader: zstdTarReader, expectErr: false, }, { name: "TocBuilder supports uncompressed layer (tar)", algorithm: compression.Uncompressed, tarEntries: tarEntries, makeTarReader: tarReader, expectErr: false, }, { name: "TocBuilder doesn't support foobar", algorithm: "foobar", tarEntries: tarEntries, makeTarReader: tarReader, expectErr: true, }, { name: "TocBuilder returns error if given tar file and algorithm mismatch", algorithm: compression.Zstd, tarEntries: tarEntries, makeTarReader: gzipTarReader, expectErr: true, }, } builder := NewTocBuilder() builder.RegisterTarProvider(compression.Gzip, TarProviderGzip) builder.RegisterTarProvider(compression.Zstd, TarProviderZstd) builder.RegisterTarProvider(compression.Uncompressed, TarProviderTar) for _, tt := range testCases { tt := tt t.Run(tt.name, func(t *testing.T) { tarReader := tt.makeTarReader(tt.tarEntries) tarFile, _, err := testutil.WriteTarToTempFile("toc_builder", tarReader) if err != nil { t.Fatalf("failed to write content to tar file: %v", err) } defer os.Remove(tarFile) if toc, _, err := builder.TocFromFile(tt.algorithm, tarFile); err != nil { if !tt.expectErr { t.Fatalf("unexpected error: %v", err) } } else { if len(toc.FileMetadata) != len(tt.tarEntries) { t.Fatalf("count of file metadata mismatch, expect: %d, actual: %d", len(tt.tarEntries), len(toc.FileMetadata)) } } }) } } soci-snapshotter-0.4.1/ztoc/zinfo_builder.go000066400000000000000000000072511454010642300211660ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package ztoc import ( "fmt" "io" "os" "github.com/awslabs/soci-snapshotter/ztoc/compression" "github.com/opencontainers/go-digest" ) // ZinfoBuilder builds the `zinfo` part of a ztoc. This interface should be // implemented for each compression algorithm we support. type ZinfoBuilder interface { // ZinfoFromFile builds zinfo given a compressed tar filename and span size, and calculate the size of the file. ZinfoFromFile(filename string, spanSize int64) (zinfo CompressionInfo, fs compression.Offset, err error) } type gzipZinfoBuilder struct{} // ZinfoFromFile creates zinfo for a gzip file. The underlying zinfo object (i.e. `GzipZinfo`) // is stored in `CompressionInfo.Checkpoints` as byte slice. func (gzb gzipZinfoBuilder) ZinfoFromFile(filename string, spanSize int64) (zinfo CompressionInfo, fs compression.Offset, err error) { index, err := compression.NewZinfoFromFile(compression.Gzip, filename, spanSize) if err != nil { return } defer index.Close() fs, err = getFileSize(filename) if err != nil { return } digests, err := getPerSpanDigests(filename, int64(fs), index) if err != nil { return } checkpoints, err := index.Bytes() if err != nil { return } return CompressionInfo{ MaxSpanID: index.MaxSpanID(), SpanDigests: digests, Checkpoints: checkpoints, CompressionAlgorithm: compression.Gzip, }, fs, nil } type tarZinfoBuilder struct{} func (tzb tarZinfoBuilder) ZinfoFromFile(filename string, spanSize int64) (zinfo CompressionInfo, fs compression.Offset, err error) { index, err := compression.NewZinfoFromFile(compression.Uncompressed, filename, spanSize) if err != nil { return } defer index.Close() fs, err = getFileSize(filename) if err != nil { return } digests, err := getPerSpanDigests(filename, int64(fs), index) if err != nil { return } checkpoints, err := index.Bytes() if err != nil { return } return CompressionInfo{ MaxSpanID: index.MaxSpanID(), SpanDigests: digests, Checkpoints: checkpoints, CompressionAlgorithm: compression.Uncompressed, }, fs, nil } func getPerSpanDigests(filename string, fileSize int64, index compression.Zinfo) ([]digest.Digest, error) { file, err := os.Open(filename) if err != nil { return nil, fmt.Errorf("could not open file for reading: %w", err) } defer file.Close() var digests []digest.Digest var i compression.SpanID maxSpanID := index.MaxSpanID() for i = 0; i <= maxSpanID; i++ { startOffset := index.StartCompressedOffset(i) endOffset := index.EndCompressedOffset(i, compression.Offset(fileSize)) section := io.NewSectionReader(file, int64(startOffset), int64(endOffset-startOffset)) dgst, err := digest.FromReader(section) if err != nil { return nil, fmt.Errorf("unable to compute digest for section; start=%d, end=%d, file=%s, size=%d", startOffset, endOffset, filename, fileSize) } digests = append(digests, dgst) } return digests, nil } func getFileSize(file string) (compression.Offset, error) { st, err := os.Stat(file) if err != nil { return 0, err } return compression.Offset(st.Size()), nil } soci-snapshotter-0.4.1/ztoc/ztoc.go000066400000000000000000000146401454010642300173120ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package ztoc import ( "archive/tar" "context" "fmt" "io" "os" "time" "github.com/opencontainers/go-digest" "golang.org/x/sync/errgroup" "github.com/awslabs/soci-snapshotter/ztoc/compression" ) // Version defines the version of a Ztoc. type Version string // Ztoc versions available. const ( Version09 Version = "0.9" ) // Ztoc is a table of contents for compressed data which consists 2 parts: // // (1). toc (`TOC`): a table of contents containing file metadata and its // offset in the decompressed TAR archive. // (2). zinfo (`CompressionInfo`): a collection of "checkpoints" of the // state of the compression engine at various points in the layer. type Ztoc struct { TOC CompressionInfo Version Version BuildToolIdentifier string CompressedArchiveSize compression.Offset UncompressedArchiveSize compression.Offset } // CompressionInfo is the "zinfo" part of ztoc including the `Checkpoints` data // and other metadata such as all span digests. type CompressionInfo struct { MaxSpanID compression.SpanID //The total number of spans in Ztoc - 1 SpanDigests []digest.Digest Checkpoints []byte CompressionAlgorithm string } // TOC is the "ztoc" part of ztoc including metadata of all files in the compressed // data (e.g., a gzip tar file). type TOC struct { FileMetadata []FileMetadata } // FileMetadata contains metadata of a file in the compressed data. type FileMetadata struct { Name string Type string UncompressedOffset compression.Offset UncompressedSize compression.Offset Linkname string // Target name of link (valid for TypeLink or TypeSymlink) Mode int64 // Permission and mode bits UID int // User ID of owner GID int // Group ID of owner Uname string // User name of owner Gname string // Group name of owner ModTime time.Time // Modification time Devmajor int64 // Major device number (valid for TypeChar or TypeBlock) Devminor int64 // Minor device number (valid for TypeChar or TypeBlock) Xattrs map[string]string } // FileMode gets file mode for the file metadata func (src FileMetadata) FileMode() (m os.FileMode) { // FileMetadata.Mode is tar.Header.Mode so we can understand the these bits using `tar` pkg. m = (&tar.Header{Mode: src.Mode}).FileInfo().Mode() & (os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky) switch src.Type { case "dir": m |= os.ModeDir case "symlink": m |= os.ModeSymlink case "char": m |= os.ModeDevice | os.ModeCharDevice case "block": m |= os.ModeDevice case "fifo": m |= os.ModeNamedPipe } return m } // MetadataEntry is used to locate a file based on its metadata. type MetadataEntry struct { UncompressedSize compression.Offset UncompressedOffset compression.Offset } // GetMetadataEntry gets MetadataEntry given a filename. func (toc TOC) GetMetadataEntry(filename string) (MetadataEntry, error) { for _, v := range toc.FileMetadata { if v.Name == filename { if v.Linkname != "" { return toc.GetMetadataEntry(v.Linkname) } return MetadataEntry{ UncompressedSize: v.UncompressedSize, UncompressedOffset: v.UncompressedOffset, }, nil } } return MetadataEntry{}, fmt.Errorf("file %s does not exist in metadata", filename) } // ExtractFile extracts a file from compressed data (as a reader) and returns the // byte data. func (zt Ztoc) ExtractFile(r *io.SectionReader, filename string) ([]byte, error) { entry, err := zt.GetMetadataEntry(filename) if err != nil { return nil, err } if entry.UncompressedSize == 0 { return []byte{}, nil } zinfo, err := zt.Zinfo() if err != nil { return nil, nil } defer zinfo.Close() spanStart := zinfo.UncompressedOffsetToSpanID(entry.UncompressedOffset) spanEnd := zinfo.UncompressedOffsetToSpanID(entry.UncompressedOffset + entry.UncompressedSize) numSpans := spanEnd - spanStart + 1 checkpoints := make([]compression.Offset, numSpans+1) checkpoints[0] = zinfo.StartCompressedOffset(spanStart) var i compression.SpanID for i = 0; i < numSpans; i++ { checkpoints[i+1] = zinfo.EndCompressedOffset(spanStart+i, zt.CompressedArchiveSize) } bufSize := checkpoints[len(checkpoints)-1] - checkpoints[0] buf := make([]byte, bufSize) eg, _ := errgroup.WithContext(context.Background()) // Fetch all span data in parallel for i = 0; i < numSpans; i++ { i := i eg.Go(func() error { rangeStart := checkpoints[i] rangeEnd := checkpoints[i+1] n, err := r.ReadAt(buf[rangeStart-checkpoints[0]:rangeEnd-checkpoints[0]], int64(rangeStart)) // need to convert rangeStart to int64 to use in ReadAt if err != nil && err != io.EOF { return err } bytesToFetch := rangeEnd - rangeStart if n != int(bytesToFetch) { return fmt.Errorf("unexpected data size. read = %d, expected = %d", n, bytesToFetch) } return nil }) } if err := eg.Wait(); err != nil { return nil, err } bytes, err := zinfo.ExtractDataFromBuffer(buf, entry.UncompressedSize, entry.UncompressedOffset, spanStart) if err != nil { return nil, err } return bytes, nil } // ExtractFromTarGz extracts data given a gzip tar file (`gz`) and its `ztoc`. func (zt Ztoc) ExtractFromTarGz(gz string, filename string) (string, error) { entry, err := zt.GetMetadataEntry(filename) if err != nil { return "", err } if entry.UncompressedSize == 0 { return "", nil } zinfo, err := zt.Zinfo() if err != nil { return "", err } defer zinfo.Close() bytes, err := zinfo.ExtractDataFromFile(gz, entry.UncompressedSize, entry.UncompressedOffset) if err != nil { return "", err } return string(bytes), nil } // Zinfo deserilizes and returns a Zinfo based on the zinfo bytes and compression // algorithm in the ztoc. func (zt Ztoc) Zinfo() (compression.Zinfo, error) { return compression.NewZinfo(zt.CompressionAlgorithm, zt.Checkpoints) } soci-snapshotter-0.4.1/ztoc/ztoc_builder.go000066400000000000000000000106121454010642300210130ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package ztoc import ( "fmt" "github.com/awslabs/soci-snapshotter/ztoc/compression" ) // Builder holds a single `TocBuilder` that builds toc, and one `ZinfoBuilder` // *per* compression algorithm that builds zinfo. `TocBuilder` is shared by different // compression algorithms. Which `ZinfoBuilder` is used depends on the compression // algorithm used by the layer. type Builder struct { tocBuilder TocBuilder zinfoBuilders map[string]ZinfoBuilder buildToolIdentifier string } // NewBuilder creates a `Builder` used to build ztocs. By default it supports gzip, // user can register new compression algorithms by calling `RegisterCompressionAlgorithm`. func NewBuilder(buildToolIdentifier string) *Builder { builder := Builder{ tocBuilder: NewTocBuilder(), zinfoBuilders: make(map[string]ZinfoBuilder), buildToolIdentifier: buildToolIdentifier, } builder.RegisterCompressionAlgorithm(compression.Gzip, TarProviderGzip, gzipZinfoBuilder{}) builder.RegisterCompressionAlgorithm(compression.Uncompressed, TarProviderTar, tarZinfoBuilder{}) builder.RegisterCompressionAlgorithm(compression.Unknown, TarProviderTar, tarZinfoBuilder{}) return &builder } // buildConfig contains configuration used when `ztoc.Builder` builds a `Ztoc`. type buildConfig struct { algorithm string } // BuildOption specifies a change to `buildConfig` when building a ztoc. type BuildOption func(opt *buildConfig) error // WithCompression specifies which compression algorithm is used by the layer. func WithCompression(algorithm string) BuildOption { return func(opt *buildConfig) error { opt.algorithm = algorithm return nil } } // defaultBuildConfig creates a `buildConfig` with default values. func defaultBuildConfig() buildConfig { return buildConfig{ algorithm: compression.Gzip, // use gzip by default } } // BuildZtoc builds a `Ztoc` given the filename of a layer blob. By default it assumes // the layer is compressed using `gzip`, unless specified via `WithCompression`. func (b *Builder) BuildZtoc(filename string, span int64, options ...BuildOption) (*Ztoc, error) { if filename == "" { return nil, fmt.Errorf("need to provide a compressed filename") } opt := defaultBuildConfig() for _, f := range options { err := f(&opt) if err != nil { return nil, err } } if !b.CheckCompressionAlgorithm(opt.algorithm) { return nil, fmt.Errorf("unsupported compression algorithm, supported: gzip, got: %s", opt.algorithm) } compressionInfo, fs, err := b.zinfoBuilders[opt.algorithm].ZinfoFromFile(filename, span) if err != nil { return nil, err } toc, uncompressedArchiveSize, err := b.tocBuilder.TocFromFile(opt.algorithm, filename) if err != nil { return nil, err } return &Ztoc{ Version: Version09, TOC: toc, CompressedArchiveSize: fs, UncompressedArchiveSize: uncompressedArchiveSize, BuildToolIdentifier: b.buildToolIdentifier, CompressionInfo: compressionInfo, }, nil } // RegisterCompressionAlgorithm supports a new compression algorithm in `ztoc.Builder`. func (b *Builder) RegisterCompressionAlgorithm(name string, tarProvider TarProvider, zinfoBuilder ZinfoBuilder) { if b.zinfoBuilders == nil { b.zinfoBuilders = make(map[string]ZinfoBuilder) } b.zinfoBuilders[name] = zinfoBuilder b.tocBuilder.RegisterTarProvider(name, tarProvider) } // CheckCompressionAlgorithm checks if a compression algorithm is supported. // // The algorithm has to be supported by both (1) `tocBuilder` (straightforward, // create a tar reader from the compressed io.reader in compressionFileReader) // and (2) `zinfoBuilder` (require zinfo impl, see `compression/gzip_zinfo.go` as an example). func (b *Builder) CheckCompressionAlgorithm(algorithm string) bool { _, ok := b.zinfoBuilders[algorithm] return ok && b.tocBuilder.CheckCompressionAlgorithm(algorithm) } soci-snapshotter-0.4.1/ztoc/ztoc_marshaler.go000066400000000000000000000236101454010642300213450ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package ztoc import ( "bytes" "fmt" "io" "sort" "strings" "time" "github.com/awslabs/soci-snapshotter/ztoc/compression" ztoc_flatbuffers "github.com/awslabs/soci-snapshotter/ztoc/fbs/ztoc" flatbuffers "github.com/google/flatbuffers/go" "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) // Marshal serializes Ztoc to its flatbuffers schema and returns a reader along with the descriptor (digest and size only). // If not successful, it will return an error. func Marshal(ztoc *Ztoc) (io.Reader, ocispec.Descriptor, error) { flatbuf, err := ztocToFlatbuffer(ztoc) if err != nil { return nil, ocispec.Descriptor{}, err } buf := bytes.NewReader(flatbuf) dgst := digest.FromBytes(flatbuf) size := len(flatbuf) return buf, ocispec.Descriptor{ Digest: dgst, Size: int64(size), }, nil } // Unmarshal takes the reader with flatbuffers byte stream and deserializes it ztoc. // In case if there's any error situation during deserialization from flatbuffers, there will be an error returned. func Unmarshal(serializedZtoc io.Reader) (*Ztoc, error) { flatbuf, err := io.ReadAll(serializedZtoc) if err != nil { return nil, err } return flatbufToZtoc(flatbuf) } func flatbufToZtoc(flatbuffer []byte) (z *Ztoc, err error) { defer func() { if r := recover(); r != nil { z = nil err = fmt.Errorf("cannot unmarshal ztoc: %v", r) } }() // ztoc - metadata ztoc := new(Ztoc) ztocFlatbuf := ztoc_flatbuffers.GetRootAsZtoc(flatbuffer, 0) ztoc.Version = Version(ztocFlatbuf.Version()) ztoc.BuildToolIdentifier = string(ztocFlatbuf.BuildToolIdentifier()) ztoc.CompressedArchiveSize = compression.Offset(ztocFlatbuf.CompressedArchiveSize()) ztoc.UncompressedArchiveSize = compression.Offset(ztocFlatbuf.UncompressedArchiveSize()) // ztoc - toc toc := new(ztoc_flatbuffers.TOC) ztocFlatbuf.Toc(toc) metadata := make([]FileMetadata, toc.MetadataLength()) ztoc.TOC = TOC{ FileMetadata: metadata, } for i := 0; i < toc.MetadataLength(); i++ { metadataEntry := new(ztoc_flatbuffers.FileMetadata) toc.Metadata(metadataEntry, i) var me FileMetadata me.Name = string(metadataEntry.Name()) me.Type = string(metadataEntry.Type()) me.UncompressedOffset = compression.Offset(metadataEntry.UncompressedOffset()) me.UncompressedSize = compression.Offset(metadataEntry.UncompressedSize()) me.Linkname = string(metadataEntry.Linkname()) me.Mode = metadataEntry.Mode() me.UID = int(metadataEntry.Uid()) me.GID = int(metadataEntry.Gid()) me.Uname = string(metadataEntry.Uname()) me.Gname = string(metadataEntry.Gname()) modTime := new(time.Time) modTime.UnmarshalText(metadataEntry.ModTime()) me.ModTime = *modTime me.Devmajor = metadataEntry.Devmajor() me.Devminor = metadataEntry.Devminor() me.Xattrs = make(map[string]string) for j := 0; j < metadataEntry.XattrsLength(); j++ { xattrEntry := new(ztoc_flatbuffers.Xattr) metadataEntry.Xattrs(xattrEntry, j) key := string(xattrEntry.Key()) value := string(xattrEntry.Value()) me.Xattrs[key] = value } ztoc.FileMetadata[i] = me } // ztoc - zinfo compressionInfo := new(ztoc_flatbuffers.CompressionInfo) ztocFlatbuf.CompressionInfo(compressionInfo) ztoc.MaxSpanID = compression.SpanID(compressionInfo.MaxSpanId()) ztoc.SpanDigests = make([]digest.Digest, compressionInfo.SpanDigestsLength()) for i := 0; i < compressionInfo.SpanDigestsLength(); i++ { dgst, _ := digest.Parse(string(compressionInfo.SpanDigests(i))) ztoc.SpanDigests[i] = dgst } ztoc.Checkpoints = compressionInfo.CheckpointsBytes() ztoc.CompressionAlgorithm = strings.ToLower(compressionInfo.CompressionAlgorithm().String()) return ztoc, nil } func ztocToFlatbuffer(ztoc *Ztoc) (fb []byte, err error) { defer func() { if r := recover(); r != nil { fb = nil err = fmt.Errorf("cannot marshal Ztoc to flatbuffers") } }() // ztoc - metadata builder := flatbuffers.NewBuilder(0) version := builder.CreateString(string(ztoc.Version)) buildToolIdentifier := builder.CreateString(ztoc.BuildToolIdentifier) // ztoc - toc metadataOffsetList := make([]flatbuffers.UOffsetT, len(ztoc.FileMetadata)) for i := len(ztoc.FileMetadata) - 1; i >= 0; i-- { me := ztoc.FileMetadata[i] // preparing the individual file medatada element metadataOffsetList[i] = prepareMetadataOffset(builder, me) } ztoc_flatbuffers.TOCStartMetadataVector(builder, len(ztoc.FileMetadata)) for i := len(metadataOffsetList) - 1; i >= 0; i-- { builder.PrependUOffsetT(metadataOffsetList[i]) } metadata := builder.EndVector(len(ztoc.FileMetadata)) ztoc_flatbuffers.TOCStart(builder) ztoc_flatbuffers.TOCAddMetadata(builder, metadata) toc := ztoc_flatbuffers.TOCEnd(builder) // ztoc - zinfo checkpointsVector := builder.CreateByteVector(ztoc.Checkpoints) spanDigestsOffsets := make([]flatbuffers.UOffsetT, 0, len(ztoc.SpanDigests)) for _, spanDigest := range ztoc.SpanDigests { off := builder.CreateString(spanDigest.String()) spanDigestsOffsets = append(spanDigestsOffsets, off) } ztoc_flatbuffers.CompressionInfoStartSpanDigestsVector(builder, len(spanDigestsOffsets)) for i := len(spanDigestsOffsets) - 1; i >= 0; i-- { builder.PrependUOffsetT(spanDigestsOffsets[i]) } spanDigests := builder.EndVector(len(spanDigestsOffsets)) ztoc_flatbuffers.CompressionInfoStart(builder) ztoc_flatbuffers.CompressionInfoAddMaxSpanId(builder, int32(ztoc.MaxSpanID)) ztoc_flatbuffers.CompressionInfoAddSpanDigests(builder, spanDigests) ztoc_flatbuffers.CompressionInfoAddCheckpoints(builder, checkpointsVector) // only add (and check) compression algorithm if not empty; // if empty, use Gzip as defined in ztoc flatbuf. if ztoc.CompressionAlgorithm != "" { compressionAlgorithm, err := compressionAlgorithmToFlatbuf(ztoc.CompressionAlgorithm) if err != nil { return nil, err } ztoc_flatbuffers.CompressionInfoAddCompressionAlgorithm(builder, compressionAlgorithm) } ztocInfo := ztoc_flatbuffers.CompressionInfoEnd(builder) ztoc_flatbuffers.ZtocStart(builder) ztoc_flatbuffers.ZtocAddVersion(builder, version) ztoc_flatbuffers.ZtocAddBuildToolIdentifier(builder, buildToolIdentifier) ztoc_flatbuffers.ZtocAddToc(builder, toc) ztoc_flatbuffers.ZtocAddCompressedArchiveSize(builder, int64(ztoc.CompressedArchiveSize)) ztoc_flatbuffers.ZtocAddUncompressedArchiveSize(builder, int64(ztoc.UncompressedArchiveSize)) ztoc_flatbuffers.ZtocAddCompressionInfo(builder, ztocInfo) ztocFlatbuf := ztoc_flatbuffers.ZtocEnd(builder) builder.Finish(ztocFlatbuf) return builder.FinishedBytes(), nil } func prepareMetadataOffset(builder *flatbuffers.Builder, me FileMetadata) flatbuffers.UOffsetT { name := builder.CreateString(me.Name) t := builder.CreateString(me.Type) linkName := builder.CreateString(me.Linkname) uname := builder.CreateString(me.Uname) gname := builder.CreateString(me.Gname) modTimeBinary, _ := me.ModTime.MarshalText() modTime := builder.CreateString(string(modTimeBinary)) xattrs := prepareXattrsOffset(me, builder) ztoc_flatbuffers.FileMetadataStart(builder) ztoc_flatbuffers.FileMetadataAddName(builder, name) ztoc_flatbuffers.FileMetadataAddType(builder, t) ztoc_flatbuffers.FileMetadataAddUncompressedOffset(builder, int64(me.UncompressedOffset)) ztoc_flatbuffers.FileMetadataAddUncompressedSize(builder, int64(me.UncompressedSize)) ztoc_flatbuffers.FileMetadataAddLinkname(builder, linkName) ztoc_flatbuffers.FileMetadataAddMode(builder, me.Mode) ztoc_flatbuffers.FileMetadataAddUid(builder, uint32(me.UID)) ztoc_flatbuffers.FileMetadataAddGid(builder, uint32(me.GID)) ztoc_flatbuffers.FileMetadataAddUname(builder, uname) ztoc_flatbuffers.FileMetadataAddGname(builder, gname) ztoc_flatbuffers.FileMetadataAddModTime(builder, modTime) ztoc_flatbuffers.FileMetadataAddDevmajor(builder, me.Devmajor) ztoc_flatbuffers.FileMetadataAddDevminor(builder, me.Devminor) ztoc_flatbuffers.FileMetadataAddXattrs(builder, xattrs) off := ztoc_flatbuffers.FileMetadataEnd(builder) return off } func prepareXattrsOffset(me FileMetadata, builder *flatbuffers.Builder) flatbuffers.UOffsetT { keys := make([]string, 0, len(me.Xattrs)) for k := range me.Xattrs { keys = append(keys, k) } sort.Strings(keys) xattrOffsetList := make([]flatbuffers.UOffsetT, 0, len(me.Xattrs)) for _, key := range keys { keyOffset := builder.CreateString(key) valueOffset := builder.CreateString(me.Xattrs[key]) ztoc_flatbuffers.XattrStart(builder) ztoc_flatbuffers.XattrAddKey(builder, keyOffset) ztoc_flatbuffers.XattrAddValue(builder, valueOffset) xattrOffset := ztoc_flatbuffers.XattrEnd(builder) xattrOffsetList = append(xattrOffsetList, xattrOffset) } ztoc_flatbuffers.FileMetadataStartXattrsVector(builder, len(xattrOffsetList)) for j := len(xattrOffsetList) - 1; j >= 0; j-- { builder.PrependUOffsetT(xattrOffsetList[j]) } xattrs := builder.EndVector(len(me.Xattrs)) return xattrs } // compressionAlgorithmToFlatbuf helps convert compression algorithm into flatbuf // enum. SOCI/containerd uses lower-case for compression, but our flatbuf capitalizes // the first letter. When converting back, we can just `strings.ToLower` so a helper // func is not needed in that case. func compressionAlgorithmToFlatbuf(algo string) (ztoc_flatbuffers.CompressionAlgorithm, error) { for k, v := range ztoc_flatbuffers.EnumValuesCompressionAlgorithm { if strings.ToLower(k) == algo { return v, nil } } return 0, fmt.Errorf("compression algorithm not defined in flatbuf: %s", algo) } soci-snapshotter-0.4.1/ztoc/ztoc_test.go000066400000000000000000000625721454010642300203600ustar00rootroot00000000000000/* Copyright The Soci Snapshotter Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package ztoc import ( "bytes" "compress/gzip" "fmt" "io" "os" "reflect" "testing" "github.com/awslabs/soci-snapshotter/util/testutil" "github.com/awslabs/soci-snapshotter/ztoc/compression" "github.com/opencontainers/go-digest" ) // buildTarGZ creates a temp tar gz file with the given `tarEntries`. // It returns the created tar gz filename, entry->content map, and the name of // each entry in the tar gz file. func buildTarGZ(t *testing.T, tarName string, tarEntries []testutil.TarEntry) (string, map[string][]byte, []string) { tarReader := testutil.BuildTarGz(tarEntries, gzip.DefaultCompression) tarGzFilePath, _, err := testutil.WriteTarToTempFile(tarName+".tar.gz", tarReader) if err != nil { t.Fatalf("cannot prepare the .tar.gz file for testing") } m, fileNames, err := testutil.GetFilesAndContentsWithinTarGz(tarGzFilePath) if err != nil { os.Remove(tarGzFilePath) t.Fatalf("failed to get targz files and their contents: %v", err) } return tarGzFilePath, m, fileNames } func buildTar(t *testing.T, tarName string, tarEntries []testutil.TarEntry) (string, map[string][]byte, []string) { tarReader := testutil.BuildTar(tarEntries) tarFilePath, _, err := testutil.WriteTarToTempFile(tarName+".tar", tarReader) if err != nil { t.Fatalf("cannot prepare the .tar file for testing") } m, fileNames, err := testutil.GetFilesAndContentsWithinTar(tarFilePath) if err != nil { os.Remove(tarFilePath) t.Fatalf("failed to get tar files and their contents: %v", err) } return tarFilePath, m, fileNames } // tarGenerator represents a function that receives a tar filename pattern and a list of // tar entries, creates a temp tar file (e.g., .tar, .tar.gz) and // returns the created tar filename, a map that maps each filename within // the tar to its content, and a list of filenames within the tarfile. type tarGenerator func(*testing.T, string, []testutil.TarEntry) (string, map[string][]byte, []string) var testZtocs = []struct { name string compressionAlgo string tarGenerator tarGenerator }{ { name: "gzip", compressionAlgo: compression.Gzip, tarGenerator: buildTarGZ, }, { name: "uncompressed", compressionAlgo: compression.Uncompressed, tarGenerator: buildTar, }, } func TestDecompress(t *testing.T) { for _, tc := range testZtocs { testDecompress(t, tc.compressionAlgo, tc.tarGenerator) } } func testDecompress(t *testing.T, compressionAlgo string, generator tarGenerator) { tarEntries := []testutil.TarEntry{ testutil.File("smallfile", string(testutil.RandomByteDataRange(1, 100))), testutil.File("mediumfile", string(testutil.RandomByteDataRange(10000, 128000))), testutil.File("largefile", string(testutil.RandomByteDataRange(350000, 500000))), testutil.File("jumbofile", string(testutil.RandomByteDataRange(3000000, 5000000))), } tests := []struct { name string spanSize int64 }{ { name: "decompress span size 10kB", spanSize: 10000, }, { name: "decompress span size 64KiB", spanSize: 65535, }, { name: "decompress span size 128kB", spanSize: 128000, }, { name: "decompress span size 256kB", spanSize: 256000, }, { name: "decompress span size 512kB", spanSize: 512000, }, { name: "decompress span size 1MiB", spanSize: 1 << 20, }, } ztocBuilder := NewBuilder("test") tarFilePath, m, fileNames := generator(t, "decompress", tarEntries) defer os.Remove(tarFilePath) for _, tc := range tests { tc := tc ztoc, err := ztocBuilder.BuildZtoc(tarFilePath, tc.spanSize, WithCompression(compressionAlgo)) if err != nil { t.Fatalf("%s: can't build ztoc: %v", tc.name, err) } if ztoc == nil { t.Fatalf("%s: ztoc should not be nil", tc.name) } for _, f := range fileNames { file, err := os.Open(tarFilePath) if err != nil { t.Fatalf("%s: could not open open the .tar.gz file", tc.name) } defer file.Close() fi, err := file.Stat() if err != nil { t.Fatalf("%s: could not get the stat for the file %s", tc.name, tarFilePath) } extracted, err := ztoc.ExtractFile(io.NewSectionReader(file, 0, fi.Size()), f) if err != nil { t.Fatalf("%s: could not extract from tar gz", tc.name) } original := m[f] if !bytes.Equal(extracted, original) { diffIdx := getPositionOfFirstDiffInByteSlice(extracted, original) t.Fatalf("%s: span_size=%d: file %s extracted bytes != original bytes; byte %d is different", tc.name, tc.spanSize, f, diffIdx) } } } } func TestDecompressWithGzipHeaders(t *testing.T) { const spanSize = 1024 testcases := []struct { name string opts []testutil.BuildTarOption }{ { name: "ztoc decompress works with gzip comments", opts: []testutil.BuildTarOption{testutil.WithGzipComment("test comment")}, }, { name: "ztoc decompress works with gzip filename", opts: []testutil.BuildTarOption{testutil.WithGzipFilename("filename.tar")}, }, { name: "ztoc decompress works with gzip extra data", opts: []testutil.BuildTarOption{testutil.WithGzipExtra(testutil.RandomByteData(100))}, }, { name: "ztoc decompress works with gzip comments, filename, and extra data", opts: []testutil.BuildTarOption{ testutil.WithGzipComment("test comment"), testutil.WithGzipFilename("filename.tar"), testutil.WithGzipExtra(testutil.RandomByteData(100)), }, }, { name: "ztoc decompress works when extra data is bigger than the span size", opts: []testutil.BuildTarOption{testutil.WithGzipExtra(testutil.RandomByteData(2 * spanSize))}, }, } for _, tc := range testcases { tc := tc t.Run(tc.name, func(t *testing.T) { data := testutil.RandomByteData(100) ztoc, sr, err := BuildZtocReader(t, []testutil.TarEntry{ testutil.File("file", string(data)), }, gzip.DefaultCompression, spanSize, tc.opts..., ) if err != nil { t.Fatalf("failed to create ztoc: %v", err) } b, err := ztoc.ExtractFile(sr, ztoc.FileMetadata[0].Name) if err != nil { t.Fatalf("failed to extract from ztoc: %v", err) } diff := getPositionOfFirstDiffInByteSlice(data, b) if diff != -1 { t.Fatalf("data mismatched at %d. expected %v, got %v", diff, data, b) } }) } } func TestZtocGenerationConsistency(t *testing.T) { for _, tc := range testZtocs { testZtocGenerationConsistency(t, tc.compressionAlgo, tc.tarGenerator) } } func testZtocGenerationConsistency(t *testing.T, compressionAlgo string, generator tarGenerator) { testcases := []struct { name string tarEntries []testutil.TarEntry spanSize int64 tarName string }{ { name: "success generate consistent ztocs, two small files, span_size=64", tarEntries: []testutil.TarEntry{ testutil.File("file1", string(testutil.RandomByteData(10))), testutil.File("file2", string(testutil.RandomByteData(15))), }, spanSize: 64, tarName: "testcase0", }, { name: "success generate consistent ztocs, mixed files, span_size=64", tarEntries: []testutil.TarEntry{ testutil.File("file1", string(testutil.RandomByteData(1000000))), testutil.File("file2", string(testutil.RandomByteData(2500000))), testutil.File("file3", string(testutil.RandomByteData(25))), testutil.File("file4", string(testutil.RandomByteData(88888))), }, spanSize: 64, tarName: "testcase1", }, } ztocBuilder := NewBuilder("test") for _, tc := range testcases { tc := tc t.Run(fmt.Sprintf("%s-%s", compressionAlgo, tc.name), func(t *testing.T) { tarFilePath, _, fileNames := generator(t, tc.tarName, tc.tarEntries) defer os.Remove(tarFilePath) ztoc1, err := ztocBuilder.BuildZtoc(tarFilePath, tc.spanSize, WithCompression(compressionAlgo)) if err != nil { t.Fatalf("can't build ztoc1: %v", err) } if ztoc1 == nil { t.Fatalf("ztoc1 should not be nil") } if len(ztoc1.FileMetadata) != len(fileNames) { t.Fatalf("ztoc1 metadata file count mismatch. expected: %d, actual: %d", len(fileNames), len(ztoc1.FileMetadata)) } ztoc2, err := ztocBuilder.BuildZtoc(tarFilePath, tc.spanSize, WithCompression(compressionAlgo)) if err != nil { t.Fatalf("can't build ztoc2: %v", err) } if ztoc2 == nil { t.Fatalf("ztoc2 should not be nil") } if len(ztoc2.FileMetadata) != len(fileNames) { t.Fatalf("ztoc2 should contain the metadata for %d files, but found %d", len(fileNames), len(ztoc2.FileMetadata)) } // compare two ztocs if ztoc1.CompressedArchiveSize != ztoc2.CompressedArchiveSize { t.Fatalf("ztoc1.CompressedArchiveSize should be equal to ztoc2.CompressedArchiveSize") } if ztoc1.MaxSpanID != ztoc2.MaxSpanID { t.Fatalf("ztoc1.MaxSpanID should be equal to ztoc2.MaxSpanID") } if ztoc1.Version != ztoc2.Version { t.Fatalf("ztoc1.Checkpoints should be equal to ztoc2.Checkpoints") } for i := 0; i < len(ztoc1.FileMetadata); i++ { metadata1 := ztoc1.FileMetadata[i] metadata2 := ztoc2.FileMetadata[i] if !reflect.DeepEqual(metadata1, metadata2) { t.Fatalf("ztoc1.FileMetadata[%d] should be equal to ztoc2.FileMetadata[%d]", i, i) } } // Compare raw Checkpoints if !bytes.Equal(ztoc1.Checkpoints, ztoc2.Checkpoints) { diffIdx := getPositionOfFirstDiffInByteSlice(ztoc1.Checkpoints, ztoc2.Checkpoints) t.Fatalf("ztoc1.Checkpoints differ ztoc2.Checkpoints starting from position %d", diffIdx) } }) } } func TestZtocGeneration(t *testing.T) { for _, tc := range testZtocs { testZtocGeneration(t, tc.compressionAlgo, tc.tarGenerator) } } func testZtocGeneration(t *testing.T, compressionAlgo string, generator tarGenerator) { testcases := []struct { name string tarEntries []testutil.TarEntry spanSize int64 tarName string buildTool string }{ { name: "success generate ztoc with multiple files, span_size=64KiB", tarEntries: []testutil.TarEntry{ testutil.File("file1", string(testutil.RandomByteData(1080033))), testutil.File("file2", string(testutil.RandomByteData(6030502))), testutil.File("file3", string(testutil.RandomByteData(93000))), testutil.File("file4", string(testutil.RandomByteData(1070021))), testutil.File("file5", string(testutil.RandomByteData(55333))), testutil.File("file6", string(testutil.RandomByteData(1070))), testutil.File("file7", string(testutil.RandomByteData(999993))), testutil.File("file8", string(testutil.RandomByteData(1080033))), testutil.File("file9", string(testutil.RandomByteData(305))), testutil.File("filea", string(testutil.RandomByteData(3000))), testutil.File("fileb", string(testutil.RandomByteData(107))), testutil.File("filec", string(testutil.RandomByteData(559333))), testutil.File("filed", string(testutil.RandomByteData(100))), testutil.File("filee", string(testutil.RandomByteData(989993))), }, spanSize: 65535, tarName: "testcase0", buildTool: "AWS SOCI CLI", }, { name: "success generate ztoc with two files, span_size=10kB", tarEntries: []testutil.TarEntry{ testutil.File("file1", string(testutil.RandomByteData(10800))), testutil.File("file2", string(testutil.RandomByteData(10))), }, spanSize: 10000, tarName: "testcase1", buildTool: "foo", }, { name: "success generate ztoc with two files, span_size=1MiB", tarEntries: []testutil.TarEntry{ testutil.File("file1", string(testutil.RandomByteData(9911873))), testutil.File("file2", string(testutil.RandomByteData(800333))), }, spanSize: 1 << 20, tarName: "testcase2", buildTool: "bar", }, { name: "success generate ztoc with one file, span_size=256kB", tarEntries: []testutil.TarEntry{ testutil.File("file1", string(testutil.RandomByteData(5108033))), }, spanSize: 256000, tarName: "testcase3", }, } for _, tc := range testcases { tc := tc t.Run(fmt.Sprintf("%s-%s", compressionAlgo, tc.name), func(t *testing.T) { tarBuffer := bytes.NewBuffer([]byte{}) rawTarFileSize, err := io.Copy(tarBuffer, testutil.BuildTar(tc.tarEntries)) if err != nil { t.Fatal(err) } tarFilePath, m, fileNames := generator(t, tc.tarName, tc.tarEntries) defer os.Remove(tarFilePath) ztoc, err := NewBuilder(tc.buildTool).BuildZtoc(tarFilePath, tc.spanSize, WithCompression(compressionAlgo)) if err != nil { t.Fatalf("can't build ztoc: error=%v", err) } if ztoc == nil { t.Fatalf("ztoc should not be nil") } if ztoc.BuildToolIdentifier != tc.buildTool { t.Fatalf("ztoc build tool identifiers do not match: expected %s, got %s", tc.buildTool, ztoc.BuildToolIdentifier) } if len(ztoc.FileMetadata) != len(fileNames) { t.Fatalf("ztoc metadata count mismatch. expected: %d, actual: %d", len(fileNames), len(ztoc.FileMetadata)) } if ztoc.UncompressedArchiveSize != compression.Offset(rawTarFileSize) { t.Fatalf("ztoc uncompressed file size mismatch. expected: %d, actual:%d", rawTarFileSize, ztoc.UncompressedArchiveSize) } for i := 0; i < len(ztoc.FileMetadata); i++ { compressedFileName := ztoc.FileMetadata[i].Name if compressedFileName != fileNames[i] { t.Fatalf("%d file name mismatch. expected: %s, actual: %s", i, fileNames[i], compressedFileName) } if int(ztoc.FileMetadata[i].UncompressedSize) != len(m[fileNames[i]]) { t.Fatalf("%d uncompressed content size mismatch. expected: %d, actual: %d", i, len(m[fileNames[i]]), int(ztoc.FileMetadata[i].UncompressedSize)) } extractedBytes, err := ztoc.ExtractFromTarGz(tarFilePath, compressedFileName) if err != nil { t.Fatalf("could not extract file %s from %s using generated ztoc: %v", compressedFileName, tarFilePath, err) } if extractedBytes != string(m[fileNames[i]]) { t.Fatalf("the extracted content does not match. expected: %s, actual: %s", string(m[fileNames[i]]), extractedBytes) } } }) } } func TestZtocSerialization(t *testing.T) { for _, tc := range testZtocs { testZtocSerialization(t, tc.compressionAlgo, tc.tarGenerator) } } func testZtocSerialization(t *testing.T, compressionAlgo string, generator tarGenerator) { testcases := []struct { name string tarEntries []testutil.TarEntry spanSize int64 tarName string buildTool string version string xattrs map[string]string }{ { name: "success serialize ztoc with multiple files, span_size=64KiB", tarEntries: []testutil.TarEntry{ testutil.File("file1", string(testutil.RandomByteData(1080033))), testutil.File("file2", string(testutil.RandomByteData(6030502))), testutil.File("file3", string(testutil.RandomByteData(93000))), testutil.File("file4", string(testutil.RandomByteData(1070021))), testutil.File("file5", string(testutil.RandomByteData(55333))), testutil.File("file6", string(testutil.RandomByteData(1070))), testutil.File("file7", string(testutil.RandomByteData(999993))), testutil.File("file8", string(testutil.RandomByteData(1080033))), testutil.File("file9", string(testutil.RandomByteData(305))), testutil.File("filea", string(testutil.RandomByteData(3000))), testutil.File("fileb", string(testutil.RandomByteData(107))), testutil.File("filec", string(testutil.RandomByteData(559333))), testutil.File("filed", string(testutil.RandomByteData(100))), testutil.File("filee", string(testutil.RandomByteData(989993))), }, spanSize: 65535, tarName: "testcase0", buildTool: "AWS SOCI CLI", xattrs: map[string]string{"testKey": "testValue"}, }, } for _, tc := range testcases { tc := tc t.Run(fmt.Sprintf("%s-%s", compressionAlgo, tc.name), func(t *testing.T) { tarFilePath, m, fileNames := generator(t, tc.tarName, tc.tarEntries) defer os.Remove(tarFilePath) createdZtoc, err := NewBuilder(tc.buildTool).BuildZtoc(tarFilePath, tc.spanSize, WithCompression(compressionAlgo)) if err != nil { t.Fatalf("can't build ztoc: error=%v", err) } if createdZtoc == nil { t.Fatalf("ztoc should not be nil") } // append xattrs for i := 0; i < len(createdZtoc.FileMetadata); i++ { for key := range tc.xattrs { createdZtoc.FileMetadata[i].Xattrs = make(map[string]string) createdZtoc.FileMetadata[i].Xattrs[key] = tc.xattrs[key] } } // verify the correctness of created ztoc if createdZtoc.BuildToolIdentifier != tc.buildTool { t.Fatalf("ztoc build tool identifiers do not match: expected %s, got %s", tc.buildTool, createdZtoc.BuildToolIdentifier) } if len(createdZtoc.FileMetadata) != len(fileNames) { t.Fatalf("ztoc metadata count mismatch. expected: %d, actual: %d", len(fileNames), len(createdZtoc.FileMetadata)) } if createdZtoc.CompressionAlgorithm != compressionAlgo { t.Fatalf("ztoc compression algorithm mismatch. expected: %s, actual: %s", compressionAlgo, createdZtoc.CompressionAlgorithm) } for i := 0; i < len(createdZtoc.FileMetadata); i++ { compressedFileName := createdZtoc.FileMetadata[i].Name if compressedFileName != fileNames[i] { t.Fatalf("%d file name mismatch. expected: %s, actual: %s", i, fileNames[i], compressedFileName) } if int(createdZtoc.FileMetadata[i].UncompressedSize) != len(m[fileNames[i]]) { t.Fatalf("%d uncompressed content size mismatch. expected: %d, actual: %d", i, len(m[fileNames[i]]), int(createdZtoc.FileMetadata[i].UncompressedSize)) } extractedBytes, err := createdZtoc.ExtractFromTarGz(tarFilePath, compressedFileName) if err != nil { t.Fatalf("could not extract file %s from %s using generated ztoc: %v", compressedFileName, tarFilePath, err) } if extractedBytes != string(m[fileNames[i]]) { t.Fatalf("the extracted content does not match. expected: %s, actual: %s", string(m[fileNames[i]]), extractedBytes) } } // serialize r, _, err := Marshal(createdZtoc) if err != nil { t.Fatalf("error occurred when getting ztoc reader: %v", err) } // replacing the original ztoc with the read version of it readZtoc, err := Unmarshal(r) if err != nil { t.Fatalf("error occurred when getting ztoc: %v", err) } if readZtoc == nil { t.Fatalf("ztoc should not be nil") } if readZtoc.BuildToolIdentifier != createdZtoc.BuildToolIdentifier { t.Fatalf("serialized ztoc build tool identifiers do not match: expected %s, got %s", createdZtoc.BuildToolIdentifier, readZtoc.BuildToolIdentifier) } if readZtoc.Version != createdZtoc.Version { t.Fatalf("serialized ztoc version identifiers do not match: expected %s, got %s", createdZtoc.Version, readZtoc.Version) } if readZtoc.CompressedArchiveSize != createdZtoc.CompressedArchiveSize { t.Fatalf("readZtoc.CompressedArchiveSize should be equal to createdZtoc.CompressedArchiveSize") } if readZtoc.MaxSpanID != createdZtoc.MaxSpanID { t.Fatalf("readZtoc.MaxSpanID should be equal to createdZtoc.MaxSpanID") } if len(readZtoc.FileMetadata) != len(createdZtoc.FileMetadata) { t.Fatalf("ztoc metadata count mismatch. expected: %d, actual: %d", len(createdZtoc.FileMetadata), len(readZtoc.FileMetadata)) } if readZtoc.CompressionAlgorithm != createdZtoc.CompressionAlgorithm { t.Fatalf("ztoc compression algorithm mismatch. expected: %s, actual: %s", createdZtoc.CompressionAlgorithm, readZtoc.CompressionAlgorithm) } for i := 0; i < len(readZtoc.FileMetadata); i++ { readZtocMetadata := readZtoc.FileMetadata[i] createdZtocMetadata := createdZtoc.FileMetadata[i] compressedFileName := readZtocMetadata.Name if !reflect.DeepEqual(readZtocMetadata, createdZtocMetadata) { if readZtocMetadata.Name != createdZtocMetadata.Name { t.Fatalf("createdZtoc.FileMetadata[%d].Name should be equal to readZtoc.FileMetadata[%d].Name", i, i) } if readZtocMetadata.Type != createdZtocMetadata.Type { t.Fatalf("createdZtoc.FileMetadata[%d].Type should be equal to readZtoc.FileMetadata[%d].Type", i, i) } if !readZtocMetadata.ModTime.Equal(createdZtocMetadata.ModTime) { t.Fatalf("createdZtoc.FileMetadata[%d].ModTime=%v should be equal to readZtoc.FileMetadata[%d].ModTime=%v", i, createdZtocMetadata.ModTime, i, readZtocMetadata.ModTime) } if readZtocMetadata.UncompressedOffset != createdZtocMetadata.UncompressedOffset { t.Fatalf("createdZtoc.FileMetadata[%d].UncompressedOffset should be equal to readZtoc.FileMetadata[%d].UncompressedOffset", i, i) } if readZtocMetadata.UncompressedSize != createdZtocMetadata.UncompressedSize { t.Fatalf("createdZtoc.FileMetadata[%d].UncompressedSize should be equal to readZtoc.FileMetadata[%d].UncompressedSize", i, i) } if readZtocMetadata.Linkname != createdZtocMetadata.Linkname { t.Fatalf("createdZtoc.FileMetadata[%d].Linkname should be equal to readZtoc.FileMetadata[%d].Linkname", i, i) } if readZtocMetadata.Mode != createdZtocMetadata.Mode { t.Fatalf("createdZtoc.FileMetadata[%d].Mode should be equal to readZtoc.FileMetadata[%d].Mode", i, i) } if readZtocMetadata.UID != createdZtocMetadata.UID { t.Fatalf("createdZtoc.FileMetadata[%d].UID should be equal to readZtoc.FileMetadata[%d].UID", i, i) } if readZtocMetadata.GID != createdZtocMetadata.GID { t.Fatalf("createdZtoc.FileMetadata[%d].GID should be equal to readZtoc.FileMetadata[%d].GID", i, i) } if readZtocMetadata.Uname != createdZtocMetadata.Uname { t.Fatalf("createdZtoc.FileMetadata[%d].Uname should be equal to readZtoc.FileMetadata[%d].Uname", i, i) } if readZtocMetadata.Gname != createdZtocMetadata.Gname { t.Fatalf("createdZtoc.FileMetadata[%d].Gname should be equal to readZtoc.FileMetadata[%d].Gname", i, i) } if readZtocMetadata.Devmajor != createdZtocMetadata.Devmajor { t.Fatalf("createdZtoc.FileMetadata[%d].Devmajor should be equal to readZtoc.FileMetadata[%d].Devmajor", i, i) } if readZtocMetadata.Devminor != createdZtocMetadata.Devminor { t.Fatalf("createdZtoc.FileMetadata[%d].Devminor should be equal to readZtoc.FileMetadata[%d].Devminor", i, i) } } extractedBytes, err := readZtoc.ExtractFromTarGz(tarFilePath, compressedFileName) if err != nil { t.Fatalf("could not extract file %s from %s using generated ztoc: %v", compressedFileName, tarFilePath, err) } if extractedBytes != string(m[fileNames[i]]) { t.Fatalf("the extracted content does not match. expected: %s, actual: %s", string(m[fileNames[i]]), extractedBytes) } } // Compare raw Checkpoints if !bytes.Equal(createdZtoc.Checkpoints, readZtoc.Checkpoints) { t.Fatalf("createdZtoc.Checkpoints must be identical to readZtoc.Checkpoints") } }) } } func TestWriteZtoc(t *testing.T) { testCases := []struct { name string version Version checkpoints []byte metadata []FileMetadata compressedArchiveSize compression.Offset uncompressedArchiveSize compression.Offset maxSpanID compression.SpanID buildTool string expDigest string expSize int64 }{ { name: "success write succeeds - same digest and size " + string(Version09), version: Version09, checkpoints: make([]byte, 1<<16), metadata: make([]FileMetadata, 2), compressedArchiveSize: 2000000, uncompressedArchiveSize: 2500000, maxSpanID: 3, buildTool: "AWS SOCI CLI", expDigest: "sha256:eba28fdf50b1b57543f57dd051b2468c1d4f57b64d2006c75aa4de1d03e6c7ec", expSize: 65928, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { toc := TOC{ FileMetadata: tc.metadata, } compressionInfo := CompressionInfo{ Checkpoints: tc.checkpoints, MaxSpanID: tc.maxSpanID, } ztoc := &Ztoc{ Version: tc.version, CompressedArchiveSize: tc.compressedArchiveSize, UncompressedArchiveSize: tc.uncompressedArchiveSize, TOC: toc, CompressionInfo: compressionInfo, BuildToolIdentifier: tc.buildTool, } _, desc, err := Marshal(ztoc) if err != nil { t.Fatalf("error occurred when getting ztoc reader: %v", err) } if desc.Digest != digest.Digest(tc.expDigest) { t.Fatalf("unexpected digest; expected %v, got %v", tc.expDigest, desc.Digest) } if desc.Size != tc.expSize { t.Fatalf("unexpected size; expected %d, got %d", tc.expSize, desc.Size) } }) } } func TestReadZtocInWrongFormat(t *testing.T) { testCases := []struct { name string serializedZtoc []byte }{ { name: "ztoc unmarshal returns error and does not panic", serializedZtoc: testutil.RandomByteData(50000), }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { r := bytes.NewReader(tc.serializedZtoc) if _, err := Unmarshal(r); err == nil { t.Fatalf("expected error, but got nil") } }) } } func getPositionOfFirstDiffInByteSlice(a, b []byte) int { sz := len(a) if len(b) < len(a) { sz = len(b) } for i := 0; i < sz; i++ { if a[i] != b[i] { return i } } return -1 }