pax_global_header 0000666 0000000 0000000 00000000064 14706111210 0014503 g ustar 00root root 0000000 0000000 52 comment=d52abcc0b4395d062bc50ad0453c428c8abaaa6a
golang-github-theupdateframework-go-tuf-2.0.2/ 0000775 0000000 0000000 00000000000 14706111210 0021331 5 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/.github/ 0000775 0000000 0000000 00000000000 14706111210 0022671 5 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/.github/codecov.yml 0000664 0000000 0000000 00000000171 14706111210 0025035 0 ustar 00root root 0000000 0000000 coverage:
status:
project:
default:
# basic
target: auto
threshold: 5%
patch: off golang-github-theupdateframework-go-tuf-2.0.2/.github/dependabot.yml 0000664 0000000 0000000 00000001102 14706111210 0025513 0 ustar 00root root 0000000 0000000 # To get started with Dependabot version updates, you'll need to specify which
# package ecosystems to update and where the package manifests are located.
# Please see the documentation for all configuration options:
# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
version: 2
updates:
# Monitor Go dependencies
- package-ecosystem: "gomod"
directory: "/"
schedule:
interval: "weekly"
time: "10:00"
commit-message:
prefix: "chore"
include: "scope"
open-pull-requests-limit: 10
golang-github-theupdateframework-go-tuf-2.0.2/.github/workflows/ 0000775 0000000 0000000 00000000000 14706111210 0024726 5 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/.github/workflows/ci.yml 0000664 0000000 0000000 00000001567 14706111210 0026055 0 ustar 00root root 0000000 0000000 #
# Copyright 2024 The Update Framework Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
#
# SPDX-License-Identifier: Apache-2.0
on:
pull_request:
push:
branches:
- "master"
tags:
- "v*"
name: CI
jobs:
linting:
uses: ./.github/workflows/linting.yml
tests:
uses: ./.github/workflows/tests.yml
examples:
uses: ./.github/workflows/examples.yml
golang-github-theupdateframework-go-tuf-2.0.2/.github/workflows/codeql-analysis.yml 0000664 0000000 0000000 00000005262 14706111210 0030546 0 ustar 00root root 0000000 0000000 # For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on:
push:
branches: [master]
pull_request:
# The branches below must be a subset of the branches above
branches: [master]
schedule:
- cron: "25 14 * * 6"
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: ["go"]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
# Learn more about CodeQL language support at https://git.io/codeql-language-support
steps:
- name: Checkout code
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: Setup - Go
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
go-version-file: 'go.mod'
cache: true
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@47b3d888fe66b639e431abf22ebca059152f1eea # 3.24.5
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@47b3d888fe66b639e431abf22ebca059152f1eea # 3.24.5
# ℹ️ Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@47b3d888fe66b639e431abf22ebca059152f1eea # 3.24.5
golang-github-theupdateframework-go-tuf-2.0.2/.github/workflows/examples.yml 0000664 0000000 0000000 00000006600 14706111210 0027271 0 ustar 00root root 0000000 0000000 #
# Copyright 2024 The Update Framework Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
#
# SPDX-License-Identifier: Apache-2.0
on:
workflow_call:
# Not exactly right to test functionality in such a way, but it does act as a set of end-to-end test cases for the time
# being, nevertheless should be updated
name: Examples
jobs:
client:
strategy:
fail-fast: false # Keep running if one leg fails.
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
runs-on: ${{ matrix.os }}
steps:
- name: Checkout code
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: Setup - Go
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
go-version-file: 'go.mod'
cache: true
- run: make example-client
repository:
strategy:
fail-fast: false # Keep running if one leg fails.
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
runs-on: ${{ matrix.os }}
steps:
- name: Checkout code
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: Setup - Go
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
go-version-file: 'go.mod'
cache: true
- run: make example-repository
multirepo:
strategy:
fail-fast: false # Keep running if one leg fails.
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
runs-on: ${{ matrix.os }}
steps:
- name: Checkout code
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: Setup - Go
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
go-version-file: 'go.mod'
cache: true
- run: make example-multirepo
tuf-client-cli:
strategy:
fail-fast: false # Keep running if one leg fails.
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
runs-on: ${{ matrix.os }}
steps:
- name: Checkout code
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: Setup - Go
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
go-version-file: 'go.mod'
cache: true
- run: make example-tuf-client-cli
root-signing:
strategy:
fail-fast: false # Keep running if one leg fails.
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
runs-on: ${{ matrix.os }}
steps:
- name: Checkout code
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: Setup - Go
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
go-version-file: 'go.mod'
cache: true
- run: make example-root-signing
golang-github-theupdateframework-go-tuf-2.0.2/.github/workflows/linting.yml 0000664 0000000 0000000 00000003442 14706111210 0027120 0 ustar 00root root 0000000 0000000 #
# Copyright 2024 The Update Framework Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
#
# SPDX-License-Identifier: Apache-2.0
on:
workflow_call:
name: Linting
jobs:
govulncheck_job:
runs-on: ubuntu-latest
name: govulncheck
steps:
- id: govulncheck
uses: golang/govulncheck-action@3a32958c2706f7048305d5a2e53633d7e37e97d0
continue-on-error: true
with:
go-version-file: 'go.mod'
go-package: ./...
golangci:
name: golangci-lint
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: Setup - Go
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
go-version-file: 'go.mod'
cache: true
- name: Run golangci-lint
uses: golangci/golangci-lint-action@3cfe3a4abbb849e10058ce4af15d205b6da42804 # v4.0.0
with:
# Require: The version of golangci-lint to use.
# When `install-mode` is `binary` (default) the value can be v1.2 or v1.2.3 or `latest` to use the latest version.
# When `install-mode` is `goinstall` the value can be v1.2.3, `latest`, or the hash of a commit.
version: v1.54
args: --timeout 5m --verbose
golang-github-theupdateframework-go-tuf-2.0.2/.github/workflows/tests.yml 0000664 0000000 0000000 00000002726 14706111210 0026622 0 ustar 00root root 0000000 0000000 #
# Copyright 2024 The Update Framework Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
#
# SPDX-License-Identifier: Apache-2.0
on:
workflow_call:
name: Tests
jobs:
run:
name: Run
strategy:
fail-fast: false # Keep running if one leg fails.
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
runs-on: ${{ matrix.os }}
steps:
- name: Set git to use LF
run: git config --global core.autocrlf false
- name: Checkout code
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: Setup - Go
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
go-version-file: 'go.mod'
cache: true
- name: Run tests
run: go test -race -covermode=atomic -coverpkg=./metadata/... -coverprofile=coverage.out ./...
- name: Send coverage
uses: codecov/codecov-action@54bcd8715eee62d40e33596ef5e8f0f48dbbccab # v4.1.0
golang-github-theupdateframework-go-tuf-2.0.2/.gitignore 0000664 0000000 0000000 00000000030 14706111210 0023312 0 ustar 00root root 0000000 0000000 .vscode
dist/
.idea/
*~
golang-github-theupdateframework-go-tuf-2.0.2/.golangci.yml 0000664 0000000 0000000 00000001467 14706111210 0023725 0 ustar 00root root 0000000 0000000 #
# Copyright 2024 The Update Framework Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
#
# SPDX-License-Identifier: Apache-2.0
run:
linters:
enable:
- gofmt
- bodyclose
- contextcheck
- errname
- gocyclo
- godot
- godox
- misspell
- stylecheck
- whitespace
- gocritic
golang-github-theupdateframework-go-tuf-2.0.2/.goreleaser.yaml 0000664 0000000 0000000 00000002717 14706111210 0024432 0 ustar 00root root 0000000 0000000 # This is an example .goreleaser.yml file with some sensible defaults.
# Make sure to check the documentation at https://goreleaser.com
before:
hooks:
# You may remove this if you don't use go modules.
- go mod tidy
# you may remove this if you don't need go generate
- go generate ./...
builds:
- skip: true
# - env:
# - CGO_ENABLED=0
# goos:
# - linux
# - windows
# - darwin
archives:
- format: tar.gz
# this name template makes the OS and Arch compatible with the results of uname.
name_template: >-
{{ .ProjectName }}_
{{- title .Os }}_
{{- if eq .Arch "amd64" }}x86_64
{{- else if eq .Arch "386" }}i386
{{- else }}{{ .Arch }}{{ end }}
{{- if .Arm }}v{{ .Arm }}{{ end }}
# use zip for windows archives
format_overrides:
- goos: windows
format: zip
checksum:
name_template: "checksums.txt"
snapshot:
name_template: "{{ incpatch .Version }}-next"
source:
enabled: true
changelog:
use: github
groups:
- title: "Breaking changes"
regexp: "^.*(?:BREAKING CHANGE)|![(\\w)]*:+.*$"
order: 0
- title: Features
regexp: "^.*feat[(\\w)]*:+.*$"
order: 1
- title: "Bug fixes"
regexp: "^.*fix[(\\w)]*:+.*$"
order: 2
- title: Others
order: 999
release:
# If set to auto, will mark the release as not ready for production
# in case there is an indicator for this in the tag e.g. v1.0.0-rc1
prerelease: auto
golang-github-theupdateframework-go-tuf-2.0.2/.minder.yaml 0000664 0000000 0000000 00000006522 14706111210 0023556 0 ustar 00root root 0000000 0000000 # This is the Minder profile file used for securely monitoring rdimitrov/go-tuf-metadata.
# For more information, see https://github.com/stacklok/minder.
---
version: v1
type: profile
name: go-tuf-metadata
context:
provider: github
alert: "on"
remediate: "on"
repository:
- type: secret_scanning
def:
enabled: true
- type: secret_push_protection
def:
enabled: true
- type: github_actions_allowed
def:
allowed_actions: all
# - type: allowed_selected_actions
# def:
# github_owned_allowed: true
# verified_allowed: true
# patterns_allowed: []
- type: default_workflow_permissions
def:
default_workflow_permissions: write
can_approve_pull_request_reviews: true
- type: codeql_enabled
def:
languages: [go]
schedule_interval: '30 4-6 * * *'
- type: actions_check_pinned_tags
def: {}
- type: dependabot_configured
def:
package_ecosystem: gomod
schedule_interval: weekly
apply_if_file: go.mod
- type: dockerfile_no_latest_tag
def: {}
# - type: trivy_action_enabled
# def: {}
- type: branch_protection_enabled
params:
branch: main
def: {}
- type: branch_protection_allow_deletions
params:
branch: main
def:
allow_deletions: false
- type: branch_protection_allow_force_pushes
params:
branch: main
def:
allow_force_pushes: true
# - type: branch_protection_enforce_admins
# params:
# branch: main
# def:
# enforce_admins: true
- type: branch_protection_lock_branch
params:
branch: main
def:
lock_branch: false
- type: branch_protection_require_conversation_resolution
params:
branch: main
def:
required_conversation_resolution: true
- type: branch_protection_require_linear_history
params:
branch: main
def:
required_linear_history: true
- type: branch_protection_require_pull_request_approving_review_count
params:
branch: main
def:
required_approving_review_count: 1
- type: branch_protection_require_pull_request_code_owners_review
params:
branch: main
def:
require_code_owner_reviews: true
- type: branch_protection_require_pull_request_dismiss_stale_reviews
params:
branch: main
def:
dismiss_stale_reviews: true
- type: branch_protection_require_pull_request_last_push_approval
params:
branch: main
def:
require_last_push_approval: true
- type: branch_protection_require_pull_requests
params:
branch: main
def:
required_pull_request_reviews: true
- type: branch_protection_require_signatures
params:
branch: main
def:
required_signatures: false
- type: license
def:
license_filename: LICENSE
license_type: "Apache License"
# artifact:
# - type: artifact_signature
# params:
# tags: [main]
# name: test
# def:
# is_signed: true
# is_verified: true
# is_bundle_verified: true
pull_request:
- type: pr_vulnerability_check
def:
action: review
ecosystem_config:
- name: go
vulnerability_database_type: osv
vulnerability_database_endpoint: https://vuln.go.dev
package_repository:
url: https://proxy.golang.org
sum_repository:
url: https://sum.golang.org
golang-github-theupdateframework-go-tuf-2.0.2/CODE-OF-CONDUCT.md 0000664 0000000 0000000 00000006431 14706111210 0023770 0 ustar 00root root 0000000 0000000 # Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, sex characteristics, gender identity and expression,
level of experience, education, socio-economic status, nationality, personal
appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at oss-coc@vmware.com. All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of an incident.
Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see
https://www.contributor-covenant.org/faq
golang-github-theupdateframework-go-tuf-2.0.2/CODEOWNERS 0000664 0000000 0000000 00000000051 14706111210 0022720 0 ustar 00root root 0000000 0000000 * @theupdateframework/go-tuf-maintainers
golang-github-theupdateframework-go-tuf-2.0.2/CONTRIBUTING.md 0000664 0000000 0000000 00000000717 14706111210 0023567 0 ustar 00root root 0000000 0000000 # Contributing
## Developer Certificate of Origin
Before you start working with this project, please read our [Developer Certificate of Origin](https://cla.vmware.com/dco). All contributions to this repository must be signed as described on that page. Your signature certifies that you wrote the patch or have the right to pass it on as an open-source patch.
## Contribution Process
* Follow the [GitHub process](https://help.github.com/articles/fork-a-repo)
golang-github-theupdateframework-go-tuf-2.0.2/LICENSE 0000664 0000000 0000000 00000026445 14706111210 0022351 0 ustar 00root root 0000000 0000000 Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2024 The Update Framework Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. golang-github-theupdateframework-go-tuf-2.0.2/MAINTAINERS.md 0000664 0000000 0000000 00000003163 14706111210 0023430 0 ustar 00root root 0000000 0000000 # Maintainers should review changes promptly.
# Github will auto-assign the below maintainers to a pull request.
## Current
The [@theupdateframework/go-tuf-maintainers](https://github.com/orgs/theupdateframework/teams/go-tuf-maintainers) team is:
| Maintainer | GitHub ID | Affiliation |
| -------------------------- | ---------------------------------------------------------- | ------------------------------------------ |
| Fredrik Skogman | [@kommendorkapten](https://github.com/kommendorkapten) | [@GitHub](https://github.com/github) |
| Joshua Lock | [@joshuagl](https://github.com/joshuagl) | [@Verizon](https://github.com/verizon) |
| Marina Moore | [@mnm678](https://github.com/mnm678) | NYU |
| Marvin Drees | [@MDr164](https://github.com/MDr164) | [@9elements](https://github.com/9elements) |
| Radoslav Dimitrov | [@rdimitrov](https://github.com/rdimitrov) | [@Stacklok](https://github.com/stacklok) |
## Emeritus
We are deeply indebted to our emeritus maintainers below:
| Maintainer | GitHub ID |
| -------------------------- | ---------------------------------------------------------- |
| Trishank Karthik Kuppusamy | [@trishankatdatadog](https://github.com/trishankatdatadog) |
| Zack Newman | [@znewman01](https://github.com/znewman01) |
golang-github-theupdateframework-go-tuf-2.0.2/Makefile 0000664 0000000 0000000 00000007233 14706111210 0022776 0 ustar 00root root 0000000 0000000 #
# Copyright 2024 The Update Framework Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
#
# SPDX-License-Identifier: Apache-2.0
# We want to use bash
SHELL:=/bin/bash
# Set environment variables
CLIS:=tuf-client # tuf
# Default target
.PHONY: default
default: build
#####################
# build section
#####################
# Build
.PHONY: build
build: $(addprefix build-, $(CLIS))
# Target for building a Go binary
.PHONY: build-%
build-%:
@echo "Building $*"
@go build -o $* examples/cli/$*/main.go
#####################
# test section
#####################
# Test target
.PHONY: test
test:
go test -race -covermode atomic ./...
#####################
# lint section
#####################
.PHONY: lint
lint:
golangci-lint run
.PHONY: fmt
fmt:
go fmt ./...
#####################
# examples section
#####################
# Target for running all examples
.PHONY: example-all
example-all: example-client example-repository example-multirepo example-tuf-client-cli example-root-signing
# Target for demoing the examples/client/client_example.go
.PHONY: example-client
example-client:
@echo "Executing the following example - client/client_example.go"
@cd examples/client/ && go run .
# Target for demoing the examples/repository/basic_repository.go
.PHONY: example-repository
example-repository:
@echo "Executing the following example - repository/basic_repository.go"
@cd examples/repository/ && go run .
# Target for demoing the examples/multirepo/client/client_example.go
.PHONY: example-multirepo
example-multirepo:
@echo "Executing the following example - multirepo/client/client_example.go"
@cd examples/multirepo/client/ && go run .
# Target for demoing the tuf-client cli
.PHONY: example-tuf-client-cli
example-tuf-client-cli: build-tuf-client
@echo "Clearing any leftover artifacts..."
./tuf-client reset --force
@echo "Initializing the following https://jku.github.io/tuf-demo/ TUF repository"
@sleep 2
./tuf-client init --url https://jku.github.io/tuf-demo/metadata
@echo "Downloading the following target file - rdimitrov/artifact-example.md"
@sleep 2
./tuf-client get --url https://jku.github.io/tuf-demo/metadata --turl https://jku.github.io/tuf-demo/targets rdimitrov/artifact-example.md
# Target for demoing the tuf-client cli with root-signing repo
.PHONY: example-root-signing
example-root-signing: build-tuf-client
@echo "Clearing any leftover artifacts..."
./tuf-client reset --force
@echo "Downloading the initial root of trust"
@curl -L "https://tuf-repo-cdn.sigstore.dev/5.root.json" > root.json
@echo "Initializing the following https://tuf-repo-cdn.sigstore.dev TUF repository"
@sleep 2
./tuf-client init --url https://tuf-repo-cdn.sigstore.dev --file root.json
@echo "Downloading the following target file - rekor.pub"
@sleep 2
./tuf-client get --url https://tuf-repo-cdn.sigstore.dev --turl https://tuf-repo-cdn.sigstore.dev/targets rekor.pub
# Clean target
.PHONY: clean
clean:
@rm -rf examples/multirepo/client/bootstrap/
@rm -rf examples/multirepo/client/download/
@rm -rf examples/multirepo/client/metadata/
@rm -rf examples/repository/tmp*
@rm -rf examples/client/tmp*
@rm -rf tuf_download
@rm -rf tuf_metadata
@rm -f tuf-client
@rm -f root.json
golang-github-theupdateframework-go-tuf-2.0.2/NOTICE 0000664 0000000 0000000 00000000344 14706111210 0022236 0 ustar 00root root 0000000 0000000 Copyright 2024 The Update Framework Authors
Apache 2.0 License
Copyright 2024 The Apache Software Foundation
This product includes software developed at
The Apache Software Foundation (/).
SPDX-License-Identifier: Apache-2.0
golang-github-theupdateframework-go-tuf-2.0.2/README.md 0000664 0000000 0000000 00000017172 14706111210 0022620 0 ustar 00root root 0000000 0000000 
[](https://codecov.io/github/theupdateframework/go-tuf)
[](https://pkg.go.dev/github.com/theupdateframework/go-tuf)
[](https://goreportcard.com/report/github.com/theupdateframework/go-tuf)
[](https://opensource.org/licenses/Apache-2.0)
#
go-tuf/v2 - Framework for Securing Software Update Systems
----------------------------
[The Update Framework (TUF)](https://theupdateframework.io/) is a framework for
secure content delivery and updates. It protects against various types of
supply chain attacks and provides resilience to compromise.
## About The Update Framework
----------------------------
The Update Framework (TUF) design helps developers maintain the security of a
software update system, even against attackers that compromise the repository
or signing keys.
TUF provides a flexible
[specification](https://github.com/theupdateframework/specification/blob/master/tuf-spec.md)
defining functionality that developers can use in any software update system or
re-implement to fit their needs.
TUF is hosted by the [Linux Foundation](https://www.linuxfoundation.org/) as
part of the [Cloud Native Computing Foundation](https://www.cncf.io/) (CNCF)
and its design is [used in production](https://theupdateframework.io/adoptions/)
by various tech companies and open-source organizations.
Please see [TUF's website](https://theupdateframework.com/) for more information about TUF!
## Overview
----------------------------
The go-tuf v2 project provides a lightweight library with the following functionality:
* creation, reading, and writing of TUF metadata
* an easy object-oriented approach for interacting with TUF metadata
* consistent snapshots
* signing and verifying TUF metadata
* ED25519, RSA, and ECDSA key types referenced by the latest TUF specification
* top-level role delegation
* target delegation via standard and hash bin delegations
* support of [succinct hash bin delegations](https://github.com/theupdateframework/taps/blob/master/tap15.md) which significantly reduce the size of the TUF metadata
* support for unrecognized fields within the metadata (i.e. preserved and accessible through `root.Signed.UnrecognizedFields["some-unknown-field"]`, also used for verifying/signing (if included in the Signed portion of the metadata))
* TUF client API
* TUF multi-repository client API (implements [TAP 4 - Multiple repository consensus on entrusted targets](https://github.com/theupdateframework/taps/blob/master/tap4.md))
## Examples
----------------------------
There are several examples that can act as a guideline on how to use the library and its features. Some of which are:
* [basic_repository.go](examples/repository/basic_repository.go) example which demonstrates how to *manually* create and
maintain repository metadata using the low-level Metadata API.
To try it - run `make example-repository` (the artifacts will be located at `examples/repository/`).
* [client_example.go](examples/client/client_example.go) which demonstrates how to implement a client using the [updater](metadata/updater/updater.go) package.
To try it - run `make example-client` (the artifacts will be located at `examples/client/`)
* [tuf-client CLI](examples/cli/tuf-client/) - a CLI tool that implements the client workflow specified by The Update Framework (TUF) specification.
To try it - run `make example-tuf-client-cli`
* [multi-repository client example (TAP4)](examples/multirepo/client/client_example.go) which demonstrates how to implement a multi-repository TUF client using the [multirepo](metadata/multirepo/multirepo.go) package.
To try it - run `make example-multirepo`
## Package details
----------------------------
### The `metadata` package
* The `metadata` package provides access to a Metadata file abstraction that closely
follows the TUF specification’s document formats. This API handles de/serialization
to and from files and bytes. It also covers the process of creating and verifying metadata
signatures and makes it easier to access and modify metadata content. It is purely
focused on individual pieces of Metadata and provides no concepts like “repository”
or “update workflow”.
### The `trustedmetadata` package
* A `TrustedMetadata` instance ensures that the collection of metadata in it is valid
and trusted through the whole client update workflow. It provides easy ways to update
the metadata with the caller making decisions on what is updated.
### The `config` package
* The `config` package stores configuration for an ``Updater`` instance.
### The `fetcher` package
* The `fetcher` package defines an interface for abstract network download.
### The `updater` package
* The `updater` package provides an implementation of the TUF client workflow.
It provides ways to query and download target files securely while handling the
TUF update workflow behind the scenes. It is implemented on top of the Metadata API
and can be used to implement various TUF clients with relatively little effort.
### The `multirepo` package
* The `multirepo` package provides an implementation of [TAP 4 - Multiple repository consensus on entrusted targets](https://github.com/theupdateframework/taps/blob/master/tap4.md). It provides a secure search for particular targets across multiple repositories. It provides the functionality for how multiple repositories with separate roots of trust can be required to sign off on the same targets, effectively creating an AND relation and ensuring any files obtained can be trusted. It offers a way to initialize multiple repositories using a `map.json` file and also mechanisms to query and download target files securely. It is implemented on top of the Updater API and can be used to implement various multi-repository TUF clients with relatively little effort.
## Documentation
----------------------------
* [Documentation](https://pkg.go.dev/github.com/theupdateframework/go-tuf)
* [Introduction to TUF's Design](https://theupdateframework.io/overview/)
* [The TUF Specification](https://theupdateframework.github.io/specification/latest/)
## History - legacy go-tuf vs go-tuf/v2
The [legacy go-tuf (v0.7.0)](https://github.com/theupdateframework/go-tuf/tree/v0.7.0) codebase was difficult to maintain and prone to errors due to its initial design decisions. Now it is considered deprecated in favour of go-tuf v2 (originaly from [rdimitrov/go-tuf-metadata](https://github.com/rdimitrov/go-tuf-metadata)) which started from the idea of providing a Go implementation of TUF that is heavily influenced by the design decisions made in [python-tuf](https://github.com/theupdateframework/python-tuf).
## Contact
----------------------------
Questions, feedback, and suggestions are welcomed on the [#tuf](https://cloud-native.slack.com/archives/C8NMD3QJ3) and/or [#go-tuf](https://cloud-native.slack.com/archives/C02D577GX54) channels on
[CNCF Slack](https://slack.cncf.io/).
We strive to make the specification easy to implement, so if you come across
any inconsistencies or experience any difficulty, do let us know by sending an
email, or by reporting an issue in the GitHub [specification
repo](https://github.com/theupdateframework/specification/issues).
golang-github-theupdateframework-go-tuf-2.0.2/SECURITY.md 0000664 0000000 0000000 00000002565 14706111210 0023132 0 ustar 00root root 0000000 0000000 # Security Policy
The go-tuf community is committed to maintaining a reliable and consistent TUF client implementation. If you believe you have identified a security issue in go-tuf's client protocol, please follow these guidelines for responsible disclosure.
## Supported Versions
You may report issues for the most recent version of go-tuf. We will not retroactively make changes to older versions.
## Reporting a Vulnerability
If you discover a potential security issue in this project we ask that you notify the go-tuf maintainers via [Github's private reporting feature](https://github.com/theupdateframework/go-tuf/security/advisories/new) (requires being signed in to GitHub). At the minimum, the report must contain the following:
* A description of the issue.
* A specific version or commit SHA of `go-tuf` where the issue reproduces.
* Instructions to reproduce the issue.
Please do **not** create a public GitHub issue or pull request to submit vulnerability reports. These public trackers are intended for non-time-sensitive and non-security-related bug reports and feature requests. Major feature requests, such as design changes to the specification, should be proposed via a [TUF Augmentation Protocol](https://theupdateframework.github.io/specification/latest/#tuf-augmentation-proposal-tap-support) (TAP).
## Disclosure
This project follows a 90 day disclosure timeline.
golang-github-theupdateframework-go-tuf-2.0.2/examples/ 0000775 0000000 0000000 00000000000 14706111210 0023147 5 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/examples/README.md 0000664 0000000 0000000 00000004605 14706111210 0024433 0 ustar 00root root 0000000 0000000 # Examples
----------------------------
## Repository
----------------------------
See the [basic_repository.go](repository/basic_repository.go) example which demonstrates how to *manually* create and
maintain repository metadata using the low-level Metadata API.
The example highlights the following functionality supported by the metadata API:
* creation of top-level metadata
* target file handling
* consistent snapshots
* support a mixture of key types - ED25519, RSA and ECDSA
* top-level delegation and signing thresholds
* metadata verification
* target delegation
* in-band and out-of-band metadata signing
* writing and reading metadata files
* root key rotation
## Client
----------------------------
There's also a [client_example.go](client/client_example.go) which demonstrates how to implement a client using the [updater](metadata/updater/updater.go) package.
* it uses [https://jku.github.io/tuf-demo](https://jku.github.io/tuf-demo), a live TUF repository hosted on GitHub
* shows an example of how to initialize a client
* shows an example of how to download a target file
* the repository is based on python-tuf so it also highlights the interoperability between the two implementations
## Multi-repository client
----------------------------
There's a [client_example.go](multirepo/client/client_example.go) which demonstrates how to implement a multi-repository client using the [multirepo](metadata/multirepo/multirepo.go) package which implements [TAP 4 - Multiple repository consensus on entrusted targets](https://github.com/theupdateframework/taps/blob/master/tap4.md). The example consists of the following:
* The `map.json` along with the root files for each repository are distributed via a trusted repository used for initialization
* The metadata, these target files and the script generating them are located in the [examples/multirepo/repository](../repository/) folder
* These files are then used to bootstrap the multi-repository TUF client
* Shows the API provided by the `multirepo` package
## CLI tools
----------------------------
The following CLIs are experimental replacements of the CLI tools provided by the go-tuf package. At some point these will be moved to a separate repository.
* [tuf-client](cli/tuf-client/README.md) - a CLI tool that implements the client workflow specified by The Update Framework (TUF) specification
* [tuf](cli/tuf/README.md) - Not implemented
golang-github-theupdateframework-go-tuf-2.0.2/examples/cli/ 0000775 0000000 0000000 00000000000 14706111210 0023716 5 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/examples/cli/README.md 0000664 0000000 0000000 00000000602 14706111210 0025173 0 ustar 00root root 0000000 0000000 # tuf and tuf-client CLI tools
----------------------------
## Overview
----------------------------
The following CLIs are experimental replacements of the CLI tools provided by the go-tuf package:
* [tuf-client](tuf-client/README.md) - a CLI tool that implements the client workflow specified by The Update Framework (TUF) specification
* [tuf](tuf/README.md) - Not implemented
golang-github-theupdateframework-go-tuf-2.0.2/examples/cli/tuf-client/ 0000775 0000000 0000000 00000000000 14706111210 0025770 5 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/examples/cli/tuf-client/README.md 0000664 0000000 0000000 00000003642 14706111210 0027254 0 ustar 00root root 0000000 0000000 # tuf-client CLI
----------------------------
## Overview
----------------------------
`tuf-client` is a CLI tool that implements the client workflow specified by The Update Framework (TUF) specification.
The tuf-client can be used to query for available targets and to download them in a secure manner.
All downloaded files are verified by signed metadata.
The CLI provides three commands:
* `tuf-client init` - Initialize the client with trusted root.json metadata
* `tuf-client get` - Download a target file
* `tuf-client reset` - Resets the local environment. Warning: this deletes both the metadata and download folders and all of their contents
All commands except `reset` require the URL of the TUF repository passed as a flag via `--url/u`
Run `tuf-client help` from the command line to get more detailed usage information.
## Usage
----------------------------
```bash
# Initialize by providing a root.json
#
# Usage: tuf-client init --url -f root.json
#
$ tuf-client init --url https://jku.github.io/tuf-demo/metadata -f root.json
# Initialize without providing a root.json
#
# Usage: tuf-client init --url
#
$ tuf-client init --url https://jku.github.io/tuf-demo/metadata
# Get a target
#
# Usage: tuf-client get --url
#
$ tuf-client get --url https://jku.github.io/tuf-demo/metadata demo/succinctly-delegated-5.txt
# Get a target by providing a URL of where target files are located
#
# Usage: tuf-client get --url -t
#
# Use --nonprefixed for non-prefixed target files
#
$ tuf-client get --url https://jku.github.io/tuf-demo/metadata --turl https://jku.github.io/tuf-demo/targets --nonprefixed demo/succinctly-delegated-5.txt
# Reset your local environment
$ tuf-client reset
```
golang-github-theupdateframework-go-tuf-2.0.2/examples/cli/tuf-client/cmd/ 0000775 0000000 0000000 00000000000 14706111210 0026533 5 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/examples/cli/tuf-client/cmd/get.go 0000664 0000000 0000000 00000011153 14706111210 0027642 0 ustar 00root root 0000000 0000000 // Copyright 2024 The Update Framework Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
//
// SPDX-License-Identifier: Apache-2.0
//
package cmd
import (
"fmt"
stdlog "log"
"os"
"path/filepath"
"github.com/go-logr/stdr"
"github.com/spf13/cobra"
"github.com/theupdateframework/go-tuf/v2/metadata"
"github.com/theupdateframework/go-tuf/v2/metadata/config"
"github.com/theupdateframework/go-tuf/v2/metadata/updater"
)
var targetsURL string
var useNonHashPrefixedTargetFiles bool
type localConfig struct {
MetadataDir string
DownloadDir string
MetadataURL string
TargetsURL string
}
var getCmd = &cobra.Command{
Use: "get",
Aliases: []string{"g"},
Short: "Download a target file",
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
if RepositoryURL == "" {
fmt.Println("Error: required flag(s) \"url\" not set")
os.Exit(1)
}
return GetCmd(args[0])
},
}
func init() {
getCmd.Flags().StringVarP(&targetsURL, "turl", "t", "", "URL of where the target files are hosted")
getCmd.Flags().BoolVarP(&useNonHashPrefixedTargetFiles, "nonprefixed", "", false, "Do not use hash-prefixed target files with consistent snapshots")
rootCmd.AddCommand(getCmd)
}
func GetCmd(target string) error {
// set logger and debug verbosity level
metadata.SetLogger(stdr.New(stdlog.New(os.Stdout, "get_cmd", stdlog.LstdFlags)))
if Verbosity {
stdr.SetVerbosity(5)
}
// verify the client environment was initialized and fetch path names
env, err := verifyEnv()
if err != nil {
return err
}
// read the trusted root metadata
rootBytes, err := os.ReadFile(filepath.Join(env.MetadataDir, "root.json"))
if err != nil {
return err
}
// updater configuration
cfg, err := config.New(env.MetadataURL, rootBytes) // default config
if err != nil {
return err
}
cfg.LocalMetadataDir = env.MetadataDir
cfg.LocalTargetsDir = env.DownloadDir
cfg.RemoteTargetsURL = env.TargetsURL
cfg.PrefixTargetsWithHash = !useNonHashPrefixedTargetFiles
// create an Updater instance
up, err := updater.New(cfg)
if err != nil {
return fmt.Errorf("failed to create Updater instance: %w", err)
}
// try to build the top-level metadata
err = up.Refresh()
if err != nil {
return fmt.Errorf("failed to refresh trusted metadata: %w", err)
}
// search if the desired target is available
targetInfo, err := up.GetTargetInfo(target)
if err != nil {
return fmt.Errorf("target %s not found: %w", target, err)
}
// target is available, so let's see if the target is already present locally
path, _, err := up.FindCachedTarget(targetInfo, "")
if err != nil {
return fmt.Errorf("failed while finding a cached target: %w", err)
}
if path != "" {
fmt.Printf("Target %s is already present at - %s\n", target, path)
return nil
}
// target is not present locally, so let's try to download it
path, _, err = up.DownloadTarget(targetInfo, "", "")
if err != nil {
return fmt.Errorf("failed to download target file %s - %w", target, err)
}
fmt.Printf("Successfully downloaded target %s at - %s\n", target, path)
return nil
}
func verifyEnv() (*localConfig, error) {
// get working directory
cwd, err := os.Getwd()
if err != nil {
return nil, err
}
// if no targetsURL is set, we expect that the target files are located at the same location where the metadata is
if targetsURL == "" {
targetsURL = RepositoryURL
}
// start populating what we need
env := &localConfig{
MetadataDir: filepath.Join(cwd, DefaultMetadataDir),
DownloadDir: filepath.Join(cwd, DefaultDownloadDir),
MetadataURL: RepositoryURL,
TargetsURL: targetsURL,
}
// verify there's local metadata folder
_, err = os.Stat(env.MetadataDir)
if err != nil {
return nil, fmt.Errorf("no local metadata folder: %w", err)
}
// verify there's local download folder
_, err = os.Stat(env.DownloadDir)
if err != nil {
return nil, fmt.Errorf("no local download folder: %w", err)
}
// verify there's a local root.json available for bootstrapping trust
_, err = os.Stat(filepath.Join(env.MetadataDir, fmt.Sprintf("%s.json", metadata.ROOT)))
if err != nil {
return nil, fmt.Errorf("no local download folder: %w", err)
}
return env, nil
}
golang-github-theupdateframework-go-tuf-2.0.2/examples/cli/tuf-client/cmd/init.go 0000664 0000000 0000000 00000011000 14706111210 0030015 0 ustar 00root root 0000000 0000000 // Copyright 2024 The Update Framework Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
//
// SPDX-License-Identifier: Apache-2.0
//
package cmd
import (
"fmt"
"io"
stdlog "log"
"net/http"
"net/url"
"os"
"path/filepath"
"github.com/go-logr/stdr"
"github.com/spf13/cobra"
"github.com/theupdateframework/go-tuf/v2/metadata"
"github.com/theupdateframework/go-tuf/v2/metadata/trustedmetadata"
)
var rootPath string
var initCmd = &cobra.Command{
Use: "init",
Aliases: []string{"i"},
Short: "Initialize the client with trusted root.json metadata",
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
if RepositoryURL == "" {
fmt.Println("Error: required flag(s) \"url\" not set")
os.Exit(1)
}
return InitializeCmd()
},
}
func init() {
initCmd.Flags().StringVarP(&rootPath, "file", "f", "", "location of the trusted root metadata file")
rootCmd.AddCommand(initCmd)
}
func InitializeCmd() error {
copyTrusted := true
// set logger and debug verbosity level
metadata.SetLogger(stdr.New(stdlog.New(os.Stdout, "ini_cmd", stdlog.LstdFlags)))
if Verbosity {
stdr.SetVerbosity(5)
}
// prepare the local environment
localMetadataDir, err := prepareEnvironment()
if err != nil {
return err
}
// if there's no root.json file passed, try to download the 1.root.json from the repository URL
if rootPath == "" {
fmt.Printf("No root.json file was provided. Trying to download one from %s\n", RepositoryURL)
rootPath, err = fetchTrustedRoot(localMetadataDir)
if err != nil {
return err
}
rootPath = filepath.Join(rootPath, fmt.Sprintf("%s.json", metadata.ROOT))
// no need to copy root.json to the metadata folder as we already download it in the expected location
copyTrusted = false
}
// read the content of root.json
rootBytes, err := ReadFile(rootPath)
if err != nil {
return err
}
// verify the content
_, err = trustedmetadata.New(rootBytes)
if err != nil {
return err
}
// Save the trusted root.json file to the metadata folder so it is available for future operations (if we haven't downloaded it)
if copyTrusted {
err = os.WriteFile(filepath.Join(localMetadataDir, rootPath), rootBytes, 0644)
if err != nil {
return err
}
}
fmt.Println("Initialization successful")
return nil
}
// prepareEnvironment prepares the local environment
func prepareEnvironment() (string, error) {
// get working directory
cwd, err := os.Getwd()
if err != nil {
return "", fmt.Errorf("failed to get current working directory: %w", err)
}
metadataPath := filepath.Join(cwd, DefaultMetadataDir)
downloadPath := filepath.Join(cwd, DefaultDownloadDir)
// create a folder for storing the artifacts
err = os.Mkdir(metadataPath, 0750)
if err != nil {
return "", fmt.Errorf("failed to create local metadata folder: %w", err)
}
// create a destination folder for storing the downloaded target
err = os.Mkdir(downloadPath, 0750)
if err != nil {
return "", fmt.Errorf("failed to create download folder: %w", err)
}
return metadataPath, nil
}
// fetchTrustedRoot downloads the initial root metadata
func fetchTrustedRoot(metadataDir string) (string, error) {
// download the initial root metadata so we can bootstrap Trust-On-First-Use
rootURL, err := url.JoinPath(RepositoryURL, "1.root.json")
if err != nil {
return "", fmt.Errorf("failed to create URL path for 1.root.json: %w", err)
}
req, err := http.NewRequest("GET", rootURL, nil)
if err != nil {
return "", fmt.Errorf("failed to create http request: %w", err)
}
client := http.DefaultClient
res, err := client.Do(req)
if err != nil {
return "", fmt.Errorf("failed to executed the http request: %w", err)
}
defer res.Body.Close()
data, err := io.ReadAll(res.Body)
if err != nil {
return "", fmt.Errorf("failed to read the http request body: %w", err)
}
// write the downloaded root metadata to file
err = os.WriteFile(filepath.Join(metadataDir, "root.json"), data, 0644)
if err != nil {
return "", fmt.Errorf("failed to write root.json metadata: %w", err)
}
return metadataDir, nil
}
golang-github-theupdateframework-go-tuf-2.0.2/examples/cli/tuf-client/cmd/reset.go 0000664 0000000 0000000 00000005271 14706111210 0030211 0 ustar 00root root 0000000 0000000 // Copyright 2024 The Update Framework Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
//
// SPDX-License-Identifier: Apache-2.0
//
package cmd
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/spf13/cobra"
)
var ForceDelete bool
var resetCmd = &cobra.Command{
Use: "reset",
Aliases: []string{"r"},
Short: "Resets the local environment. Warning: this deletes both the metadata and download folders and all of their contents",
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
return ResetCmd()
},
}
func init() {
resetCmd.Flags().BoolVarP(&ForceDelete, "force", "f", false, "force delete without waiting for confirmation")
rootCmd.AddCommand(resetCmd)
}
func ResetCmd() error {
// get working directory
cwd, err := os.Getwd()
if err != nil {
return fmt.Errorf("failed to get current working directory: %w", err)
}
// folders to delete
metadataPath := filepath.Join(cwd, DefaultMetadataDir)
downloadPath := filepath.Join(cwd, DefaultDownloadDir)
// warning: deletes the metadata folder and all of its contents
fmt.Printf("Warning: Are you sure you want to delete the \"%s\" folder and all of its contents? (y/n)\n", metadataPath)
if ForceDelete || askForConfirmation() {
os.RemoveAll(metadataPath)
fmt.Printf("Folder %s was successfully deleted\n", metadataPath)
} else {
fmt.Printf("Folder \"%s\" was not deleted\n", metadataPath)
}
// warning: deletes the download folder and all of its contents
fmt.Printf("Warning: Are you sure you want to delete the \"%s\" folder and all of its contents? (y/n)\n", downloadPath)
if ForceDelete || askForConfirmation() {
os.RemoveAll(downloadPath)
fmt.Printf("Folder %s was successfully deleted\n", downloadPath)
} else {
fmt.Printf("Folder \"%s\" was not deleted\n", downloadPath)
}
return nil
}
func askForConfirmation() bool {
var response string
_, err := fmt.Scanln(&response)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
switch strings.ToLower(response) {
case "y", "yes":
return true
case "n", "no":
return false
default:
fmt.Println("I'm sorry but I didn't get what you meant, please type (y)es or (n)o and then press enter:")
return askForConfirmation()
}
}
golang-github-theupdateframework-go-tuf-2.0.2/examples/cli/tuf-client/cmd/root.go 0000664 0000000 0000000 00000003655 14706111210 0030056 0 ustar 00root root 0000000 0000000 // Copyright 2024 The Update Framework Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
//
// SPDX-License-Identifier: Apache-2.0
//
package cmd
import (
"io"
"os"
"github.com/spf13/cobra"
)
const (
DefaultMetadataDir = "tuf_metadata"
DefaultDownloadDir = "tuf_download"
)
var Verbosity bool
var RepositoryURL string
var rootCmd = &cobra.Command{
Use: "tuf-client",
Short: "tuf-client - a client-side CLI tool for The Update Framework (TUF)",
Long: `tuf-client is a CLI tool that implements the client workflow specified by The Update Framework (TUF) specification.
The tuf-client can be used to query for available targets and to download them in a secure manner.
All downloaded files are verified by signed metadata.`,
Run: func(cmd *cobra.Command, args []string) {
// show the help message if no command has been used
if len(args) == 0 {
_ = cmd.Help()
os.Exit(0)
}
},
}
func Execute() {
rootCmd.PersistentFlags().BoolVarP(&Verbosity, "verbose", "v", false, "verbose output")
rootCmd.PersistentFlags().StringVarP(&RepositoryURL, "url", "u", "", "URL of the TUF repository")
if err := rootCmd.Execute(); err != nil {
os.Exit(1)
}
}
// ReadFile reads the content of a file and return its bytes
func ReadFile(name string) ([]byte, error) {
in, err := os.Open(name)
if err != nil {
return nil, err
}
defer in.Close()
data, err := io.ReadAll(in)
if err != nil {
return nil, err
}
return data, nil
}
golang-github-theupdateframework-go-tuf-2.0.2/examples/cli/tuf-client/main.go 0000664 0000000 0000000 00000001426 14706111210 0027246 0 ustar 00root root 0000000 0000000 // Copyright 2024 The Update Framework Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
//
// SPDX-License-Identifier: Apache-2.0
//
package main
import (
tufclient "github.com/theupdateframework/go-tuf/v2/examples/cli/tuf-client/cmd"
)
func main() {
tufclient.Execute()
}
golang-github-theupdateframework-go-tuf-2.0.2/examples/cli/tuf/ 0000775 0000000 0000000 00000000000 14706111210 0024514 5 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/examples/cli/tuf/README.md 0000664 0000000 0000000 00000000144 14706111210 0025772 0 ustar 00root root 0000000 0000000 # tuf CLI
----------------------------
## Overview
----------------------------
Not implemented
golang-github-theupdateframework-go-tuf-2.0.2/examples/cli/tuf/cmd/ 0000775 0000000 0000000 00000000000 14706111210 0025257 5 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/examples/cli/tuf/cmd/init.go 0000664 0000000 0000000 00000002461 14706111210 0026554 0 ustar 00root root 0000000 0000000 // Copyright 2024 The Update Framework Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
//
// SPDX-License-Identifier: Apache-2.0
//
package cmd
import (
"fmt"
stdlog "log"
"os"
"github.com/go-logr/stdr"
"github.com/spf13/cobra"
"github.com/theupdateframework/go-tuf/v2/metadata"
)
var initCmd = &cobra.Command{
Use: "init",
Aliases: []string{"i"},
Short: "Initialize a repository",
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
return InitializeCmd()
},
}
func init() {
rootCmd.AddCommand(initCmd)
}
func InitializeCmd() error {
// set logger and debug verbosity level
metadata.SetLogger(stdr.New(stdlog.New(os.Stdout, "ini_cmd", stdlog.LstdFlags)))
if Verbosity {
stdr.SetVerbosity(5)
}
fmt.Println("Initialization successful")
return nil
}
golang-github-theupdateframework-go-tuf-2.0.2/examples/cli/tuf/cmd/root.go 0000664 0000000 0000000 00000002621 14706111210 0026572 0 ustar 00root root 0000000 0000000 // Copyright 2024 The Update Framework Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
//
// SPDX-License-Identifier: Apache-2.0
//
package cmd
import (
"io"
"os"
"github.com/spf13/cobra"
)
var Verbosity bool
var rootCmd = &cobra.Command{
Use: "tuf",
Short: "tuf - a repository-side CLI tool for The Update Framework (TUF)",
Long: "tuf - a repository-side CLI tool for The Update Framework (TUF)",
Run: func(cmd *cobra.Command, args []string) {
},
}
func Execute() {
rootCmd.PersistentFlags().BoolVarP(&Verbosity, "verbose", "v", false, "verbose output")
if err := rootCmd.Execute(); err != nil {
os.Exit(1)
}
}
// ReadFile reads the content of a file and return its bytes
func ReadFile(name string) ([]byte, error) {
in, err := os.Open(name)
if err != nil {
return nil, err
}
defer in.Close()
data, err := io.ReadAll(in)
if err != nil {
return nil, err
}
return data, nil
}
golang-github-theupdateframework-go-tuf-2.0.2/examples/cli/tuf/main.go 0000664 0000000 0000000 00000001475 14706111210 0025776 0 ustar 00root root 0000000 0000000 // Copyright 2024 The Update Framework Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
//
// SPDX-License-Identifier: Apache-2.0
//
package main
import (
"fmt"
"os"
tuf "github.com/theupdateframework/go-tuf/v2/examples/cli/tuf/cmd"
)
func main() {
fmt.Println("Not implemented")
os.Exit(1)
tuf.Execute()
}
golang-github-theupdateframework-go-tuf-2.0.2/examples/client/ 0000775 0000000 0000000 00000000000 14706111210 0024425 5 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/examples/client/client_example.go 0000664 0000000 0000000 00000013204 14706111210 0027745 0 ustar 00root root 0000000 0000000 // Copyright 2024 The Update Framework Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
//
// SPDX-License-Identifier: Apache-2.0
//
package main
import (
"fmt"
"io"
stdlog "log"
"net/http"
"net/url"
"os"
"path/filepath"
"github.com/go-logr/stdr"
"github.com/theupdateframework/go-tuf/v2/metadata"
"github.com/theupdateframework/go-tuf/v2/metadata/config"
"github.com/theupdateframework/go-tuf/v2/metadata/updater"
)
// The following config is used to fetch a target from Jussi's GitHub repository example
const (
metadataURL = "https://jku.github.io/tuf-demo/metadata"
targetsURL = "https://jku.github.io/tuf-demo/targets"
targetName = "rdimitrov/artifact-example.md"
verbosity = 4
generateRandomFolder = false
)
func main() {
// set logger to stdout with info level
metadata.SetLogger(stdr.New(stdlog.New(os.Stdout, "client_example", stdlog.LstdFlags)))
stdr.SetVerbosity(verbosity)
log := metadata.GetLogger()
// initialize environment - temporary folders, etc.
metadataDir, err := InitEnvironment()
if err != nil {
log.Error(err, "Failed to initialize environment")
}
// initialize client with Trust-On-First-Use
err = InitTrustOnFirstUse(metadataDir)
if err != nil {
log.Error(err, "Trust-On-First-Use failed")
}
// download the desired target
err = DownloadTarget(metadataDir, targetName)
if err != nil {
log.Error(err, "Download failed")
}
}
// InitEnvironment prepares the local environment - temporary folders, etc.
func InitEnvironment() (string, error) {
var tmpDir string
// get working directory
cwd, err := os.Getwd()
if err != nil {
return "", fmt.Errorf("failed to get current working directory: %w", err)
}
if !generateRandomFolder {
tmpDir = filepath.Join(cwd, "tmp")
// create a temporary folder for storing the demo artifacts
os.Mkdir(tmpDir, 0750)
} else {
// create a temporary folder for storing the demo artifacts
tmpDir, err = os.MkdirTemp(cwd, "tmp")
if err != nil {
return "", fmt.Errorf("failed to create a temporary folder: %w", err)
}
}
// create a destination folder for storing the downloaded target
os.Mkdir(filepath.Join(tmpDir, "download"), 0750)
return tmpDir, nil
}
// InitTrustOnFirstUse initialize local trusted metadata (Trust-On-First-Use)
func InitTrustOnFirstUse(metadataDir string) error {
// check if there's already a local root.json available for bootstrapping trust
_, err := os.Stat(filepath.Join(metadataDir, "root.json"))
if err == nil {
return nil
}
// download the initial root metadata so we can bootstrap Trust-On-First-Use
rootURL, err := url.JoinPath(metadataURL, "1.root.json")
if err != nil {
return fmt.Errorf("failed to create URL path for 1.root.json: %w", err)
}
req, err := http.NewRequest("GET", rootURL, nil)
if err != nil {
return fmt.Errorf("failed to create http request: %w", err)
}
client := http.DefaultClient
res, err := client.Do(req)
if err != nil {
return fmt.Errorf("failed to executed the http request: %w", err)
}
defer res.Body.Close()
data, err := io.ReadAll(res.Body)
if err != nil {
return fmt.Errorf("failed to read the http request body: %w", err)
}
// write the downloaded root metadata to file
err = os.WriteFile(filepath.Join(metadataDir, "root.json"), data, 0644)
if err != nil {
return fmt.Errorf("failed to write root.json metadata: %w", err)
}
return nil
}
// DownloadTarget downloads the target file using Updater. The Updater refreshes the top-level metadata,
// get the target information, verifies if the target is already cached, and in case it
// is not cached, downloads the target file.
func DownloadTarget(localMetadataDir, target string) error {
log := metadata.GetLogger()
rootBytes, err := os.ReadFile(filepath.Join(localMetadataDir, "root.json"))
if err != nil {
return err
}
// create updater configuration
cfg, err := config.New(metadataURL, rootBytes) // default config
if err != nil {
return err
}
cfg.LocalMetadataDir = localMetadataDir
cfg.LocalTargetsDir = filepath.Join(localMetadataDir, "download")
cfg.RemoteTargetsURL = targetsURL
cfg.PrefixTargetsWithHash = true
// create a new Updater instance
up, err := updater.New(cfg)
if err != nil {
return fmt.Errorf("failed to create Updater instance: %w", err)
}
// try to build the top-level metadata
err = up.Refresh()
if err != nil {
return fmt.Errorf("failed to refresh trusted metadata: %w", err)
}
// search if the desired target is available
targetInfo, err := up.GetTargetInfo(target)
if err != nil {
return fmt.Errorf("target %s not found: %w", target, err)
}
// target is available, so let's see if the target is already present locally
path, _, err := up.FindCachedTarget(targetInfo, "")
if err != nil {
return fmt.Errorf("failed while finding a cached target: %w", err)
}
if path != "" {
log.Info("Target is already present", "target", target, "path", path)
}
// target is not present locally, so let's try to download it
path, _, err = up.DownloadTarget(targetInfo, "", "")
if err != nil {
return fmt.Errorf("failed to download target file %s - %w", target, err)
}
log.Info("Successfully downloaded target", "target", target, "path", path)
return nil
}
golang-github-theupdateframework-go-tuf-2.0.2/examples/multirepo/ 0000775 0000000 0000000 00000000000 14706111210 0025167 5 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/examples/multirepo/client/ 0000775 0000000 0000000 00000000000 14706111210 0026445 5 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/examples/multirepo/client/README.md 0000664 0000000 0000000 00000001261 14706111210 0027724 0 ustar 00root root 0000000 0000000 # Example repository showing a multi repository TUF client (TAP 4)
The following is a TUF multi-repository client example of the `multirepo` package which implements [TAP 4 - Multiple repository consensus on entrusted targets](https://github.com/theupdateframework/taps/blob/master/tap4.md):
- The `map.json` along with the root files for each repository are distributed via a trusted repository used for initialization
- The metadata, these target files and the script generating them are located in the [examples/multirepo/repository](../repository/) folder
- These files are then used to bootstrap the multi-repository TUF client
- Shows the API provided by the `multirepo` package
golang-github-theupdateframework-go-tuf-2.0.2/examples/multirepo/client/client_example.go 0000664 0000000 0000000 00000013342 14706111210 0031770 0 ustar 00root root 0000000 0000000 // Copyright 2024 The Update Framework Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
//
// SPDX-License-Identifier: Apache-2.0
//
package main
import (
"fmt"
stdlog "log"
"os"
"path/filepath"
"strings"
"github.com/go-logr/stdr"
"github.com/theupdateframework/go-tuf/v2/metadata"
"github.com/theupdateframework/go-tuf/v2/metadata/config"
"github.com/theupdateframework/go-tuf/v2/metadata/multirepo"
"github.com/theupdateframework/go-tuf/v2/metadata/updater"
)
const (
metadataURL = "https://raw.githubusercontent.com/theupdateframework/go-tuf/master/examples/multirepo/repository/metadata"
targetsURL = "https://raw.githubusercontent.com/theupdateframework/go-tuf/master/examples/multirepo/repository/targets"
verbosity = 4
)
func main() {
// set logger to stdout with info level
metadata.SetLogger(stdr.New(stdlog.New(os.Stdout, "multirepo_client_example", stdlog.LstdFlags)))
stdr.SetVerbosity(verbosity)
// Bootstrap TUF
fmt.Printf("Bootstrapping the initial TUF repo - fetching map.json file and necessary trusted root files\n\n")
mapBytes, trustedRoots, err := BootstrapTUF() // returns the map.json and the trusted root files
if err != nil {
panic(err)
}
// Initialize the multi-repository TUF client
fmt.Printf("Initializing the multi-repository TUF client with the given map.json file\n\n")
client, err := InitMultiRepoTUF(mapBytes, trustedRoots)
if err != nil {
panic(err)
}
// Refresh all repositories
fmt.Printf("Refreshing each TUF client (updating metadata/client update workflow)\n\n")
err = client.Refresh()
if err != nil {
panic(err)
}
// Get target info for the given target
fmt.Printf("Searching for a target using the multi-repository TUF client\n\n")
targetInfo, repositories, err := client.GetTargetInfo("rekor.pub") // rekor.pub trusted_root.json fulcio_v1.crt.pem
if err != nil {
panic(err)
}
// Download the target using that target info
fmt.Println("Downloading a target using the multi-repository TUF client")
_, _, err = client.DownloadTarget(repositories, targetInfo, "", "")
if err != nil {
panic(err)
}
}
// BootstrapTUF returns the map file and the related trusted root metadata files
func BootstrapTUF() ([]byte, map[string][]byte, error) {
log := metadata.GetLogger()
trustedRoots := map[string][]byte{}
mapBytes := []byte{}
// get working directory
cwd, err := os.Getwd()
if err != nil {
return nil, nil, fmt.Errorf("failed to get current working directory: %w", err)
}
targetsDir := filepath.Join(cwd, "bootstrap/targets")
// ensure the necessary folder layout
err = os.MkdirAll(targetsDir, os.ModePerm)
if err != nil {
return nil, nil, err
}
// read the trusted root metadata
rootBytes, err := os.ReadFile(filepath.Join(cwd, "root.json"))
if err != nil {
return nil, nil, err
}
// create updater configuration
cfg, err := config.New(metadataURL, rootBytes) // default config
if err != nil {
return nil, nil, err
}
cfg.LocalMetadataDir = filepath.Join(cwd, "bootstrap")
cfg.LocalTargetsDir = targetsDir
cfg.RemoteTargetsURL = targetsURL
// create a new Updater instance
up, err := updater.New(cfg)
if err != nil {
return nil, nil, fmt.Errorf("failed to create Updater instance: %w", err)
}
// build the top-level metadata
err = up.Refresh()
if err != nil {
return nil, nil, fmt.Errorf("failed to refresh trusted metadata: %w", err)
}
// download all target files
for name, targetInfo := range up.GetTopLevelTargets() {
// see if the target is already present locally
path, _, err := up.FindCachedTarget(targetInfo, "")
if err != nil {
return nil, nil, fmt.Errorf("failed while finding a cached target: %w", err)
}
if path != "" {
log.Info("Target is already present", "target", name, "path", path)
}
// target is not present locally, so let's try to download it
// keeping the same path layout as its target path
expectedTargetLocation := filepath.Join(targetsDir, name)
dirName, _ := filepath.Split(expectedTargetLocation)
err = os.MkdirAll(dirName, os.ModePerm)
if err != nil {
return nil, nil, err
}
// download targets (we don't have to actually store them other than for the sake of the example)
path, bytes, err := up.DownloadTarget(targetInfo, expectedTargetLocation, "")
if err != nil {
return nil, nil, fmt.Errorf("failed to download target file %s - %w", name, err)
}
// populate the return values
if name == "map.json" {
mapBytes = bytes
} else {
// Target names uses forwardslash even on Windows
repositoryName := strings.Split(name, "/")
trustedRoots[repositoryName[0]] = bytes
}
log.Info("Successfully downloaded target", "target", name, "path", path)
}
return mapBytes, trustedRoots, nil
}
func InitMultiRepoTUF(mapBytes []byte, trustedRoots map[string][]byte) (*multirepo.MultiRepoClient, error) {
// get working directory
cwd, err := os.Getwd()
if err != nil {
return nil, fmt.Errorf("failed to get current working directory: %w", err)
}
// create a new configuration for a multi-repository client
cfg, err := multirepo.NewConfig(mapBytes, trustedRoots)
if err != nil {
return nil, err
}
cfg.LocalMetadataDir = filepath.Join(cwd, "metadata")
cfg.LocalTargetsDir = filepath.Join(cwd, "download")
// create a new instance of a multi-repository TUF client
return multirepo.New(cfg)
}
golang-github-theupdateframework-go-tuf-2.0.2/examples/multirepo/client/root.json 0000664 0000000 0000000 00000003577 14706111210 0030337 0 ustar 00root root 0000000 0000000 {
"signatures": [
{
"keyid": "8be9c0369e7f2e2aacd9fde0267c5ab224bd65b3e8f892f09168050c53dd1a4d",
"sig": "86e985ac792a7c92489b781e371c9c4ea0b4ba944f56f19783df8404cc7d76dc2ea44b13fd508bd70cbf9cd481e155b4a41ce0b4f030ea481bea9015fa055c06"
}
],
"signed": {
"_type": "root",
"consistent_snapshot": true,
"expires": "2034-09-30T11:29:58.188964Z",
"keys": {
"1e0c46c0f988b6f45eb0956ed6b0836697a39dca123c56453a3d8ad57c64726b": {
"keytype": "ed25519",
"keyval": {
"public": "a5ebf16e4bfec00df5b3a0a580ef1edf8f5c786f398ad82e4bf1b3761c39fc9b"
},
"scheme": "ed25519"
},
"6ee434fdb4e723ed7d5c556a34e5fabc7412c37ce652dd1a4aeec1e06f86a44c": {
"keytype": "ed25519",
"keyval": {
"public": "c934af418d5a32992e5ab6e1bb1a0fddbd0d944654b9dbe53b507b1552ac7057"
},
"scheme": "ed25519"
},
"8be9c0369e7f2e2aacd9fde0267c5ab224bd65b3e8f892f09168050c53dd1a4d": {
"keytype": "ed25519",
"keyval": {
"public": "e48f729e90a19ac8cf227d7a5e56dcfd52bdc30258fc426255c856959935cb9e"
},
"scheme": "ed25519"
},
"b779120edb45353d2a151004fe463ec6f10d90d83c0fa1c755e4e436e2ac8009": {
"keytype": "ed25519",
"keyval": {
"public": "097df4cf52f263630b0e5dfac96b2955b83a253842d0e7fcaffa121a349e6efa"
},
"scheme": "ed25519"
}
},
"roles": {
"root": {
"keyids": [
"8be9c0369e7f2e2aacd9fde0267c5ab224bd65b3e8f892f09168050c53dd1a4d"
],
"threshold": 1
},
"snapshot": {
"keyids": [
"6ee434fdb4e723ed7d5c556a34e5fabc7412c37ce652dd1a4aeec1e06f86a44c"
],
"threshold": 1
},
"targets": {
"keyids": [
"b779120edb45353d2a151004fe463ec6f10d90d83c0fa1c755e4e436e2ac8009"
],
"threshold": 1
},
"timestamp": {
"keyids": [
"1e0c46c0f988b6f45eb0956ed6b0836697a39dca123c56453a3d8ad57c64726b"
],
"threshold": 1
}
},
"spec_version": "1.0.31",
"version": 1
}
} golang-github-theupdateframework-go-tuf-2.0.2/examples/multirepo/repository/ 0000775 0000000 0000000 00000000000 14706111210 0027406 5 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/examples/multirepo/repository/README.md 0000664 0000000 0000000 00000001523 14706111210 0030666 0 ustar 00root root 0000000 0000000 # Example repository used for bootstrapping a multi repository TUF client (TAP 4)
The following is a helper TUF repository that serves several targets:
- `map.json` which holds repository mappings and can be used to bootstrap a TUF client supporting multiple repositories
- A set of trusted root files for each repository listed in the `map.json` file
- The `examples/multirepo/client/client_example.go`(../client/client_example.go) is a client which uses this repository to bootstrap a multi-repository TUF client
## Usage
To regenerate the multi-repo repository,
run the following command from inside the `examples/multirepo/repository` directory:
```bash
go run .
```
This should generate the necessary metadata files in the `metadata` directory and the `map.json` file.
It will also copy the new `root.json` files to the `client` directory.
golang-github-theupdateframework-go-tuf-2.0.2/examples/multirepo/repository/generate_metadata.go 0000664 0000000 0000000 00000014660 14706111210 0033376 0 ustar 00root root 0000000 0000000 // Copyright 2024 The Update Framework Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
//
// SPDX-License-Identifier: Apache-2.0
//
package main
import (
"crypto"
"crypto/ed25519"
"fmt"
"os"
"path/filepath"
"strings"
"time"
"github.com/sigstore/sigstore/pkg/signature"
"github.com/theupdateframework/go-tuf/v2/metadata"
"github.com/theupdateframework/go-tuf/v2/metadata/repository"
)
func main() {
// Create top-level metadata
roles := repository.New()
keys := map[string]ed25519.PrivateKey{}
// Create Targets metadata
targets := metadata.Targets(helperExpireIn(10))
roles.SetTargets("targets", targets)
// Add each target to Targets metadata
for _, targetName := range []string{"targets/map.json", "targets/sigstore-tuf-root/root.json", "targets/staging/root.json"} {
targetPath, localPath := helperGetPathForTarget(targetName)
targetFileInfo, err := metadata.TargetFile().FromFile(localPath, "sha256")
if err != nil {
panic(fmt.Sprintln("generate_metadata.go:", "generating target file info failed", err))
}
roles.Targets("targets").Signed.Targets[strings.TrimPrefix(targetPath, "targets/")] = targetFileInfo
for _, eachHashValue := range targetFileInfo.Hashes {
err := copyHashPrefixed(localPath, eachHashValue.String())
if err != nil {
panic(err)
}
}
}
// Create Snapshot metadata
snapshot := metadata.Snapshot(helperExpireIn(10))
roles.SetSnapshot(snapshot)
// Create Timestamp metadata
timestamp := metadata.Timestamp(helperExpireIn(10))
roles.SetTimestamp(timestamp)
// Create Root metadata
root := metadata.Root(helperExpireIn(10))
roles.SetRoot(root)
// For this example, we generate one private key of type 'ed25519' for each top-level role
for _, name := range []string{"targets", "snapshot", "timestamp", "root"} {
_, private, err := ed25519.GenerateKey(nil)
if err != nil {
panic(fmt.Sprintln("generate_metadata.go:", "key generation failed", err))
}
keys[name] = private
key, err := metadata.KeyFromPublicKey(private.Public())
if err != nil {
panic(fmt.Sprintln("generate_metadata.go:", "key conversion failed", err))
}
err = roles.Root().Signed.AddKey(key, name)
if err != nil {
panic(fmt.Sprintln("generate_metadata.go:", "adding key to root failed", err))
}
}
// Sign top-level metadata (in-band)
for _, name := range []string{"targets", "snapshot", "timestamp", "root"} {
key := keys[name]
signer, err := signature.LoadSigner(key, crypto.Hash(0))
if err != nil {
panic(fmt.Sprintln("generate_metadata.go:", "loading a signer failed", err))
}
switch name {
case "targets":
_, err = roles.Targets("targets").Sign(signer)
case "snapshot":
_, err = roles.Snapshot().Sign(signer)
case "timestamp":
_, err = roles.Timestamp().Sign(signer)
case "root":
_, err = roles.Root().Sign(signer)
}
if err != nil {
panic(fmt.Sprintln("generate_metadata.go:", "metadata signing failed", err))
}
}
// Persist metadata (consistent snapshot)
cwd, err := os.Getwd()
if err != nil {
panic(fmt.Sprintln("generate_metadata.go:", "getting cwd failed", err))
}
// Save to metadata folder
cwd = filepath.Join(cwd, "metadata")
for _, name := range []string{"targets", "snapshot", "timestamp", "root"} {
switch name {
case "targets":
filename := fmt.Sprintf("%d.%s.json", roles.Targets("targets").Signed.Version, name)
err = roles.Targets("targets").ToFile(filepath.Join(cwd, filename), true)
case "snapshot":
filename := fmt.Sprintf("%d.%s.json", roles.Snapshot().Signed.Version, name)
err = roles.Snapshot().ToFile(filepath.Join(cwd, filename), true)
case "timestamp":
filename := fmt.Sprintf("%s.json", name)
err = roles.Timestamp().ToFile(filepath.Join(cwd, filename), true)
case "root":
filename := fmt.Sprintf("%d.%s.json", roles.Root().Signed.Version, name)
err = roles.Root().ToFile(filepath.Join(cwd, filename), true)
}
if err != nil {
panic(fmt.Sprintln("generate_metadata.go:", "saving metadata to file failed", err))
}
}
// Save the created root metadata in the client folder, this is the initial trusted root metadata
err = roles.Root().ToFile(filepath.Join(cwd, "../../client/root.json"), true)
if err != nil {
panic(fmt.Sprintln("generate_metadata.go:", "saving trusted root metadata to client folder failed", err))
}
// Verify that metadata is signed correctly
// Verify root
err = roles.Root().VerifyDelegate("root", roles.Root())
if err != nil {
panic(fmt.Sprintln("generate_metadata.go:", "verifying root metadata failed", err))
}
// Verify targets
err = roles.Root().VerifyDelegate("targets", roles.Targets("targets"))
if err != nil {
panic(fmt.Sprintln("generate_metadata.go:", "verifying targets metadata failed", err))
}
// Verify snapshot
err = roles.Root().VerifyDelegate("snapshot", roles.Snapshot())
if err != nil {
panic(fmt.Sprintln("generate_metadata.go:", "verifying snapshot metadata failed", err))
}
// Verify timestamp
err = roles.Root().VerifyDelegate("timestamp", roles.Timestamp())
if err != nil {
panic(fmt.Sprintln("generate_metadata.go:", "verifying timestamp metadata failed", err))
}
fmt.Println("Done! Metadata files location:", cwd)
}
// helperExpireIn returns time offset by years (for the sake of the example)
func helperExpireIn(years int) time.Time {
return time.Now().AddDate(years, 0, 0).UTC()
}
// helperGetPathForTarget returns local and target paths for target
func helperGetPathForTarget(name string) (string, string) {
cwd, err := os.Getwd()
if err != nil {
panic(fmt.Sprintln("generate_metadata.go:", "getting cwd failed", err))
}
// _, dir := filepath.Split(cwd)
// return filepath.Join(dir, name), filepath.Join(cwd, name)
return name, filepath.Join(cwd, name)
}
func copyHashPrefixed(src string, hash string) error {
data, err := os.ReadFile(src)
if err != nil {
return err
}
dirName, fileName := filepath.Split(src)
err = os.WriteFile(filepath.Join(dirName, fmt.Sprintf("%s.%s", hash, fileName)), data, 0644)
if err != nil {
return err
}
return nil
}
golang-github-theupdateframework-go-tuf-2.0.2/examples/multirepo/repository/metadata/ 0000775 0000000 0000000 00000000000 14706111210 0031166 5 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/examples/multirepo/repository/metadata/1.root.json 0000664 0000000 0000000 00000003577 14706111210 0033217 0 ustar 00root root 0000000 0000000 {
"signatures": [
{
"keyid": "8be9c0369e7f2e2aacd9fde0267c5ab224bd65b3e8f892f09168050c53dd1a4d",
"sig": "86e985ac792a7c92489b781e371c9c4ea0b4ba944f56f19783df8404cc7d76dc2ea44b13fd508bd70cbf9cd481e155b4a41ce0b4f030ea481bea9015fa055c06"
}
],
"signed": {
"_type": "root",
"consistent_snapshot": true,
"expires": "2034-09-30T11:29:58.188964Z",
"keys": {
"1e0c46c0f988b6f45eb0956ed6b0836697a39dca123c56453a3d8ad57c64726b": {
"keytype": "ed25519",
"keyval": {
"public": "a5ebf16e4bfec00df5b3a0a580ef1edf8f5c786f398ad82e4bf1b3761c39fc9b"
},
"scheme": "ed25519"
},
"6ee434fdb4e723ed7d5c556a34e5fabc7412c37ce652dd1a4aeec1e06f86a44c": {
"keytype": "ed25519",
"keyval": {
"public": "c934af418d5a32992e5ab6e1bb1a0fddbd0d944654b9dbe53b507b1552ac7057"
},
"scheme": "ed25519"
},
"8be9c0369e7f2e2aacd9fde0267c5ab224bd65b3e8f892f09168050c53dd1a4d": {
"keytype": "ed25519",
"keyval": {
"public": "e48f729e90a19ac8cf227d7a5e56dcfd52bdc30258fc426255c856959935cb9e"
},
"scheme": "ed25519"
},
"b779120edb45353d2a151004fe463ec6f10d90d83c0fa1c755e4e436e2ac8009": {
"keytype": "ed25519",
"keyval": {
"public": "097df4cf52f263630b0e5dfac96b2955b83a253842d0e7fcaffa121a349e6efa"
},
"scheme": "ed25519"
}
},
"roles": {
"root": {
"keyids": [
"8be9c0369e7f2e2aacd9fde0267c5ab224bd65b3e8f892f09168050c53dd1a4d"
],
"threshold": 1
},
"snapshot": {
"keyids": [
"6ee434fdb4e723ed7d5c556a34e5fabc7412c37ce652dd1a4aeec1e06f86a44c"
],
"threshold": 1
},
"targets": {
"keyids": [
"b779120edb45353d2a151004fe463ec6f10d90d83c0fa1c755e4e436e2ac8009"
],
"threshold": 1
},
"timestamp": {
"keyids": [
"1e0c46c0f988b6f45eb0956ed6b0836697a39dca123c56453a3d8ad57c64726b"
],
"threshold": 1
}
},
"spec_version": "1.0.31",
"version": 1
}
} golang-github-theupdateframework-go-tuf-2.0.2/examples/multirepo/repository/metadata/1.snapshot.json0000664 0000000 0000000 00000000667 14706111210 0034070 0 ustar 00root root 0000000 0000000 {
"signatures": [
{
"keyid": "6ee434fdb4e723ed7d5c556a34e5fabc7412c37ce652dd1a4aeec1e06f86a44c",
"sig": "d585b3ee76eb84c386215ec96d4c09a1d24a895d4620b1275b68110e255604037e180deb666aae9bf62ff7cda1844a0f1ddad34e8e911833482c7fcaf6a0cd07"
}
],
"signed": {
"_type": "snapshot",
"expires": "2034-09-30T11:29:58.188962Z",
"meta": {
"targets.json": {
"version": 1
}
},
"spec_version": "1.0.31",
"version": 1
}
} golang-github-theupdateframework-go-tuf-2.0.2/examples/multirepo/repository/metadata/1.targets.json 0000664 0000000 0000000 00000001541 14706111210 0033672 0 ustar 00root root 0000000 0000000 {
"signatures": [
{
"keyid": "b779120edb45353d2a151004fe463ec6f10d90d83c0fa1c755e4e436e2ac8009",
"sig": "548ef3fb01c6def4b972444e05a5bc7a7c0e5750eb0c78dc04bd52997849185b3ec72a440157a467c42ff6dd63a808a047a0aee944855e319734bb927e9d7e05"
}
],
"signed": {
"_type": "targets",
"expires": "2034-09-30T11:29:58.187585Z",
"spec_version": "1.0.31",
"targets": {
"map.json": {
"hashes": {
"sha256": "562fc7cffe872542a430342995998546e3949dc3acbe7d37668dc76d657032ff"
},
"length": 596
},
"sigstore-tuf-root/root.json": {
"hashes": {
"sha256": "e2a930b2d1d4053dd56e8faf66fd113658545d522e35d222ccf58fea87ccccf4"
},
"length": 6388
},
"staging/root.json": {
"hashes": {
"sha256": "e2a930b2d1d4053dd56e8faf66fd113658545d522e35d222ccf58fea87ccccf4"
},
"length": 6388
}
},
"version": 1
}
} golang-github-theupdateframework-go-tuf-2.0.2/examples/multirepo/repository/metadata/timestamp.json 0000664 0000000 0000000 00000000671 14706111210 0034070 0 ustar 00root root 0000000 0000000 {
"signatures": [
{
"keyid": "1e0c46c0f988b6f45eb0956ed6b0836697a39dca123c56453a3d8ad57c64726b",
"sig": "29e2506d14263991f9a178a2197921fc9acd33725a82705fc49fc7a22d50603e5d7faf0a8a70e70252c397ed5ad08d5632c102988741502ad26a0481f881cc07"
}
],
"signed": {
"_type": "timestamp",
"expires": "2034-09-30T11:29:58.188963Z",
"meta": {
"snapshot.json": {
"version": 1
}
},
"spec_version": "1.0.31",
"version": 1
}
} golang-github-theupdateframework-go-tuf-2.0.2/examples/multirepo/repository/targets/ 0000775 0000000 0000000 00000000000 14706111210 0031057 5 ustar 00root root 0000000 0000000 562fc7cffe872542a430342995998546e3949dc3acbe7d37668dc76d657032ff.map.json 0000664 0000000 0000000 00000001124 14706111210 0043111 0 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/examples/multirepo/repository/targets
{
"repositories": {
"sigstore-tuf-root": ["https://tuf-repo-cdn.sigstore.dev"],
"staging": ["https://tuf-repo-cdn.sigstore.dev"]
},
"mapping": [
{
"paths": ["fulcio*", "*.json"],
"repositories": ["staging"],
"threshold": 1,
"terminating": true
},
{
"paths": ["*.pub"],
"repositories": ["sigstore-tuf-root", "staging"],
"threshold": 2,
"terminating": false
},
{
"paths": ["*"],
"repositories": ["sigstore-tuf-root"],
"terminating": true,
"threshold": 1
}
]
}
golang-github-theupdateframework-go-tuf-2.0.2/examples/multirepo/repository/targets/map.json 0000664 0000000 0000000 00000001124 14706111210 0032525 0 ustar 00root root 0000000 0000000
{
"repositories": {
"sigstore-tuf-root": ["https://tuf-repo-cdn.sigstore.dev"],
"staging": ["https://tuf-repo-cdn.sigstore.dev"]
},
"mapping": [
{
"paths": ["fulcio*", "*.json"],
"repositories": ["staging"],
"threshold": 1,
"terminating": true
},
{
"paths": ["*.pub"],
"repositories": ["sigstore-tuf-root", "staging"],
"threshold": 2,
"terminating": false
},
{
"paths": ["*"],
"repositories": ["sigstore-tuf-root"],
"terminating": true,
"threshold": 1
}
]
}
sigstore-tuf-root/ 0000775 0000000 0000000 00000000000 14706111210 0034414 5 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/examples/multirepo/repository/targets e2a930b2d1d4053dd56e8faf66fd113658545d522e35d222ccf58fea87ccccf4.root.json 0000664 0000000 0000000 00000014364 14706111210 0047404 0 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/examples/multirepo/repository/targets/sigstore-tuf-root {
"signed": {
"_type": "root",
"spec_version": "1.0",
"version": 5,
"expires": "2023-04-18T18:13:43Z",
"keys": {
"25a0eb450fd3ee2bd79218c963dce3f1cc6118badf251bf149f0bd07d5cabe99": {
"keytype": "ecdsa-sha2-nistp256",
"scheme": "ecdsa-sha2-nistp256",
"keyid_hash_algorithms": [
"sha256",
"sha512"
],
"keyval": {
"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEEXsz3SZXFb8jMV42j6pJlyjbjR8K\nN3Bwocexq6LMIb5qsWKOQvLN16NUefLc4HswOoumRsVVaajSpQS6fobkRw==\n-----END PUBLIC KEY-----\n"
}
},
"2e61cd0cbf4a8f45809bda9f7f78c0d33ad11842ff94ae340873e2664dc843de": {
"keytype": "ecdsa-sha2-nistp256",
"scheme": "ecdsa-sha2-nistp256",
"keyid_hash_algorithms": [
"sha256",
"sha512"
],
"keyval": {
"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE0ghrh92Lw1Yr3idGV5WqCtMDB8Cx\n+D8hdC4w2ZLNIplVRoVGLskYa3gheMyOjiJ8kPi15aQ2//7P+oj7UvJPGw==\n-----END PUBLIC KEY-----\n"
}
},
"45b283825eb184cabd582eb17b74fc8ed404f68cf452acabdad2ed6f90ce216b": {
"keytype": "ecdsa-sha2-nistp256",
"scheme": "ecdsa-sha2-nistp256",
"keyid_hash_algorithms": [
"sha256",
"sha512"
],
"keyval": {
"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAELrWvNt94v4R085ELeeCMxHp7PldF\n0/T1GxukUh2ODuggLGJE0pc1e8CSBf6CS91Fwo9FUOuRsjBUld+VqSyCdQ==\n-----END PUBLIC KEY-----\n"
}
},
"7f7513b25429a64473e10ce3ad2f3da372bbdd14b65d07bbaf547e7c8bbbe62b": {
"keytype": "ecdsa-sha2-nistp256",
"scheme": "ecdsa-sha2-nistp256",
"keyid_hash_algorithms": [
"sha256",
"sha512"
],
"keyval": {
"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEinikSsAQmYkNeH5eYq/CnIzLaacO\nxlSaawQDOwqKy/tCqxq5xxPSJc21K4WIhs9GyOkKfzueY3GILzcMJZ4cWw==\n-----END PUBLIC KEY-----\n"
}
},
"e1863ba02070322ebc626dcecf9d881a3a38c35c3b41a83765b6ad6c37eaec2a": {
"keytype": "ecdsa-sha2-nistp256",
"scheme": "ecdsa-sha2-nistp256",
"keyid_hash_algorithms": [
"sha256",
"sha512"
],
"keyval": {
"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEWRiGr5+j+3J5SsH+Ztr5nE2H2wO7\nBV+nO3s93gLca18qTOzHY1oWyAGDykMSsGTUBSt9D+An0KfKsD2mfSM42Q==\n-----END PUBLIC KEY-----\n"
}
},
"f5312f542c21273d9485a49394386c4575804770667f2ddb59b3bf0669fddd2f": {
"keytype": "ecdsa-sha2-nistp256",
"scheme": "ecdsa-sha2-nistp256",
"keyid_hash_algorithms": [
"sha256",
"sha512"
],
"keyval": {
"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEzBzVOmHCPojMVLSI364WiiV8NPrD\n6IgRxVliskz/v+y3JER5mcVGcONliDcWMC5J2lfHmjPNPhb4H7xm8LzfSA==\n-----END PUBLIC KEY-----\n"
}
},
"ff51e17fcf253119b7033f6f57512631da4a0969442afcf9fc8b141c7f2be99c": {
"keytype": "ecdsa-sha2-nistp256",
"scheme": "ecdsa-sha2-nistp256",
"keyid_hash_algorithms": [
"sha256",
"sha512"
],
"keyval": {
"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEy8XKsmhBYDI8Jc0GwzBxeKax0cm5\nSTKEU65HPFunUn41sT8pi0FjM4IkHz/YUmwmLUO0Wt7lxhj6BkLIK4qYAw==\n-----END PUBLIC KEY-----\n"
}
}
},
"roles": {
"root": {
"keyids": [
"ff51e17fcf253119b7033f6f57512631da4a0969442afcf9fc8b141c7f2be99c",
"25a0eb450fd3ee2bd79218c963dce3f1cc6118badf251bf149f0bd07d5cabe99",
"f5312f542c21273d9485a49394386c4575804770667f2ddb59b3bf0669fddd2f",
"7f7513b25429a64473e10ce3ad2f3da372bbdd14b65d07bbaf547e7c8bbbe62b",
"2e61cd0cbf4a8f45809bda9f7f78c0d33ad11842ff94ae340873e2664dc843de"
],
"threshold": 3
},
"snapshot": {
"keyids": [
"45b283825eb184cabd582eb17b74fc8ed404f68cf452acabdad2ed6f90ce216b"
],
"threshold": 1
},
"targets": {
"keyids": [
"ff51e17fcf253119b7033f6f57512631da4a0969442afcf9fc8b141c7f2be99c",
"25a0eb450fd3ee2bd79218c963dce3f1cc6118badf251bf149f0bd07d5cabe99",
"f5312f542c21273d9485a49394386c4575804770667f2ddb59b3bf0669fddd2f",
"7f7513b25429a64473e10ce3ad2f3da372bbdd14b65d07bbaf547e7c8bbbe62b",
"2e61cd0cbf4a8f45809bda9f7f78c0d33ad11842ff94ae340873e2664dc843de"
],
"threshold": 3
},
"timestamp": {
"keyids": [
"e1863ba02070322ebc626dcecf9d881a3a38c35c3b41a83765b6ad6c37eaec2a"
],
"threshold": 1
}
},
"consistent_snapshot": true
},
"signatures": [
{
"keyid": "ff51e17fcf253119b7033f6f57512631da4a0969442afcf9fc8b141c7f2be99c",
"sig": "3045022100fc1c2be509ce50ea917bbad1d9efe9d96c8c2ebea04af2717aa3d9c6fe617a75022012eef282a19f2d8bd4818aa333ef48a06489f49d4d34a20b8fe8fc867bb25a7a"
},
{
"keyid": "25a0eb450fd3ee2bd79218c963dce3f1cc6118badf251bf149f0bd07d5cabe99",
"sig": "30450221008a4392ae5057fc00778b651e61fea244766a4ae58db84d9f1d3810720ab0f3b702207c49e59e8031318caf02252ecea1281cecc1e5986c309a9cef61f455ecf7165d"
},
{
"keyid": "7f7513b25429a64473e10ce3ad2f3da372bbdd14b65d07bbaf547e7c8bbbe62b",
"sig": "3046022100da1b8dc5d53aaffbbfac98de3e23ee2d2ad3446a7bed09fac0f88bae19be2587022100b681c046afc3919097dfe794e0d819be891e2e850aade315bec06b0c4dea221b"
},
{
"keyid": "2e61cd0cbf4a8f45809bda9f7f78c0d33ad11842ff94ae340873e2664dc843de",
"sig": "3046022100b534e0030e1b271133ecfbdf3ba9fbf3becb3689abea079a2150afbb63cdb7c70221008c39a718fd9495f249b4ab8788d5b9dc269f0868dbe38b272f48207359d3ded9"
},
{
"keyid": "2f64fb5eac0cf94dd39bb45308b98920055e9a0d8e012a7220787834c60aef97",
"sig": "3045022100fc1c2be509ce50ea917bbad1d9efe9d96c8c2ebea04af2717aa3d9c6fe617a75022012eef282a19f2d8bd4818aa333ef48a06489f49d4d34a20b8fe8fc867bb25a7a"
},
{
"keyid": "eaf22372f417dd618a46f6c627dbc276e9fd30a004fc94f9be946e73f8bd090b",
"sig": "30450221008a4392ae5057fc00778b651e61fea244766a4ae58db84d9f1d3810720ab0f3b702207c49e59e8031318caf02252ecea1281cecc1e5986c309a9cef61f455ecf7165d"
},
{
"keyid": "f505595165a177a41750a8e864ed1719b1edfccd5a426fd2c0ffda33ce7ff209",
"sig": "3046022100da1b8dc5d53aaffbbfac98de3e23ee2d2ad3446a7bed09fac0f88bae19be2587022100b681c046afc3919097dfe794e0d819be891e2e850aade315bec06b0c4dea221b"
},
{
"keyid": "75e867ab10e121fdef32094af634707f43ddd79c6bab8ad6c5ab9f03f4ea8c90",
"sig": "3046022100b534e0030e1b271133ecfbdf3ba9fbf3becb3689abea079a2150afbb63cdb7c70221008c39a718fd9495f249b4ab8788d5b9dc269f0868dbe38b272f48207359d3ded9"
}
]
} root.json 0000664 0000000 0000000 00000014364 14706111210 0036302 0 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/examples/multirepo/repository/targets/sigstore-tuf-root {
"signed": {
"_type": "root",
"spec_version": "1.0",
"version": 5,
"expires": "2023-04-18T18:13:43Z",
"keys": {
"25a0eb450fd3ee2bd79218c963dce3f1cc6118badf251bf149f0bd07d5cabe99": {
"keytype": "ecdsa-sha2-nistp256",
"scheme": "ecdsa-sha2-nistp256",
"keyid_hash_algorithms": [
"sha256",
"sha512"
],
"keyval": {
"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEEXsz3SZXFb8jMV42j6pJlyjbjR8K\nN3Bwocexq6LMIb5qsWKOQvLN16NUefLc4HswOoumRsVVaajSpQS6fobkRw==\n-----END PUBLIC KEY-----\n"
}
},
"2e61cd0cbf4a8f45809bda9f7f78c0d33ad11842ff94ae340873e2664dc843de": {
"keytype": "ecdsa-sha2-nistp256",
"scheme": "ecdsa-sha2-nistp256",
"keyid_hash_algorithms": [
"sha256",
"sha512"
],
"keyval": {
"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE0ghrh92Lw1Yr3idGV5WqCtMDB8Cx\n+D8hdC4w2ZLNIplVRoVGLskYa3gheMyOjiJ8kPi15aQ2//7P+oj7UvJPGw==\n-----END PUBLIC KEY-----\n"
}
},
"45b283825eb184cabd582eb17b74fc8ed404f68cf452acabdad2ed6f90ce216b": {
"keytype": "ecdsa-sha2-nistp256",
"scheme": "ecdsa-sha2-nistp256",
"keyid_hash_algorithms": [
"sha256",
"sha512"
],
"keyval": {
"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAELrWvNt94v4R085ELeeCMxHp7PldF\n0/T1GxukUh2ODuggLGJE0pc1e8CSBf6CS91Fwo9FUOuRsjBUld+VqSyCdQ==\n-----END PUBLIC KEY-----\n"
}
},
"7f7513b25429a64473e10ce3ad2f3da372bbdd14b65d07bbaf547e7c8bbbe62b": {
"keytype": "ecdsa-sha2-nistp256",
"scheme": "ecdsa-sha2-nistp256",
"keyid_hash_algorithms": [
"sha256",
"sha512"
],
"keyval": {
"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEinikSsAQmYkNeH5eYq/CnIzLaacO\nxlSaawQDOwqKy/tCqxq5xxPSJc21K4WIhs9GyOkKfzueY3GILzcMJZ4cWw==\n-----END PUBLIC KEY-----\n"
}
},
"e1863ba02070322ebc626dcecf9d881a3a38c35c3b41a83765b6ad6c37eaec2a": {
"keytype": "ecdsa-sha2-nistp256",
"scheme": "ecdsa-sha2-nistp256",
"keyid_hash_algorithms": [
"sha256",
"sha512"
],
"keyval": {
"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEWRiGr5+j+3J5SsH+Ztr5nE2H2wO7\nBV+nO3s93gLca18qTOzHY1oWyAGDykMSsGTUBSt9D+An0KfKsD2mfSM42Q==\n-----END PUBLIC KEY-----\n"
}
},
"f5312f542c21273d9485a49394386c4575804770667f2ddb59b3bf0669fddd2f": {
"keytype": "ecdsa-sha2-nistp256",
"scheme": "ecdsa-sha2-nistp256",
"keyid_hash_algorithms": [
"sha256",
"sha512"
],
"keyval": {
"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEzBzVOmHCPojMVLSI364WiiV8NPrD\n6IgRxVliskz/v+y3JER5mcVGcONliDcWMC5J2lfHmjPNPhb4H7xm8LzfSA==\n-----END PUBLIC KEY-----\n"
}
},
"ff51e17fcf253119b7033f6f57512631da4a0969442afcf9fc8b141c7f2be99c": {
"keytype": "ecdsa-sha2-nistp256",
"scheme": "ecdsa-sha2-nistp256",
"keyid_hash_algorithms": [
"sha256",
"sha512"
],
"keyval": {
"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEy8XKsmhBYDI8Jc0GwzBxeKax0cm5\nSTKEU65HPFunUn41sT8pi0FjM4IkHz/YUmwmLUO0Wt7lxhj6BkLIK4qYAw==\n-----END PUBLIC KEY-----\n"
}
}
},
"roles": {
"root": {
"keyids": [
"ff51e17fcf253119b7033f6f57512631da4a0969442afcf9fc8b141c7f2be99c",
"25a0eb450fd3ee2bd79218c963dce3f1cc6118badf251bf149f0bd07d5cabe99",
"f5312f542c21273d9485a49394386c4575804770667f2ddb59b3bf0669fddd2f",
"7f7513b25429a64473e10ce3ad2f3da372bbdd14b65d07bbaf547e7c8bbbe62b",
"2e61cd0cbf4a8f45809bda9f7f78c0d33ad11842ff94ae340873e2664dc843de"
],
"threshold": 3
},
"snapshot": {
"keyids": [
"45b283825eb184cabd582eb17b74fc8ed404f68cf452acabdad2ed6f90ce216b"
],
"threshold": 1
},
"targets": {
"keyids": [
"ff51e17fcf253119b7033f6f57512631da4a0969442afcf9fc8b141c7f2be99c",
"25a0eb450fd3ee2bd79218c963dce3f1cc6118badf251bf149f0bd07d5cabe99",
"f5312f542c21273d9485a49394386c4575804770667f2ddb59b3bf0669fddd2f",
"7f7513b25429a64473e10ce3ad2f3da372bbdd14b65d07bbaf547e7c8bbbe62b",
"2e61cd0cbf4a8f45809bda9f7f78c0d33ad11842ff94ae340873e2664dc843de"
],
"threshold": 3
},
"timestamp": {
"keyids": [
"e1863ba02070322ebc626dcecf9d881a3a38c35c3b41a83765b6ad6c37eaec2a"
],
"threshold": 1
}
},
"consistent_snapshot": true
},
"signatures": [
{
"keyid": "ff51e17fcf253119b7033f6f57512631da4a0969442afcf9fc8b141c7f2be99c",
"sig": "3045022100fc1c2be509ce50ea917bbad1d9efe9d96c8c2ebea04af2717aa3d9c6fe617a75022012eef282a19f2d8bd4818aa333ef48a06489f49d4d34a20b8fe8fc867bb25a7a"
},
{
"keyid": "25a0eb450fd3ee2bd79218c963dce3f1cc6118badf251bf149f0bd07d5cabe99",
"sig": "30450221008a4392ae5057fc00778b651e61fea244766a4ae58db84d9f1d3810720ab0f3b702207c49e59e8031318caf02252ecea1281cecc1e5986c309a9cef61f455ecf7165d"
},
{
"keyid": "7f7513b25429a64473e10ce3ad2f3da372bbdd14b65d07bbaf547e7c8bbbe62b",
"sig": "3046022100da1b8dc5d53aaffbbfac98de3e23ee2d2ad3446a7bed09fac0f88bae19be2587022100b681c046afc3919097dfe794e0d819be891e2e850aade315bec06b0c4dea221b"
},
{
"keyid": "2e61cd0cbf4a8f45809bda9f7f78c0d33ad11842ff94ae340873e2664dc843de",
"sig": "3046022100b534e0030e1b271133ecfbdf3ba9fbf3becb3689abea079a2150afbb63cdb7c70221008c39a718fd9495f249b4ab8788d5b9dc269f0868dbe38b272f48207359d3ded9"
},
{
"keyid": "2f64fb5eac0cf94dd39bb45308b98920055e9a0d8e012a7220787834c60aef97",
"sig": "3045022100fc1c2be509ce50ea917bbad1d9efe9d96c8c2ebea04af2717aa3d9c6fe617a75022012eef282a19f2d8bd4818aa333ef48a06489f49d4d34a20b8fe8fc867bb25a7a"
},
{
"keyid": "eaf22372f417dd618a46f6c627dbc276e9fd30a004fc94f9be946e73f8bd090b",
"sig": "30450221008a4392ae5057fc00778b651e61fea244766a4ae58db84d9f1d3810720ab0f3b702207c49e59e8031318caf02252ecea1281cecc1e5986c309a9cef61f455ecf7165d"
},
{
"keyid": "f505595165a177a41750a8e864ed1719b1edfccd5a426fd2c0ffda33ce7ff209",
"sig": "3046022100da1b8dc5d53aaffbbfac98de3e23ee2d2ad3446a7bed09fac0f88bae19be2587022100b681c046afc3919097dfe794e0d819be891e2e850aade315bec06b0c4dea221b"
},
{
"keyid": "75e867ab10e121fdef32094af634707f43ddd79c6bab8ad6c5ab9f03f4ea8c90",
"sig": "3046022100b534e0030e1b271133ecfbdf3ba9fbf3becb3689abea079a2150afbb63cdb7c70221008c39a718fd9495f249b4ab8788d5b9dc269f0868dbe38b272f48207359d3ded9"
}
]
} golang-github-theupdateframework-go-tuf-2.0.2/examples/multirepo/repository/targets/staging/ 0000775 0000000 0000000 00000000000 14706111210 0032513 5 ustar 00root root 0000000 0000000 e2a930b2d1d4053dd56e8faf66fd113658545d522e35d222ccf58fea87ccccf4.root.json 0000664 0000000 0000000 00000014364 14706111210 0045424 0 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/examples/multirepo/repository/targets/staging {
"signed": {
"_type": "root",
"spec_version": "1.0",
"version": 5,
"expires": "2023-04-18T18:13:43Z",
"keys": {
"25a0eb450fd3ee2bd79218c963dce3f1cc6118badf251bf149f0bd07d5cabe99": {
"keytype": "ecdsa-sha2-nistp256",
"scheme": "ecdsa-sha2-nistp256",
"keyid_hash_algorithms": [
"sha256",
"sha512"
],
"keyval": {
"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEEXsz3SZXFb8jMV42j6pJlyjbjR8K\nN3Bwocexq6LMIb5qsWKOQvLN16NUefLc4HswOoumRsVVaajSpQS6fobkRw==\n-----END PUBLIC KEY-----\n"
}
},
"2e61cd0cbf4a8f45809bda9f7f78c0d33ad11842ff94ae340873e2664dc843de": {
"keytype": "ecdsa-sha2-nistp256",
"scheme": "ecdsa-sha2-nistp256",
"keyid_hash_algorithms": [
"sha256",
"sha512"
],
"keyval": {
"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE0ghrh92Lw1Yr3idGV5WqCtMDB8Cx\n+D8hdC4w2ZLNIplVRoVGLskYa3gheMyOjiJ8kPi15aQ2//7P+oj7UvJPGw==\n-----END PUBLIC KEY-----\n"
}
},
"45b283825eb184cabd582eb17b74fc8ed404f68cf452acabdad2ed6f90ce216b": {
"keytype": "ecdsa-sha2-nistp256",
"scheme": "ecdsa-sha2-nistp256",
"keyid_hash_algorithms": [
"sha256",
"sha512"
],
"keyval": {
"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAELrWvNt94v4R085ELeeCMxHp7PldF\n0/T1GxukUh2ODuggLGJE0pc1e8CSBf6CS91Fwo9FUOuRsjBUld+VqSyCdQ==\n-----END PUBLIC KEY-----\n"
}
},
"7f7513b25429a64473e10ce3ad2f3da372bbdd14b65d07bbaf547e7c8bbbe62b": {
"keytype": "ecdsa-sha2-nistp256",
"scheme": "ecdsa-sha2-nistp256",
"keyid_hash_algorithms": [
"sha256",
"sha512"
],
"keyval": {
"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEinikSsAQmYkNeH5eYq/CnIzLaacO\nxlSaawQDOwqKy/tCqxq5xxPSJc21K4WIhs9GyOkKfzueY3GILzcMJZ4cWw==\n-----END PUBLIC KEY-----\n"
}
},
"e1863ba02070322ebc626dcecf9d881a3a38c35c3b41a83765b6ad6c37eaec2a": {
"keytype": "ecdsa-sha2-nistp256",
"scheme": "ecdsa-sha2-nistp256",
"keyid_hash_algorithms": [
"sha256",
"sha512"
],
"keyval": {
"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEWRiGr5+j+3J5SsH+Ztr5nE2H2wO7\nBV+nO3s93gLca18qTOzHY1oWyAGDykMSsGTUBSt9D+An0KfKsD2mfSM42Q==\n-----END PUBLIC KEY-----\n"
}
},
"f5312f542c21273d9485a49394386c4575804770667f2ddb59b3bf0669fddd2f": {
"keytype": "ecdsa-sha2-nistp256",
"scheme": "ecdsa-sha2-nistp256",
"keyid_hash_algorithms": [
"sha256",
"sha512"
],
"keyval": {
"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEzBzVOmHCPojMVLSI364WiiV8NPrD\n6IgRxVliskz/v+y3JER5mcVGcONliDcWMC5J2lfHmjPNPhb4H7xm8LzfSA==\n-----END PUBLIC KEY-----\n"
}
},
"ff51e17fcf253119b7033f6f57512631da4a0969442afcf9fc8b141c7f2be99c": {
"keytype": "ecdsa-sha2-nistp256",
"scheme": "ecdsa-sha2-nistp256",
"keyid_hash_algorithms": [
"sha256",
"sha512"
],
"keyval": {
"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEy8XKsmhBYDI8Jc0GwzBxeKax0cm5\nSTKEU65HPFunUn41sT8pi0FjM4IkHz/YUmwmLUO0Wt7lxhj6BkLIK4qYAw==\n-----END PUBLIC KEY-----\n"
}
}
},
"roles": {
"root": {
"keyids": [
"ff51e17fcf253119b7033f6f57512631da4a0969442afcf9fc8b141c7f2be99c",
"25a0eb450fd3ee2bd79218c963dce3f1cc6118badf251bf149f0bd07d5cabe99",
"f5312f542c21273d9485a49394386c4575804770667f2ddb59b3bf0669fddd2f",
"7f7513b25429a64473e10ce3ad2f3da372bbdd14b65d07bbaf547e7c8bbbe62b",
"2e61cd0cbf4a8f45809bda9f7f78c0d33ad11842ff94ae340873e2664dc843de"
],
"threshold": 3
},
"snapshot": {
"keyids": [
"45b283825eb184cabd582eb17b74fc8ed404f68cf452acabdad2ed6f90ce216b"
],
"threshold": 1
},
"targets": {
"keyids": [
"ff51e17fcf253119b7033f6f57512631da4a0969442afcf9fc8b141c7f2be99c",
"25a0eb450fd3ee2bd79218c963dce3f1cc6118badf251bf149f0bd07d5cabe99",
"f5312f542c21273d9485a49394386c4575804770667f2ddb59b3bf0669fddd2f",
"7f7513b25429a64473e10ce3ad2f3da372bbdd14b65d07bbaf547e7c8bbbe62b",
"2e61cd0cbf4a8f45809bda9f7f78c0d33ad11842ff94ae340873e2664dc843de"
],
"threshold": 3
},
"timestamp": {
"keyids": [
"e1863ba02070322ebc626dcecf9d881a3a38c35c3b41a83765b6ad6c37eaec2a"
],
"threshold": 1
}
},
"consistent_snapshot": true
},
"signatures": [
{
"keyid": "ff51e17fcf253119b7033f6f57512631da4a0969442afcf9fc8b141c7f2be99c",
"sig": "3045022100fc1c2be509ce50ea917bbad1d9efe9d96c8c2ebea04af2717aa3d9c6fe617a75022012eef282a19f2d8bd4818aa333ef48a06489f49d4d34a20b8fe8fc867bb25a7a"
},
{
"keyid": "25a0eb450fd3ee2bd79218c963dce3f1cc6118badf251bf149f0bd07d5cabe99",
"sig": "30450221008a4392ae5057fc00778b651e61fea244766a4ae58db84d9f1d3810720ab0f3b702207c49e59e8031318caf02252ecea1281cecc1e5986c309a9cef61f455ecf7165d"
},
{
"keyid": "7f7513b25429a64473e10ce3ad2f3da372bbdd14b65d07bbaf547e7c8bbbe62b",
"sig": "3046022100da1b8dc5d53aaffbbfac98de3e23ee2d2ad3446a7bed09fac0f88bae19be2587022100b681c046afc3919097dfe794e0d819be891e2e850aade315bec06b0c4dea221b"
},
{
"keyid": "2e61cd0cbf4a8f45809bda9f7f78c0d33ad11842ff94ae340873e2664dc843de",
"sig": "3046022100b534e0030e1b271133ecfbdf3ba9fbf3becb3689abea079a2150afbb63cdb7c70221008c39a718fd9495f249b4ab8788d5b9dc269f0868dbe38b272f48207359d3ded9"
},
{
"keyid": "2f64fb5eac0cf94dd39bb45308b98920055e9a0d8e012a7220787834c60aef97",
"sig": "3045022100fc1c2be509ce50ea917bbad1d9efe9d96c8c2ebea04af2717aa3d9c6fe617a75022012eef282a19f2d8bd4818aa333ef48a06489f49d4d34a20b8fe8fc867bb25a7a"
},
{
"keyid": "eaf22372f417dd618a46f6c627dbc276e9fd30a004fc94f9be946e73f8bd090b",
"sig": "30450221008a4392ae5057fc00778b651e61fea244766a4ae58db84d9f1d3810720ab0f3b702207c49e59e8031318caf02252ecea1281cecc1e5986c309a9cef61f455ecf7165d"
},
{
"keyid": "f505595165a177a41750a8e864ed1719b1edfccd5a426fd2c0ffda33ce7ff209",
"sig": "3046022100da1b8dc5d53aaffbbfac98de3e23ee2d2ad3446a7bed09fac0f88bae19be2587022100b681c046afc3919097dfe794e0d819be891e2e850aade315bec06b0c4dea221b"
},
{
"keyid": "75e867ab10e121fdef32094af634707f43ddd79c6bab8ad6c5ab9f03f4ea8c90",
"sig": "3046022100b534e0030e1b271133ecfbdf3ba9fbf3becb3689abea079a2150afbb63cdb7c70221008c39a718fd9495f249b4ab8788d5b9dc269f0868dbe38b272f48207359d3ded9"
}
]
} root.json 0000664 0000000 0000000 00000014364 14706111210 0034322 0 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/examples/multirepo/repository/targets/staging {
"signed": {
"_type": "root",
"spec_version": "1.0",
"version": 5,
"expires": "2023-04-18T18:13:43Z",
"keys": {
"25a0eb450fd3ee2bd79218c963dce3f1cc6118badf251bf149f0bd07d5cabe99": {
"keytype": "ecdsa-sha2-nistp256",
"scheme": "ecdsa-sha2-nistp256",
"keyid_hash_algorithms": [
"sha256",
"sha512"
],
"keyval": {
"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEEXsz3SZXFb8jMV42j6pJlyjbjR8K\nN3Bwocexq6LMIb5qsWKOQvLN16NUefLc4HswOoumRsVVaajSpQS6fobkRw==\n-----END PUBLIC KEY-----\n"
}
},
"2e61cd0cbf4a8f45809bda9f7f78c0d33ad11842ff94ae340873e2664dc843de": {
"keytype": "ecdsa-sha2-nistp256",
"scheme": "ecdsa-sha2-nistp256",
"keyid_hash_algorithms": [
"sha256",
"sha512"
],
"keyval": {
"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE0ghrh92Lw1Yr3idGV5WqCtMDB8Cx\n+D8hdC4w2ZLNIplVRoVGLskYa3gheMyOjiJ8kPi15aQ2//7P+oj7UvJPGw==\n-----END PUBLIC KEY-----\n"
}
},
"45b283825eb184cabd582eb17b74fc8ed404f68cf452acabdad2ed6f90ce216b": {
"keytype": "ecdsa-sha2-nistp256",
"scheme": "ecdsa-sha2-nistp256",
"keyid_hash_algorithms": [
"sha256",
"sha512"
],
"keyval": {
"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAELrWvNt94v4R085ELeeCMxHp7PldF\n0/T1GxukUh2ODuggLGJE0pc1e8CSBf6CS91Fwo9FUOuRsjBUld+VqSyCdQ==\n-----END PUBLIC KEY-----\n"
}
},
"7f7513b25429a64473e10ce3ad2f3da372bbdd14b65d07bbaf547e7c8bbbe62b": {
"keytype": "ecdsa-sha2-nistp256",
"scheme": "ecdsa-sha2-nistp256",
"keyid_hash_algorithms": [
"sha256",
"sha512"
],
"keyval": {
"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEinikSsAQmYkNeH5eYq/CnIzLaacO\nxlSaawQDOwqKy/tCqxq5xxPSJc21K4WIhs9GyOkKfzueY3GILzcMJZ4cWw==\n-----END PUBLIC KEY-----\n"
}
},
"e1863ba02070322ebc626dcecf9d881a3a38c35c3b41a83765b6ad6c37eaec2a": {
"keytype": "ecdsa-sha2-nistp256",
"scheme": "ecdsa-sha2-nistp256",
"keyid_hash_algorithms": [
"sha256",
"sha512"
],
"keyval": {
"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEWRiGr5+j+3J5SsH+Ztr5nE2H2wO7\nBV+nO3s93gLca18qTOzHY1oWyAGDykMSsGTUBSt9D+An0KfKsD2mfSM42Q==\n-----END PUBLIC KEY-----\n"
}
},
"f5312f542c21273d9485a49394386c4575804770667f2ddb59b3bf0669fddd2f": {
"keytype": "ecdsa-sha2-nistp256",
"scheme": "ecdsa-sha2-nistp256",
"keyid_hash_algorithms": [
"sha256",
"sha512"
],
"keyval": {
"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEzBzVOmHCPojMVLSI364WiiV8NPrD\n6IgRxVliskz/v+y3JER5mcVGcONliDcWMC5J2lfHmjPNPhb4H7xm8LzfSA==\n-----END PUBLIC KEY-----\n"
}
},
"ff51e17fcf253119b7033f6f57512631da4a0969442afcf9fc8b141c7f2be99c": {
"keytype": "ecdsa-sha2-nistp256",
"scheme": "ecdsa-sha2-nistp256",
"keyid_hash_algorithms": [
"sha256",
"sha512"
],
"keyval": {
"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEy8XKsmhBYDI8Jc0GwzBxeKax0cm5\nSTKEU65HPFunUn41sT8pi0FjM4IkHz/YUmwmLUO0Wt7lxhj6BkLIK4qYAw==\n-----END PUBLIC KEY-----\n"
}
}
},
"roles": {
"root": {
"keyids": [
"ff51e17fcf253119b7033f6f57512631da4a0969442afcf9fc8b141c7f2be99c",
"25a0eb450fd3ee2bd79218c963dce3f1cc6118badf251bf149f0bd07d5cabe99",
"f5312f542c21273d9485a49394386c4575804770667f2ddb59b3bf0669fddd2f",
"7f7513b25429a64473e10ce3ad2f3da372bbdd14b65d07bbaf547e7c8bbbe62b",
"2e61cd0cbf4a8f45809bda9f7f78c0d33ad11842ff94ae340873e2664dc843de"
],
"threshold": 3
},
"snapshot": {
"keyids": [
"45b283825eb184cabd582eb17b74fc8ed404f68cf452acabdad2ed6f90ce216b"
],
"threshold": 1
},
"targets": {
"keyids": [
"ff51e17fcf253119b7033f6f57512631da4a0969442afcf9fc8b141c7f2be99c",
"25a0eb450fd3ee2bd79218c963dce3f1cc6118badf251bf149f0bd07d5cabe99",
"f5312f542c21273d9485a49394386c4575804770667f2ddb59b3bf0669fddd2f",
"7f7513b25429a64473e10ce3ad2f3da372bbdd14b65d07bbaf547e7c8bbbe62b",
"2e61cd0cbf4a8f45809bda9f7f78c0d33ad11842ff94ae340873e2664dc843de"
],
"threshold": 3
},
"timestamp": {
"keyids": [
"e1863ba02070322ebc626dcecf9d881a3a38c35c3b41a83765b6ad6c37eaec2a"
],
"threshold": 1
}
},
"consistent_snapshot": true
},
"signatures": [
{
"keyid": "ff51e17fcf253119b7033f6f57512631da4a0969442afcf9fc8b141c7f2be99c",
"sig": "3045022100fc1c2be509ce50ea917bbad1d9efe9d96c8c2ebea04af2717aa3d9c6fe617a75022012eef282a19f2d8bd4818aa333ef48a06489f49d4d34a20b8fe8fc867bb25a7a"
},
{
"keyid": "25a0eb450fd3ee2bd79218c963dce3f1cc6118badf251bf149f0bd07d5cabe99",
"sig": "30450221008a4392ae5057fc00778b651e61fea244766a4ae58db84d9f1d3810720ab0f3b702207c49e59e8031318caf02252ecea1281cecc1e5986c309a9cef61f455ecf7165d"
},
{
"keyid": "7f7513b25429a64473e10ce3ad2f3da372bbdd14b65d07bbaf547e7c8bbbe62b",
"sig": "3046022100da1b8dc5d53aaffbbfac98de3e23ee2d2ad3446a7bed09fac0f88bae19be2587022100b681c046afc3919097dfe794e0d819be891e2e850aade315bec06b0c4dea221b"
},
{
"keyid": "2e61cd0cbf4a8f45809bda9f7f78c0d33ad11842ff94ae340873e2664dc843de",
"sig": "3046022100b534e0030e1b271133ecfbdf3ba9fbf3becb3689abea079a2150afbb63cdb7c70221008c39a718fd9495f249b4ab8788d5b9dc269f0868dbe38b272f48207359d3ded9"
},
{
"keyid": "2f64fb5eac0cf94dd39bb45308b98920055e9a0d8e012a7220787834c60aef97",
"sig": "3045022100fc1c2be509ce50ea917bbad1d9efe9d96c8c2ebea04af2717aa3d9c6fe617a75022012eef282a19f2d8bd4818aa333ef48a06489f49d4d34a20b8fe8fc867bb25a7a"
},
{
"keyid": "eaf22372f417dd618a46f6c627dbc276e9fd30a004fc94f9be946e73f8bd090b",
"sig": "30450221008a4392ae5057fc00778b651e61fea244766a4ae58db84d9f1d3810720ab0f3b702207c49e59e8031318caf02252ecea1281cecc1e5986c309a9cef61f455ecf7165d"
},
{
"keyid": "f505595165a177a41750a8e864ed1719b1edfccd5a426fd2c0ffda33ce7ff209",
"sig": "3046022100da1b8dc5d53aaffbbfac98de3e23ee2d2ad3446a7bed09fac0f88bae19be2587022100b681c046afc3919097dfe794e0d819be891e2e850aade315bec06b0c4dea221b"
},
{
"keyid": "75e867ab10e121fdef32094af634707f43ddd79c6bab8ad6c5ab9f03f4ea8c90",
"sig": "3046022100b534e0030e1b271133ecfbdf3ba9fbf3becb3689abea079a2150afbb63cdb7c70221008c39a718fd9495f249b4ab8788d5b9dc269f0868dbe38b272f48207359d3ded9"
}
]
} golang-github-theupdateframework-go-tuf-2.0.2/examples/repository/ 0000775 0000000 0000000 00000000000 14706111210 0025366 5 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/examples/repository/basic_repository.go 0000664 0000000 0000000 00000057357 14706111210 0031316 0 ustar 00root root 0000000 0000000 // Copyright 2024 The Update Framework Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
//
// SPDX-License-Identifier: Apache-2.0
//
package main
import (
"crypto"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"fmt"
"os"
"path/filepath"
"time"
"github.com/sigstore/sigstore/pkg/signature"
"github.com/theupdateframework/go-tuf/v2/metadata"
"github.com/theupdateframework/go-tuf/v2/metadata/repository"
)
// A TUF repository example using the low-level TUF Metadata API.
// The example code in this file demonstrates how to *manually* create and
// maintain repository metadata using the low-level Metadata API.
// Contents:
// * creation of top-level metadata
// * target file handling
// * consistent snapshots
// * key management
// * top-level delegation and signing thresholds
// * metadata verification
// * target delegation
// * in-band and out-of-band metadata signing
// * writing and reading metadata files
// * root key rotation
// NOTE: Metadata files will be written to a 'tmp*'-directory in CWD.
func main() {
// Create top-level metadata
// =========================
// Every TUF repository has at least four roles, i.e. the top-level roles
// 'targets', 'snapshot', 'timestamp' and 'root'. Below we will discuss their
// purpose, show how to create the corresponding metadata, and how to use them
// to provide integrity, consistency and freshness for the files TUF aims to
// protect, i.e. target files.
// Define containers for metadata objects and cryptographic keys created below. This
// allows us to sign and write metadata in a batch more easily. The repository.New() instance
// doesn't provide anything else yet other than serving as a placeholder for all metadata.
roles := repository.New()
keys := map[string]ed25519.PrivateKey{}
// Targets (integrity)
// -------------------
// The targets role guarantees integrity for the files that TUF aims to protect,
// i.e. target files. It does so by listing the relevant target files, along
// with their hash and length.
targets := metadata.Targets(helperExpireIn(7))
roles.SetTargets("targets", targets)
// For the purpose of this example we use the top-level targets role to protect
// the integrity of this very example script. The metadata entry contains the
// hash and length of this file at the local path. In addition, it specifies the
// 'target path', which a client uses to locate the target file relative to a
// configured mirror base URL.
// |----base URL---||--------target path--------|
// e.g. tuf-examples.org/examples/basic_repository.py
targetPath, localPath := helperGetPathForTarget("basic_repository.go")
targetFileInfo, err := metadata.TargetFile().FromFile(localPath, "sha256")
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "generating target file info failed", err))
}
roles.Targets("targets").Signed.Targets[targetPath] = targetFileInfo
// Snapshot (consistency)
// ----------------------
// The snapshot role guarantees consistency of the entire repository. It does so
// by listing all available targets metadata files at their latest version. This
// becomes relevant, when there are multiple targets metadata files in a
// repository and we want to protect the client against mix-and-match attacks.
snapshot := metadata.Snapshot(helperExpireIn(7))
roles.SetSnapshot(snapshot)
// Timestamp (freshness)
// ---------------------
// The timestamp role guarantees freshness of the repository metadata. It does
// so by listing the latest snapshot (which in turn lists all the latest
// targets) metadata. A short expiration interval requires the repository to
// regularly issue new timestamp metadata and thus protects the client against
// freeze attacks.
// Note that snapshot and timestamp use the same generic wireline metadata
// format.
timestamp := metadata.Timestamp(helperExpireIn(1))
roles.SetTimestamp(timestamp)
// Root (root of trust)
// --------------------
// The root role serves as root of trust for all top-level roles, including
// itself. It does so by mapping cryptographic keys to roles, i.e. the keys that
// are authorized to sign any top-level role metadata, and signing thresholds,
// i.e. how many authorized keys are required for a given role (see 'roles'
// field). This is called top-level delegation.
// In addition, root provides all public keys to verify these signatures (see
// 'keys' field), and a configuration parameter that describes whether a
// repository uses consistent snapshots (see section 'Persist metadata' below
// for more details).
// Create root metadata object
root := metadata.Root(helperExpireIn(365))
roles.SetRoot(root)
// For this example, we generate one private key of type 'ed25519' for each top-level role
for _, name := range []string{"targets", "snapshot", "timestamp", "root"} {
_, private, err := ed25519.GenerateKey(nil)
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "key generation failed", err))
}
keys[name] = private
key, err := metadata.KeyFromPublicKey(private.Public())
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "key conversion failed", err))
}
err = roles.Root().Signed.AddKey(key, name)
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "adding key to root failed", err))
}
}
// NOTE: We only need the public part to populate root, so it is possible to use
// out-of-band mechanisms to generate key pairs and only expose the public part
// to whoever maintains the root role. As a matter of fact, the very purpose of
// signature thresholds is to avoid having private keys all in one place.
// Signature thresholds
// --------------------
// Given the importance of the root role, it is highly recommended to require a
// threshold of multiple keys to sign root metadata. For this example we
// generate another root key (you can pretend it's out-of-band) and increase the
// required signature threshold.
_, anotherRootKey, err := ed25519.GenerateKey(nil)
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "key generation failed", err))
}
anotherKey, err := metadata.KeyFromPublicKey(anotherRootKey.Public())
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "key conversion failed", err))
}
err = roles.Root().Signed.AddKey(anotherKey, "root")
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "adding another key to root failed", err))
}
roles.Root().Signed.Roles["root"].Threshold = 2
// Sign top-level metadata (in-band)
// =================================
// In this example we have access to all top-level signing keys, so we can use
// them to create and add a signature for each role metadata.
for _, name := range []string{"targets", "snapshot", "timestamp", "root"} {
key := keys[name]
signer, err := signature.LoadSigner(key, crypto.Hash(0))
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "loading a signer failed", err))
}
switch name {
case "targets":
_, err = roles.Targets("targets").Sign(signer)
case "snapshot":
_, err = roles.Snapshot().Sign(signer)
case "timestamp":
_, err = roles.Timestamp().Sign(signer)
case "root":
_, err = roles.Root().Sign(signer)
}
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "metadata signing failed", err))
}
}
// Persist metadata (consistent snapshot)
// ======================================
// It is time to publish the first set of metadata for a client to safely
// download the target file that we have registered for this example repository.
// For the purpose of this example we will follow the consistent snapshot naming
// convention for all metadata. This means that each metadata file, must be
// prefixed with its version number, except for timestamp. The naming convention
// also affects the target files, but we don't cover this in the example. See
// the TUF specification for more details:
// https://theupdateframework.github.io/specification/latest/#writing-consistent-snapshots
// Also note that the TUF specification does not mandate a wireline format. In
// this demo we use a non-compact JSON format and store all metadata in
// temporary directory at CWD for review.
cwd, err := os.Getwd()
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "getting cwd failed", err))
}
tmpDir, err := os.MkdirTemp(cwd, "tmp")
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "creating a temporary folder failed", err))
}
for _, name := range []string{"targets", "snapshot", "timestamp", "root"} {
switch name {
case "targets":
filename := fmt.Sprintf("%d.%s.json", roles.Targets("targets").Signed.Version, name)
err = roles.Targets("targets").ToFile(filepath.Join(tmpDir, filename), true)
case "snapshot":
filename := fmt.Sprintf("%d.%s.json", roles.Snapshot().Signed.Version, name)
err = roles.Snapshot().ToFile(filepath.Join(tmpDir, filename), true)
case "timestamp":
filename := fmt.Sprintf("%s.json", name)
err = roles.Timestamp().ToFile(filepath.Join(tmpDir, filename), true)
case "root":
filename := fmt.Sprintf("%d.%s.json", roles.Root().Signed.Version, name)
err = roles.Root().ToFile(filepath.Join(tmpDir, filename), true)
}
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "saving metadata to file failed", err))
}
}
// Threshold signing (out-of-band)
// ===============================
// As mentioned above, using signature thresholds usually entails that not all
// signing keys for a given role are in the same place. Let's briefly pretend
// this is the case for the second root key we registered above, and we are now
// on that key owner's computer. All the owner has to do is read the metadata
// file, sign it, and write it back to the same file, and this can be repeated
// until the threshold is satisfied.
_, err = roles.Root().FromFile(filepath.Join(tmpDir, "1.root.json"))
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "loading root metadata from file failed", err))
}
outofbandSigner, err := signature.LoadSigner(anotherRootKey, crypto.Hash(0))
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "loading a signer failed", err))
}
_, err = roles.Root().Sign(outofbandSigner)
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "signing root failed", err))
}
err = roles.Root().ToFile(filepath.Join(tmpDir, "1.root.json"), true)
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "saving root metadata to file failed", err))
}
// Verify that metadata is signed correctly
// ====================================
// Verify root
err = roles.Root().VerifyDelegate("root", roles.Root())
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "verifying root metadata failed", err))
}
// Verify targets
err = roles.Root().VerifyDelegate("targets", roles.Targets("targets"))
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "verifying targets metadata failed", err))
}
// Verify snapshot
err = roles.Root().VerifyDelegate("snapshot", roles.Snapshot())
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "verifying snapshot metadata failed", err))
}
// Verify timestamp
err = roles.Root().VerifyDelegate("timestamp", roles.Timestamp())
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "verifying timestamp metadata failed", err))
}
// Targets delegation
// ==================
// Similar to how the root role delegates responsibilities about integrity,
// consistency and freshness to the corresponding top-level roles, a targets
// role may further delegate its responsibility for target files (or a subset
// thereof) to other targets roles. This allows creation of a granular trust
// hierarchy, and further reduces the impact of a single role compromise.
// In this example the top-level targets role trusts a new "go-scripts"
// targets role to provide integrity for any target file that ends with ".go".
delegateeName := "go-scripts"
_, delegateePrivateKey, err := ed25519.GenerateKey(nil)
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "key generation failed", err))
}
keys[delegateeName] = delegateePrivateKey
// Delegatee
// ---------
// Create a new targets role, akin to how we created top-level targets above, and
// add target file info from above according to the delegatee's responsibility.
delegatee := metadata.Targets(helperExpireIn(7))
delegatee.Signed.Targets[targetPath] = targetFileInfo
roles.SetTargets(delegateeName, delegatee)
// Delegator
// ---------
// Akin to top-level delegation, the delegator expresses its trust in the
// delegatee by authorizing a threshold of cryptographic keys to provide
// signatures for the delegatee metadata. It also provides the corresponding
// public key store.
// The delegation info defined by the delegator further requires the provision
// of a unique delegatee name and constraints about the target files the
// delegatee is responsible for, e.g. a list of path patterns. For details about
// all configuration parameters see
// https://theupdateframework.github.io/specification/latest/#delegations
delegateeKey, _ := metadata.KeyFromPublicKey(delegateePrivateKey.Public())
roles.Targets("targets").Signed.Delegations = &metadata.Delegations{
Keys: map[string]*metadata.Key{
delegateeKey.ID(): delegateeKey,
},
Roles: []metadata.DelegatedRole{
{
Name: delegateeName,
KeyIDs: []string{delegateeKey.ID()},
Threshold: 1,
Terminating: true,
Paths: []string{"*.go"},
},
},
}
// Remove target file info from top-level targets (delegatee is now responsible)
delete(roles.Targets("targets").Signed.Targets, targetPath)
// Increase expiry (delegators should be less volatile)
roles.Targets("targets").Signed.Expires = helperExpireIn(365)
// Snapshot + Timestamp + Sign + Persist
// -------------------------------------
// In order to publish a new consistent set of metadata, we need to update
// dependent roles (snapshot, timestamp) accordingly, bumping versions of all
// changed metadata.
// Bump targets version
roles.Targets("targets").Signed.Version += 1
// Update snapshot to account for changed and new targets(delegatee) metadata
roles.Snapshot().Signed.Meta["targets.json"] = metadata.MetaFile(roles.Targets("targets").Signed.Version)
roles.Snapshot().Signed.Meta[fmt.Sprintf("%s.json", delegateeName)] = metadata.MetaFile(1)
roles.Snapshot().Signed.Version += 1
// Update timestamp to account for changed snapshot metadata
roles.Timestamp().Signed.Meta["snapshot.json"] = metadata.MetaFile(roles.Snapshot().Signed.Version)
roles.Timestamp().Signed.Version += 1
// Sign and write metadata for all changed roles, i.e. all but root
for _, name := range []string{"targets", "snapshot", "timestamp", delegateeName} {
key := keys[name]
signer, err := signature.LoadSigner(key, crypto.Hash(0))
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "loading a signer failed", err))
}
switch name {
case "targets":
roles.Targets("targets").ClearSignatures()
_, err = roles.Targets("targets").Sign(signer)
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "signing metadata failed", err))
}
filename := fmt.Sprintf("%d.%s.json", roles.Targets("targets").Signed.Version, name)
err = roles.Targets("targets").ToFile(filepath.Join(tmpDir, filename), true)
case "snapshot":
roles.Snapshot().ClearSignatures()
_, err = roles.Snapshot().Sign(signer)
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "signing metadata failed", err))
}
filename := fmt.Sprintf("%d.%s.json", roles.Snapshot().Signed.Version, name)
err = roles.Snapshot().ToFile(filepath.Join(tmpDir, filename), true)
case "timestamp":
roles.Timestamp().ClearSignatures()
_, err = roles.Timestamp().Sign(signer)
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "signing metadata failed", err))
}
filename := fmt.Sprintf("%s.json", name)
err = roles.Timestamp().ToFile(filepath.Join(tmpDir, filename), true)
case delegateeName:
roles.Targets(delegateeName).ClearSignatures()
_, err = roles.Targets(delegateeName).Sign(signer)
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "signing metadata failed", err))
}
filename := fmt.Sprintf("%d.%s.json", roles.Targets(delegateeName).Signed.Version, name)
err = roles.Targets(delegateeName).ToFile(filepath.Join(tmpDir, filename), true)
}
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "saving metadata to file failed", err))
}
}
// Root key rotation (recover from a compromise / key loss)
// ========================================================
// TUF makes it easy to recover from a key compromise in-band. Given the trust
// hierarchy through top-level and targets delegation you can easily
// replace compromised or lost keys for any role using the delegating role, even
// for the root role.
// However, since root authorizes its own keys, it always has to be signed with
// both the threshold of keys from the previous version and the threshold of
// keys from the new version. This establishes a trusted line of continuity.
// In this example we will replace a root key, and sign a new version of root
// with the threshold of old and new keys. Since one of the previous root keys
// remains in place, it can be used to count towards the old and new threshold.
_, newRootKey, err := ed25519.GenerateKey(nil)
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "key generation failed", err))
}
oldRootKey, err := metadata.KeyFromPublicKey(keys["root"].Public())
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "key conversion failed", err))
}
err = roles.Root().Signed.RevokeKey(oldRootKey.ID(), "root")
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "revoking key failed", err))
}
// Add new key for root
newRootKeyTUF, err := metadata.KeyFromPublicKey(newRootKey.Public())
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "key conversion failed", err))
}
err = roles.Root().Signed.AddKey(newRootKeyTUF, "root")
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "adding key to root failed", err))
}
roles.Root().Signed.Version += 1
roles.Root().ClearSignatures()
// Sign root
for _, k := range []ed25519.PrivateKey{keys["root"], anotherRootKey, newRootKey} {
signer, err := signature.LoadSigner(k, crypto.Hash(0))
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "loading a signer failed", err))
}
_, err = roles.Root().Sign(signer)
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "signing root failed", err))
}
}
filename := fmt.Sprintf("%d.%s.json", roles.Root().Signed.Version, "root")
err = roles.Root().ToFile(filepath.Join(tmpDir, filename), true)
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "saving root to file failed", err))
}
// Verify again that metadata is signed correctly
// ==============================================
// Verify root
err = roles.Root().VerifyDelegate("root", roles.Root())
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "verifying root metadata failed", err))
}
// Verify targets
err = roles.Root().VerifyDelegate("targets", roles.Targets("targets"))
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "verifying targets metadata failed", err))
}
// Verify snapshot
err = roles.Root().VerifyDelegate("snapshot", roles.Snapshot())
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "verifying snapshot metadata failed", err))
}
// Verify timestamp
err = roles.Root().VerifyDelegate("timestamp", roles.Timestamp())
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "verifying timestamp metadata failed", err))
}
// Verify delegatee
err = roles.Targets("targets").VerifyDelegate(delegateeName, roles.Targets(delegateeName))
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "verifying delegatee metadata failed", err))
}
// Use a mixture of key types
// ==========================
// Create an RSA key
anotherRootKeyRSA, _ := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "RSA key generation failed", err))
}
anotherKeyRSA, err := metadata.KeyFromPublicKey(anotherRootKeyRSA.Public())
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "RSA key conversion failed", err))
}
// Create an ECDSA key
anotherRootKeyECDSA, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "ECDSA key generation failed", err))
}
anotherKeyECDSA, err := metadata.KeyFromPublicKey(anotherRootKeyECDSA.Public())
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "ECDSA key conversion failed", err))
}
// Add the RSA key to root keys
err = roles.Root().Signed.AddKey(anotherKeyRSA, "root")
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "adding RSA key to root failed", err))
}
// Add the ECDSA key to root keys
err = roles.Root().Signed.AddKey(anotherKeyECDSA, "root")
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "adding ECDSA key to root failed", err))
}
// Clear existing signatures, bump version and threshold
roles.Root().Signed.Roles["root"].Threshold = 4
roles.Root().Signed.Version += 1
roles.Root().ClearSignatures()
// Sign root with existing ed25519 keys
for _, k := range []ed25519.PrivateKey{keys["root"], anotherRootKey, newRootKey} {
signer, err := signature.LoadSigner(k, crypto.Hash(0))
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "loading a signer failed", err))
}
_, err = roles.Root().Sign(signer)
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "signing root failed", err))
}
}
// Sign root with the new RSA and ECDSA keys
outofbandSignerRSA, err := signature.LoadSigner(anotherRootKeyRSA, crypto.SHA256)
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "loading RSA signer failed", err))
}
outofbandSignerECDSA, err := signature.LoadSigner(anotherRootKeyECDSA, crypto.SHA256)
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "loading ECDSA signer failed", err))
}
_, err = roles.Root().Sign(outofbandSignerRSA)
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "signing root failed", err))
}
_, err = roles.Root().Sign(outofbandSignerECDSA)
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "signing root failed", err))
}
// Verify that root is signed correctly
// ====================================
err = roles.Root().VerifyDelegate("root", roles.Root())
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "verifying root metadata failed", err))
}
// Save root to file
filename = fmt.Sprintf("%d.%s.json", roles.Root().Signed.Version, "root")
err = roles.Root().ToFile(filepath.Join(tmpDir, filename), true)
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "saving root to file failed", err))
}
fmt.Println("Done! Metadata files location:", tmpDir)
}
// helperExpireIn returns time offset by days
func helperExpireIn(days int) time.Time {
return time.Now().AddDate(0, 0, days).UTC()
}
// helperGetPathForTarget returns local and target paths for target
func helperGetPathForTarget(name string) (string, string) {
cwd, err := os.Getwd()
if err != nil {
panic(fmt.Sprintln("basic_repository.go:", "getting cwd failed", err))
}
// _, dir := filepath.Split(cwd)
// return filepath.Join(dir, name), filepath.Join(cwd, name)
return name, filepath.Join(cwd, name)
}
golang-github-theupdateframework-go-tuf-2.0.2/go.mod 0000664 0000000 0000000 00000002066 14706111210 0022443 0 ustar 00root root 0000000 0000000 module github.com/theupdateframework/go-tuf/v2
go 1.21
require (
github.com/go-logr/stdr v1.2.2
github.com/secure-systems-lab/go-securesystemslib v0.8.0
github.com/sigstore/sigstore v1.8.4
github.com/spf13/cobra v1.8.1
github.com/stretchr/testify v1.9.0
)
require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/go-logr/logr v1.3.0 // indirect
github.com/google/go-containerregistry v0.19.1 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
golang.org/x/crypto v0.23.0 // indirect
golang.org/x/sys v0.20.0 // indirect
golang.org/x/term v0.20.0 // indirect
google.golang.org/grpc v1.56.3 // indirect
gopkg.in/go-jose/go-jose.v2 v2.6.3 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
golang-github-theupdateframework-go-tuf-2.0.2/go.sum 0000664 0000000 0000000 00000016611 14706111210 0022471 0 ustar 00root root 0000000 0000000 github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg=
github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-containerregistry v0.19.1 h1:yMQ62Al6/V0Z7CqIrrS1iYoA5/oQCm88DeNujc7C1KY=
github.com/google/go-containerregistry v0.19.1/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs=
github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e h1:RLTpX495BXToqxpM90Ws4hXEo4Wfh81jr9DX1n/4WOo=
github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e/go.mod h1:EAuqr9VFWxBi9nD5jc/EA2MT1RFty9288TF6zdtYoCU=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI=
github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk=
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA=
github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU=
github.com/sigstore/sigstore v1.8.4 h1:g4ICNpiENFnWxjmBzBDWUn62rNFeny/P77HUC8da32w=
github.com/sigstore/sigstore v1.8.4/go.mod h1:1jIKtkTFEeISen7en+ZPWdDHazqhxco/+v9CNjc7oNg=
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0=
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs=
go.opentelemetry.io/otel v1.15.0 h1:NIl24d4eiLJPM0vKn4HjLYM+UZf6gSfi9Z+NmCxkWbk=
go.opentelemetry.io/otel v1.15.0/go.mod h1:qfwLEbWhLPk5gyWrne4XnF0lC8wtywbuJbgfAE3zbek=
go.opentelemetry.io/otel/trace v1.15.0 h1:5Fwje4O2ooOxkfyqI/kJwxWotggDLix4BSAvpE1wlpo=
go.opentelemetry.io/otel/trace v1.15.0/go.mod h1:CUsmE2Ht1CRkvE8OsMESvraoZrrcgD1J2W8GV1ev0Y4=
golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw=
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc=
google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/go-jose/go-jose.v2 v2.6.3 h1:nt80fvSDlhKWQgSWyHyy5CfmlQr+asih51R8PTWNKKs=
gopkg.in/go-jose/go-jose.v2 v2.6.3/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
golang-github-theupdateframework-go-tuf-2.0.2/internal/ 0000775 0000000 0000000 00000000000 14706111210 0023145 5 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/internal/testutils/ 0000775 0000000 0000000 00000000000 14706111210 0025205 5 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/internal/testutils/repository_data/ 0000775 0000000 0000000 00000000000 14706111210 0030415 5 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/internal/testutils/repository_data/keystore/ 0000775 0000000 0000000 00000000000 14706111210 0032262 5 ustar 00root root 0000000 0000000 delegation_key 0000664 0000000 0000000 00000001466 14706111210 0035120 0 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/internal/testutils/repository_data/keystore 68593a508472ad3007915379e6b1f3c0@@@@100000@@@@615986af4d1ba89aeadc2f489f89b0e8d46da133a6f75c7b162b8f99f63f86ed@@@@8319255f9856c4f40f9d71bc10e79e5d@@@@1dc7b20f1c668a1f544dc39c7a9fcb3c4a4dd34d1cc8c9d8f779bab026cf0b8e0f46e53bc5ed20bf0e5048b94a5d2ea176e79c12bcc7daa65cd55bf810deebeec5bc903ce9e5316d7dbba88f1a2b51d3f9bc782f8fa9b21dff91609ad0260e21a2039223f816d0fe97ace2e204d0025d327b38d27aa6cd87e85aa8883bfcb6d12f93155d72ffd3c7717a0570cf9811eb6d6a340baa0f27433315d83322c685fec02053ff8c173c4ebf91a258e83402f39546821e3352baa7b246e33b2a573a8ff7b289682407abbcb9184249d4304db68d3bf8e124e94377fd62dde5c4f3b7617d483776345154d047d139b1e559351577da315f54e16153c510159e1908231574bcf49c4f96cafe6530e86a09e9eee47bcff78f2fed2984754c895733938999ff085f9e3532d7174fd76dc09921506dd2137e16ec4926998f5d9df8a8ffb3e6649c71bc32571b2e24357739fa1a56be delegation_key.pub 0000664 0000000 0000000 00000000264 14706111210 0035700 0 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/internal/testutils/repository_data/keystore {"keyval": {"public": "fcf224e55fa226056adf113ef1eb3d55e308b75b321c8c8316999d8c4fd9e0d9"}, "keytype": "ed25519", "scheme": "ed25519", "keyid_hash_algorithms": ["sha256", "sha512"]} golang-github-theupdateframework-go-tuf-2.0.2/internal/testutils/repository_data/keystore/root_key 0000664 0000000 0000000 00000001566 14706111210 0034050 0 ustar 00root root 0000000 0000000 -----BEGIN RSA PRIVATE KEY-----
MIICXQIBAAKBgQDydf/VEpxBOCDoxpM6IVhq9i67P9BiVv2zwZSUO/M0RTToAvFv
NgDKXwtnp8LyjVk++wMA1aceMa+pS7vYrKvPIJa7WIT+mwy86/fIdnllJDMw5tmL
r2mE3oBMxOhpEiD2tO+liGacklFNk6nHHorX9S91iqpdRVa3zJw5ALvLdwIDAQAB
AoGBAJlhwoUVb9nmWxNGw86LV7bapDd6qCX96CL2PDsGLdWMTmrTqc5zuE5NkBZz
z2THvISWIJE/l6gHQJv1uBDbMxfquhK40k+GfE/fApVODN8KeBLLRUzYyHNz7KwW
aNF3jY8AbO4HzWpdaFYce5r+YqlWZoaVPR9i6LCW3sZXALyRAkEA/lSVaT0azp55
2GI4Gn+EQQFqFJWEbNwJ8i3FZ4aG+/gnw2WmxJr+2nQcUlLb2cpQCCcMyWxvCfLK
+DapvvgZXwJBAPQNd+liOrKKd1gPR3S6y+D4h1ewj8ii1MHzRtAsCKCRG/e+v+hC
xp77Rc/qtZXKvVTGrccnKqCVAvG7F15rzOkCQQDCswgKn6+0+5c1ssNWbcZWaXnH
NktBdxXaI3Ya8d7GaEwwhtIrcqilnfvMfgg2a23nP9XHIU7EI+2EJXy/aHkrAkBH
wH30u9COFW+pEDTt+M1gQzFncp2TW2w56ZB0O739lywl1osNejRzIWURD+x7MbQg
bJlC6Bz8QVMwRtVECWWhAkAflD6eIJeceDhVHClHB/QwmF8mwR1o63RN7ZFlgel1
kwMt6bPZZ1cyrRoj6Cdi4pyqBssDBuQmbBLWyYuijIwz
-----END RSA PRIVATE KEY----- root_key.pub 0000664 0000000 0000000 00000000417 14706111210 0034550 0 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/internal/testutils/repository_data/keystore -----BEGIN PUBLIC KEY-----
MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDydf/VEpxBOCDoxpM6IVhq9i67
P9BiVv2zwZSUO/M0RTToAvFvNgDKXwtnp8LyjVk++wMA1aceMa+pS7vYrKvPIJa7
WIT+mwy86/fIdnllJDMw5tmLr2mE3oBMxOhpEiD2tO+liGacklFNk6nHHorX9S91
iqpdRVa3zJw5ALvLdwIDAQAB
-----END PUBLIC KEY----- golang-github-theupdateframework-go-tuf-2.0.2/internal/testutils/repository_data/keystore/root_key2 0000664 0000000 0000000 00000001572 14706111210 0034127 0 ustar 00root root 0000000 0000000 -----BEGIN RSA PRIVATE KEY-----
MIICXwIBAAKBgQC9sG1iNSW+JbnSuITAuWL46YphgqOXU3DhGZp2hM/1FaaRzLlR
HTyH7EvzbKqlaGBs9Caatx6KlN7e8J45vpLO1PkKnLOHdKBuy39Fzxw1dhxJWvTI
0mNDv68JbWxvgRxjEPE7wc24jYubovxiiBD8g2DbOnWEkyt7owECPRlXAwIDAQAB
AoGBAKquh54oqG9yTsRXF8y6g13p9oRLIpxVjmpduWkPlHe5JYpnphBguEitzKGa
k+oGA03GWr44K5kS33/HDvhyjHFXCqniittlVUwbaa4kbJiWi4lQ3K/m8F2DzHJP
s4YaqmzG30v9j9z3nOgLhM7iye65beala72zJnGOXivUAuhRAkEA3KnfY/SFjTbo
rsluVa03hC5KVQm/yDf4wShDnl8drYHnJ1XFkSC0UbBruRyu8JeWE93dAKu9IxdK
WEdHOtxR3wJBANwQwX/wPJ8/+yo4lToyUuN0omx94sK/JuRmvi9dzCbdfQbkgvDa
MEyWc0LNwxDBPYJ2bej/ORGmD+nOJo59h10CQQCCj/x+jvrKcFfCu6qOBRyZGC6h
HFCebgfAektwFIVh2T/lNUndsgUfZIyIjeEwt/Bzts2CDRuu/KPfkeUifaPvAkEA
m9iR8FTl2bGp4cCojcpNwR88V7DfAiP1GxNX5Jt8lJmOjW8O/BrI0bRKdCjb1+XB
9b6BH9x/QexkoKOJ0qc7UQJBAINLHep9QG2b3AEGZ692Z+iyU1Lub7rWNBKsmodh
0x9rwYs0D0EJo0BYozYhExz7ugaSzXW61H26IWbHtsg+5a0=
-----END RSA PRIVATE KEY----- root_key2.pub 0000664 0000000 0000000 00000000417 14706111210 0034632 0 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/internal/testutils/repository_data/keystore -----BEGIN PUBLIC KEY-----
MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC9sG1iNSW+JbnSuITAuWL46Yph
gqOXU3DhGZp2hM/1FaaRzLlRHTyH7EvzbKqlaGBs9Caatx6KlN7e8J45vpLO1PkK
nLOHdKBuy39Fzxw1dhxJWvTI0mNDv68JbWxvgRxjEPE7wc24jYubovxiiBD8g2Db
OnWEkyt7owECPRlXAwIDAQAB
-----END PUBLIC KEY----- snapshot_key 0000664 0000000 0000000 00000001567 14706111210 0034646 0 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/internal/testutils/repository_data/keystore -----BEGIN RSA PRIVATE KEY-----
MIICXAIBAAKBgQCPQoHresXRRRGoinN3bNn+BI23KolXdXLGqYkTvr9AjemUQJxb
qmvZXHboQMAYw8OuBrRNt5Fz20wjsrJwOBEU5U3nHSJI4zYPGckYci0/0Eo2Kjws
5BmIj38qgIfhsH4zyZ4FZZ+GLRn+W3i3wl6SfRMC/HCg0DDwi75faC0vGQIDAQAB
AoGAbPFYt2hf8qqhqRfQgysmA4QW+QnB895+8BCRC5DtA/xnerQ/s33AEkW8rxY+
fxawQjEbAFbup7pHBoaoJ6qbYbKDBSGgZFSEbh40nriX1V0oYb9E+BCAFHE+42Rj
WYYNxXRp7LGoUQqisTsfoR1bvmrLC+9I/tDArHuMudm1slkCQQDOVn9AKTcaBGuQ
Y+JQqoRmi9eMN6XztKIAKQ+P/57BofwlKJDFnwttsvMxRud6rvN1FCnCDM638HNb
I0JDY0JXAkEAsb10uNV+SaWsHJOxfHzwK+uZJV1SkYzpBMizUREHuIyKT4MfpYNw
kn00KpyCvhIp6buwNyYo76TssejYN86UDwJAGi3ZSU+xYQisiQ5TOX7Y+5XEjFLH
KGuDnleXVOLOxqyBrElATQKH1aw9tMPVPLiTxQgA4FD1rVrBmA+aKaifUwJALBp8
yhh/u7qWWIj1c5R07BEL8U+U23UBpSRACo+VQN/uuggpZCKXXmIe/avUbWGIcO0X
rreTVNOxv/utGzvxVQJBAL7Kpqt9d50SL1ndLr2EdqGw8ZB/B2dKMlZf7AWwbk0k
HHdvWfSDYhtvGo3ilLibHLesE/Tq1fm/2aEOds95/Eo=
-----END RSA PRIVATE KEY-----
snapshot_key.pub 0000664 0000000 0000000 00000000516 14706111210 0035424 0 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/internal/testutils/repository_data/keystore {"keyval": {"public": "MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCG5DWxCcw4FW2G21RwTmuR7gdkv+ZrjZVOx0KsvJc/51QBxo/Y9xPVeoFF7YrhE8EV6A6b0qsLufIo1E63sQ6kjLOPfIMjag6dYPlmEyGcbxNDokv2elxZk7jS98iBQLxEmJLicrdERmxC2t2OOEQ6ELi5dt+C13QvNJFg4+OaTwIDAQAB"}, "keytype": "ed25519", "scheme": "ed25519", "keyid_hash_algorithms": ["sha256", "sha512"]}
targets_key 0000664 0000000 0000000 00000001566 14706111210 0034457 0 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/internal/testutils/repository_data/keystore -----BEGIN RSA PRIVATE KEY-----
MIICWwIBAAKBgQCjm6HPktvTGsygQ8Gvmu+zydTNe1zqoxLxV7mVRbmsCI4kn7JT
Hc4fmWZwvo7f/Wbto6Xj5HqGJFSlYIGZuTwZqPg3w8wqv8cuPxbmsFSxMoHfzBBI
uJe0FlwXFysojbdhrSUqNL84tlwTFXEhePYrpTNMDn+9T55B0WJYT/VPxwIDAQAB
AoGANYaYRLHWS1WMNq6UMmBtJZPVlDhU6MrbSqwZojWCjj7qSh8ZF0o8AmiMdDxT
wAJGZ17PyiQY1cQTEVvmaqWIfJKvipAcTvkiXFrAxeIf/HYIVfCP9UB8RqhJufsc
XzDQyvZTmJdatHfKe2JV+q42GrsN4VN61wFEed3NuF8NGjECQQDSA5b+N1wMn5X4
G5fxPYjhlwQmK3tlBHIPIVcVAsGOxU9Ry55xLQ8LpfKwJZIt2+LvgBIXf4DZY2u6
GEnyR7epAkEAx267l7XX+9Dh8bHPluQSgH/tDrCp1hUNmyV4XzZCwavI/FaucANa
h8ChpUOSZTq5mR76YaUL7O3Sx8N7L/2x7wJAZDvgYf6sCT5VhnAtCa+T2A+KpGkW
YLVJdt0zwcxp8ylK3UAwo9Wcm7Oda+LSrN6IpkRa3io1pguki9Ix4NfH2QJATsXA
NxZOb1p8RFk1Y6ZGYJcm7Wx+SN8b9rIAL6thBtpxkqoyUHAirAg8UOi1xGJDuOVx
hGwKn9T4MotV9wi/5QJAB+1/2TaUMKjyL5Ca8Fh5SMigrwHp8SnX2vl7HV4hiBXi
0FaVxMPGH94tuFqHQ+q53tiTT1cp6YwcMMgpezTRRA==
-----END RSA PRIVATE KEY----- targets_key.pub 0000664 0000000 0000000 00000000514 14706111210 0035234 0 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/internal/testutils/repository_data/keystore {"keyval": {"public": "MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCjm6HPktvTGsygQ8Gvmu+zydTNe1zqoxLxV7mVRbmsCI4kn7JTHc4fmWZwvo7f/Wbto6Xj5HqGJFSlYIGZuTwZqPg3w8wqv8cuPxbmsFSxMoHfzBBIuJe0FlwXFysojbdhrSUqNL84tlwTFXEhePYrpTNMDn+9T55B0WJYT/VPxwIDAQAB"}, "keytype": "ed25519", "scheme": "ed25519", "keyid_hash_algorithms": ["sha256", "sha512"]} timestamp_key 0000664 0000000 0000000 00000001562 14706111210 0035005 0 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/internal/testutils/repository_data/keystore -----BEGIN RSA PRIVATE KEY-----
MIICWgIBAAKBgHXjYnWGuCIOh5T3XGmgG/RsXWHPTbyu7OImP6O+uHg8hui8C1nY
/mcJdFdxqgl1vKEco/Nwebh2T8L6XbNfcgV9VVstWpeCalZYWi55lZSLe9KixQIA
yg15rNdhN9pcD3OuLmFvslgTx+dTbZ3ZoYMbcb4C5yqvqzcOoCTQMeWbAgMBAAEC
gYAMlDvAUKS7NZOwCIj62FPDTADW2/juhjfOlcg6n7ItWkAG+3G2n5ndwruATSeY
pNCA3H5+DmVeknlGU9LFvgx7dhJMw3WSkq7rImOGbwLN1jCVfwKP0AEEqb7GrtCU
a9lvm2ZFvKj+2VVFS2yifeluDG1Xm10ygq+RDd2lL2g6eQJBAMZrMTUwxWT/Cc0j
Yi7CFPl9V8GkYzLCKRQGR3x4QiNuXpNtQ3D+ivxHieBMEtw6M244PMDC+GpLxAfc
DtiGEl8CQQCYGXeycwkgn2YfH3w1/Mw6TWsdv4rVLPOieiQPrhZbVsBc6NT24MYW
b3c7osW5ypf7lo+xU8E6ylFUyeeVSk5FAkADTAqwSJQvHnHKP9lEz6LLloKbzCB9
2m4WUBhmABWRQyc9Keah/QjQMlwfJwR1Nl5eaX7Q8Sxxj7q9KrHwdSHfAkAS1yTC
kAlTZytJM6c5MMVDe4+HMdDKszTCrYqF/rR6P/a4C4dFxXYEFW6ZjoIbj4LgAThv
aMaIt8L3U8NB9OBZAkA3ke4kilnVnjEyB9ibJ/SbDiUgh7e7M/XDbNQuXwSipFft
keBYEwL4Njms9uwMT4Gl59HyQls7BE2XEoiFjsY1
-----END RSA PRIVATE KEY----- timestamp_key.pub 0000664 0000000 0000000 00000000514 14706111210 0035566 0 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/internal/testutils/repository_data/keystore {"keyval": {"public": "MIGeMA0GCSqGSIb3DQEBAQUAA4GMADCBiAKBgHXjYnWGuCIOh5T3XGmgG/RsXWHPTbyu7OImP6O+uHg8hui8C1nY/mcJdFdxqgl1vKEco/Nwebh2T8L6XbNfcgV9VVstWpeCalZYWi55lZSLe9KixQIAyg15rNdhN9pcD3OuLmFvslgTx+dTbZ3ZoYMbcb4C5yqvqzcOoCTQMeWbAgMBAAE="}, "keytype": "ed25519", "scheme": "ed25519", "keyid_hash_algorithms": ["sha256", "sha512"]} golang-github-theupdateframework-go-tuf-2.0.2/internal/testutils/repository_data/repository/ 0000775 0000000 0000000 00000000000 14706111210 0032634 5 ustar 00root root 0000000 0000000 metadata/ 0000775 0000000 0000000 00000000000 14706111210 0034335 5 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/internal/testutils/repository_data/repository 1.root.json 0000664 0000000 0000000 00000003530 14706111210 0036353 0 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/internal/testutils/repository_data/repository/metadata {
"signatures": [
{
"keyid": "d5fa855fce82db75ec64283e828cc90517df5edf5cdc57e7958a890d6556f5b7",
"sig": "1307990e6ba5ca145eb35e99182a9bec46531bc54ddf656a602c780fa0240dee"
}
],
"signed": {
"_type": "root",
"consistent_snapshot": true,
"expires": "2030-08-15T14:30:45.0000001Z",
"keys": {
"0a5842e65e9c8c428354f40708435de6793ac379a275effe40d6358be2de835c": {
"keytype": "ed25519",
"keyval": {
"public": "4e10fe156f07e6f6e1f6fb1579105b7d3e62790b6a62dbf7727b91f82d2bc9db"
},
"scheme": "ed25519"
},
"409fb816e403e0c00646665eac21cb8adfab8e318272ca7589b2d1fc0bccb255": {
"keytype": "ed25519",
"keyval": {
"public": "23e5dc4eb18d5c116e76a92b02e44a7d7279622574457050b85fb8fd9260422c"
},
"scheme": "ed25519"
},
"700464ea12f4cb5f06a7512c75b73c0b6eeb2cd42854b085eed5b3c993607cba": {
"keytype": "ed25519",
"keyval": {
"public": "1603f99998ca46c35c238a2c1a2a015e0f32b38771e4fa5401348ce0a677d63f"
},
"scheme": "ed25519"
},
"d5fa855fce82db75ec64283e828cc90517df5edf5cdc57e7958a890d6556f5b7": {
"keytype": "ed25519",
"keyval": {
"public": "17454b5e7a6594e7f00ceadda10d0267b94d0118b82f541f4f69f0d327c5a41a"
},
"scheme": "ed25519"
}
},
"roles": {
"root": {
"keyids": [
"d5fa855fce82db75ec64283e828cc90517df5edf5cdc57e7958a890d6556f5b7"
],
"threshold": 1
},
"snapshot": {
"keyids": [
"700464ea12f4cb5f06a7512c75b73c0b6eeb2cd42854b085eed5b3c993607cba"
],
"threshold": 1
},
"targets": {
"keyids": [
"409fb816e403e0c00646665eac21cb8adfab8e318272ca7589b2d1fc0bccb255"
],
"threshold": 1
},
"timestamp": {
"keyids": [
"0a5842e65e9c8c428354f40708435de6793ac379a275effe40d6358be2de835c"
],
"threshold": 1
}
},
"spec_version": "1.0.31",
"version": 1,
"test": "true"
}
} role1.json 0000664 0000000 0000000 00000002344 14706111210 0036255 0 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/internal/testutils/repository_data/repository/metadata {
"signatures": [
{
"keyid": "c8022fa1e9b9cb239a6b362bbdffa9649e61ad2cb699d2e4bc4fdf7930a0e64a",
"sig": "9408b46569e622a46f1d35d9fa3c10e17a9285631ced4f2c9c2bba2c2842413fcb796db4e81d6f988fc056c21c407fdc3c10441592cf1e837e088f2e2dfd5403"
}
],
"signed": {
"_type": "targets",
"delegations": {
"keys": {
"c8022fa1e9b9cb239a6b362bbdffa9649e61ad2cb699d2e4bc4fdf7930a0e64a": {
"keyid_hash_algorithms": [
"sha256",
"sha512"
],
"keytype": "ed25519",
"keyval": {
"public": "fcf224e55fa226056adf113ef1eb3d55e308b75b321c8c8316999d8c4fd9e0d9"
},
"scheme": "ed25519"
}
},
"roles": [
{
"keyids": [
"c8022fa1e9b9cb239a6b362bbdffa9649e61ad2cb699d2e4bc4fdf7930a0e64a"
],
"name": "role2",
"paths": [],
"terminating": false,
"threshold": 1
}
]
},
"expires": "2030-01-01T00:00:00Z",
"spec_version": "1.0.0",
"targets": {
"file3.txt": {
"hashes": {
"sha256": "141f740f53781d1ca54b8a50af22cbf74e44c21a998fa2a8a05aaac2c002886b",
"sha512": "ef5beafa16041bcdd2937140afebd485296cd54f7348ecd5a4d035c09759608de467a7ac0eb58753d0242df873c305e8bffad2454aa48f44480f15efae1cacd0"
},
"length": 28
}
},
"version": 1
}
} role2.json 0000664 0000000 0000000 00000000603 14706111210 0036252 0 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/internal/testutils/repository_data/repository/metadata {
"signatures": [
{
"keyid": "c8022fa1e9b9cb239a6b362bbdffa9649e61ad2cb699d2e4bc4fdf7930a0e64a",
"sig": "75b196a224fd200e46e738b1216b3316c5384f61083872f8d14b8b0a378b2344e64b1a6f1a89a711206a66a0b199d65ac0e30fe15ddbc4de89fa8ff645f99403"
}
],
"signed": {
"_type": "targets",
"expires": "2030-01-01T00:00:00Z",
"spec_version": "1.0.0",
"targets": {},
"version": 1
}
} root.json 0000664 0000000 0000000 00000005560 14706111210 0036221 0 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/internal/testutils/repository_data/repository/metadata {
"signatures": [
{
"keyid": "74b58be26a6ff00ab2eec9b14da29038591a69c212223033f4efdf24489913f2",
"sig": "d0283ac0653e324ce132e47a518f8a1539b59430efe5cdec58ec53f824bec28628b57dd5fb2452bde83fc8f5d11ab0b7350a9bbcbefc7acc6c447785545fa1e36f1352c9e20dd1ebcc3ab16a2a7ff702e32e481ceba88e0f348dc2cddd26ca577445d00c7194e8656d901fd2382c479555af93a64eef48cf79cdff6ecdcd7cb7"
}
],
"signed": {
"_type": "root",
"consistent_snapshot": true,
"expires": "2030-08-15T14:30:45.0000001Z",
"keys": {
"142919f8e933d7045abff3be450070057814da36331d7a22ccade8b35a9e3946": {
"keytype": "rsa",
"keyval": {
"public": "-----BEGIN PUBLIC KEY-----\nMIGeMA0GCSqGSIb3DQEBAQUAA4GMADCBiAKBgHXjYnWGuCIOh5T3XGmgG/RsXWHP\nTbyu7OImP6O+uHg8hui8C1nY/mcJdFdxqgl1vKEco/Nwebh2T8L6XbNfcgV9VVst\nWpeCalZYWi55lZSLe9KixQIAyg15rNdhN9pcD3OuLmFvslgTx+dTbZ3ZoYMbcb4C\n5yqvqzcOoCTQMeWbAgMBAAE=\n-----END PUBLIC KEY-----\n"
},
"scheme": "rsassa-pss-sha256"
},
"282612f348dcd7fe3f19e0f890e89fad48d45335deeb91deef92873934e6fe6d": {
"keytype": "rsa",
"keyval": {
"public": "-----BEGIN PUBLIC KEY-----\nMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCjm6HPktvTGsygQ8Gvmu+zydTN\ne1zqoxLxV7mVRbmsCI4kn7JTHc4fmWZwvo7f/Wbto6Xj5HqGJFSlYIGZuTwZqPg3\nw8wqv8cuPxbmsFSxMoHfzBBIuJe0FlwXFysojbdhrSUqNL84tlwTFXEhePYrpTNM\nDn+9T55B0WJYT/VPxwIDAQAB\n-----END PUBLIC KEY-----\n"
},
"scheme": "rsassa-pss-sha256"
},
"74b58be26a6ff00ab2eec9b14da29038591a69c212223033f4efdf24489913f2": {
"keytype": "rsa",
"keyval": {
"public": "-----BEGIN PUBLIC KEY-----\nMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDydf/VEpxBOCDoxpM6IVhq9i67\nP9BiVv2zwZSUO/M0RTToAvFvNgDKXwtnp8LyjVk++wMA1aceMa+pS7vYrKvPIJa7\nWIT+mwy86/fIdnllJDMw5tmLr2mE3oBMxOhpEiD2tO+liGacklFNk6nHHorX9S91\niqpdRVa3zJw5ALvLdwIDAQAB\n-----END PUBLIC KEY-----\n"
},
"scheme": "rsassa-pss-sha256"
},
"8a14f637b21578cc292a67899df0e46cc160d7fd56e9beae898adb666f4fd9d6": {
"keytype": "rsa",
"keyval": {
"public": "-----BEGIN PUBLIC KEY-----\nMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCPQoHresXRRRGoinN3bNn+BI23\nKolXdXLGqYkTvr9AjemUQJxbqmvZXHboQMAYw8OuBrRNt5Fz20wjsrJwOBEU5U3n\nHSJI4zYPGckYci0/0Eo2Kjws5BmIj38qgIfhsH4zyZ4FZZ+GLRn+W3i3wl6SfRMC\n/HCg0DDwi75faC0vGQIDAQAB\n-----END PUBLIC KEY-----\n"
},
"scheme": "rsassa-pss-sha256"
}
},
"roles": {
"root": {
"keyids": [
"74b58be26a6ff00ab2eec9b14da29038591a69c212223033f4efdf24489913f2"
],
"threshold": 1
},
"snapshot": {
"keyids": [
"8a14f637b21578cc292a67899df0e46cc160d7fd56e9beae898adb666f4fd9d6"
],
"threshold": 1
},
"targets": {
"keyids": [
"282612f348dcd7fe3f19e0f890e89fad48d45335deeb91deef92873934e6fe6d"
],
"threshold": 1
},
"timestamp": {
"keyids": [
"142919f8e933d7045abff3be450070057814da36331d7a22ccade8b35a9e3946"
],
"threshold": 1
}
},
"spec_version": "1.0.31",
"version": 1
}
} snapshot.json 0000664 0000000 0000000 00000001214 14706111210 0037065 0 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/internal/testutils/repository_data/repository/metadata {
"signatures": [
{
"keyid": "8a14f637b21578cc292a67899df0e46cc160d7fd56e9beae898adb666f4fd9d6",
"sig": "3075fe9ef3008603eb0531500a93101b8f7eb52b07ce63fb71abaffd5eb20784bcab888abfca8041798b13dd35c6e18ff4a64d536161c4d5e7535f006edec3a46c71684a632269222da82d50bf380e20eb477032e45df0b44af9e1dc46f25cd72f9901b4fc41b90869649b6257a66188b61b83c7295baf16f113e9cc4d39b3a6"
}
],
"signed": {
"_type": "snapshot",
"expires": "2030-08-15T14:30:45.0000001Z",
"meta": {
"role1.json": {
"version": 1
},
"role2.json": {
"version": 1
},
"targets.json": {
"version": 1
}
},
"spec_version": "1.0.31",
"version": 1
}
} targets.json 0000664 0000000 0000000 00000002362 14706111210 0036704 0 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/internal/testutils/repository_data/repository/metadata {
"signatures": [
{
"keyid": "282612f348dcd7fe3f19e0f890e89fad48d45335deeb91deef92873934e6fe6d",
"sig": "80cd125a4b128c9508df8bc6f71ad2ed9896a9e7afccd53fca9e7dbc2f02db69c3ae712234d3730c929d891fa035bdf059736e7debf62cbac6f0e8d22ab0c5de3b3e47b249eb0d41dea66d9fda9588893cde824a95614129263b6fed72fafb21cd7114e603fe3a30e3871e9eb5b5029e3e9a8353190f1bcb332a81ec211a93eb"
}
],
"signed": {
"_type": "targets",
"delegations": {
"keys": {
"c8022fa1e9b9cb239a6b362bbdffa9649e61ad2cb699d2e4bc4fdf7930a0e64a": {
"keyid_hash_algorithms": [
"sha256",
"sha512"
],
"keytype": "ed25519",
"keyval": {
"public": "fcf224e55fa226056adf113ef1eb3d55e308b75b321c8c8316999d8c4fd9e0d9"
},
"scheme": "ed25519"
}
},
"roles": [
{
"keyids": [
"c8022fa1e9b9cb239a6b362bbdffa9649e61ad2cb699d2e4bc4fdf7930a0e64a"
],
"name": "role1",
"paths": [
"file3.txt"
],
"terminating": false,
"threshold": 1
}
]
},
"expires": "2030-08-15T14:30:45.0000001Z",
"spec_version": "1.0.31",
"targets": {
"file1.txt": {
"hashes": {
"sha256": "65b8c67f51c993d898250f40aa57a317d854900b3a04895464313e48785440da"
},
"length": 31
}
},
"version": 1
}
} timestamp.json 0000664 0000000 0000000 00000001072 14706111210 0037233 0 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/internal/testutils/repository_data/repository/metadata {
"signatures": [
{
"keyid": "142919f8e933d7045abff3be450070057814da36331d7a22ccade8b35a9e3946",
"sig": "639c9ce3dbb705265b5e9ad6d67fea2b38780c48ff7917e372adace8e50a7a2f054383d5960457a113059be521b8ce7e6d8a5787c600c4850b8c0ed1ae17a931a6bfe794476e7824c6f53df5232561e0a2e146b11dde7889b397c6f8136e2105bbb21b4b59b5addc032a0e755d97e531255f3b458d474184168541e542626e81"
}
],
"signed": {
"_type": "timestamp",
"expires": "2030-08-15T14:30:45.0000001Z",
"meta": {
"snapshot.json": {
"version": 1
}
},
"spec_version": "1.0.31",
"version": 1
}
} golang-github-theupdateframework-go-tuf-2.0.2/internal/testutils/repository_data/repository/targets/0000775 0000000 0000000 00000000000 14706111210 0034305 5 ustar 00root root 0000000 0000000 file1.txt 0000664 0000000 0000000 00000000037 14706111210 0035767 0 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/internal/testutils/repository_data/repository/targets This is an example target file. file2.txt 0000664 0000000 0000000 00000000047 14706111210 0035771 0 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/internal/testutils/repository_data/repository/targets This is an another example target file. file3.txt 0000664 0000000 0000000 00000000034 14706111210 0035766 0 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/internal/testutils/repository_data/repository/targets This is role1's target file. golang-github-theupdateframework-go-tuf-2.0.2/internal/testutils/setup.go 0000664 0000000 0000000 00000005733 14706111210 0026704 0 ustar 00root root 0000000 0000000 // Copyright 2024 The Update Framework Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
//
// SPDX-License-Identifier: Apache-2.0
//
package testutils
import (
"fmt"
"log"
"os"
"path/filepath"
)
var (
TempDir string
RepoDir string
TargetsDir string
KeystoreDir string
)
func SetupTestDirs(repoPath string, targetsPath string, keystorePath string) error {
tmp := os.TempDir()
var err error
TempDir, err = os.MkdirTemp(tmp, "0750")
if err != nil {
return fmt.Errorf("failed to create temporary directory: %w", err)
}
RepoDir = filepath.Join(TempDir, "repository_data", "repository")
absPath, err := filepath.Abs(repoPath)
if err != nil {
return fmt.Errorf("failed to get absolute path: %w", err)
}
err = Copy(absPath, RepoDir)
if err != nil {
return fmt.Errorf("failed to copy metadata to %s: %w", RepoDir, err)
}
TargetsDir = filepath.Join(TempDir, "repository_data", "repository", "targets")
targetsAbsPath, err := filepath.Abs(targetsPath)
if err != nil {
return fmt.Errorf("failed to get absolute targets path: %w", err)
}
err = Copy(targetsAbsPath, TargetsDir)
if err != nil {
return fmt.Errorf("failed to copy metadata to %s: %w", RepoDir, err)
}
KeystoreDir = filepath.Join(TempDir, "keystore")
err = os.Mkdir(KeystoreDir, 0750)
if err != nil {
return fmt.Errorf("failed to create keystore dir %s: %w", KeystoreDir, err)
}
absPath, err = filepath.Abs(keystorePath)
if err != nil {
return fmt.Errorf("failed to get absolute path: %w", err)
}
err = Copy(absPath, KeystoreDir)
if err != nil {
return fmt.Errorf("failed to copy keystore to %s: %w", KeystoreDir, err)
}
return nil
}
func Copy(fromPath string, toPath string) error {
err := os.MkdirAll(toPath, 0750)
if err != nil {
return fmt.Errorf("failed to create directory %s: %w", toPath, err)
}
files, err := os.ReadDir(fromPath)
if err != nil {
return fmt.Errorf("failed to read path %s: %w", fromPath, err)
}
for _, file := range files {
data, err := os.ReadFile(filepath.Join(fromPath, file.Name()))
if err != nil {
return fmt.Errorf("failed to read file %s: %w", file.Name(), err)
}
filePath := filepath.Join(toPath, file.Name())
err = os.WriteFile(filePath, data, 0750)
if err != nil {
return fmt.Errorf("failed to write file %s: %w", filePath, err)
}
}
return nil
}
func Cleanup() {
log.Printf("cleaning temporary directory: %s\n", TempDir)
err := os.RemoveAll(TempDir)
if err != nil {
log.Fatalf("failed to cleanup test directories: %v", err)
}
}
golang-github-theupdateframework-go-tuf-2.0.2/internal/testutils/simulator/ 0000775 0000000 0000000 00000000000 14706111210 0027224 5 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/internal/testutils/simulator/repository_simulator.go 0000664 0000000 0000000 00000051136 14706111210 0034077 0 ustar 00root root 0000000 0000000 // Copyright 2024 The Update Framework Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
//
// SPDX-License-Identifier: Apache-2.0
//
package simulator
// Test utility to simulate a repository
// RepositorySimulator provides methods to modify repository metadata so that it's
// easy to "publish" new repository versions with modified metadata, while serving
// the versions to client test code.
// RepositorySimulator implements FetcherInterface so Updaters in tests can use it
// as a way to "download" new metadata from remote: in practice no downloading,
// network connections or even file access happens as RepositorySimulator serves
// everything from memory.
// Metadata and targets "hosted" by the simulator are made available in URL paths
// "/metadata/..." and "/targets/..." respectively.
// Example::
// // Initialize repository with top-level metadata
// sim := simulator.NewRepository()
// // metadata can be modified directly: it is immediately available to clients
// sim.Snapshot.Version += 1
// // As an exception, new root versions require explicit publishing
// sim.Root.Version += 1
// sim.PublishRoot()
// // there are helper functions
// sim.AddTarget("targets", b"content", "targetpath")
// sim.Targets.Version += 1
// sim.UpdateSnapshot()
// """
import (
"bytes"
"crypto"
"crypto/ed25519"
"crypto/sha256"
"fmt"
"log/slog"
"net/url"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"github.com/sigstore/sigstore/pkg/signature"
"github.com/theupdateframework/go-tuf/v2/metadata"
"github.com/theupdateframework/go-tuf/v2/metadata/fetcher"
)
var SPEC_VER = "." + metadata.SPECIFICATION_VERSION
type FTMetadata struct {
Name string
Value int
}
type FTTargets struct {
Name string
Value *string
}
// FetchTracker contains actual target data
// and the related target metadata
type FetchTracker struct {
Metadata []FTMetadata
Targets []FTTargets
}
// RepositoryTarget contains actual target data
// and the related target metadata
type RepositoryTarget struct {
Data []byte
TargetFile *metadata.TargetFiles
}
// RepositorySimulator simulates a repository that can be used for testing
type RepositorySimulator struct {
fetcher.Fetcher
MDDelegates map[string]metadata.Metadata[metadata.TargetsType]
SignedRoots [][]byte
Signers map[string]map[string]*signature.Signer
TargetFiles map[string]RepositoryTarget
ComputeMetafileHashesAndLength bool
PrefixTargetsWithHash bool
DumpDir string
DumpVersion int64
FetchTracker FetchTracker
SafeExpiry time.Time
MDTargets *metadata.Metadata[metadata.TargetsType]
MDSnapshot *metadata.Metadata[metadata.SnapshotType]
MDTimestamp *metadata.Metadata[metadata.TimestampType]
MDRoot *metadata.Metadata[metadata.RootType]
LocalDir string
}
// New initializes a RepositorySimulator
func NewRepository() *RepositorySimulator {
now := time.Now().UTC()
rs := RepositorySimulator{
MDDelegates: map[string]metadata.Metadata[metadata.TargetsType]{},
// Other metadata is signed on-demand (when fetched) but roots must be
// explicitly published with PublishRoot() which maintains this list
SignedRoots: [][]byte{},
// Signers are used on-demand at fetch time to sign metadata
// keys are roles, values are map of {keyid: signer}
Signers: make(map[string]map[string]*signature.Signer),
// Target downloads are served from this map
TargetFiles: make(map[string]RepositoryTarget),
// Whether to compute hashes and length for meta in snapshot/timestamp
ComputeMetafileHashesAndLength: false,
// Enable hash-prefixed target file names
PrefixTargetsWithHash: true,
DumpDir: "",
DumpVersion: 0,
FetchTracker: FetchTracker{
Metadata: []FTMetadata{},
Targets: []FTTargets{},
},
SafeExpiry: now.Truncate(time.Second).AddDate(0, 0, 30),
}
rs.setupMinimalValidRepository()
return &rs
}
func (rs *RepositorySimulator) setupMinimalValidRepository() {
rs.MDTargets = metadata.Targets(rs.SafeExpiry)
rs.MDSnapshot = metadata.Snapshot(rs.SafeExpiry)
rs.MDTimestamp = metadata.Timestamp(rs.SafeExpiry)
rs.MDRoot = metadata.Root(rs.SafeExpiry)
for _, role := range metadata.TOP_LEVEL_ROLE_NAMES {
publicKey, _, signer := CreateKey()
mtdkey, err := metadata.KeyFromPublicKey(*publicKey)
if err != nil {
slog.Error("Repository simulator: key conversion failed while setting repository", "err", err)
os.Exit(1)
}
if err = rs.MDRoot.Signed.AddKey(mtdkey, role); err != nil {
slog.Error("Repository simulator: failed to add key", "err", err)
}
rs.AddSigner(role, mtdkey.ID(), *signer)
}
rs.PublishRoot()
}
func (rs *RepositorySimulator) Root() metadata.RootType {
return rs.MDRoot.Signed
}
func (rs *RepositorySimulator) Timestamp() metadata.TimestampType {
return rs.MDTimestamp.Signed
}
func (rs *RepositorySimulator) Snapshot() metadata.SnapshotType {
return rs.MDSnapshot.Signed
}
func (rs *RepositorySimulator) Targets() metadata.TargetsType {
return rs.MDTargets.Signed
}
// AllTargets allows receiving role name and signed portion of targets one by one
func (rs *RepositorySimulator) AllTargets() <-chan metadata.TargetsType {
ch := make(chan metadata.TargetsType)
go func() {
ch <- rs.MDTargets.Signed
for role, md := range rs.MDDelegates {
targets := metadata.TargetsType{
Type: role,
Version: md.Signed.Version,
Delegations: md.Signed.Delegations,
}
ch <- targets
}
close(ch)
}()
return ch
}
func CreateKey() (*ed25519.PublicKey, *ed25519.PrivateKey, *signature.Signer) {
public, private, err := ed25519.GenerateKey(nil)
if err != nil {
slog.Error("Failed to generate key", "err", err)
}
signer, err := signature.LoadSigner(private, crypto.Hash(0))
if err != nil {
slog.Error("failed to load signer", "err", err)
}
return &public, &private, &signer
}
func (rs *RepositorySimulator) AddSigner(role string, keyID string, signer signature.Signer) {
if _, ok := rs.Signers[role]; !ok {
rs.Signers[role] = make(map[string]*signature.Signer)
}
rs.Signers[role][keyID] = &signer
}
// RotateKeys removes all keys for role, then add threshold of new keys
func (rs *RepositorySimulator) RotateKeys(role string) {
rs.MDRoot.Signed.Roles[role].KeyIDs = []string{}
for k := range rs.Signers[role] {
delete(rs.Signers[role], k)
}
for i := 0; i < rs.MDRoot.Signed.Roles[role].Threshold; i++ {
publicKey, _, signer := CreateKey()
mtdkey, err := metadata.KeyFromPublicKey(*publicKey)
if err != nil {
slog.Error("Repository simulator: key conversion failed while rotating keys", "err", err)
os.Exit(1)
}
if err = rs.MDRoot.Signed.AddKey(mtdkey, role); err != nil {
slog.Error("Repository simulator: failed to add key", "err", err)
}
rs.AddSigner(role, mtdkey.ID(), *signer)
}
}
// PublishRoot signs and stores a new serialized version of root
func (rs *RepositorySimulator) PublishRoot() {
rs.MDRoot.ClearSignatures()
for _, signer := range rs.Signers[metadata.ROOT] {
if _, err := rs.MDRoot.Sign(*signer); err != nil {
slog.Error("Repository simulator: failed to sign root", "err", err)
}
}
mtd, err := rs.MDRoot.MarshalJSON()
if err != nil {
slog.Error("Failed to marshal metadata while publishing root", "err", err)
}
rs.SignedRoots = append(rs.SignedRoots, mtd)
slog.Info("Published root", "version", rs.MDRoot.Signed.Version)
}
func lastIndex(str string, delimiter string) (string, string, string) {
// TODO: check if contained and lengths
spl := strings.Split(str, delimiter)
res := strings.SplitAfterN(str, delimiter, len(spl)-1)
return res[0], delimiter, res[1]
}
func partition(s string, delimiter string) (string, string) {
splitted := strings.Split(s, delimiter)
version := ""
role := ""
switch len(splitted) {
case 1:
role = splitted[0]
case 2:
version = splitted[0]
role = splitted[1]
case 3:
version = splitted[0]
if splitted[1] == "" && splitted[2] == "" {
role = "."
}
case 4:
version = splitted[0]
if splitted[1] == "" && splitted[2] == "" && splitted[3] == "" {
role = ".."
}
}
return version, role
}
func (rs *RepositorySimulator) DownloadFile(urlPath string, maxLength int64, timeout time.Duration) ([]byte, error) {
data, err := rs.fetch(urlPath)
if err != nil {
return data, err
}
if len(data) > int(maxLength) {
err = &metadata.ErrDownloadLengthMismatch{
Msg: fmt.Sprintf("Downloaded %d bytes exceeding the maximum allowed length of %d", len(data), maxLength),
}
}
return data, err
}
func IsWindowsPath(path string) bool {
match, _ := regexp.MatchString(`^[a-zA-Z]:\\`, path)
return match
}
func trimPrefix(path string, prefix string) (string, error) {
var toTrim string
if IsWindowsPath(path) {
toTrim = path
} else {
parsedURL, e := url.Parse(path)
if e != nil {
return "", e
}
toTrim = parsedURL.Path
}
return strings.TrimPrefix(toTrim, prefix), nil
}
func hasPrefix(path, prefix string) bool {
return strings.HasPrefix(filepath.ToSlash(path), prefix)
}
func hasSuffix(path, prefix string) bool {
return strings.HasSuffix(filepath.ToSlash(path), prefix)
}
func (rs *RepositorySimulator) fetch(urlPath string) ([]byte, error) {
path, err := trimPrefix(urlPath, rs.LocalDir)
if err != nil {
return nil, err
}
if hasPrefix(path, "/metadata/") && hasSuffix(path, ".json") {
fileName := path[len("/metadata/"):]
verAndName := fileName[:len(path)-len("/metadata/")-len(".json")]
versionStr, role := partition(verAndName, ".")
var version int
var err error
if role == metadata.ROOT || (rs.MDRoot.Signed.ConsistentSnapshot && verAndName != metadata.TIMESTAMP) {
version, err = strconv.Atoi(versionStr)
if err != nil {
slog.Error("Repository simulator: downloading file: failed to convert version", "err", err)
}
} else {
role = verAndName
version = -1
}
return rs.FetchMetadata(role, &version)
} else if hasPrefix(path, "/targets/") {
targetPath := path[len("/targets/"):]
dirParts, sep, prefixedFilename := lastIndex(targetPath, string(filepath.Separator))
var filename string
prefix := ""
filename = prefixedFilename
if rs.MDRoot.Signed.ConsistentSnapshot && rs.PrefixTargetsWithHash {
prefix, filename = partition(prefixedFilename, ".")
}
targetPath = filepath.Join(dirParts, sep, filename)
target, err := rs.FetchTarget(targetPath, prefix)
if err != nil {
slog.Error("Failed to fetch target", "err", err)
}
return target, err
}
return nil, nil
}
// FetchTarget returns data for 'targetPath', checking 'targetHash' if it is given.
// If hash is None, then consistentSnapshot is not used
func (rs *RepositorySimulator) FetchTarget(targetPath string, targetHash string) ([]byte, error) {
rs.FetchTracker.Targets = append(rs.FetchTracker.Targets, FTTargets{Name: targetPath, Value: &targetHash})
repoTarget, ok := rs.TargetFiles[targetPath]
if !ok {
return nil, fmt.Errorf("no target %s", targetPath)
}
if targetHash != "" && !contains(repoTarget.TargetFile.Hashes, []byte(targetHash)) {
return nil, fmt.Errorf("hash mismatch for %s", targetPath)
}
slog.Info("Fetched target", "path", targetPath)
return repoTarget.Data, nil
}
func contains(hashes map[string]metadata.HexBytes, targetHash []byte) bool {
for _, value := range hashes {
if bytes.Equal(value, targetHash) {
return true
}
}
return false
}
// FetchMetadata returns signed metadata for 'role', using 'version' if it is given.
// If version is None, non-versioned metadata is being requested
func (rs *RepositorySimulator) FetchMetadata(role string, version *int) ([]byte, error) {
rs.FetchTracker.Metadata = append(rs.FetchTracker.Metadata, FTMetadata{Name: role, Value: *version})
// Decode role for the metadata
// role, _ = strconv.Unquote(role)
if role == metadata.ROOT {
// Return a version previously serialized in PublishRoot()
if version == nil || *version > len(rs.SignedRoots) && *version > 0 {
slog.Error("Unknown root version", "version", *version)
return []byte{}, &metadata.ErrDownloadHTTP{StatusCode: 404}
}
slog.Info("Fetched root", "version", version)
return rs.SignedRoots[*version-1], nil
}
// Sign and serialize the requested metadata
if role == metadata.TIMESTAMP {
return signMetadata(role, rs.MDTimestamp, rs)
} else if role == metadata.SNAPSHOT {
return signMetadata(role, rs.MDSnapshot, rs)
} else if role == metadata.TARGETS {
return signMetadata(role, rs.MDTargets, rs)
} else {
md, ok := rs.MDDelegates[role]
if !ok {
slog.Error("Unknown role", "role", role)
return []byte{}, &metadata.ErrDownloadHTTP{StatusCode: 404}
}
return signMetadata(role, &md, rs)
}
}
func signMetadata[T metadata.Roles](role string, md *metadata.Metadata[T], rs *RepositorySimulator) ([]byte, error) {
md.Signatures = []metadata.Signature{}
for _, signer := range rs.Signers[role] {
// TODO: check if a bool argument should be added to Sign as in python-tuf
// Not appending only for a local repo example !!! missing type for signers
if _, err := md.Sign(*signer); err != nil {
slog.Error("Repository simulator: failed to sign metadata", "err", err)
}
}
// TODO: test if the version is the correct one
// log.Printf("fetched %s v%d with %d sigs", role, md.GetVersion(), len(rs.Signers[role]))
mtd, err := md.MarshalJSON()
if err != nil {
slog.Error("Failed to marshal metadata while signing for role", "role", role, "err", err)
}
return mtd, err
}
func (rs *RepositorySimulator) computeHashesAndLength(role string) (map[string]metadata.HexBytes, int) {
noVersion := -1
data, err := rs.FetchMetadata(role, &noVersion)
if err != nil {
slog.Error("Failed to fetch metadata", "err", err)
}
digest := sha256.Sum256(data)
hashes := map[string]metadata.HexBytes{"sha256": digest[:]}
return hashes, len(data)
}
// UpdateTimestamp updates timestamp and assign snapshot version
// to snapshot meta version
func (rs *RepositorySimulator) UpdateTimestamp() {
hashes := make(map[string]metadata.HexBytes)
length := 0
if rs.ComputeMetafileHashesAndLength {
hashes, length = rs.computeHashesAndLength(metadata.SNAPSHOT)
}
rs.MDTimestamp.Signed.Meta[fmt.Sprintf("%s.json", metadata.SNAPSHOT)] = &metadata.MetaFiles{
Length: int64(length),
Hashes: hashes,
Version: rs.MDSnapshot.Signed.Version,
}
rs.MDTimestamp.Signed.Version += 1
}
// UpdateSnapshot updates snapshot, assigns targets versions
// and updates timestamp
func (rs *RepositorySimulator) UpdateSnapshot() {
for target := range rs.AllTargets() {
hashes := make(map[string]metadata.HexBytes)
length := 0
if rs.ComputeMetafileHashesAndLength {
hashes, length = rs.computeHashesAndLength(target.Type)
}
rs.MDSnapshot.Signed.Meta[fmt.Sprintf("%s.json", target.Type)] = &metadata.MetaFiles{
Length: int64(length),
Hashes: hashes,
Version: target.Version,
}
}
rs.MDSnapshot.Signed.Version += 1
rs.UpdateTimestamp()
}
// Given a delegator name return, its corresponding TargetsType object
func (rs *RepositorySimulator) getDelegator(delegatorName string) *metadata.TargetsType {
if delegatorName == metadata.TARGETS {
return &rs.MDTargets.Signed
}
delegation := rs.MDDelegates[delegatorName]
return &delegation.Signed
}
// AddTarget creates a target from data and adds it to the TargetFiles.
func (rs *RepositorySimulator) AddTarget(role string, data []byte, path string) {
targets := rs.getDelegator(role)
target, err := metadata.TargetFile().FromBytes(path, data, "sha256")
if err != nil {
slog.Error("Failed to add target", "path", path, "err", err)
os.Exit(1)
}
targets.Targets[path] = target
rs.TargetFiles[path] = RepositoryTarget{
Data: data,
TargetFile: target,
}
}
// AddDelegation adds delegated target role to the repository
func (rs *RepositorySimulator) AddDelegation(delegatorName string, role metadata.DelegatedRole, targets metadata.TargetsType) {
delegator := rs.getDelegator(delegatorName)
if delegator.Delegations != nil && delegator.Delegations.SuccinctRoles != nil {
slog.Error("Can't add a role when SuccinctRoles is used")
os.Exit(1)
}
// Create delegation
if delegator.Delegations == nil {
delegator.Delegations = &metadata.Delegations{
Keys: map[string]*metadata.Key{},
Roles: []metadata.DelegatedRole{},
}
}
// Put delegation last by default
delegator.Delegations.Roles = append(delegator.Delegations.Roles, role)
// By default add one new key for the role
publicKey, _, signer := CreateKey()
mdkey, err := metadata.KeyFromPublicKey(*publicKey)
if err != nil {
slog.Error("Repository simulator: key conversion failed while adding delegation", "err", err)
os.Exit(1)
}
if err = delegator.AddKey(mdkey, role.Name); err != nil {
slog.Error("Repository simulator: failed to add key", "err", err)
}
rs.AddSigner(role.Name, mdkey.ID(), *signer)
if _, ok := rs.MDDelegates[role.Name]; !ok {
rs.MDDelegates[role.Name] = metadata.Metadata[metadata.TargetsType]{
Signed: targets,
UnrecognizedFields: map[string]interface{}{},
}
}
}
// AddSuccinctRoles adds succinct roles info to a delegator with name "delegatorName".
//
// Note that for each delegated role represented by succinct roles an empty
// Targets instance is created
func (rs *RepositorySimulator) AddSuccinctRoles(delegatorName string, bitLength int, namePrefix string) {
delegator := rs.getDelegator(delegatorName)
if delegator.Delegations != nil && delegator.Delegations.Roles != nil {
slog.Error("Can't add a SuccinctRoles when delegated roles are used")
os.Exit(1)
}
publicKey, _, signer := CreateKey()
mdkey, err := metadata.KeyFromPublicKey(*publicKey)
if err != nil {
slog.Error("Repository simulator: key conversion failed while adding succinct roles", "err", err)
os.Exit(1)
}
succinctRoles := &metadata.SuccinctRoles{
KeyIDs: []string{},
Threshold: 1,
BitLength: bitLength,
NamePrefix: namePrefix,
}
delegator.Delegations = &metadata.Delegations{Roles: nil, SuccinctRoles: succinctRoles}
// Add targets metadata for all bins
for _, delegatedName := range succinctRoles.GetRoles() {
rs.MDDelegates[delegatedName] = metadata.Metadata[metadata.TargetsType]{
Signed: metadata.TargetsType{
Expires: rs.SafeExpiry,
},
}
rs.AddSigner(delegatedName, mdkey.ID(), *signer)
}
if err = delegator.AddKey(mdkey, metadata.TARGETS); err != nil {
slog.Error("Repository simulator: failed to add key", "err", err)
}
}
// Write dumps current repository metadata to rs.DumpDir
// This is a debugging tool: dumping repository state before running
// Updater refresh may be useful while debugging a test.
func (rs *RepositorySimulator) Write() {
if rs.DumpDir == "" {
rs.DumpDir = os.TempDir()
slog.Info("Repository Simulator dumps into tmp dir", "path", rs.DumpDir)
}
rs.DumpVersion += 1
destDir := filepath.Join(rs.DumpDir, strconv.Itoa(int(rs.DumpVersion)))
if err := os.MkdirAll(destDir, os.ModePerm); err != nil {
slog.Error("Repository simulator: failed to create dir", "err", err)
}
for ver := 1; ver < len(rs.SignedRoots)+1; ver++ {
f, _ := os.Create(filepath.Join(destDir, fmt.Sprintf("%d.root.json", ver)))
defer f.Close()
meta, err := rs.FetchMetadata(metadata.ROOT, &ver)
if err != nil {
slog.Error("Failed to fetch metadata", "err", err)
}
if _, err = f.Write(meta); err != nil {
slog.Error("Repository simulator: failed to write signed roots", "err", err)
}
}
noVersion := -1
for _, role := range []string{metadata.TIMESTAMP, metadata.SNAPSHOT, metadata.TARGETS} {
f, _ := os.Create(filepath.Join(destDir, fmt.Sprintf("%s.json", role)))
defer f.Close()
meta, err := rs.FetchMetadata(role, &noVersion)
if err != nil {
slog.Error("Failed to fetch metadata", "err", err)
}
if _, err = f.Write(meta); err != nil {
slog.Error("Repository simulator: failed to write signed roots", "err", err)
}
}
for role := range rs.MDDelegates {
quotedRole := url.PathEscape(role)
f, _ := os.Create(filepath.Join(destDir, fmt.Sprintf("%s.json", quotedRole)))
defer f.Close()
meta, err := rs.FetchMetadata(role, &noVersion)
if err != nil {
slog.Error("Failed to fetch metadata", "err", err)
}
if _, err = f.Write(meta); err != nil {
slog.Error("Repository simulator: failed to write signed roots", "err", err)
}
}
}
repository_simulator_setup.go 0000664 0000000 0000000 00000004727 14706111210 0035244 0 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/internal/testutils/simulator // Copyright 2024 The Update Framework Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
//
// SPDX-License-Identifier: Apache-2.0
//
package simulator
import (
"log/slog"
"os"
"path/filepath"
"time"
)
var (
MetadataURL = "https://jku.github.io/tuf-demo/metadata"
TargetsURL = "https://jku.github.io/tuf-demo/targets"
MetadataDir string
RootBytes []byte
PastDateTime time.Time
Sim *RepositorySimulator
metadataPath = "/metadata"
targetsPath = "/targets"
LocalDir string
DumpDir string
)
func InitLocalEnv() error {
tmp := os.TempDir()
tmpDir, err := os.MkdirTemp(tmp, "0750")
if err != nil {
slog.Error("Failed to create temporary directory", "err", err)
os.Exit(1)
}
if err = os.Mkdir(filepath.Join(tmpDir, metadataPath), 0750); err != nil {
slog.Error("Repository simulator: failed to create dir", "err", err)
}
if err = os.Mkdir(filepath.Join(tmpDir, targetsPath), 0750); err != nil {
slog.Error("Repository simulator: failed to create dir", "err", err)
}
LocalDir = tmpDir
return nil
}
func InitMetadataDir() (*RepositorySimulator, string, string, error) {
if err := InitLocalEnv(); err != nil {
slog.Error("Failed to initialize environment", "err", err)
os.Exit(1)
}
metadataDir := filepath.Join(LocalDir, metadataPath)
sim := NewRepository()
f, err := os.Create(filepath.Join(metadataDir, "root.json"))
if err != nil {
slog.Error("Failed to create root", "err", err)
os.Exit(1)
}
defer f.Close()
if _, err = f.Write(sim.SignedRoots[0]); err != nil {
slog.Error("Repository simulator setup: failed to write signed roots", "err", err)
}
targetsDir := filepath.Join(LocalDir, targetsPath)
sim.LocalDir = LocalDir
return sim, metadataDir, targetsDir, err
}
func GetRootBytes(localMetadataDir string) ([]byte, error) {
return os.ReadFile(filepath.Join(localMetadataDir, "root.json"))
}
func RepositoryCleanup(tmpDir string) {
slog.Info("Cleaning temporary directory", "dir", tmpDir)
os.RemoveAll(tmpDir)
}
golang-github-theupdateframework-go-tuf-2.0.2/metadata/ 0000775 0000000 0000000 00000000000 14706111210 0023111 5 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/metadata/config/ 0000775 0000000 0000000 00000000000 14706111210 0024356 5 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/metadata/config/config.go 0000664 0000000 0000000 00000005466 14706111210 0026165 0 ustar 00root root 0000000 0000000 // Copyright 2024 The Update Framework Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
//
// SPDX-License-Identifier: Apache-2.0
//
package config
import (
"net/url"
"os"
"github.com/theupdateframework/go-tuf/v2/metadata/fetcher"
)
type UpdaterConfig struct {
// TUF configuration
MaxRootRotations int64
MaxDelegations int
RootMaxLength int64
TimestampMaxLength int64
SnapshotMaxLength int64
TargetsMaxLength int64
// Updater configuration
Fetcher fetcher.Fetcher
LocalTrustedRoot []byte
LocalMetadataDir string
LocalTargetsDir string
RemoteMetadataURL string
RemoteTargetsURL string
DisableLocalCache bool
PrefixTargetsWithHash bool
// UnsafeLocalMode only uses the metadata as written on disk
// if the metadata is incomplete, calling updater.Refresh will fail
UnsafeLocalMode bool
}
// New creates a new UpdaterConfig instance used by the Updater to
// store configuration
func New(remoteURL string, rootBytes []byte) (*UpdaterConfig, error) {
// Default URL for target files - /targets
targetsURL, err := url.JoinPath(remoteURL, "targets")
if err != nil {
return nil, err
}
return &UpdaterConfig{
// TUF configuration
MaxRootRotations: 256,
MaxDelegations: 32,
RootMaxLength: 512000, // bytes
TimestampMaxLength: 16384, // bytes
SnapshotMaxLength: 2000000, // bytes
TargetsMaxLength: 5000000, // bytes
// Updater configuration
Fetcher: &fetcher.DefaultFetcher{}, // use the default built-in download fetcher
LocalTrustedRoot: rootBytes, // trusted root.json
RemoteMetadataURL: remoteURL, // URL of where the TUF metadata is
RemoteTargetsURL: targetsURL, // URL of where the target files should be downloaded from
DisableLocalCache: false, // enable local caching of trusted metadata
PrefixTargetsWithHash: true, // use hash-prefixed target files with consistent snapshots
UnsafeLocalMode: false,
}, nil
}
func (cfg *UpdaterConfig) EnsurePathsExist() error {
if cfg.DisableLocalCache {
return nil
}
for _, path := range []string{cfg.LocalMetadataDir, cfg.LocalTargetsDir} {
if err := os.MkdirAll(path, os.ModePerm); err != nil {
return err
}
}
return nil
}
golang-github-theupdateframework-go-tuf-2.0.2/metadata/config/config_test.go 0000664 0000000 0000000 00000010465 14706111210 0027217 0 ustar 00root root 0000000 0000000 // Copyright 2024 The Update Framework Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
//
// SPDX-License-Identifier: Apache-2.0
//
package config
import (
"net/url"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/theupdateframework/go-tuf/v2/metadata/fetcher"
)
func TestNewUpdaterConfig(t *testing.T) {
// setup testing table (tt) and create subtest for each entry
for _, tt := range []struct {
name string
desc string
remoteURL string
rootBytes []byte
config *UpdaterConfig
wantErr error
}{
{
name: "success",
desc: "No errors expected",
remoteURL: "somepath",
rootBytes: []byte("somerootbytes"),
config: &UpdaterConfig{
MaxRootRotations: 256,
MaxDelegations: 32,
RootMaxLength: 512000,
TimestampMaxLength: 16384,
SnapshotMaxLength: 2000000,
TargetsMaxLength: 5000000,
Fetcher: &fetcher.DefaultFetcher{},
LocalTrustedRoot: []byte("somerootbytes"),
RemoteMetadataURL: "somepath",
RemoteTargetsURL: "somepath/targets",
DisableLocalCache: false,
PrefixTargetsWithHash: true,
},
wantErr: nil,
},
{
name: "invalid character in URL",
desc: "Invalid ASCII control sequence in input",
remoteURL: string([]byte{0x7f}),
rootBytes: []byte("somerootbytes"),
config: nil,
wantErr: &url.Error{}, // just make sure this is non-nil, url pkg has no exported errors
},
} {
t.Run(tt.name, func(t *testing.T) {
// this will only be printed if run in verbose mode or if test fails
t.Logf("Desc: %s", tt.desc)
// run the function under test
updaterConfig, err := New(tt.remoteURL, tt.rootBytes)
// special case if we expect no error
if tt.wantErr == nil {
assert.NoErrorf(t, err, "expected no error but got %v", err)
assert.EqualExportedValuesf(t, *tt.config, *updaterConfig, "expected %#+v but got %#+v", tt.config, updaterConfig)
return
}
// compare the error with our expected error
assert.Nilf(t, updaterConfig, "expected nil but got %#+v", updaterConfig)
assert.Errorf(t, err, "expected %v but got %v", tt.wantErr, err)
})
}
}
func TestEnsurePathsExist(t *testing.T) {
// setup testing table (tt) and create subtest for each entry
for _, tt := range []struct {
name string
desc string
config *UpdaterConfig
setup func(t *testing.T, cfg *UpdaterConfig)
wantErr error
}{
{
name: "success",
desc: "No errors expected",
config: &UpdaterConfig{
DisableLocalCache: false,
},
setup: func(t *testing.T, cfg *UpdaterConfig) {
t.Helper()
tmp := t.TempDir()
cfg.LocalTargetsDir = filepath.Join(tmp, "targets")
cfg.LocalMetadataDir = filepath.Join(tmp, "metadata")
},
wantErr: nil,
},
{
name: "path not exist",
desc: "No local directories error",
config: &UpdaterConfig{
DisableLocalCache: false,
},
setup: func(t *testing.T, cfg *UpdaterConfig) {
t.Helper()
},
wantErr: os.ErrNotExist,
},
{
name: "no local cache",
desc: "Test if method no-op works",
config: &UpdaterConfig{
DisableLocalCache: true,
},
setup: func(t *testing.T, cfg *UpdaterConfig) {
t.Helper()
},
wantErr: nil,
},
} {
t.Run(tt.name, func(t *testing.T) {
// this will only be printed if run in verbose mode or if test fails
t.Logf("Desc: %s", tt.desc)
// run special test setup in case it is needed for any subtest
tt.setup(t, tt.config)
// run the method under test
err := tt.config.EnsurePathsExist()
// special case if we expect no error
if tt.wantErr == nil {
assert.NoErrorf(t, err, "expected no error but got %v", err)
return
}
// compare the error with our expected error
assert.ErrorIsf(t, err, tt.wantErr, "expected %v but got %v", tt.wantErr, err)
})
}
}
golang-github-theupdateframework-go-tuf-2.0.2/metadata/errors.go 0000664 0000000 0000000 00000013566 14706111210 0024767 0 ustar 00root root 0000000 0000000 // Copyright 2024 The Update Framework Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
//
// SPDX-License-Identifier: Apache-2.0
//
package metadata
import (
"fmt"
)
// Define TUF error types used inside the new modern implementation.
// The names chosen for TUF error types should start in 'Err' except where
// there is a good reason not to, and provide that reason in those cases.
// Repository errors
// ErrRepository - an error with a repository's state, such as a missing file.
// It covers all exceptions that come from the repository side when
// looking from the perspective of users of metadata API or client
type ErrRepository struct {
Msg string
}
func (e *ErrRepository) Error() string {
return fmt.Sprintf("repository error: %s", e.Msg)
}
func (e *ErrRepository) Is(target error) bool {
_, ok := target.(*ErrRepository)
return ok
}
// ErrUnsignedMetadata - An error about metadata object with insufficient threshold of signatures
type ErrUnsignedMetadata struct {
Msg string
}
func (e *ErrUnsignedMetadata) Error() string {
return fmt.Sprintf("unsigned metadata error: %s", e.Msg)
}
// ErrUnsignedMetadata is a subset of ErrRepository
func (e *ErrUnsignedMetadata) Is(target error) bool {
if _, ok := target.(*ErrUnsignedMetadata); ok {
return true
}
if _, ok := target.(*ErrRepository); ok {
return true
}
return false
}
// ErrBadVersionNumber - An error for metadata that contains an invalid version number
type ErrBadVersionNumber struct {
Msg string
}
func (e *ErrBadVersionNumber) Error() string {
return fmt.Sprintf("bad version number error: %s", e.Msg)
}
// ErrBadVersionNumber is a subset of ErrRepository
func (e *ErrBadVersionNumber) Is(target error) bool {
if _, ok := target.(*ErrBadVersionNumber); ok {
return true
}
if _, ok := target.(*ErrRepository); ok {
return true
}
return false
}
// ErrEqualVersionNumber - An error for metadata containing a previously verified version number
type ErrEqualVersionNumber struct {
Msg string
}
func (e *ErrEqualVersionNumber) Error() string {
return fmt.Sprintf("equal version number error: %s", e.Msg)
}
// ErrEqualVersionNumber is a subset of both ErrRepository and ErrBadVersionNumber
func (e *ErrEqualVersionNumber) Is(target error) bool {
if _, ok := target.(*ErrEqualVersionNumber); ok {
return true
}
if _, ok := target.(*ErrBadVersionNumber); ok {
return true
}
if _, ok := target.(*ErrRepository); ok {
return true
}
return false
}
// ErrExpiredMetadata - Indicate that a TUF Metadata file has expired
type ErrExpiredMetadata struct {
Msg string
}
func (e *ErrExpiredMetadata) Error() string {
return fmt.Sprintf("expired metadata error: %s", e.Msg)
}
// ErrExpiredMetadata is a subset of ErrRepository
func (e *ErrExpiredMetadata) Is(target error) bool {
if _, ok := target.(*ErrExpiredMetadata); ok {
return true
}
if _, ok := target.(*ErrRepository); ok {
return true
}
return false
}
// ErrLengthOrHashMismatch - An error while checking the length and hash values of an object
type ErrLengthOrHashMismatch struct {
Msg string
}
func (e *ErrLengthOrHashMismatch) Error() string {
return fmt.Sprintf("length/hash verification error: %s", e.Msg)
}
// ErrLengthOrHashMismatch is a subset of ErrRepository
func (e *ErrLengthOrHashMismatch) Is(target error) bool {
if _, ok := target.(*ErrLengthOrHashMismatch); ok {
return true
}
if _, ok := target.(*ErrRepository); ok {
return true
}
return false
}
// Download errors
// ErrDownload - An error occurred while attempting to download a file
type ErrDownload struct {
Msg string
}
func (e *ErrDownload) Error() string {
return fmt.Sprintf("download error: %s", e.Msg)
}
func (e *ErrDownload) Is(target error) bool {
_, ok := target.(*ErrDownload)
return ok
}
// ErrDownloadLengthMismatch - Indicate that a mismatch of lengths was seen while downloading a file
type ErrDownloadLengthMismatch struct {
Msg string
}
func (e *ErrDownloadLengthMismatch) Error() string {
return fmt.Sprintf("download length mismatch error: %s", e.Msg)
}
// ErrDownloadLengthMismatch is a subset of ErrDownload
func (e *ErrDownloadLengthMismatch) Is(target error) bool {
if _, ok := target.(*ErrDownloadLengthMismatch); ok {
return true
}
if _, ok := target.(*ErrDownload); ok {
return true
}
return false
}
// ErrDownloadHTTP - Returned by Fetcher interface implementations for HTTP errors
type ErrDownloadHTTP struct {
StatusCode int
URL string
}
func (e *ErrDownloadHTTP) Error() string {
return fmt.Sprintf("failed to download %s, http status code: %d", e.URL, e.StatusCode)
}
// ErrDownloadHTTP is a subset of ErrDownload
func (e *ErrDownloadHTTP) Is(target error) bool {
if _, ok := target.(*ErrDownloadHTTP); ok {
return true
}
if _, ok := target.(*ErrDownload); ok {
return true
}
return false
}
// ValueError
type ErrValue struct {
Msg string
}
func (e *ErrValue) Error() string {
return fmt.Sprintf("value error: %s", e.Msg)
}
func (e *ErrValue) Is(err error) bool {
_, ok := err.(*ErrValue)
return ok
}
// TypeError
type ErrType struct {
Msg string
}
func (e *ErrType) Error() string {
return fmt.Sprintf("type error: %s", e.Msg)
}
func (e *ErrType) Is(err error) bool {
_, ok := err.(*ErrType)
return ok
}
// RuntimeError
type ErrRuntime struct {
Msg string
}
func (e *ErrRuntime) Error() string {
return fmt.Sprintf("runtime error: %s", e.Msg)
}
func (e *ErrRuntime) Is(err error) bool {
_, ok := err.(*ErrRuntime)
return ok
}
golang-github-theupdateframework-go-tuf-2.0.2/metadata/fetcher/ 0000775 0000000 0000000 00000000000 14706111210 0024531 5 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/metadata/fetcher/fetcher.go 0000664 0000000 0000000 00000005714 14706111210 0026507 0 ustar 00root root 0000000 0000000 // Copyright 2024 The Update Framework Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
//
// SPDX-License-Identifier: Apache-2.0
//
package fetcher
import (
"fmt"
"io"
"net/http"
"strconv"
"time"
"github.com/theupdateframework/go-tuf/v2/metadata"
)
// Fetcher interface
type Fetcher interface {
DownloadFile(urlPath string, maxLength int64, timeout time.Duration) ([]byte, error)
}
// DefaultFetcher implements Fetcher
type DefaultFetcher struct {
httpUserAgent string
}
func (d *DefaultFetcher) SetHTTPUserAgent(httpUserAgent string) {
d.httpUserAgent = httpUserAgent
}
// DownloadFile downloads a file from urlPath, errors out if it failed,
// its length is larger than maxLength or the timeout is reached.
func (d *DefaultFetcher) DownloadFile(urlPath string, maxLength int64, timeout time.Duration) ([]byte, error) {
client := &http.Client{Timeout: timeout}
req, err := http.NewRequest("GET", urlPath, nil)
if err != nil {
return nil, err
}
// Use in case of multiple sessions.
if d.httpUserAgent != "" {
req.Header.Set("User-Agent", d.httpUserAgent)
}
// Execute the request.
res, err := client.Do(req)
if err != nil {
return nil, err
}
defer res.Body.Close()
// Handle HTTP status codes.
if res.StatusCode != http.StatusOK {
return nil, &metadata.ErrDownloadHTTP{StatusCode: res.StatusCode, URL: urlPath}
}
var length int64
// Get content length from header (might not be accurate, -1 or not set).
if header := res.Header.Get("Content-Length"); header != "" {
length, err = strconv.ParseInt(header, 10, 0)
if err != nil {
return nil, err
}
// Error if the reported size is greater than what is expected.
if length > maxLength {
return nil, &metadata.ErrDownloadLengthMismatch{Msg: fmt.Sprintf("download failed for %s, length %d is larger than expected %d", urlPath, length, maxLength)}
}
}
// Although the size has been checked above, use a LimitReader in case
// the reported size is inaccurate, or size is -1 which indicates an
// unknown length. We read maxLength + 1 in order to check if the read data
// surpased our set limit.
data, err := io.ReadAll(io.LimitReader(res.Body, maxLength+1))
if err != nil {
return nil, err
}
// Error if the reported size is greater than what is expected.
length = int64(len(data))
if length > maxLength {
return nil, &metadata.ErrDownloadLengthMismatch{Msg: fmt.Sprintf("download failed for %s, length %d is larger than expected %d", urlPath, length, maxLength)}
}
return data, nil
}
golang-github-theupdateframework-go-tuf-2.0.2/metadata/fetcher/fetcher_test.go 0000664 0000000 0000000 00000035156 14706111210 0027551 0 ustar 00root root 0000000 0000000 // Copyright 2024 The Update Framework Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
//
// SPDX-License-Identifier: Apache-2.0
//
package fetcher
import (
"net/url"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/theupdateframework/go-tuf/v2/metadata"
)
func TestDownLoadFile(t *testing.T) {
for _, tt := range []struct {
name string
desc string
url string
maxLength int64
timeout time.Duration
data []byte
wantErr error
}{
{
name: "success",
desc: "No errors expected",
url: "https://jku.github.io/tuf-demo/metadata/1.root.json",
maxLength: 512000,
timeout: 15 * time.Second,
data: []byte{123, 10, 32, 34, 115, 105, 103, 110, 97, 116, 117, 114, 101, 115, 34, 58, 32, 91, 10, 32, 32, 123, 10, 32, 32, 32, 34, 107, 101, 121, 105, 100, 34, 58, 32, 34, 52, 99, 53, 54, 100, 101, 53, 98, 54, 50, 102, 100, 48, 54, 52, 102, 99, 57, 52, 52, 53, 51, 98, 54, 56, 48, 100, 102, 100, 51, 102, 97, 102, 54, 97, 48, 49, 98, 97, 97, 97, 98, 51, 98, 101, 97, 99, 50, 57, 54, 57, 50, 48, 102, 48, 99, 99, 102, 97, 50, 50, 55, 55, 53, 34, 44, 10, 32, 32, 32, 34, 115, 105, 103, 34, 58, 32, 34, 57, 54, 57, 98, 100, 101, 99, 51, 54, 100, 54, 102, 51, 100, 99, 53, 57, 99, 49, 55, 50, 48, 50, 97, 56, 53, 50, 56, 98, 98, 51, 53, 54, 97, 54, 101, 97, 53, 52, 100, 55, 99, 99, 57, 54, 98, 98, 51, 55, 49, 101, 101, 101, 52, 56, 101, 50, 52, 48, 49, 57, 50, 98, 99, 97, 99, 100, 53, 48, 53, 49, 51, 56, 56, 50, 52, 53, 49, 52, 52, 97, 97, 99, 97, 49, 48, 51, 57, 100, 51, 101, 98, 55, 48, 54, 50, 101, 48, 56, 55, 54, 55, 57, 53, 101, 56, 49, 101, 49, 100, 53, 54, 54, 102, 56, 100, 101, 100, 50, 99, 50, 56, 52, 97, 101, 101, 48, 102, 34, 10, 32, 32, 125, 44, 10, 32, 32, 123, 10, 32, 32, 32, 34, 107, 101, 121, 105, 100, 34, 58, 32, 34, 52, 53, 97, 57, 53, 55, 55, 99, 97, 52, 56, 51, 102, 51, 53, 56, 98, 100, 97, 52, 97, 50, 49, 97, 102, 57, 51, 98, 55, 54, 54, 48, 98, 56, 50, 98, 100, 57, 99, 48, 101, 49, 57, 51, 48, 97, 54, 98, 55, 100, 53, 50, 49, 98, 52, 50, 56, 57, 55, 97, 48, 102, 97, 51, 34, 44, 10, 32, 32, 32, 34, 115, 105, 103, 34, 58, 32, 34, 101, 100, 102, 97, 102, 51, 99, 53, 51, 56, 97, 48, 50, 51, 101, 55, 99, 102, 53, 98, 50, 54, 51, 97, 101, 52, 101, 54, 51, 99, 51, 51, 99, 57, 52, 97, 50, 98, 102, 99, 57, 102, 101, 56, 48, 56, 53, 57, 99, 52, 57, 51, 52, 100, 52, 97, 54, 54, 98, 48, 49, 53, 98, 54, 53, 98, 57, 48, 49, 101, 99, 53, 100, 53, 50, 57, 48, 101, 97, 53, 50, 52, 51, 51, 57, 101, 54, 97, 52, 48, 98, 53, 98, 56, 100, 98, 56, 97, 57, 53, 54, 49, 102, 51, 99, 49, 48, 51, 101, 50, 97, 101, 56, 55, 98, 57, 101, 101, 48, 51, 50, 97, 57, 101, 51, 48, 48, 49, 34, 10, 32, 32, 125, 10, 32, 93, 44, 10, 32, 34, 115, 105, 103, 110, 101, 100, 34, 58, 32, 123, 10, 32, 32, 34, 95, 116, 121, 112, 101, 34, 58, 32, 34, 114, 111, 111, 116, 34, 44, 10, 32, 32, 34, 99, 111, 110, 115, 105, 115, 116, 101, 110, 116, 95, 115, 110, 97, 112, 115, 104, 111, 116, 34, 58, 32, 116, 114, 117, 101, 44, 10, 32, 32, 34, 101, 120, 112, 105, 114, 101, 115, 34, 58, 32, 34, 50, 48, 50, 49, 45, 48, 55, 45, 49, 56, 84, 49, 51, 58, 51, 55, 58, 51, 56, 90, 34, 44, 10, 32, 32, 34, 107, 101, 121, 115, 34, 58, 32, 123, 10, 32, 32, 32, 34, 51, 56, 54, 48, 48, 56, 50, 48, 102, 49, 49, 97, 53, 102, 55, 100, 55, 102, 102, 52, 50, 101, 54, 100, 102, 99, 57, 98, 48, 51, 102, 100, 54, 48, 50, 55, 50, 97, 51, 98, 101, 54, 102, 56, 57, 53, 100, 97, 50, 100, 56, 56, 50, 99, 101, 97, 56, 98, 98, 49, 101, 50, 48, 102, 34, 58, 32, 123, 10, 32, 32, 32, 32, 34, 107, 101, 121, 105, 100, 34, 58, 32, 34, 51, 56, 54, 48, 48, 56, 50, 48, 102, 49, 49, 97, 53, 102, 55, 100, 55, 102, 102, 52, 50, 101, 54, 100, 102, 99, 57, 98, 48, 51, 102, 100, 54, 48, 50, 55, 50, 97, 51, 98, 101, 54, 102, 56, 57, 53, 100, 97, 50, 100, 56, 56, 50, 99, 101, 97, 56, 98, 98, 49, 101, 50, 48, 102, 34, 44, 10, 32, 32, 32, 32, 34, 107, 101, 121, 116, 121, 112, 101, 34, 58, 32, 34, 101, 100, 50, 53, 53, 49, 57, 34, 44, 10, 32, 32, 32, 32, 34, 107, 101, 121, 118, 97, 108, 34, 58, 32, 123, 10, 32, 32, 32, 32, 32, 34, 112, 117, 98, 108, 105, 99, 34, 58, 32, 34, 53, 48, 102, 52, 56, 54, 53, 57, 54, 54, 53, 98, 51, 101, 101, 98, 50, 50, 100, 52, 57, 51, 55, 52, 101, 49, 56, 51, 49, 57, 55, 101, 101, 102, 56, 101, 52, 50, 56, 55, 54, 97, 53, 99, 98, 57, 48, 57, 99, 57, 49, 97, 98, 55, 55, 101, 52, 50, 98, 49, 101, 99, 99, 54, 34, 10, 32, 32, 32, 32, 125, 44, 10, 32, 32, 32, 32, 34, 115, 99, 104, 101, 109, 101, 34, 58, 32, 34, 101, 100, 50, 53, 53, 49, 57, 34, 10, 32, 32, 32, 125, 44, 10, 32, 32, 32, 34, 52, 53, 97, 57, 53, 55, 55, 99, 97, 52, 56, 51, 102, 51, 53, 56, 98, 100, 97, 52, 97, 50, 49, 97, 102, 57, 51, 98, 55, 54, 54, 48, 98, 56, 50, 98, 100, 57, 99, 48, 101, 49, 57, 51, 48, 97, 54, 98, 55, 100, 53, 50, 49, 98, 52, 50, 56, 57, 55, 97, 48, 102, 97, 51, 34, 58, 32, 123, 10, 32, 32, 32, 32, 34, 107, 101, 121, 105, 100, 34, 58, 32, 34, 52, 53, 97, 57, 53, 55, 55, 99, 97, 52, 56, 51, 102, 51, 53, 56, 98, 100, 97, 52, 97, 50, 49, 97, 102, 57, 51, 98, 55, 54, 54, 48, 98, 56, 50, 98, 100, 57, 99, 48, 101, 49, 57, 51, 48, 97, 54, 98, 55, 100, 53, 50, 49, 98, 52, 50, 56, 57, 55, 97, 48, 102, 97, 51, 34, 44, 10, 32, 32, 32, 32, 34, 107, 101, 121, 116, 121, 112, 101, 34, 58, 32, 34, 101, 100, 50, 53, 53, 49, 57, 34, 44, 10, 32, 32, 32, 32, 34, 107, 101, 121, 118, 97, 108, 34, 58, 32, 123, 10, 32, 32, 32, 32, 32, 34, 112, 117, 98, 108, 105, 99, 34, 58, 32, 34, 49, 56, 101, 98, 50, 52, 56, 51, 49, 57, 54, 98, 55, 97, 97, 50, 53, 102, 97, 102, 98, 56, 49, 50, 55, 54, 99, 55, 48, 52, 102, 55, 57, 48, 51, 99, 99, 57, 98, 49, 101, 51, 52, 99, 97, 100, 99, 52, 101, 97, 102, 54, 55, 55, 98, 55, 97, 54, 55, 52, 100, 54, 102, 53, 34, 10, 32, 32, 32, 32, 125, 44, 10, 32, 32, 32, 32, 34, 115, 99, 104, 101, 109, 101, 34, 58, 32, 34, 101, 100, 50, 53, 53, 49, 57, 34, 10, 32, 32, 32, 125, 44, 10, 32, 32, 32, 34, 52, 99, 53, 54, 100, 101, 53, 98, 54, 50, 102, 100, 48, 54, 52, 102, 99, 57, 52, 52, 53, 51, 98, 54, 56, 48, 100, 102, 100, 51, 102, 97, 102, 54, 97, 48, 49, 98, 97, 97, 97, 98, 51, 98, 101, 97, 99, 50, 57, 54, 57, 50, 48, 102, 48, 99, 99, 102, 97, 50, 50, 55, 55, 53, 34, 58, 32, 123, 10, 32, 32, 32, 32, 34, 107, 101, 121, 105, 100, 34, 58, 32, 34, 52, 99, 53, 54, 100, 101, 53, 98, 54, 50, 102, 100, 48, 54, 52, 102, 99, 57, 52, 52, 53, 51, 98, 54, 56, 48, 100, 102, 100, 51, 102, 97, 102, 54, 97, 48, 49, 98, 97, 97, 97, 98, 51, 98, 101, 97, 99, 50, 57, 54, 57, 50, 48, 102, 48, 99, 99, 102, 97, 50, 50, 55, 55, 53, 34, 44, 10, 32, 32, 32, 32, 34, 107, 101, 121, 116, 121, 112, 101, 34, 58, 32, 34, 101, 100, 50, 53, 53, 49, 57, 34, 44, 10, 32, 32, 32, 32, 34, 107, 101, 121, 118, 97, 108, 34, 58, 32, 123, 10, 32, 32, 32, 32, 32, 34, 112, 117, 98, 108, 105, 99, 34, 58, 32, 34, 57, 50, 49, 101, 99, 99, 56, 54, 101, 101, 57, 49, 102, 100, 100, 51, 97, 53, 53, 49, 52, 48, 50, 51, 100, 102, 49, 57, 99, 100, 56, 53, 57, 49, 53, 57, 52, 54, 55, 55, 54, 52, 102, 54, 48, 102, 99, 52, 49, 101, 49, 101, 101, 97, 99, 56, 53, 48, 51, 53, 49, 49, 54, 49, 34, 10, 32, 32, 32, 32, 125, 44, 10, 32, 32, 32, 32, 34, 115, 99, 104, 101, 109, 101, 34, 58, 32, 34, 101, 100, 50, 53, 53, 49, 57, 34, 10, 32, 32, 32, 125, 44, 10, 32, 32, 32, 34, 56, 102, 51, 99, 50, 55, 57, 52, 102, 50, 52, 52, 50, 54, 48, 49, 52, 102, 99, 50, 54, 97, 100, 99, 98, 98, 56, 101, 102, 100, 57, 52, 52, 49, 49, 102, 99, 101, 56, 56, 49, 102, 97, 54, 48, 102, 99, 56, 55, 50, 53, 97, 56, 57, 57, 49, 49, 53, 55, 53, 48, 101, 102, 97, 34, 58, 32, 123, 10, 32, 32, 32, 32, 34, 107, 101, 121, 105, 100, 34, 58, 32, 34, 56, 102, 51, 99, 50, 55, 57, 52, 102, 50, 52, 52, 50, 54, 48, 49, 52, 102, 99, 50, 54, 97, 100, 99, 98, 98, 56, 101, 102, 100, 57, 52, 52, 49, 49, 102, 99, 101, 56, 56, 49, 102, 97, 54, 48, 102, 99, 56, 55, 50, 53, 97, 56, 57, 57, 49, 49, 53, 55, 53, 48, 101, 102, 97, 34, 44, 10, 32, 32, 32, 32, 34, 107, 101, 121, 116, 121, 112, 101, 34, 58, 32, 34, 101, 100, 50, 53, 53, 49, 57, 34, 44, 10, 32, 32, 32, 32, 34, 107, 101, 121, 118, 97, 108, 34, 58, 32, 123, 10, 32, 32, 32, 32, 32, 34, 112, 117, 98, 108, 105, 99, 34, 58, 32, 34, 56, 57, 53, 55, 54, 57, 49, 55, 100, 49, 54, 48, 50, 56, 52, 51, 56, 52, 97, 52, 55, 55, 53, 57, 101, 101, 99, 49, 102, 99, 48, 102, 53, 98, 55, 52, 54, 99, 97, 51, 100, 102, 97, 100, 56, 49, 51, 101, 101, 51, 48, 56, 55, 53, 99, 51, 50, 98, 97, 99, 51, 54, 57, 99, 34, 10, 32, 32, 32, 32, 125, 44, 10, 32, 32, 32, 32, 34, 115, 99, 104, 101, 109, 101, 34, 58, 32, 34, 101, 100, 50, 53, 53, 49, 57, 34, 10, 32, 32, 32, 125, 44, 10, 32, 32, 32, 34, 57, 100, 55, 56, 53, 52, 51, 98, 53, 48, 56, 102, 57, 57, 97, 57, 53, 97, 51, 99, 51, 49, 102, 97, 100, 51, 99, 102, 102, 101, 102, 48, 54, 52, 52, 49, 51, 52, 102, 49, 97, 48, 50, 56, 98, 51, 48, 53, 48, 49, 97, 99, 99, 49, 50, 48, 53, 56, 99, 55, 99, 51, 101, 56, 34, 58, 32, 123, 10, 32, 32, 32, 32, 34, 107, 101, 121, 105, 100, 34, 58, 32, 34, 57, 100, 55, 56, 53, 52, 51, 98, 53, 48, 56, 102, 57, 57, 97, 57, 53, 97, 51, 99, 51, 49, 102, 97, 100, 51, 99, 102, 102, 101, 102, 48, 54, 52, 52, 49, 51, 52, 102, 49, 97, 48, 50, 56, 98, 51, 48, 53, 48, 49, 97, 99, 99, 49, 50, 48, 53, 56, 99, 55, 99, 51, 101, 56, 34, 44, 10, 32, 32, 32, 32, 34, 107, 101, 121, 116, 121, 112, 101, 34, 58, 32, 34, 101, 100, 50, 53, 53, 49, 57, 34, 44, 10, 32, 32, 32, 32, 34, 107, 101, 121, 118, 97, 108, 34, 58, 32, 123, 10, 32, 32, 32, 32, 32, 34, 112, 117, 98, 108, 105, 99, 34, 58, 32, 34, 48, 52, 101, 102, 51, 51, 53, 54, 102, 98, 53, 99, 100, 48, 48, 57, 55, 53, 100, 102, 99, 101, 57, 102, 56, 102, 52, 50, 100, 53, 98, 49, 50, 98, 55, 98, 56, 51, 102, 56, 98, 97, 49, 53, 99, 50, 101, 57, 56, 102, 100, 48, 52, 49, 53, 49, 52, 99, 55, 52, 98, 101, 98, 50, 34, 10, 32, 32, 32, 32, 125, 44, 10, 32, 32, 32, 32, 34, 115, 99, 104, 101, 109, 101, 34, 58, 32, 34, 101, 100, 50, 53, 53, 49, 57, 34, 10, 32, 32, 32, 125, 10, 32, 32, 125, 44, 10, 32, 32, 34, 114, 111, 108, 101, 115, 34, 58, 32, 123, 10, 32, 32, 32, 34, 114, 111, 111, 116, 34, 58, 32, 123, 10, 32, 32, 32, 32, 34, 107, 101, 121, 105, 100, 115, 34, 58, 32, 91, 10, 32, 32, 32, 32, 32, 34, 52, 53, 97, 57, 53, 55, 55, 99, 97, 52, 56, 51, 102, 51, 53, 56, 98, 100, 97, 52, 97, 50, 49, 97, 102, 57, 51, 98, 55, 54, 54, 48, 98, 56, 50, 98, 100, 57, 99, 48, 101, 49, 57, 51, 48, 97, 54, 98, 55, 100, 53, 50, 49, 98, 52, 50, 56, 57, 55, 97, 48, 102, 97, 51, 34, 44, 10, 32, 32, 32, 32, 32, 34, 52, 99, 53, 54, 100, 101, 53, 98, 54, 50, 102, 100, 48, 54, 52, 102, 99, 57, 52, 52, 53, 51, 98, 54, 56, 48, 100, 102, 100, 51, 102, 97, 102, 54, 97, 48, 49, 98, 97, 97, 97, 98, 51, 98, 101, 97, 99, 50, 57, 54, 57, 50, 48, 102, 48, 99, 99, 102, 97, 50, 50, 55, 55, 53, 34, 10, 32, 32, 32, 32, 93, 44, 10, 32, 32, 32, 32, 34, 116, 104, 114, 101, 115, 104, 111, 108, 100, 34, 58, 32, 50, 10, 32, 32, 32, 125, 44, 10, 32, 32, 32, 34, 115, 110, 97, 112, 115, 104, 111, 116, 34, 58, 32, 123, 10, 32, 32, 32, 32, 34, 107, 101, 121, 105, 100, 115, 34, 58, 32, 91, 10, 32, 32, 32, 32, 32, 34, 57, 100, 55, 56, 53, 52, 51, 98, 53, 48, 56, 102, 57, 57, 97, 57, 53, 97, 51, 99, 51, 49, 102, 97, 100, 51, 99, 102, 102, 101, 102, 48, 54, 52, 52, 49, 51, 52, 102, 49, 97, 48, 50, 56, 98, 51, 48, 53, 48, 49, 97, 99, 99, 49, 50, 48, 53, 56, 99, 55, 99, 51, 101, 56, 34, 10, 32, 32, 32, 32, 93, 44, 10, 32, 32, 32, 32, 34, 116, 104, 114, 101, 115, 104, 111, 108, 100, 34, 58, 32, 49, 10, 32, 32, 32, 125, 44, 10, 32, 32, 32, 34, 116, 97, 114, 103, 101, 116, 115, 34, 58, 32, 123, 10, 32, 32, 32, 32, 34, 107, 101, 121, 105, 100, 115, 34, 58, 32, 91, 10, 32, 32, 32, 32, 32, 34, 56, 102, 51, 99, 50, 55, 57, 52, 102, 50, 52, 52, 50, 54, 48, 49, 52, 102, 99, 50, 54, 97, 100, 99, 98, 98, 56, 101, 102, 100, 57, 52, 52, 49, 49, 102, 99, 101, 56, 56, 49, 102, 97, 54, 48, 102, 99, 56, 55, 50, 53, 97, 56, 57, 57, 49, 49, 53, 55, 53, 48, 101, 102, 97, 34, 10, 32, 32, 32, 32, 93, 44, 10, 32, 32, 32, 32, 34, 116, 104, 114, 101, 115, 104, 111, 108, 100, 34, 58, 32, 49, 10, 32, 32, 32, 125, 44, 10, 32, 32, 32, 34, 116, 105, 109, 101, 115, 116, 97, 109, 112, 34, 58, 32, 123, 10, 32, 32, 32, 32, 34, 107, 101, 121, 105, 100, 115, 34, 58, 32, 91, 10, 32, 32, 32, 32, 32, 34, 51, 56, 54, 48, 48, 56, 50, 48, 102, 49, 49, 97, 53, 102, 55, 100, 55, 102, 102, 52, 50, 101, 54, 100, 102, 99, 57, 98, 48, 51, 102, 100, 54, 48, 50, 55, 50, 97, 51, 98, 101, 54, 102, 56, 57, 53, 100, 97, 50, 100, 56, 56, 50, 99, 101, 97, 56, 98, 98, 49, 101, 50, 48, 102, 34, 10, 32, 32, 32, 32, 93, 44, 10, 32, 32, 32, 32, 34, 116, 104, 114, 101, 115, 104, 111, 108, 100, 34, 58, 32, 49, 10, 32, 32, 32, 125, 10, 32, 32, 125, 44, 10, 32, 32, 34, 115, 112, 101, 99, 95, 118, 101, 114, 115, 105, 111, 110, 34, 58, 32, 34, 49, 46, 48, 46, 49, 57, 34, 44, 10, 32, 32, 34, 118, 101, 114, 115, 105, 111, 110, 34, 58, 32, 49, 44, 10, 32, 32, 34, 120, 45, 116, 117, 102, 114, 101, 112, 111, 45, 101, 120, 112, 105, 114, 121, 45, 112, 101, 114, 105, 111, 100, 34, 58, 32, 56, 54, 52, 48, 48, 10, 32, 125, 10, 125},
wantErr: nil,
},
{
name: "invalid url",
desc: "URL does not exist",
url: "https://somebadtufrepourl.com/metadata/",
data: nil,
wantErr: &url.Error{},
},
{
name: "invalid url format",
desc: "URL is malformed",
url: string([]byte{0x7f}),
data: nil,
wantErr: &url.Error{},
},
{
name: "invalid path",
desc: "Path does not exist",
url: "https://jku.github.io/tuf-demo/metadata/badPath.json",
data: nil,
wantErr: &metadata.ErrDownloadHTTP{},
},
{
name: "data too long",
desc: "Returned data is longer than maxLength",
url: "https://jku.github.io/tuf-demo/metadata/1.root.json",
maxLength: 1,
data: nil,
wantErr: &metadata.ErrDownloadLengthMismatch{},
},
} {
t.Run(tt.name, func(t *testing.T) {
// this will only be printed if run in verbose mode or if test fails
t.Logf("Desc: %s", tt.desc)
// run the function under test
fetcher := DefaultFetcher{httpUserAgent: "Metadata_Unit_Test/1.0"}
data, err := fetcher.DownloadFile(tt.url, tt.maxLength, tt.timeout)
// special case if we expect no error
if tt.wantErr == nil {
assert.NoErrorf(t, err, "expected no error but got %v", err)
return
}
// compare the error and data with our expected error and data
assert.Equal(t, tt.data, data, "fetched data did not match")
assert.IsTypef(t, tt.wantErr, err, "expected %v but got %v", tt.wantErr, err)
})
}
}
golang-github-theupdateframework-go-tuf-2.0.2/metadata/keys.go 0000664 0000000 0000000 00000010220 14706111210 0024406 0 ustar 00root root 0000000 0000000 // Copyright 2024 The Update Framework Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
//
// SPDX-License-Identifier: Apache-2.0
//
package metadata
import (
"crypto"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/rsa"
"crypto/sha256"
"crypto/x509"
"encoding/hex"
"fmt"
"github.com/secure-systems-lab/go-securesystemslib/cjson"
"github.com/sigstore/sigstore/pkg/cryptoutils"
)
const (
KeyTypeEd25519 = "ed25519"
KeyTypeECDSA_SHA2_P256_COMPAT = "ecdsa-sha2-nistp256"
KeyTypeECDSA_SHA2_P256 = "ecdsa"
KeyTypeRSASSA_PSS_SHA256 = "rsa"
KeySchemeEd25519 = "ed25519"
KeySchemeECDSA_SHA2_P256 = "ecdsa-sha2-nistp256"
KeySchemeECDSA_SHA2_P384 = "ecdsa-sha2-nistp384"
KeySchemeRSASSA_PSS_SHA256 = "rsassa-pss-sha256"
)
// ToPublicKey generate crypto.PublicKey from metadata type Key
func (k *Key) ToPublicKey() (crypto.PublicKey, error) {
switch k.Type {
case KeyTypeRSASSA_PSS_SHA256:
publicKey, err := cryptoutils.UnmarshalPEMToPublicKey([]byte(k.Value.PublicKey))
if err != nil {
return nil, err
}
rsaKey, ok := publicKey.(*rsa.PublicKey)
if !ok {
return nil, fmt.Errorf("invalid rsa public key")
}
// done for verification - ref. https://github.com/theupdateframework/go-tuf/pull/357
if _, err := x509.MarshalPKIXPublicKey(rsaKey); err != nil {
return nil, err
}
return rsaKey, nil
case KeyTypeECDSA_SHA2_P256, KeyTypeECDSA_SHA2_P256_COMPAT: // handle "ecdsa" too as python-tuf/sslib keys are using it for keytype instead of https://theupdateframework.github.io/specification/latest/index.html#keytype-ecdsa-sha2-nistp256
publicKey, err := cryptoutils.UnmarshalPEMToPublicKey([]byte(k.Value.PublicKey))
if err != nil {
return nil, err
}
ecdsaKey, ok := publicKey.(*ecdsa.PublicKey)
if !ok {
return nil, fmt.Errorf("invalid ecdsa public key")
}
// done for verification - ref. https://github.com/theupdateframework/go-tuf/pull/357
if _, err := x509.MarshalPKIXPublicKey(ecdsaKey); err != nil {
return nil, err
}
return ecdsaKey, nil
case KeyTypeEd25519:
publicKey, err := hex.DecodeString(k.Value.PublicKey)
if err != nil {
return nil, err
}
ed25519Key := ed25519.PublicKey(publicKey)
// done for verification - ref. https://github.com/theupdateframework/go-tuf/pull/357
if _, err := x509.MarshalPKIXPublicKey(ed25519Key); err != nil {
return nil, err
}
return ed25519Key, nil
}
return nil, fmt.Errorf("unsupported public key type")
}
// KeyFromPublicKey generate metadata type Key from crypto.PublicKey
func KeyFromPublicKey(k crypto.PublicKey) (*Key, error) {
key := &Key{}
switch k := k.(type) {
case *rsa.PublicKey:
key.Type = KeyTypeRSASSA_PSS_SHA256
key.Scheme = KeySchemeRSASSA_PSS_SHA256
pemKey, err := cryptoutils.MarshalPublicKeyToPEM(k)
if err != nil {
return nil, err
}
key.Value.PublicKey = string(pemKey)
case *ecdsa.PublicKey:
key.Type = KeyTypeECDSA_SHA2_P256
key.Scheme = KeySchemeECDSA_SHA2_P256
pemKey, err := cryptoutils.MarshalPublicKeyToPEM(k)
if err != nil {
return nil, err
}
key.Value.PublicKey = string(pemKey)
case ed25519.PublicKey:
key.Type = KeyTypeEd25519
key.Scheme = KeySchemeEd25519
key.Value.PublicKey = hex.EncodeToString(k)
default:
return nil, fmt.Errorf("unsupported public key type")
}
return key, nil
}
// ID returns the keyID value for the given Key
func (k *Key) ID() string {
// the identifier is a hexdigest of the SHA-256 hash of the canonical form of the key
if k.id == "" {
data, err := cjson.EncodeCanonical(k)
if err != nil {
panic(fmt.Errorf("error creating key ID: %w", err))
}
digest := sha256.Sum256(data)
k.id = hex.EncodeToString(digest[:])
}
return k.id
}
golang-github-theupdateframework-go-tuf-2.0.2/metadata/logger.go 0000664 0000000 0000000 00000002371 14706111210 0024722 0 ustar 00root root 0000000 0000000 // Copyright 2024 The Update Framework Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
//
// SPDX-License-Identifier: Apache-2.0
//
package metadata
var log Logger = DiscardLogger{}
// Logger partially implements the go-log/logr's interface:
// https://github.com/go-logr/logr/blob/master/logr.go
type Logger interface {
// Info logs a non-error message with key/value pairs
Info(msg string, kv ...any)
// Error logs an error with a given message and key/value pairs.
Error(err error, msg string, kv ...any)
}
type DiscardLogger struct{}
func (d DiscardLogger) Info(msg string, kv ...any) {
}
func (d DiscardLogger) Error(err error, msg string, kv ...any) {
}
func SetLogger(logger Logger) {
log = logger
}
func GetLogger() Logger {
return log
}
golang-github-theupdateframework-go-tuf-2.0.2/metadata/logger_test.go 0000664 0000000 0000000 00000002365 14706111210 0025764 0 ustar 00root root 0000000 0000000 // Copyright 2024 The Update Framework Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
//
// SPDX-License-Identifier: Apache-2.0
//
package metadata
import (
stdlog "log"
"os"
"testing"
"github.com/go-logr/stdr"
"github.com/stretchr/testify/assert"
)
func TestSetLogger(t *testing.T) {
// This function is just a simple setter, no need for testing table
testLogger := stdr.New(stdlog.New(os.Stdout, "test", stdlog.LstdFlags))
SetLogger(testLogger)
assert.Equal(t, testLogger, log, "setting package global logger was unsuccessful")
}
func TestGetLogger(t *testing.T) {
// This function is just a simple getter, no need for testing table
testLogger := GetLogger()
assert.Equal(t, log, testLogger, "function did not return current logger")
}
golang-github-theupdateframework-go-tuf-2.0.2/metadata/marshal.go 0000664 0000000 0000000 00000033105 14706111210 0025071 0 ustar 00root root 0000000 0000000 // Copyright 2024 The Update Framework Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
//
// SPDX-License-Identifier: Apache-2.0
//
package metadata
import (
"encoding/hex"
"encoding/json"
"errors"
)
// The following marshal/unmarshal methods override the default behavior for for each TUF type
// in order to support unrecognized fields
func (signed RootType) MarshalJSON() ([]byte, error) {
dict := map[string]any{}
if len(signed.UnrecognizedFields) != 0 {
copyMapValues(signed.UnrecognizedFields, dict)
}
dict["_type"] = signed.Type
dict["spec_version"] = signed.SpecVersion
dict["consistent_snapshot"] = signed.ConsistentSnapshot
dict["version"] = signed.Version
dict["expires"] = signed.Expires
dict["keys"] = signed.Keys
dict["roles"] = signed.Roles
return json.Marshal(dict)
}
func (signed *RootType) UnmarshalJSON(data []byte) error {
type Alias RootType
var s Alias
if err := json.Unmarshal(data, &s); err != nil {
return err
}
*signed = RootType(s)
var dict map[string]any
if err := json.Unmarshal(data, &dict); err != nil {
return err
}
delete(dict, "_type")
delete(dict, "spec_version")
delete(dict, "consistent_snapshot")
delete(dict, "version")
delete(dict, "expires")
delete(dict, "keys")
delete(dict, "roles")
signed.UnrecognizedFields = dict
return nil
}
func (signed SnapshotType) MarshalJSON() ([]byte, error) {
dict := map[string]any{}
if len(signed.UnrecognizedFields) != 0 {
copyMapValues(signed.UnrecognizedFields, dict)
}
dict["_type"] = signed.Type
dict["spec_version"] = signed.SpecVersion
dict["version"] = signed.Version
dict["expires"] = signed.Expires
dict["meta"] = signed.Meta
return json.Marshal(dict)
}
func (signed *SnapshotType) UnmarshalJSON(data []byte) error {
type Alias SnapshotType
var s Alias
if err := json.Unmarshal(data, &s); err != nil {
return err
}
*signed = SnapshotType(s)
var dict map[string]any
if err := json.Unmarshal(data, &dict); err != nil {
return err
}
delete(dict, "_type")
delete(dict, "spec_version")
delete(dict, "version")
delete(dict, "expires")
delete(dict, "meta")
signed.UnrecognizedFields = dict
return nil
}
func (signed TimestampType) MarshalJSON() ([]byte, error) {
dict := map[string]any{}
if len(signed.UnrecognizedFields) != 0 {
copyMapValues(signed.UnrecognizedFields, dict)
}
dict["_type"] = signed.Type
dict["spec_version"] = signed.SpecVersion
dict["version"] = signed.Version
dict["expires"] = signed.Expires
dict["meta"] = signed.Meta
return json.Marshal(dict)
}
func (signed *TimestampType) UnmarshalJSON(data []byte) error {
type Alias TimestampType
var s Alias
if err := json.Unmarshal(data, &s); err != nil {
return err
}
*signed = TimestampType(s)
var dict map[string]any
if err := json.Unmarshal(data, &dict); err != nil {
return err
}
delete(dict, "_type")
delete(dict, "spec_version")
delete(dict, "version")
delete(dict, "expires")
delete(dict, "meta")
signed.UnrecognizedFields = dict
return nil
}
func (signed TargetsType) MarshalJSON() ([]byte, error) {
dict := map[string]any{}
if len(signed.UnrecognizedFields) != 0 {
copyMapValues(signed.UnrecognizedFields, dict)
}
dict["_type"] = signed.Type
dict["spec_version"] = signed.SpecVersion
dict["version"] = signed.Version
dict["expires"] = signed.Expires
dict["targets"] = signed.Targets
if signed.Delegations != nil {
dict["delegations"] = signed.Delegations
}
return json.Marshal(dict)
}
func (signed *TargetsType) UnmarshalJSON(data []byte) error {
type Alias TargetsType
var s Alias
if err := json.Unmarshal(data, &s); err != nil {
return err
}
*signed = TargetsType(s)
// populate the path field for each target
for name, targetFile := range signed.Targets {
targetFile.Path = name
}
var dict map[string]any
if err := json.Unmarshal(data, &dict); err != nil {
return err
}
delete(dict, "_type")
delete(dict, "spec_version")
delete(dict, "version")
delete(dict, "expires")
delete(dict, "targets")
delete(dict, "delegations")
signed.UnrecognizedFields = dict
return nil
}
func (signed MetaFiles) MarshalJSON() ([]byte, error) {
dict := map[string]any{}
if len(signed.UnrecognizedFields) != 0 {
copyMapValues(signed.UnrecognizedFields, dict)
}
// length and hashes are optional
if signed.Length != 0 {
dict["length"] = signed.Length
}
if len(signed.Hashes) != 0 {
dict["hashes"] = signed.Hashes
}
dict["version"] = signed.Version
return json.Marshal(dict)
}
func (signed *MetaFiles) UnmarshalJSON(data []byte) error {
type Alias MetaFiles
var s Alias
if err := json.Unmarshal(data, &s); err != nil {
return err
}
*signed = MetaFiles(s)
var dict map[string]any
if err := json.Unmarshal(data, &dict); err != nil {
return err
}
delete(dict, "length")
delete(dict, "hashes")
delete(dict, "version")
signed.UnrecognizedFields = dict
return nil
}
func (signed TargetFiles) MarshalJSON() ([]byte, error) {
dict := map[string]any{}
if len(signed.UnrecognizedFields) != 0 {
copyMapValues(signed.UnrecognizedFields, dict)
}
dict["length"] = signed.Length
dict["hashes"] = signed.Hashes
if signed.Custom != nil {
dict["custom"] = signed.Custom
}
return json.Marshal(dict)
}
func (signed *TargetFiles) UnmarshalJSON(data []byte) error {
type Alias TargetFiles
var s Alias
if err := json.Unmarshal(data, &s); err != nil {
return err
}
*signed = TargetFiles(s)
var dict map[string]any
if err := json.Unmarshal(data, &dict); err != nil {
return err
}
delete(dict, "length")
delete(dict, "hashes")
delete(dict, "custom")
signed.UnrecognizedFields = dict
return nil
}
func (key Key) MarshalJSON() ([]byte, error) {
dict := map[string]any{}
if len(key.UnrecognizedFields) != 0 {
copyMapValues(key.UnrecognizedFields, dict)
}
dict["keytype"] = key.Type
dict["scheme"] = key.Scheme
dict["keyval"] = key.Value
return json.Marshal(dict)
}
func (key *Key) UnmarshalJSON(data []byte) error {
type Alias Key
var a Alias
if err := json.Unmarshal(data, &a); err != nil {
return err
}
// nolint
*key = Key(a)
var dict map[string]any
if err := json.Unmarshal(data, &dict); err != nil {
return err
}
delete(dict, "keytype")
delete(dict, "scheme")
delete(dict, "keyval")
key.UnrecognizedFields = dict
return nil
}
func (meta Metadata[T]) MarshalJSON() ([]byte, error) {
dict := map[string]any{}
if len(meta.UnrecognizedFields) != 0 {
copyMapValues(meta.UnrecognizedFields, dict)
}
dict["signed"] = meta.Signed
dict["signatures"] = meta.Signatures
return json.Marshal(dict)
}
func (meta *Metadata[T]) UnmarshalJSON(data []byte) error {
tmp := any(new(T))
var m map[string]any
if err := json.Unmarshal(data, &m); err != nil {
return err
}
switch tmp.(type) {
case *RootType:
dict := struct {
Signed RootType `json:"signed"`
Signatures []Signature `json:"signatures"`
}{}
if err := json.Unmarshal(data, &dict); err != nil {
return err
}
var i interface{} = dict.Signed
meta.Signed = i.(T)
meta.Signatures = dict.Signatures
case *SnapshotType:
dict := struct {
Signed SnapshotType `json:"signed"`
Signatures []Signature `json:"signatures"`
}{}
if err := json.Unmarshal(data, &dict); err != nil {
return err
}
var i interface{} = dict.Signed
meta.Signed = i.(T)
meta.Signatures = dict.Signatures
case *TimestampType:
dict := struct {
Signed TimestampType `json:"signed"`
Signatures []Signature `json:"signatures"`
}{}
if err := json.Unmarshal(data, &dict); err != nil {
return err
}
var i interface{} = dict.Signed
meta.Signed = i.(T)
meta.Signatures = dict.Signatures
case *TargetsType:
dict := struct {
Signed TargetsType `json:"signed"`
Signatures []Signature `json:"signatures"`
}{}
if err := json.Unmarshal(data, &dict); err != nil {
return err
}
var i interface{} = dict.Signed
meta.Signed = i.(T)
meta.Signatures = dict.Signatures
default:
return &ErrValue{Msg: "unrecognized metadata type"}
}
delete(m, "signed")
delete(m, "signatures")
meta.UnrecognizedFields = m
return nil
}
func (s Signature) MarshalJSON() ([]byte, error) {
dict := map[string]any{}
if len(s.UnrecognizedFields) != 0 {
copyMapValues(s.UnrecognizedFields, dict)
}
dict["keyid"] = s.KeyID
dict["sig"] = s.Signature
return json.Marshal(dict)
}
func (s *Signature) UnmarshalJSON(data []byte) error {
type Alias Signature
var a Alias
if err := json.Unmarshal(data, &a); err != nil {
return err
}
*s = Signature(a)
var dict map[string]any
if err := json.Unmarshal(data, &dict); err != nil {
return err
}
delete(dict, "keyid")
delete(dict, "sig")
s.UnrecognizedFields = dict
return nil
}
func (kv KeyVal) MarshalJSON() ([]byte, error) {
dict := map[string]any{}
if len(kv.UnrecognizedFields) != 0 {
copyMapValues(kv.UnrecognizedFields, dict)
}
dict["public"] = kv.PublicKey
return json.Marshal(dict)
}
func (kv *KeyVal) UnmarshalJSON(data []byte) error {
type Alias KeyVal
var a Alias
if err := json.Unmarshal(data, &a); err != nil {
return err
}
*kv = KeyVal(a)
var dict map[string]any
if err := json.Unmarshal(data, &dict); err != nil {
return err
}
delete(dict, "public")
kv.UnrecognizedFields = dict
return nil
}
func (role Role) MarshalJSON() ([]byte, error) {
dict := map[string]any{}
if len(role.UnrecognizedFields) != 0 {
copyMapValues(role.UnrecognizedFields, dict)
}
dict["keyids"] = role.KeyIDs
dict["threshold"] = role.Threshold
return json.Marshal(dict)
}
func (role *Role) UnmarshalJSON(data []byte) error {
type Alias Role
var a Alias
if err := json.Unmarshal(data, &a); err != nil {
return err
}
*role = Role(a)
var dict map[string]any
if err := json.Unmarshal(data, &dict); err != nil {
return err
}
delete(dict, "keyids")
delete(dict, "threshold")
role.UnrecognizedFields = dict
return nil
}
func (d Delegations) MarshalJSON() ([]byte, error) {
dict := map[string]any{}
if len(d.UnrecognizedFields) != 0 {
copyMapValues(d.UnrecognizedFields, dict)
}
// only one is allowed
dict["keys"] = d.Keys
if d.Roles != nil {
dict["roles"] = d.Roles
} else if d.SuccinctRoles != nil {
dict["succinct_roles"] = d.SuccinctRoles
}
return json.Marshal(dict)
}
func (d *Delegations) UnmarshalJSON(data []byte) error {
type Alias Delegations
var a Alias
if err := json.Unmarshal(data, &a); err != nil {
return err
}
*d = Delegations(a)
var dict map[string]any
if err := json.Unmarshal(data, &dict); err != nil {
return err
}
delete(dict, "keys")
delete(dict, "roles")
delete(dict, "succinct_roles")
d.UnrecognizedFields = dict
return nil
}
func (role DelegatedRole) MarshalJSON() ([]byte, error) {
dict := map[string]any{}
if len(role.UnrecognizedFields) != 0 {
copyMapValues(role.UnrecognizedFields, dict)
}
dict["name"] = role.Name
dict["keyids"] = role.KeyIDs
dict["threshold"] = role.Threshold
dict["terminating"] = role.Terminating
// make sure we have only one of the two (per spec)
if role.Paths != nil && role.PathHashPrefixes != nil {
return nil, &ErrValue{Msg: "failed to marshal: not allowed to have both \"paths\" and \"path_hash_prefixes\" present"}
}
if role.Paths != nil {
dict["paths"] = role.Paths
} else if role.PathHashPrefixes != nil {
dict["path_hash_prefixes"] = role.PathHashPrefixes
}
return json.Marshal(dict)
}
func (role *DelegatedRole) UnmarshalJSON(data []byte) error {
type Alias DelegatedRole
var a Alias
if err := json.Unmarshal(data, &a); err != nil {
return err
}
*role = DelegatedRole(a)
var dict map[string]any
if err := json.Unmarshal(data, &dict); err != nil {
return err
}
delete(dict, "name")
delete(dict, "keyids")
delete(dict, "threshold")
delete(dict, "terminating")
delete(dict, "paths")
delete(dict, "path_hash_prefixes")
role.UnrecognizedFields = dict
return nil
}
func (role SuccinctRoles) MarshalJSON() ([]byte, error) {
dict := map[string]any{}
if len(role.UnrecognizedFields) != 0 {
copyMapValues(role.UnrecognizedFields, dict)
}
dict["keyids"] = role.KeyIDs
dict["threshold"] = role.Threshold
dict["bit_length"] = role.BitLength
dict["name_prefix"] = role.NamePrefix
return json.Marshal(dict)
}
func (role *SuccinctRoles) UnmarshalJSON(data []byte) error {
type Alias SuccinctRoles
var a Alias
if err := json.Unmarshal(data, &a); err != nil {
return err
}
*role = SuccinctRoles(a)
var dict map[string]any
if err := json.Unmarshal(data, &dict); err != nil {
return err
}
delete(dict, "keyids")
delete(dict, "threshold")
delete(dict, "bit_length")
delete(dict, "name_prefix")
role.UnrecognizedFields = dict
return nil
}
func (b *HexBytes) UnmarshalJSON(data []byte) error {
if len(data) < 2 || len(data)%2 != 0 || data[0] != '"' || data[len(data)-1] != '"' {
return errors.New("tuf: invalid JSON hex bytes")
}
res := make([]byte, hex.DecodedLen(len(data)-2))
_, err := hex.Decode(res, data[1:len(data)-1])
if err != nil {
return err
}
*b = res
return nil
}
func (b HexBytes) MarshalJSON() ([]byte, error) {
res := make([]byte, hex.EncodedLen(len(b))+2)
res[0] = '"'
res[len(res)-1] = '"'
hex.Encode(res[1:], b)
return res, nil
}
func (b HexBytes) String() string {
return hex.EncodeToString(b)
}
// copyMapValues copies the values of the src map to dst
func copyMapValues(src, dst map[string]any) {
for k, v := range src {
dst[k] = v
}
}
golang-github-theupdateframework-go-tuf-2.0.2/metadata/metadata.go 0000664 0000000 0000000 00000071346 14706111210 0025233 0 ustar 00root root 0000000 0000000 // Copyright 2024 The Update Framework Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
//
// SPDX-License-Identifier: Apache-2.0
//
package metadata
import (
"bytes"
"crypto"
"crypto/hmac"
"crypto/sha256"
"crypto/sha512"
"encoding/base64"
"encoding/binary"
"encoding/hex"
"encoding/json"
"fmt"
"hash"
"io"
"math"
"os"
"path/filepath"
"slices"
"strconv"
"strings"
"time"
"github.com/secure-systems-lab/go-securesystemslib/cjson"
"github.com/sigstore/sigstore/pkg/signature"
)
// Root return new metadata instance of type Root
func Root(expires ...time.Time) *Metadata[RootType] {
// expire now if there's nothing set
if len(expires) == 0 {
expires = []time.Time{time.Now().UTC()}
}
// populate Roles
roles := map[string]*Role{}
for _, r := range []string{ROOT, SNAPSHOT, TARGETS, TIMESTAMP} {
roles[r] = &Role{
KeyIDs: []string{},
Threshold: 1,
}
}
log.Info("Created metadata", "type", ROOT)
return &Metadata[RootType]{
Signed: RootType{
Type: ROOT,
SpecVersion: SPECIFICATION_VERSION,
Version: 1,
Expires: expires[0],
Keys: map[string]*Key{},
Roles: roles,
ConsistentSnapshot: true,
},
Signatures: []Signature{},
}
}
// Snapshot return new metadata instance of type Snapshot
func Snapshot(expires ...time.Time) *Metadata[SnapshotType] {
// expire now if there's nothing set
if len(expires) == 0 {
expires = []time.Time{time.Now().UTC()}
}
log.Info("Created metadata", "type", SNAPSHOT)
return &Metadata[SnapshotType]{
Signed: SnapshotType{
Type: SNAPSHOT,
SpecVersion: SPECIFICATION_VERSION,
Version: 1,
Expires: expires[0],
Meta: map[string]*MetaFiles{
"targets.json": {
Version: 1,
},
},
},
Signatures: []Signature{},
}
}
// Timestamp return new metadata instance of type Timestamp
func Timestamp(expires ...time.Time) *Metadata[TimestampType] {
// expire now if there's nothing set
if len(expires) == 0 {
expires = []time.Time{time.Now().UTC()}
}
log.Info("Created metadata", "type", TIMESTAMP)
return &Metadata[TimestampType]{
Signed: TimestampType{
Type: TIMESTAMP,
SpecVersion: SPECIFICATION_VERSION,
Version: 1,
Expires: expires[0],
Meta: map[string]*MetaFiles{
"snapshot.json": {
Version: 1,
},
},
},
Signatures: []Signature{},
}
}
// Targets return new metadata instance of type Targets
func Targets(expires ...time.Time) *Metadata[TargetsType] {
// expire now if there's nothing set
if len(expires) == 0 {
expires = []time.Time{time.Now().UTC()}
}
log.Info("Created metadata", "type", TARGETS)
return &Metadata[TargetsType]{
Signed: TargetsType{
Type: TARGETS,
SpecVersion: SPECIFICATION_VERSION,
Version: 1,
Expires: expires[0],
Targets: map[string]*TargetFiles{},
},
Signatures: []Signature{},
}
}
// TargetFile return new metadata instance of type TargetFiles
func TargetFile() *TargetFiles {
return &TargetFiles{
Length: 0,
Hashes: Hashes{},
}
}
// MetaFile return new metadata instance of type MetaFile
func MetaFile(version int64) *MetaFiles {
if version < 1 {
// attempting to set incorrect version
log.Info("Attempting to set incorrect version for MetaFile", "version", version)
version = 1
}
return &MetaFiles{
Length: 0,
Hashes: Hashes{},
Version: version,
}
}
// FromFile load metadata from file
func (meta *Metadata[T]) FromFile(name string) (*Metadata[T], error) {
in, err := os.Open(name)
if err != nil {
return nil, err
}
defer in.Close()
data, err := io.ReadAll(in)
if err != nil {
return nil, err
}
m, err := fromBytes[T](data)
if err != nil {
return nil, err
}
*meta = *m
log.Info("Loaded metadata from file", "name", name)
return meta, nil
}
// FromBytes deserialize metadata from bytes
func (meta *Metadata[T]) FromBytes(data []byte) (*Metadata[T], error) {
m, err := fromBytes[T](data)
if err != nil {
return nil, err
}
*meta = *m
log.Info("Loaded metadata from bytes")
return meta, nil
}
// ToBytes serialize metadata to bytes
func (meta *Metadata[T]) ToBytes(pretty bool) ([]byte, error) {
log.Info("Writing metadata to bytes")
if pretty {
return json.MarshalIndent(*meta, "", "\t")
}
return json.Marshal(*meta)
}
// ToFile save metadata to file
func (meta *Metadata[T]) ToFile(name string, pretty bool) error {
log.Info("Writing metadata to file", "name", name)
data, err := meta.ToBytes(pretty)
if err != nil {
return err
}
return os.WriteFile(name, data, 0644)
}
// Sign create signature over Signed and assign it to Signatures
func (meta *Metadata[T]) Sign(signer signature.Signer) (*Signature, error) {
// encode the Signed part to canonical JSON so signatures are consistent
payload, err := cjson.EncodeCanonical(meta.Signed)
if err != nil {
return nil, err
}
// sign the Signed part
sb, err := signer.SignMessage(bytes.NewReader(payload))
if err != nil {
return nil, &ErrUnsignedMetadata{Msg: "problem signing metadata"}
}
// get the signer's PublicKey
publ, err := signer.PublicKey()
if err != nil {
return nil, err
}
// convert to TUF Key type to get keyID
key, err := KeyFromPublicKey(publ)
if err != nil {
return nil, err
}
// build signature
sig := &Signature{
KeyID: key.ID(),
Signature: sb,
}
// update the Signatures part
meta.Signatures = append(meta.Signatures, *sig)
// return the new signature
log.Info("Signed metadata with key", "ID", key.ID())
return sig, nil
}
// VerifyDelegate verifies that delegatedMetadata is signed with the required
// threshold of keys for the delegated role delegatedRole
func (meta *Metadata[T]) VerifyDelegate(delegatedRole string, delegatedMetadata any) error {
i := any(meta)
signingKeys := map[string]bool{}
var keys map[string]*Key
var roleKeyIDs []string
var roleThreshold int
log.Info("Verifying", "role", delegatedRole)
// collect keys, keyIDs and threshold based on delegator type
switch i := i.(type) {
// Root delegator
case *Metadata[RootType]:
keys = i.Signed.Keys
if role, ok := (*i).Signed.Roles[delegatedRole]; ok {
roleKeyIDs = role.KeyIDs
roleThreshold = role.Threshold
} else {
// the delegated role was not found, no need to proceed
return &ErrValue{Msg: fmt.Sprintf("no delegation found for %s", delegatedRole)}
}
// Targets delegator
case *Metadata[TargetsType]:
if i.Signed.Delegations == nil {
return &ErrValue{Msg: "no delegations found"}
}
keys = i.Signed.Delegations.Keys
if i.Signed.Delegations.Roles != nil {
found := false
for _, v := range i.Signed.Delegations.Roles {
if v.Name == delegatedRole {
found = true
roleKeyIDs = v.KeyIDs
roleThreshold = v.Threshold
break
}
}
// the delegated role was not found, no need to proceed
if !found {
return &ErrValue{Msg: fmt.Sprintf("no delegation found for %s", delegatedRole)}
}
} else if i.Signed.Delegations.SuccinctRoles != nil {
roleKeyIDs = i.Signed.Delegations.SuccinctRoles.KeyIDs
roleThreshold = i.Signed.Delegations.SuccinctRoles.Threshold
}
default:
return &ErrType{Msg: "call is valid only on delegator metadata (should be either root or targets)"}
}
// if there are no keyIDs for that role it means there's no delegation found
if len(roleKeyIDs) == 0 {
return &ErrValue{Msg: fmt.Sprintf("no delegation found for %s", delegatedRole)}
}
// loop through each role keyID
for _, keyID := range roleKeyIDs {
key, ok := keys[keyID]
if !ok {
return &ErrValue{Msg: fmt.Sprintf("key with ID %s not found in %s keyids", keyID, delegatedRole)}
}
sign := Signature{}
var payload []byte
// convert to a PublicKey type
publicKey, err := key.ToPublicKey()
if err != nil {
return err
}
// use corresponding hash function for key type
hash := crypto.Hash(0)
if key.Type != KeyTypeEd25519 {
switch key.Scheme {
case KeySchemeECDSA_SHA2_P256:
hash = crypto.SHA256
case KeySchemeECDSA_SHA2_P384:
hash = crypto.SHA384
default:
hash = crypto.SHA256
}
}
// load a verifier based on that key
verifier, err := signature.LoadVerifier(publicKey, hash)
if err != nil {
return err
}
// collect the signature for that key and build the payload we'll verify
// based on the Signed part of the delegated metadata
switch d := delegatedMetadata.(type) {
case *Metadata[RootType]:
for _, signature := range d.Signatures {
if signature.KeyID == keyID {
sign = signature
}
}
payload, err = cjson.EncodeCanonical(d.Signed)
if err != nil {
return err
}
case *Metadata[SnapshotType]:
for _, signature := range d.Signatures {
if signature.KeyID == keyID {
sign = signature
}
}
payload, err = cjson.EncodeCanonical(d.Signed)
if err != nil {
return err
}
case *Metadata[TimestampType]:
for _, signature := range d.Signatures {
if signature.KeyID == keyID {
sign = signature
}
}
payload, err = cjson.EncodeCanonical(d.Signed)
if err != nil {
return err
}
case *Metadata[TargetsType]:
for _, signature := range d.Signatures {
if signature.KeyID == keyID {
sign = signature
}
}
payload, err = cjson.EncodeCanonical(d.Signed)
if err != nil {
return err
}
default:
return &ErrType{Msg: "unknown delegated metadata type"}
}
// verify if the signature for that payload corresponds to the given key
if err := verifier.VerifySignature(bytes.NewReader(sign.Signature), bytes.NewReader(payload)); err != nil {
// failed to verify the metadata with that key ID
log.Info("Failed to verify %s with key ID %s", delegatedRole, keyID)
} else {
// save the verified keyID only if verification passed
signingKeys[keyID] = true
log.Info("Verified with key", "role", delegatedRole, "ID", keyID)
}
}
// check if the amount of valid signatures is enough
if len(signingKeys) < roleThreshold {
log.Info("Verifying failed, not enough signatures", "role", delegatedRole, "got", len(signingKeys), "want", roleThreshold)
return &ErrUnsignedMetadata{Msg: fmt.Sprintf("Verifying %s failed, not enough signatures, got %d, want %d", delegatedRole, len(signingKeys), roleThreshold)}
}
log.Info("Verified successfully", "role", delegatedRole)
return nil
}
// IsExpired returns true if metadata is expired.
// It checks if referenceTime is after Signed.Expires
func (signed *RootType) IsExpired(referenceTime time.Time) bool {
return referenceTime.After(signed.Expires)
}
// IsExpired returns true if metadata is expired.
// It checks if referenceTime is after Signed.Expires
func (signed *SnapshotType) IsExpired(referenceTime time.Time) bool {
return referenceTime.After(signed.Expires)
}
// IsExpired returns true if metadata is expired.
// It checks if referenceTime is after Signed.Expires
func (signed *TimestampType) IsExpired(referenceTime time.Time) bool {
return referenceTime.After(signed.Expires)
}
// IsExpired returns true if metadata is expired.
// It checks if referenceTime is after Signed.Expires
func (signed *TargetsType) IsExpired(referenceTime time.Time) bool {
return referenceTime.After(signed.Expires)
}
// VerifyLengthHashes checks whether the MetaFiles data matches its corresponding
// length and hashes
func (f *MetaFiles) VerifyLengthHashes(data []byte) error {
// hashes and length are optional for MetaFiles
if len(f.Hashes) > 0 {
err := verifyHashes(data, f.Hashes)
if err != nil {
return err
}
}
if f.Length != 0 {
err := verifyLength(data, f.Length)
if err != nil {
return err
}
}
return nil
}
// VerifyLengthHashes checks whether the TargetFiles data matches its corresponding
// length and hashes
func (f *TargetFiles) VerifyLengthHashes(data []byte) error {
err := verifyHashes(data, f.Hashes)
if err != nil {
return err
}
err = verifyLength(data, f.Length)
if err != nil {
return err
}
return nil
}
// Equal checks whether the source target file matches another
func (source *TargetFiles) Equal(expected TargetFiles) bool {
if source.Length == expected.Length && source.Hashes.Equal(expected.Hashes) {
return true
}
return false
}
// FromFile generate TargetFiles from file
func (t *TargetFiles) FromFile(localPath string, hashes ...string) (*TargetFiles, error) {
log.Info("Generating target file from file", "path", localPath)
// open file
in, err := os.Open(localPath)
if err != nil {
return nil, err
}
defer in.Close()
// read file
data, err := io.ReadAll(in)
if err != nil {
return nil, err
}
return t.FromBytes(localPath, data, hashes...)
}
// FromBytes generate TargetFiles from bytes
func (t *TargetFiles) FromBytes(localPath string, data []byte, hashes ...string) (*TargetFiles, error) {
log.Info("Generating target file from bytes", "path", localPath)
var hasher hash.Hash
targetFile := &TargetFiles{
Hashes: map[string]HexBytes{},
}
// use default hash algorithm if not set
if len(hashes) == 0 {
hashes = []string{"sha256"}
}
// calculate length
len, err := io.Copy(io.Discard, bytes.NewReader(data))
if err != nil {
return nil, err
}
targetFile.Length = len
for _, v := range hashes {
switch v {
case "sha256":
hasher = sha256.New()
case "sha512":
hasher = sha512.New()
default:
return nil, &ErrValue{Msg: fmt.Sprintf("failed generating TargetFile - unsupported hashing algorithm - %s", v)}
}
_, err := hasher.Write(data)
if err != nil {
return nil, err
}
targetFile.Hashes[v] = hasher.Sum(nil)
}
targetFile.Path = localPath
return targetFile, nil
}
// ClearSignatures clears Signatures
func (meta *Metadata[T]) ClearSignatures() {
log.Info("Cleared signatures")
meta.Signatures = []Signature{}
}
// IsDelegatedPath determines whether the given "targetFilepath" is in one of
// the paths that "DelegatedRole" is trusted to provide
func (role *DelegatedRole) IsDelegatedPath(targetFilepath string) (bool, error) {
if len(role.Paths) > 0 {
// standard delegations
for _, pathPattern := range role.Paths {
// A delegated role path may be an explicit path or glob
// pattern (Unix shell-style wildcards).
if isTargetInPathPattern(targetFilepath, pathPattern) {
return true, nil
}
}
} else if len(role.PathHashPrefixes) > 0 {
// hash bin delegations - calculate the hash of the filepath to determine in which bin to find the target.
targetFilepathHash := sha256.Sum256([]byte(targetFilepath))
for _, pathHashPrefix := range role.PathHashPrefixes {
if strings.HasPrefix(base64.URLEncoding.EncodeToString(targetFilepathHash[:]), pathHashPrefix) {
return true, nil
}
}
}
return false, nil
}
// Determine whether “targetpath“ matches the “pathpattern“.
func isTargetInPathPattern(targetpath string, pathpattern string) bool {
// We need to make sure that targetpath and pathpattern are pointing to
// the same directory as fnmatch doesn't threat "/" as a special symbol.
targetParts := strings.Split(targetpath, "/")
patternParts := strings.Split(pathpattern, "/")
if len(targetParts) != len(patternParts) {
return false
}
// Every part in the pathpattern could include a glob pattern, that's why
// each of the target and pathpattern parts should match.
for i := 0; i < len(targetParts); i++ {
if ok, _ := filepath.Match(patternParts[i], targetParts[i]); !ok {
return false
}
}
return true
}
// GetRolesForTarget return the names and terminating status of all
// delegated roles who are responsible for targetFilepath
// Note the result should be an ordered list, ref. https://github.com/theupdateframework/go-tuf/security/advisories/GHSA-4f8r-qqr9-fq8j
func (role *Delegations) GetRolesForTarget(targetFilepath string) []RoleResult {
var res []RoleResult
// Standard delegations
if role.Roles != nil {
for _, r := range role.Roles {
ok, err := r.IsDelegatedPath(targetFilepath)
if err == nil && ok {
res = append(res, RoleResult{Name: r.Name, Terminating: r.Terminating})
}
}
} else if role.SuccinctRoles != nil {
// SuccinctRoles delegations
res = role.SuccinctRoles.GetRolesForTarget(targetFilepath)
}
// We preserve the same order as the actual roles list
return res
}
// GetRolesForTarget calculate the name of the delegated role responsible for "targetFilepath".
// The target at path "targetFilepath" is assigned to a bin by casting
// the left-most "BitLength" of bits of the file path hash digest to
// int, using it as bin index between 0 and “2**BitLength-1”.
func (role *SuccinctRoles) GetRolesForTarget(targetFilepath string) []RoleResult {
// calculate the suffixLen value based on the total number of bins in
// hex. If bit_length = 10 then numberOfBins = 1024 or bin names will
// have a suffix between "000" and "3ff" in hex and suffixLen will be 3
// meaning the third bin will have a suffix of "003"
numberOfBins := math.Pow(2, float64(role.BitLength))
// suffixLen is calculated based on "numberOfBins - 1" as the name
// of the last bin contains the number "numberOfBins -1" as a suffix.
suffixLen := len(strconv.FormatInt(int64(numberOfBins-1), 16))
targetFilepathHash := sha256.Sum256([]byte(targetFilepath))
// we can't ever need more than 4 bytes (32 bits)
hashBytes := targetFilepathHash[:4]
// right shift hash bytes, so that we only have the leftmost
// bit_length bits that we care about
shiftValue := 32 - role.BitLength
binNumber := binary.BigEndian.Uint32(hashBytes) >> shiftValue
// add zero padding if necessary and cast to hex the suffix
suffix := fmt.Sprintf("%0*x", suffixLen, binNumber)
// we consider all succinct_roles as terminating.
// for more information, read TAP 15.
return []RoleResult{{Name: fmt.Sprintf("%s-%s", role.NamePrefix, suffix), Terminating: true}}
}
// GetRoles returns the names of all different delegated roles
func (role *SuccinctRoles) GetRoles() []string {
res := []string{}
suffixLen, numberOfBins := role.GetSuffixLen()
for binNumber := 0; binNumber < numberOfBins; binNumber++ {
suffix := fmt.Sprintf("%0*x", suffixLen, binNumber)
res = append(res, fmt.Sprintf("%s-%s", role.NamePrefix, suffix))
}
return res
}
func (role *SuccinctRoles) GetSuffixLen() (int, int) {
numberOfBins := int(math.Pow(2, float64(role.BitLength)))
return len(strconv.FormatInt(int64(numberOfBins-1), 16)), numberOfBins
}
// IsDelegatedRole returns whether the given roleName is in one of
// the delegated roles that “SuccinctRoles“ represents
func (role *SuccinctRoles) IsDelegatedRole(roleName string) bool {
suffixLen, numberOfBins := role.GetSuffixLen()
expectedPrefix := fmt.Sprintf("%s-", role.NamePrefix)
// check if the roleName prefix is what we would expect
if !strings.HasPrefix(roleName, expectedPrefix) {
return false
}
// check if the roleName suffix length is what we would expect
suffix := roleName[len(expectedPrefix):]
if len(suffix) != suffixLen {
return false
}
// make sure suffix is hex value and get bin number
value, err := strconv.ParseInt(suffix, 16, 64)
if err != nil {
return false
}
// check if the bin we calculated is indeed within the range of what we support
return (value >= 0) && (value < int64(numberOfBins))
}
// AddKey adds new signing key for delegated role "role"
// keyID: Identifier of the key to be added for “role“.
// key: Signing key to be added for “role“.
// role: Name of the role, for which “key“ is added.
func (signed *RootType) AddKey(key *Key, role string) error {
// verify role is present
if _, ok := signed.Roles[role]; !ok {
return &ErrValue{Msg: fmt.Sprintf("role %s doesn't exist", role)}
}
// add keyID to role
if !slices.Contains(signed.Roles[role].KeyIDs, key.ID()) {
signed.Roles[role].KeyIDs = append(signed.Roles[role].KeyIDs, key.ID())
}
// update Keys
signed.Keys[key.ID()] = key // TODO: should we check if we don't accidentally override an existing keyID with another key value?
return nil
}
// RevokeKey revoke key from “role“ and updates the Keys store.
// keyID: Identifier of the key to be removed for “role“.
// role: Name of the role, for which a signing key is removed.
func (signed *RootType) RevokeKey(keyID, role string) error {
// verify role is present
if _, ok := signed.Roles[role]; !ok {
return &ErrValue{Msg: fmt.Sprintf("role %s doesn't exist", role)}
}
// verify keyID is present for given role
if !slices.Contains(signed.Roles[role].KeyIDs, keyID) {
return &ErrValue{Msg: fmt.Sprintf("key with id %s is not used by %s", keyID, role)}
}
// remove keyID from role
filteredKeyIDs := []string{}
for _, k := range signed.Roles[role].KeyIDs {
if k != keyID {
filteredKeyIDs = append(filteredKeyIDs, k)
}
}
// overwrite the old keyID slice
signed.Roles[role].KeyIDs = filteredKeyIDs
// check if keyID is used by other roles too
for _, r := range signed.Roles {
if slices.Contains(r.KeyIDs, keyID) {
return nil
}
}
// delete the keyID from Keys if it's not used anywhere else
delete(signed.Keys, keyID)
return nil
}
// AddKey adds new signing key for delegated role "role"
// key: Signing key to be added for “role“.
// role: Name of the role, for which “key“ is added.
// If SuccinctRoles is used then the "role" argument can be ignored.
func (signed *TargetsType) AddKey(key *Key, role string) error {
// check if Delegations are even present
if signed.Delegations == nil {
return &ErrValue{Msg: fmt.Sprintf("delegated role %s doesn't exist", role)}
}
// standard delegated roles
if signed.Delegations.Roles != nil {
// loop through all delegated roles
isDelegatedRole := false
for i, d := range signed.Delegations.Roles {
// if role is found
if d.Name == role {
isDelegatedRole = true
// add key if keyID is not already part of keyIDs for that role
if !slices.Contains(d.KeyIDs, key.ID()) {
signed.Delegations.Roles[i].KeyIDs = append(signed.Delegations.Roles[i].KeyIDs, key.ID())
signed.Delegations.Keys[key.ID()] = key // TODO: should we check if we don't accidentally override an existing keyID with another key value?
return nil
}
log.Info("Delegated role already has keyID", "role", role, "ID", key.ID())
}
}
if !isDelegatedRole {
return &ErrValue{Msg: fmt.Sprintf("delegated role %s doesn't exist", role)}
}
} else if signed.Delegations.SuccinctRoles != nil {
// add key if keyID is not already part of keyIDs for the SuccinctRoles role
if !slices.Contains(signed.Delegations.SuccinctRoles.KeyIDs, key.ID()) {
signed.Delegations.SuccinctRoles.KeyIDs = append(signed.Delegations.SuccinctRoles.KeyIDs, key.ID())
signed.Delegations.Keys[key.ID()] = key // TODO: should we check if we don't accidentally override an existing keyID with another key value?
return nil
}
log.Info("SuccinctRoles role already has keyID", "ID", key.ID())
}
signed.Delegations.Keys[key.ID()] = key // TODO: should we check if we don't accidentally override an existing keyID with another key value?
return nil
}
// RevokeKey revokes key from delegated role "role" and updates the delegations key store
// keyID: Identifier of the key to be removed for “role“.
// role: Name of the role, for which a signing key is removed.
func (signed *TargetsType) RevokeKey(keyID string, role string) error {
// check if Delegations are even present
if signed.Delegations == nil {
return &ErrValue{Msg: fmt.Sprintf("delegated role %s doesn't exist", role)}
}
// standard delegated roles
if signed.Delegations.Roles != nil {
// loop through all delegated roles
for i, d := range signed.Delegations.Roles {
// if role is found
if d.Name == role {
// check if keyID is present in keyIDs for that role
if !slices.Contains(d.KeyIDs, keyID) {
return &ErrValue{Msg: fmt.Sprintf("key with id %s is not used by %s", keyID, role)}
}
// remove keyID from role
filteredKeyIDs := []string{}
for _, k := range signed.Delegations.Roles[i].KeyIDs {
if k != keyID {
filteredKeyIDs = append(filteredKeyIDs, k)
}
}
// overwrite the old keyID slice for that role
signed.Delegations.Roles[i].KeyIDs = filteredKeyIDs
// check if keyID is used by other roles too
for _, r := range signed.Delegations.Roles {
if slices.Contains(r.KeyIDs, keyID) {
return nil
}
}
// delete the keyID from Keys if it's not used anywhere else
delete(signed.Delegations.Keys, keyID)
return nil
}
}
// we haven't found the delegated role
return &ErrValue{Msg: fmt.Sprintf("delegated role %s doesn't exist", role)}
} else if signed.Delegations.SuccinctRoles != nil {
// check if keyID is used by SuccinctRoles role
if !slices.Contains(signed.Delegations.SuccinctRoles.KeyIDs, keyID) {
return &ErrValue{Msg: fmt.Sprintf("key with id %s is not used by SuccinctRoles", keyID)}
}
// remove keyID from the SuccinctRoles role
filteredKeyIDs := []string{}
for _, k := range signed.Delegations.SuccinctRoles.KeyIDs {
if k != keyID {
filteredKeyIDs = append(filteredKeyIDs, k)
}
}
// overwrite the old keyID slice for SuccinctRoles role
signed.Delegations.SuccinctRoles.KeyIDs = filteredKeyIDs
// delete the keyID from Keys since it can not be used anywhere else
delete(signed.Delegations.Keys, keyID)
return nil
}
return &ErrValue{Msg: fmt.Sprintf("delegated role %s doesn't exist", role)}
}
// Equal checks whether one hash set equals another
func (source Hashes) Equal(expected Hashes) bool {
hashChecked := false
for typ, hash := range expected {
if h, ok := source[typ]; ok {
// hash type match found
hashChecked = true
if !hmac.Equal(h, hash) {
// hash values don't match
return false
}
}
}
return hashChecked
}
// verifyLength verifies if the passed data has the corresponding length
func verifyLength(data []byte, length int64) error {
len, err := io.Copy(io.Discard, bytes.NewReader(data))
if err != nil {
return err
}
if length != len {
return &ErrLengthOrHashMismatch{Msg: fmt.Sprintf("length verification failed - expected %d, got %d", length, len)}
}
return nil
}
// verifyHashes verifies if the hash of the passed data corresponds to it
func verifyHashes(data []byte, hashes Hashes) error {
var hasher hash.Hash
for k, v := range hashes {
switch k {
case "sha256":
hasher = sha256.New()
case "sha512":
hasher = sha512.New()
default:
return &ErrLengthOrHashMismatch{Msg: fmt.Sprintf("hash verification failed - unknown hashing algorithm - %s", k)}
}
hasher.Write(data)
if hex.EncodeToString(v) != hex.EncodeToString(hasher.Sum(nil)) {
return &ErrLengthOrHashMismatch{Msg: fmt.Sprintf("hash verification failed - mismatch for algorithm %s", k)}
}
}
return nil
}
// fromBytes return a *Metadata[T] object from bytes and verifies
// that the data corresponds to the caller struct type
func fromBytes[T Roles](data []byte) (*Metadata[T], error) {
meta := &Metadata[T]{}
// verify that the type we used to create the object is the same as the type of the metadata file
if err := checkType[T](data); err != nil {
return nil, err
}
// if all is okay, unmarshal meta to the desired Metadata[T] type
if err := json.Unmarshal(data, meta); err != nil {
return nil, err
}
// Make sure signature key IDs are unique
if err := checkUniqueSignatures(*meta); err != nil {
return nil, err
}
return meta, nil
}
// checkUniqueSignatures verifies if the signature key IDs are unique for that metadata
func checkUniqueSignatures[T Roles](meta Metadata[T]) error {
signatures := []string{}
for _, sig := range meta.Signatures {
if slices.Contains(signatures, sig.KeyID) {
return &ErrValue{Msg: fmt.Sprintf("multiple signatures found for key ID %s", sig.KeyID)}
}
signatures = append(signatures, sig.KeyID)
}
return nil
}
// checkType verifies if the generic type used to create the object is the same as the type of the metadata file in bytes
func checkType[T Roles](data []byte) error {
var m map[string]any
i := any(new(T))
if err := json.Unmarshal(data, &m); err != nil {
return err
}
signedType := m["signed"].(map[string]any)["_type"].(string)
switch i.(type) {
case *RootType:
if ROOT != signedType {
return &ErrValue{Msg: fmt.Sprintf("expected metadata type %s, got - %s", ROOT, signedType)}
}
case *SnapshotType:
if SNAPSHOT != signedType {
return &ErrValue{Msg: fmt.Sprintf("expected metadata type %s, got - %s", SNAPSHOT, signedType)}
}
case *TimestampType:
if TIMESTAMP != signedType {
return &ErrValue{Msg: fmt.Sprintf("expected metadata type %s, got - %s", TIMESTAMP, signedType)}
}
case *TargetsType:
if TARGETS != signedType {
return &ErrValue{Msg: fmt.Sprintf("expected metadata type %s, got - %s", TARGETS, signedType)}
}
default:
return &ErrValue{Msg: fmt.Sprintf("unrecognized metadata type - %s", signedType)}
}
// all okay
return nil
}
golang-github-theupdateframework-go-tuf-2.0.2/metadata/metadata_api_test.go 0000664 0000000 0000000 00000111707 14706111210 0027117 0 ustar 00root root 0000000 0000000 // Copyright 2024 The Update Framework Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
//
// SPDX-License-Identifier: Apache-2.0
//
package metadata
import (
"bytes"
"crypto"
"crypto/sha256"
"encoding/json"
"fmt"
"io/fs"
"os"
"path/filepath"
"strconv"
"strings"
"testing"
"time"
"github.com/sigstore/sigstore/pkg/cryptoutils"
"github.com/sigstore/sigstore/pkg/signature"
"github.com/stretchr/testify/assert"
"github.com/theupdateframework/go-tuf/v2/internal/testutils"
)
func TestMain(m *testing.M) {
repoPath := "../internal/testutils/repository_data/repository/metadata"
targetsPath := "../internal/testutils/repository_data/repository/targets"
keystorePath := "../internal/testutils/repository_data/keystore"
err := testutils.SetupTestDirs(repoPath, targetsPath, keystorePath)
defer testutils.Cleanup()
if err != nil {
log.Error(err, "failed to setup test dirs")
os.Exit(1)
}
m.Run()
}
func TestGenericRead(t *testing.T) {
// Assert that it chokes correctly on an unknown metadata type
badMetadata := "{\"signed\": {\"_type\": \"bad-metadata\"}}"
_, err := Root().FromBytes([]byte(badMetadata))
assert.ErrorIs(t, err, &ErrValue{"expected metadata type root, got - bad-metadata"})
_, err = Snapshot().FromBytes([]byte(badMetadata))
assert.ErrorIs(t, err, &ErrValue{"expected metadata type snapshot, got - bad-metadata"})
_, err = Targets().FromBytes([]byte(badMetadata))
assert.ErrorIs(t, err, &ErrValue{"expected metadata type targets, got - bad-metadata"})
_, err = Timestamp().FromBytes([]byte(badMetadata))
assert.ErrorIs(t, err, &ErrValue{"expected metadata type timestamp, got - bad-metadata"})
badMetadataPath := filepath.Join(testutils.RepoDir, "bad-metadata.json")
err = os.WriteFile(badMetadataPath, []byte(badMetadata), 0644)
assert.NoError(t, err)
assert.FileExists(t, badMetadataPath)
_, err = Root().FromFile(badMetadataPath)
assert.ErrorIs(t, err, &ErrValue{"expected metadata type root, got - bad-metadata"})
_, err = Snapshot().FromFile(badMetadataPath)
assert.ErrorIs(t, err, &ErrValue{"expected metadata type snapshot, got - bad-metadata"})
_, err = Targets().FromFile(badMetadataPath)
assert.ErrorIs(t, err, &ErrValue{"expected metadata type targets, got - bad-metadata"})
_, err = Timestamp().FromFile(badMetadataPath)
assert.ErrorIs(t, err, &ErrValue{"expected metadata type timestamp, got - bad-metadata"})
err = os.RemoveAll(badMetadataPath)
assert.NoError(t, err)
assert.NoFileExists(t, badMetadataPath)
}
func TestGenericReadFromMismatchingRoles(t *testing.T) {
// Test failing to load other roles from root metadata
_, err := Snapshot().FromFile(filepath.Join(testutils.RepoDir, "root.json"))
assert.ErrorIs(t, err, &ErrValue{"expected metadata type snapshot, got - root"})
_, err = Timestamp().FromFile(filepath.Join(testutils.RepoDir, "root.json"))
assert.ErrorIs(t, err, &ErrValue{"expected metadata type timestamp, got - root"})
_, err = Targets().FromFile(filepath.Join(testutils.RepoDir, "root.json"))
assert.ErrorIs(t, err, &ErrValue{"expected metadata type targets, got - root"})
// Test failing to load other roles from targets metadata
_, err = Snapshot().FromFile(filepath.Join(testutils.RepoDir, "targets.json"))
assert.ErrorIs(t, err, &ErrValue{"expected metadata type snapshot, got - targets"})
_, err = Timestamp().FromFile(filepath.Join(testutils.RepoDir, "targets.json"))
assert.ErrorIs(t, err, &ErrValue{"expected metadata type timestamp, got - targets"})
_, err = Root().FromFile(filepath.Join(testutils.RepoDir, "targets.json"))
assert.ErrorIs(t, err, &ErrValue{"expected metadata type root, got - targets"})
// Test failing to load other roles from timestamp metadata
_, err = Snapshot().FromFile(filepath.Join(testutils.RepoDir, "timestamp.json"))
assert.ErrorIs(t, err, &ErrValue{"expected metadata type snapshot, got - timestamp"})
_, err = Targets().FromFile(filepath.Join(testutils.RepoDir, "timestamp.json"))
assert.ErrorIs(t, err, &ErrValue{"expected metadata type targets, got - timestamp"})
_, err = Root().FromFile(filepath.Join(testutils.RepoDir, "timestamp.json"))
assert.ErrorIs(t, err, &ErrValue{"expected metadata type root, got - timestamp"})
// Test failing to load other roles from snapshot metadata
_, err = Targets().FromFile(filepath.Join(testutils.RepoDir, "snapshot.json"))
assert.ErrorIs(t, err, &ErrValue{"expected metadata type targets, got - snapshot"})
_, err = Timestamp().FromFile(filepath.Join(testutils.RepoDir, "snapshot.json"))
assert.ErrorIs(t, err, &ErrValue{"expected metadata type timestamp, got - snapshot"})
_, err = Root().FromFile(filepath.Join(testutils.RepoDir, "snapshot.json"))
assert.ErrorIs(t, err, &ErrValue{"expected metadata type root, got - snapshot"})
}
func TestMDReadWriteFileExceptions(t *testing.T) {
// Test writing to a file with bad filename
badMetadataPath := filepath.Join(testutils.RepoDir, "bad-metadata.json")
_, err := Root().FromFile(badMetadataPath)
expectedErr := fs.PathError{
Op: "open",
Path: badMetadataPath,
Err: fs.ErrNotExist,
}
assert.ErrorIs(t, err, expectedErr.Err)
// Test serializing to a file with bad filename
root := Root(fixedExpire)
err = root.ToFile("", false)
expectedErr = fs.PathError{
Op: "open",
Path: "",
Err: fs.ErrNotExist,
}
assert.ErrorIs(t, err, expectedErr.Err)
}
func TestCompareFromBytesFromFileToBytes(t *testing.T) {
rootPath := filepath.Join(testutils.RepoDir, "root.json")
rootBytesWant, err := os.ReadFile(rootPath)
assert.NoError(t, err)
root, err := Root().FromFile(rootPath)
assert.NoError(t, err)
rootBytesActual, err := root.ToBytes(true)
assert.NoError(t, err)
assert.Equal(t, rootBytesWant, rootBytesActual)
targetsPath := filepath.Join(testutils.RepoDir, "targets.json")
targetsBytesWant, err := os.ReadFile(targetsPath)
assert.NoError(t, err)
targets, err := Targets().FromFile(targetsPath)
assert.NoError(t, err)
targetsBytesActual, err := targets.ToBytes(true)
assert.NoError(t, err)
assert.Equal(t, targetsBytesWant, targetsBytesActual)
snapshotPath := filepath.Join(testutils.RepoDir, "snapshot.json")
snapshotBytesWant, err := os.ReadFile(snapshotPath)
assert.NoError(t, err)
snapshot, err := Snapshot().FromFile(snapshotPath)
assert.NoError(t, err)
snapshotBytesActual, err := snapshot.ToBytes(true)
assert.NoError(t, err)
assert.Equal(t, snapshotBytesWant, snapshotBytesActual)
timestampPath := filepath.Join(testutils.RepoDir, "timestamp.json")
timestampBytesWant, err := os.ReadFile(timestampPath)
assert.NoError(t, err)
timestamp, err := Timestamp().FromFile(timestampPath)
assert.NoError(t, err)
timestampBytesActual, err := timestamp.ToBytes(true)
assert.NoError(t, err)
assert.Equal(t, timestampBytesWant, timestampBytesActual)
}
func TestRootReadWriteReadCompare(t *testing.T) {
src := filepath.Join(testutils.RepoDir, "root.json")
srcRoot, err := Root().FromFile(src)
assert.NoError(t, err)
dst := src + ".tmp"
err = srcRoot.ToFile(dst, false)
assert.NoError(t, err)
dstRoot, err := Root().FromFile(dst)
assert.NoError(t, err)
srcBytes, err := srcRoot.ToBytes(false)
assert.NoError(t, err)
dstBytes, err := dstRoot.ToBytes(false)
assert.NoError(t, err)
assert.Equal(t, srcBytes, dstBytes)
err = os.RemoveAll(dst)
assert.NoError(t, err)
}
func TestSnapshotReadWriteReadCompare(t *testing.T) {
path1 := filepath.Join(testutils.RepoDir, "snapshot.json")
snaphot1, err := Snapshot().FromFile(path1)
assert.NoError(t, err)
path2 := path1 + ".tmp"
err = snaphot1.ToFile(path2, false)
assert.NoError(t, err)
snapshot2, err := Snapshot().FromFile(path2)
assert.NoError(t, err)
bytes1, err := snaphot1.ToBytes(false)
assert.NoError(t, err)
bytes2, err := snapshot2.ToBytes(false)
assert.NoError(t, err)
assert.Equal(t, bytes1, bytes2)
err = os.RemoveAll(path2)
assert.NoError(t, err)
}
func TestTargetsReadWriteReadCompare(t *testing.T) {
path1 := filepath.Join(testutils.RepoDir, "targets.json")
targets1, err := Targets().FromFile(path1)
assert.NoError(t, err)
path2 := path1 + ".tmp"
err = targets1.ToFile(path2, false)
assert.NoError(t, err)
targets2, err := Targets().FromFile(path2)
assert.NoError(t, err)
bytes1, err := targets1.ToBytes(false)
assert.NoError(t, err)
bytes2, err := targets2.ToBytes(false)
assert.NoError(t, err)
assert.Equal(t, bytes1, bytes2)
err = os.RemoveAll(path2)
assert.NoError(t, err)
}
func TestTimestampReadWriteReadCompare(t *testing.T) {
path1 := filepath.Join(testutils.RepoDir, "timestamp.json")
timestamp1, err := Timestamp().FromFile(path1)
assert.NoError(t, err)
path2 := path1 + ".tmp"
err = timestamp1.ToFile(path2, false)
assert.NoError(t, err)
timestamp2, err := Timestamp().FromFile(path2)
assert.NoError(t, err)
bytes1, err := timestamp1.ToBytes(false)
assert.NoError(t, err)
bytes2, err := timestamp2.ToBytes(false)
assert.NoError(t, err)
assert.Equal(t, bytes1, bytes2)
err = os.RemoveAll(path2)
assert.NoError(t, err)
}
func TestToFromBytes(t *testing.T) {
// ROOT
rootPath := filepath.Join(testutils.RepoDir, "root.json")
data, err := os.ReadFile(rootPath)
assert.NoError(t, err)
root, err := Root().FromBytes(data)
assert.NoError(t, err)
// Comparate that from_bytes/to_bytes doesn't change the content
// for two cases for the serializer: noncompact and compact.
// Case 1: test noncompact by overriding the default serializer.
rootBytesWant, err := root.ToBytes(true)
assert.NoError(t, err)
assert.Equal(t, data, rootBytesWant)
// Case 2: test compact by using the default serializer.
root2, err := Root().FromBytes(rootBytesWant)
assert.NoError(t, err)
rootBytesActual, err := root2.ToBytes(true)
assert.NoError(t, err)
assert.Equal(t, rootBytesWant, rootBytesActual)
// SNAPSHOT
data, err = os.ReadFile(filepath.Join(testutils.RepoDir, "snapshot.json"))
assert.NoError(t, err)
snapshot, err := Snapshot().FromBytes(data)
assert.NoError(t, err)
// Case 1: test noncompact by overriding the default serializer.
snapshotBytesWant, err := snapshot.ToBytes(true)
assert.NoError(t, err)
assert.Equal(t, data, snapshotBytesWant)
// Case 2: test compact by using the default serializer.
snapshot2, err := Snapshot().FromBytes(snapshotBytesWant)
assert.NoError(t, err)
snapshotBytesActual, err := snapshot2.ToBytes(true)
assert.NoError(t, err)
assert.Equal(t, snapshotBytesWant, snapshotBytesActual)
// TARGETS
data, err = os.ReadFile(filepath.Join(testutils.RepoDir, "targets.json"))
assert.NoError(t, err)
targets, err := Targets().FromBytes(data)
assert.NoError(t, err)
// Case 1: test noncompact by overriding the default serializer.
targetsBytesWant, err := targets.ToBytes(true)
assert.NoError(t, err)
assert.Equal(t, data, targetsBytesWant)
// Case 2: test compact by using the default serializer.
targets2, err := Targets().FromBytes(targetsBytesWant)
assert.NoError(t, err)
targetsBytesActual, err := targets2.ToBytes(true)
assert.NoError(t, err)
assert.Equal(t, targetsBytesWant, targetsBytesActual)
// TIMESTAMP
data, err = os.ReadFile(filepath.Join(testutils.RepoDir, "timestamp.json"))
assert.NoError(t, err)
timestamp, err := Timestamp().FromBytes(data)
assert.NoError(t, err)
// Case 1: test noncompact by overriding the default serializer.
timestampBytesWant, err := timestamp.ToBytes(true)
assert.NoError(t, err)
assert.Equal(t, data, timestampBytesWant)
// Case 2: test compact by using the default serializer.
timestamp2, err := Timestamp().FromBytes(timestampBytesWant)
assert.NoError(t, err)
timestampBytesActual, err := timestamp2.ToBytes(true)
assert.NoError(t, err)
assert.Equal(t, timestampBytesWant, timestampBytesActual)
}
func TestSignVerify(t *testing.T) {
root, err := Root().FromFile(filepath.Join(testutils.RepoDir, "root.json"))
assert.NoError(t, err)
// Locate the public keys we need from root
assert.NotEmpty(t, root.Signed.Roles[TARGETS].KeyIDs)
targetsKeyID := root.Signed.Roles[TARGETS].KeyIDs[0]
assert.NotEmpty(t, root.Signed.Roles[SNAPSHOT].KeyIDs)
snapshotKeyID := root.Signed.Roles[SNAPSHOT].KeyIDs[0]
assert.NotEmpty(t, root.Signed.Roles[TIMESTAMP].KeyIDs)
timestampKeyID := root.Signed.Roles[TIMESTAMP].KeyIDs[0]
// Load sample metadata (targets) and assert ...
targets, err := Targets().FromFile(filepath.Join(testutils.RepoDir, "targets.json"))
assert.NoError(t, err)
sig, _ := getSignatureByKeyID(targets.Signatures, targetsKeyID)
data, err := targets.Signed.MarshalJSON()
assert.NoError(t, err)
// ... it has a single existing signature,
assert.Equal(t, 1, len(targets.Signatures))
// ... which is valid for the correct key.
targetsKey := root.Signed.Keys[targetsKeyID]
targetsPublicKey, err := targetsKey.ToPublicKey()
assert.NoError(t, err)
targetsHash := crypto.SHA256
targetsVerifier, err := signature.LoadVerifier(targetsPublicKey, targetsHash)
assert.NoError(t, err)
err = targetsVerifier.VerifySignature(bytes.NewReader(sig), bytes.NewReader(data))
assert.NoError(t, err)
// ... and invalid for an unrelated key
snapshotKey := root.Signed.Keys[snapshotKeyID]
snapshotPublicKey, err := snapshotKey.ToPublicKey()
assert.NoError(t, err)
snapshotHash := crypto.SHA256
snapshotVerifier, err := signature.LoadVerifier(snapshotPublicKey, snapshotHash)
assert.NoError(t, err)
err = snapshotVerifier.VerifySignature(bytes.NewReader(sig), bytes.NewReader(data))
assert.ErrorContains(t, err, "crypto/rsa: verification error")
// Append a new signature with the unrelated key and assert that ...
signer, err := signature.LoadSignerFromPEMFile(filepath.Join(testutils.KeystoreDir, "snapshot_key"), crypto.SHA256, cryptoutils.SkipPassword)
assert.NoError(t, err)
snapshotSig, err := targets.Sign(signer)
assert.NoError(t, err)
// ... there are now two signatures, and
assert.Equal(t, 2, len(targets.Signatures))
// ... both are valid for the corresponding keys.
err = targetsVerifier.VerifySignature(bytes.NewReader(sig), bytes.NewReader(data))
assert.NoError(t, err)
err = snapshotVerifier.VerifySignature(bytes.NewReader(snapshotSig.Signature), bytes.NewReader(data))
assert.NoError(t, err)
// ... the returned (appended) signature is for snapshot key
assert.Equal(t, snapshotSig.KeyID, snapshotKeyID)
// Clear all signatures and add a new signature with the unrelated key and assert that ...
signer, err = signature.LoadSignerFromPEMFile(filepath.Join(testutils.KeystoreDir, "timestamp_key"), crypto.SHA256, cryptoutils.SkipPassword)
assert.NoError(t, err)
targets.ClearSignatures()
assert.Equal(t, 0, len(targets.Signatures))
timestampSig, err := targets.Sign(signer)
assert.NoError(t, err)
// ... there now is only one signature,
assert.Equal(t, 1, len(targets.Signatures))
// ... valid for that key.
timestampKey := root.Signed.Keys[timestampKeyID]
timestampPublicKey, err := timestampKey.ToPublicKey()
assert.NoError(t, err)
timestampHash := crypto.SHA256
timestampVerifier, err := signature.LoadVerifier(timestampPublicKey, timestampHash)
assert.NoError(t, err)
err = timestampVerifier.VerifySignature(bytes.NewReader(timestampSig.Signature), bytes.NewReader(data))
assert.NoError(t, err)
err = targetsVerifier.VerifySignature(bytes.NewReader(timestampSig.Signature), bytes.NewReader(data))
assert.ErrorContains(t, err, "crypto/rsa: verification error")
}
func TestKeyVerifyFailures(t *testing.T) {
root, err := Root().FromFile(filepath.Join(testutils.RepoDir, "root.json"))
assert.NoError(t, err)
// Locate the timestamp public key we need from root
assert.NotEmpty(t, root.Signed.Roles[TIMESTAMP].KeyIDs)
timestampKeyID := root.Signed.Roles[TIMESTAMP].KeyIDs[0]
// Load sample metadata (timestamp)
timestamp, err := Timestamp().FromFile(filepath.Join(testutils.RepoDir, "timestamp.json"))
assert.NoError(t, err)
timestampSig, _ := getSignatureByKeyID(timestamp.Signatures, timestampKeyID)
data, err := timestamp.Signed.MarshalJSON()
assert.NoError(t, err)
// Test failure on unknown type
// Originally this test should cover unknown scheme,
// but in our case scheme changes do not affect any
// further functionality
timestampKey := root.Signed.Keys[timestampKeyID]
ttype := timestampKey.Type
timestampKey.Type = "foo"
timestampPublicKey, err := timestampKey.ToPublicKey()
assert.Error(t, err, "unsupported public key type")
timestampHash := crypto.SHA256
timestampVerifier, err := signature.LoadVerifier(timestampPublicKey, timestampHash)
assert.Error(t, err, "unsupported public key type")
assert.Nil(t, timestampVerifier)
timestampKey.Type = ttype
timestampPublicKey, err = timestampKey.ToPublicKey()
assert.NoError(t, err)
timestampHash = crypto.SHA256
timestampVerifier, err = signature.LoadVerifier(timestampPublicKey, timestampHash)
assert.NoError(t, err)
err = timestampVerifier.VerifySignature(bytes.NewReader(timestampSig), bytes.NewReader(data))
assert.NoError(t, err)
timestampKey.Type = ttype
// Test failure on broken public key data
public := timestampKey.Value.PublicKey
timestampKey.Value.PublicKey = "ffff"
timestampBrokenPublicKey, err := timestampKey.ToPublicKey()
assert.ErrorContains(t, err, "PEM decoding failed")
timestampHash = crypto.SHA256
timestampNilVerifier, err := signature.LoadVerifier(timestampBrokenPublicKey, timestampHash)
assert.ErrorContains(t, err, "unsupported public key type")
assert.Nil(t, timestampNilVerifier)
timestampKey.Value.PublicKey = public
// Test failure with invalid signature
sigData := []byte("foo")
h32 := sha256.Sum256(sigData)
incorrectTimestampSig := h32[:]
err = timestampVerifier.VerifySignature(bytes.NewReader(incorrectTimestampSig), bytes.NewReader(data))
assert.ErrorContains(t, err, "crypto/rsa: verification error")
// Test failure with valid but incorrect signature
anotherSig := root.Signatures[0]
h32 = sha256.Sum256([]byte(anotherSig.Signature.String()))
incorrectValidTimestampSig := h32[:]
err = timestampVerifier.VerifySignature(bytes.NewReader(incorrectValidTimestampSig), bytes.NewReader(data))
assert.ErrorContains(t, err, "crypto/rsa: verification error")
}
func TestMetadataSignedIsExpired(t *testing.T) {
// Use of Snapshot is arbitrary, we're just testing the base class
// features with real data
snapshot, err := Snapshot().FromFile(filepath.Join(testutils.RepoDir, "snapshot.json"))
assert.NoError(t, err)
assert.Equal(t, time.Date(2030, 8, 15, 14, 30, 45, 100, time.UTC), snapshot.Signed.Expires)
// Test IsExpired with reference time provided
// In the Go implementation IsExpired tests >= rather than only >,
// which results in snapshot.Signed.Expires IsExpired check
// being false by default, so we skip the default assertion
isExpired := snapshot.Signed.IsExpired(snapshot.Signed.Expires.Add(time.Microsecond))
assert.True(t, isExpired)
isExpired = snapshot.Signed.IsExpired(snapshot.Signed.Expires.Add(-time.Microsecond))
assert.False(t, isExpired)
}
func TestMetadataVerifyDelegate(t *testing.T) {
root, err := Root().FromFile(filepath.Join(testutils.RepoDir, "root.json"))
assert.NoError(t, err)
snapshot, err := Snapshot().FromFile(filepath.Join(testutils.RepoDir, "snapshot.json"))
assert.NoError(t, err)
targets, err := Targets().FromFile(filepath.Join(testutils.RepoDir, "targets.json"))
assert.NoError(t, err)
role1, err := Targets().FromFile(filepath.Join(testutils.RepoDir, "role1.json"))
assert.NoError(t, err)
role2, err := Targets().FromFile(filepath.Join(testutils.RepoDir, "role2.json"))
assert.NoError(t, err)
// Test the expected delegation tree
err = root.VerifyDelegate(ROOT, root)
assert.NoError(t, err)
err = root.VerifyDelegate(SNAPSHOT, snapshot)
assert.NoError(t, err)
err = root.VerifyDelegate(TARGETS, targets)
assert.NoError(t, err)
err = targets.VerifyDelegate("role1", role1)
assert.NoError(t, err)
err = role1.VerifyDelegate("role2", role2)
assert.NoError(t, err)
// Only root and targets can verify delegates
err = snapshot.VerifyDelegate(SNAPSHOT, snapshot)
assert.ErrorIs(t, err, &ErrType{"call is valid only on delegator metadata (should be either root or targets)"})
// Verify fails for roles that are not delegated by delegator
err = root.VerifyDelegate("role1", role1)
assert.ErrorIs(t, err, &ErrValue{"no delegation found for role1"})
err = targets.VerifyDelegate(TARGETS, targets)
assert.ErrorIs(t, err, &ErrValue{"no delegation found for targets"})
// Verify fails when delegator has no delegations
err = role2.VerifyDelegate("role1", role1)
assert.ErrorIs(t, err, &ErrValue{"no delegations found"})
// Verify fails when delegate content is modified
expires := snapshot.Signed.Expires
snapshot.Signed.Expires = snapshot.Signed.Expires.Add(time.Hour * 24)
err = root.VerifyDelegate(SNAPSHOT, snapshot)
assert.ErrorIs(t, err, &ErrUnsignedMetadata{"Verifying snapshot failed, not enough signatures, got 0, want 1"})
snapshot.Signed.Expires = expires
// Verify fails with verification error
// (in this case signature is malformed)
keyID := root.Signed.Roles[SNAPSHOT].KeyIDs[0]
goodSig, idx := getSignatureByKeyID(snapshot.Signatures, keyID)
assert.NotEmpty(t, goodSig)
snapshot.Signatures[idx].Signature = []byte("foo")
err = root.VerifyDelegate(SNAPSHOT, snapshot)
assert.ErrorIs(t, err, &ErrUnsignedMetadata{"Verifying snapshot failed, not enough signatures, got 0, want 1"})
snapshot.Signatures[idx].Signature = goodSig
// Verify fails if roles keys do not sign the metadata
err = root.VerifyDelegate(TIMESTAMP, snapshot)
assert.ErrorIs(t, err, &ErrUnsignedMetadata{"Verifying timestamp failed, not enough signatures, got 0, want 1"})
// Add a key to snapshot role, make sure the new sig fails to verify
tsKeyID := root.Signed.Roles[TIMESTAMP].KeyIDs[0]
err = root.Signed.AddKey(root.Signed.Keys[tsKeyID], SNAPSHOT)
assert.NoError(t, err)
newSig := Signature{
KeyID: tsKeyID,
Signature: []byte(strings.Repeat("ff", 64)),
}
snapshot.Signatures = append(snapshot.Signatures, newSig)
// Verify succeeds if threshold is reached even if some signatures
// fail to verify
err = root.VerifyDelegate(SNAPSHOT, snapshot)
assert.NoError(t, err)
// Verify fails if threshold of signatures is not reached
root.Signed.Roles[SNAPSHOT].Threshold = 2
err = root.VerifyDelegate(SNAPSHOT, snapshot)
assert.ErrorIs(t, err, &ErrUnsignedMetadata{"Verifying snapshot failed, not enough signatures, got 1, want 2"})
// Verify succeeds when we correct the new signature and reach the
// threshold of 2 keys
signer, err := signature.LoadSignerFromPEMFile(filepath.Join(testutils.KeystoreDir, "timestamp_key"), crypto.SHA256, cryptoutils.SkipPassword)
assert.NoError(t, err)
_, err = snapshot.Sign(signer)
assert.NoError(t, err)
err = root.VerifyDelegate(SNAPSHOT, snapshot)
assert.NoError(t, err)
}
func TestRootAddKeyAndRevokeKey(t *testing.T) {
root, err := Root().FromFile(filepath.Join(testutils.RepoDir, "root.json"))
assert.NoError(t, err)
// Create a new key
signer, err := signature.LoadSignerFromPEMFile(filepath.Join(testutils.KeystoreDir, "root_key2"), crypto.SHA256, cryptoutils.SkipPassword)
assert.NoError(t, err)
key, err := signer.PublicKey()
assert.NoError(t, err)
rootKey2, err := KeyFromPublicKey(key)
assert.NoError(t, err)
// Assert that root does not contain the new key
assert.NotContains(t, root.Signed.Roles[ROOT].KeyIDs, rootKey2.id)
assert.NotContains(t, root.Signed.Keys, rootKey2.id)
// Add new root key
err = root.Signed.AddKey(rootKey2, ROOT)
assert.NoError(t, err)
// Assert that key is added
assert.Contains(t, root.Signed.Roles[ROOT].KeyIDs, rootKey2.id)
assert.Contains(t, root.Signed.Keys, rootKey2.id)
// Confirm that the newly added key does not break
// the object serialization
_, err = root.Signed.MarshalJSON()
assert.NoError(t, err)
// Try adding the same key again and assert its ignored.
preAddKeyIDs := make([]string, len(root.Signed.Roles[ROOT].KeyIDs))
copy(preAddKeyIDs, root.Signed.Roles[ROOT].KeyIDs)
err = root.Signed.AddKey(rootKey2, ROOT)
assert.NoError(t, err)
assert.Equal(t, preAddKeyIDs, root.Signed.Roles[ROOT].KeyIDs)
// Add the same key to targets role as well
err = root.Signed.AddKey(rootKey2, TARGETS)
assert.NoError(t, err)
// Add the same key to a nonexistent role.
err = root.Signed.AddKey(rootKey2, "nosuchrole")
assert.ErrorIs(t, err, &ErrValue{"role nosuchrole doesn't exist"})
// Remove the key from root role (targets role still uses it)
err = root.Signed.RevokeKey(rootKey2.id, ROOT)
assert.NoError(t, err)
assert.NotContains(t, root.Signed.Roles[ROOT].KeyIDs, rootKey2.id)
assert.Contains(t, root.Signed.Keys, rootKey2.id)
// Remove the key from targets as well
err = root.Signed.RevokeKey(rootKey2.id, TARGETS)
assert.NoError(t, err)
assert.NotContains(t, root.Signed.Roles[ROOT].KeyIDs, rootKey2.id)
assert.NotContains(t, root.Signed.Keys, rootKey2.id)
err = root.Signed.RevokeKey("nosuchkey", ROOT)
assert.ErrorIs(t, err, &ErrValue{"key with id nosuchkey is not used by root"})
err = root.Signed.RevokeKey(rootKey2.id, "nosuchrole")
assert.ErrorIs(t, err, &ErrValue{"role nosuchrole doesn't exist"})
}
func TestTargetsKeyAPI(t *testing.T) {
targets, err := Targets().FromFile(filepath.Join(testutils.RepoDir, "targets.json"))
assert.NoError(t, err)
delegatedRole := DelegatedRole{
Name: "role2",
Paths: []string{"fn3", "fn4"},
KeyIDs: []string{},
Terminating: false,
Threshold: 1,
}
targets.Signed.Delegations.Roles = append(targets.Signed.Delegations.Roles, delegatedRole)
key := &Key{
Type: "ed25519",
Value: KeyVal{PublicKey: "edcd0a32a07dce33f7c7873aaffbff36d20ea30787574ead335eefd337e4dacd"},
Scheme: "ed25519",
}
// Assert that delegated role "role1" does not contain the new key
assert.Equal(t, "role1", targets.Signed.Delegations.Roles[0].Name)
assert.NotContains(t, targets.Signed.Delegations.Roles[0].KeyIDs, key.id)
err = targets.Signed.AddKey(key, "role1")
assert.NoError(t, err)
// Assert that the new key is added to the delegated role "role1"
assert.Contains(t, targets.Signed.Delegations.Roles[0].KeyIDs, key.id)
// Try adding the same key again and assert its ignored.
pastKeyIDs := make([]string, len(targets.Signed.Delegations.Roles[0].KeyIDs))
copy(pastKeyIDs, targets.Signed.Delegations.Roles[0].KeyIDs)
err = targets.Signed.AddKey(key, "role1")
assert.NoError(t, err)
assert.Equal(t, pastKeyIDs, targets.Signed.Delegations.Roles[0].KeyIDs)
// Try adding a key to a delegated role that doesn't exists
err = targets.Signed.AddKey(key, "nosuchrole")
assert.ErrorIs(t, err, &ErrValue{"delegated role nosuchrole doesn't exist"})
// Add the same key to "role2" as well
err = targets.Signed.AddKey(key, "role2")
assert.NoError(t, err)
// Remove the key from "role1" role ("role2" still uses it)
err = targets.Signed.RevokeKey(key.id, "role1")
assert.NoError(t, err)
// Assert that delegated role "role1" doesn't contain the key.
assert.Equal(t, "role1", targets.Signed.Delegations.Roles[0].Name)
assert.Equal(t, "role2", targets.Signed.Delegations.Roles[1].Name)
assert.NotContains(t, targets.Signed.Delegations.Roles[0].KeyIDs, key.id)
assert.Contains(t, targets.Signed.Delegations.Roles[1].KeyIDs, key.id)
// Remove the key from "role2" as well
err = targets.Signed.RevokeKey(key.id, "role2")
assert.NoError(t, err)
assert.NotContains(t, targets.Signed.Delegations.Roles[1].KeyIDs, key.id)
// Try remove key not used by "role1"
err = targets.Signed.RevokeKey(key.id, "role1")
assert.ErrorIs(t, err, &ErrValue{fmt.Sprintf("key with id %s is not used by role1", key.id)})
// Try removing a key from delegated role that doesn't exists
err = targets.Signed.RevokeKey(key.id, "nosuchrole")
assert.ErrorIs(t, err, &ErrValue{"delegated role nosuchrole doesn't exist"})
// Remove delegations as a whole
targets.Signed.Delegations = nil
//Test that calling add_key and revoke_key throws an error
// and that delegations is still None after each of the api calls
err = targets.Signed.AddKey(key, "role1")
assert.ErrorIs(t, err, &ErrValue{"delegated role role1 doesn't exist"})
err = targets.Signed.RevokeKey(key.id, "role1")
assert.ErrorIs(t, err, &ErrValue{"delegated role role1 doesn't exist"})
assert.Nil(t, targets.Signed.Delegations)
}
func TestTargetsKeyAPIWithSuccinctRoles(t *testing.T) {
targets, err := Targets().FromFile(filepath.Join(testutils.RepoDir, "targets.json"))
assert.NoError(t, err)
// Remove delegated roles
assert.NotNil(t, targets.Signed.Delegations)
assert.NotNil(t, targets.Signed.Delegations.Roles)
targets.Signed.Delegations.Roles = nil
targets.Signed.Delegations.Keys = map[string]*Key{}
// Add succinct roles information
targets.Signed.Delegations.SuccinctRoles = &SuccinctRoles{
KeyIDs: []string{},
Threshold: 1,
BitLength: 8,
NamePrefix: "foo",
}
assert.Equal(t, 0, len(targets.Signed.Delegations.Keys))
assert.Equal(t, 0, len(targets.Signed.Delegations.SuccinctRoles.KeyIDs))
// Add a key to succinct_roles and verify it's saved.
key := &Key{
Type: "ed25519",
Value: KeyVal{PublicKey: "edcd0a32a07dce33f7c7873aaffbff36d20ea30787574ead335eefd337e4dacd"},
Scheme: "ed25519",
}
err = targets.Signed.AddKey(key, "foo")
assert.NoError(t, err)
assert.Contains(t, targets.Signed.Delegations.Keys, key.id)
assert.Contains(t, targets.Signed.Delegations.SuccinctRoles.KeyIDs, key.id)
assert.Equal(t, 1, len(targets.Signed.Delegations.Keys))
// Try adding the same key again and verify that noting is added.
err = targets.Signed.AddKey(key, "foo")
assert.NoError(t, err)
assert.Equal(t, 1, len(targets.Signed.Delegations.Keys))
// Remove the key and verify it's not stored anymore.
err = targets.Signed.RevokeKey(key.id, "foo")
assert.NoError(t, err)
assert.NotContains(t, targets.Signed.Delegations.Keys, key.id)
assert.NotContains(t, targets.Signed.Delegations.SuccinctRoles.KeyIDs, key.id)
assert.Equal(t, 0, len(targets.Signed.Delegations.Keys))
// Try removing it again.
err = targets.Signed.RevokeKey(key.id, "foo")
assert.ErrorIs(t, err, &ErrValue{fmt.Sprintf("key with id %s is not used by SuccinctRoles", key.id)})
}
func TestLengthAndHashValidation(t *testing.T) {
// Test metadata files' hash and length verification.
// Use timestamp to get a MetaFile object and snapshot
// for untrusted metadata file to verify.
timestamp, err := Timestamp().FromFile(filepath.Join(testutils.RepoDir, "timestamp.json"))
assert.NoError(t, err)
snapshotMetafile := timestamp.Signed.Meta["snapshot.json"]
assert.NotNil(t, snapshotMetafile)
snapshotData, err := os.ReadFile(filepath.Join(testutils.RepoDir, "snapshot.json"))
assert.NoError(t, err)
h32 := sha256.Sum256(snapshotData)
h := h32[:]
snapshotMetafile.Hashes = map[string]HexBytes{
"sha256": h,
}
snapshotMetafile.Length = 652
data, err := os.ReadFile(filepath.Join(testutils.RepoDir, "snapshot.json"))
assert.NoError(t, err)
err = snapshotMetafile.VerifyLengthHashes(data)
assert.NoError(t, err)
// test exceptions
originalLength := snapshotMetafile.Length
snapshotMetafile.Length = 2345
err = snapshotMetafile.VerifyLengthHashes(data)
assert.ErrorIs(t, err, &ErrLengthOrHashMismatch{fmt.Sprintf("length verification failed - expected %d, got %d", 2345, originalLength)})
snapshotMetafile.Length = originalLength
originalHashSHA256 := snapshotMetafile.Hashes["sha256"]
snapshotMetafile.Hashes["sha256"] = []byte("incorrecthash")
err = snapshotMetafile.VerifyLengthHashes(data)
assert.ErrorIs(t, err, &ErrLengthOrHashMismatch{"hash verification failed - mismatch for algorithm sha256"})
snapshotMetafile.Hashes["sha256"] = originalHashSHA256
snapshotMetafile.Hashes["unsupported-alg"] = []byte("72c5cabeb3e8079545a5f4d2b067f8e35f18a0de3c2b00d3cb8d05919c19c72d")
err = snapshotMetafile.VerifyLengthHashes(data)
assert.ErrorIs(t, err, &ErrLengthOrHashMismatch{"hash verification failed - unknown hashing algorithm - unsupported-alg"})
// test optional length and hashes
snapshotMetafile.Length = 0
snapshotMetafile.Hashes = nil
err = snapshotMetafile.VerifyLengthHashes(data)
assert.NoError(t, err)
// Test target files' hash and length verification
targets, err := Targets().FromFile(filepath.Join(testutils.RepoDir, "targets.json"))
assert.NoError(t, err)
targetFile := targets.Signed.Targets["file1.txt"]
targetFileData, err := os.ReadFile(filepath.Join(testutils.TargetsDir, targetFile.Path))
assert.NoError(t, err)
// test exceptions
originalLength = targetFile.Length
targetFile.Length = 2345
err = targetFile.VerifyLengthHashes(targetFileData)
assert.ErrorIs(t, err, &ErrLengthOrHashMismatch{fmt.Sprintf("length verification failed - expected %d, got %d", 2345, originalLength)})
targetFile.Length = originalLength
targetFile.Hashes["sha256"] = []byte("incorrecthash")
err = targetFile.VerifyLengthHashes(targetFileData)
assert.ErrorIs(t, err, &ErrLengthOrHashMismatch{"hash verification failed - mismatch for algorithm sha256"})
}
func TestTargetFileFromFile(t *testing.T) {
// Test with an existing file and valid hash algorithm
targetFilePath := filepath.Join(testutils.TargetsDir, "file1.txt")
targetFileFromFile, err := TargetFile().FromFile(targetFilePath, "sha256")
assert.NoError(t, err)
targetFileData, err := os.ReadFile(targetFilePath)
assert.NoError(t, err)
err = targetFileFromFile.VerifyLengthHashes(targetFileData)
assert.NoError(t, err)
// Test with mismatching target file data
mismatchingTargetFilePath := filepath.Join(testutils.TargetsDir, "file2.txt")
mismatchingTargetFileData, err := os.ReadFile(mismatchingTargetFilePath)
assert.NoError(t, err)
err = targetFileFromFile.VerifyLengthHashes(mismatchingTargetFileData)
assert.ErrorIs(t, err, &ErrLengthOrHashMismatch{"hash verification failed - mismatch for algorithm sha256"})
// Test with an unsupported algorithm
_, err = TargetFile().FromFile(targetFilePath, "123")
assert.ErrorIs(t, err, &ErrValue{"failed generating TargetFile - unsupported hashing algorithm - 123"})
}
func TestTargetFileCustom(t *testing.T) {
// Test creating TargetFile and accessing custom.
targetFile := TargetFile()
customJSON := json.RawMessage([]byte(`{"foo":"bar"}`))
targetFile.Custom = &customJSON
custom, err := targetFile.Custom.MarshalJSON()
assert.NoError(t, err)
assert.Equal(t, "{\"foo\":\"bar\"}", string(custom))
}
func TestTargetFileFromBytes(t *testing.T) {
data := []byte("Inline test content")
path := filepath.Join(testutils.TargetsDir, "file1.txt")
// Test with a valid hash algorithm
targetFileFromData, err := TargetFile().FromBytes(path, data, "sha256")
assert.NoError(t, err)
err = targetFileFromData.VerifyLengthHashes(data)
assert.NoError(t, err)
// Test with no algorithms specified
targetFileFromDataWithNoAlg, err := TargetFile().FromBytes(path, data)
assert.NoError(t, err)
err = targetFileFromDataWithNoAlg.VerifyLengthHashes(data)
assert.NoError(t, err)
}
func TestIsDelegatedRole(t *testing.T) {
// Test path matches
role := &DelegatedRole{
Name: "",
KeyIDs: []string{},
Threshold: 1,
Terminating: false,
Paths: []string{"a/path", "otherpath", "a/path", "*/?ath"},
}
nonMatching, err := role.IsDelegatedPath("a/non-matching-path")
assert.NoError(t, err)
assert.False(t, nonMatching)
matching, err := role.IsDelegatedPath("a/path")
assert.NoError(t, err)
assert.True(t, matching)
// Test path hash prefix matches: sha256 sum of "a/path" is 927b0ecf9...
role = &DelegatedRole{
Name: "",
KeyIDs: []string{},
Threshold: 1,
Terminating: false,
PathHashPrefixes: []string{"knsOz5xYT", "other prefix", "knsOz5xYT", "knsOz", "kn"},
}
nonMatching, err = role.IsDelegatedPath("a/non-matching-path")
assert.NoError(t, err)
assert.False(t, nonMatching)
matching, err = role.IsDelegatedPath("a/path")
assert.NoError(t, err)
assert.True(t, matching)
}
func TestIsDelegatedRoleInSuccinctRoles(t *testing.T) {
succinctRoles := &SuccinctRoles{
KeyIDs: []string{},
Threshold: 1,
BitLength: 5,
NamePrefix: "bin",
}
falseRoleNmaeExamples := []string{
"foo",
"bin-",
"bin-s",
"bin-0t",
"bin-20",
"bin-100",
}
for _, roleName := range falseRoleNmaeExamples {
res := succinctRoles.IsDelegatedRole(roleName)
assert.False(t, res)
}
// Delegated role name suffixes are in hex format.
trueNameExamples := []string{"bin-00", "bin-0f", "bin-1f"}
for _, roleName := range trueNameExamples {
res := succinctRoles.IsDelegatedRole(roleName)
assert.True(t, res)
}
}
func TestGetRolesInSuccinctRoles(t *testing.T) {
succinctRoles := &SuccinctRoles{
KeyIDs: []string{},
Threshold: 1,
BitLength: 16,
NamePrefix: "bin",
}
// bin names are in hex format and 4 hex digits are enough to represent
// all bins between 0 and 2^16 - 1 meaning suffix_len must be 4
expectedSuffixLength := 4
suffixLen, _ := succinctRoles.GetSuffixLen()
assert.Equal(t, expectedSuffixLength, suffixLen)
allRoles := succinctRoles.GetRoles()
for binNumer, roleName := range allRoles {
// This adds zero-padding if the bin_numer is represented by a hex
// number with a length less than expected_suffix_length.
expectedBinSuffix := fmt.Sprintf("%0"+strconv.Itoa(expectedSuffixLength)+"x", binNumer)
assert.Equal(t, fmt.Sprintf("bin-%s", expectedBinSuffix), roleName)
}
}
golang-github-theupdateframework-go-tuf-2.0.2/metadata/metadata_test.go 0000664 0000000 0000000 00000057203 14706111210 0026266 0 ustar 00root root 0000000 0000000 // Copyright 2024 The Update Framework Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
//
// SPDX-License-Identifier: Apache-2.0
//
package metadata
import (
"crypto/ed25519"
"crypto/sha256"
"encoding/json"
"os"
"path/filepath"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
var testRootBytes = []byte("{\"signatures\":[{\"keyid\":\"roothash\",\"sig\":\"1307990e6ba5ca145eb35e99182a9bec46531bc54ddf656a602c780fa0240dee\"}],\"signed\":{\"_type\":\"root\",\"consistent_snapshot\":true,\"expires\":\"2030-08-15T14:30:45.0000001Z\",\"keys\":{\"roothash\":{\"keytype\":\"ed25519\",\"keyval\":{\"public\":\"pubrootval\"},\"scheme\":\"ed25519\"},\"snapshothash\":{\"keytype\":\"ed25519\",\"keyval\":{\"public\":\"pubsval\"},\"scheme\":\"ed25519\"},\"targetshash\":{\"keytype\":\"ed25519\",\"keyval\":{\"public\":\"pubtrval\"},\"scheme\":\"ed25519\"},\"timestamphash\":{\"keytype\":\"ed25519\",\"keyval\":{\"public\":\"pubtmval\"},\"scheme\":\"ed25519\"}},\"roles\":{\"root\":{\"keyids\":[\"roothash\"],\"threshold\":1},\"snapshot\":{\"keyids\":[\"snapshothash\"],\"threshold\":1},\"targets\":{\"keyids\":[\"targetshash\"],\"threshold\":1},\"timestamp\":{\"keyids\":[\"timestamphash\"],\"threshold\":1}},\"spec_version\":\"1.0.31\",\"version\":1}}")
const TEST_REPOSITORY_DATA = "../internal/testutils/repository_data/repository/metadata"
var fixedExpire = time.Date(2030, 8, 15, 14, 30, 45, 100, time.UTC)
func getSignatureByKeyID(signatures []Signature, keyID string) (HexBytes, int) {
for i, sig := range signatures {
if sig.KeyID == keyID {
return sig.Signature, i
}
}
return []byte{}, 0
}
func TestDefaultValuesRoot(t *testing.T) {
// without setting expiration
meta := Root()
assert.NotNil(t, meta)
assert.GreaterOrEqual(t, []time.Time{time.Now().UTC()}[0], meta.Signed.Expires)
// setting expiration
expire := time.Now().AddDate(0, 0, 2).UTC()
meta = Root(expire)
assert.NotNil(t, meta)
assert.Equal(t, expire, meta.Signed.Expires)
// Type
assert.Equal(t, ROOT, meta.Signed.Type)
// SpecVersion
assert.Equal(t, SPECIFICATION_VERSION, meta.Signed.SpecVersion)
// Version
assert.Equal(t, int64(1), meta.Signed.Version)
// Threshold and KeyIDs for Roles
for _, role := range []string{ROOT, SNAPSHOT, TARGETS, TIMESTAMP} {
assert.Equal(t, 1, meta.Signed.Roles[role].Threshold)
assert.Equal(t, []string{}, meta.Signed.Roles[role].KeyIDs)
}
// Keys
assert.Equal(t, map[string]*Key{}, meta.Signed.Keys)
// Consistent snapshot
assert.True(t, meta.Signed.ConsistentSnapshot)
// Signatures
assert.Equal(t, []Signature{}, meta.Signatures)
}
func TestDefaultValuesSnapshot(t *testing.T) {
// without setting expiration
meta := Snapshot()
assert.NotNil(t, meta)
assert.GreaterOrEqual(t, []time.Time{time.Now().UTC()}[0], meta.Signed.Expires)
// setting expiration
expire := time.Now().AddDate(0, 0, 2).UTC()
meta = Snapshot(expire)
assert.NotNil(t, meta)
assert.Equal(t, expire, meta.Signed.Expires)
// Type
assert.Equal(t, SNAPSHOT, meta.Signed.Type)
// SpecVersion
assert.Equal(t, SPECIFICATION_VERSION, meta.Signed.SpecVersion)
// Version
assert.Equal(t, int64(1), meta.Signed.Version)
// Targets meta
assert.Equal(t, map[string]*MetaFiles{"targets.json": {Version: 1}}, meta.Signed.Meta)
// Signatures
assert.Equal(t, []Signature{}, meta.Signatures)
}
func TestDefaultValuesTimestamp(t *testing.T) {
// without setting expiration
meta := Timestamp()
assert.NotNil(t, meta)
assert.GreaterOrEqual(t, []time.Time{time.Now().UTC()}[0], meta.Signed.Expires)
// setting expiration
expire := time.Now().AddDate(0, 0, 2).UTC()
meta = Timestamp(expire)
assert.NotNil(t, meta)
assert.Equal(t, expire, meta.Signed.Expires)
// Type
assert.Equal(t, TIMESTAMP, meta.Signed.Type)
// SpecVersion
assert.Equal(t, SPECIFICATION_VERSION, meta.Signed.SpecVersion)
// Version
assert.Equal(t, int64(1), meta.Signed.Version)
// Snapshot meta
assert.Equal(t, map[string]*MetaFiles{"snapshot.json": {Version: 1}}, meta.Signed.Meta)
// Signatures
assert.Equal(t, []Signature{}, meta.Signatures)
}
func TestDefaultValuesTargets(t *testing.T) {
// without setting expiration
meta := Targets()
assert.NotNil(t, meta)
assert.GreaterOrEqual(t, []time.Time{time.Now().UTC()}[0], meta.Signed.Expires)
// setting expiration
expire := time.Now().AddDate(0, 0, 2).UTC()
meta = Targets(expire)
assert.NotNil(t, meta)
assert.Equal(t, expire, meta.Signed.Expires)
// Type
assert.Equal(t, TARGETS, meta.Signed.Type)
// SpecVersion
assert.Equal(t, SPECIFICATION_VERSION, meta.Signed.SpecVersion)
// Version
assert.Equal(t, int64(1), meta.Signed.Version)
// Target files
assert.Equal(t, map[string]*TargetFiles{}, meta.Signed.Targets)
// Signatures
assert.Equal(t, []Signature{}, meta.Signatures)
}
func TestDefaultValuesTargetFile(t *testing.T) {
targetFile := TargetFile()
assert.NotNil(t, targetFile)
assert.Equal(t, int64(0), targetFile.Length)
assert.Equal(t, Hashes{}, targetFile.Hashes)
}
func TestMetaFileDefaultValues(t *testing.T) {
version := int64(0)
metaFile := MetaFile(version)
assert.NotNil(t, metaFile)
assert.Equal(t, int64(0), metaFile.Length)
assert.Equal(t, Hashes{}, metaFile.Hashes)
assert.Equal(t, int64(1), metaFile.Version)
version = int64(-1)
metaFile = MetaFile(version)
assert.NotNil(t, metaFile)
assert.Equal(t, int64(0), metaFile.Length)
assert.Equal(t, Hashes{}, metaFile.Hashes)
assert.Equal(t, int64(1), metaFile.Version)
version = int64(1)
metaFile = MetaFile(version)
assert.NotNil(t, metaFile)
assert.Equal(t, int64(0), metaFile.Length)
assert.Equal(t, Hashes{}, metaFile.Hashes)
assert.Equal(t, int64(1), metaFile.Version)
version = int64(2)
metaFile = MetaFile(version)
assert.NotNil(t, metaFile)
assert.Equal(t, int64(0), metaFile.Length)
assert.Equal(t, Hashes{}, metaFile.Hashes)
assert.Equal(t, int64(2), metaFile.Version)
}
func TestIsDelegatedPath(t *testing.T) {
type pathMatch struct {
Pattern []string
TargetPath string
Expected bool
}
// As per - https://theupdateframework.github.io/specification/latest/#pathpattern
matches := []pathMatch{
// a PATHPATTERN of "targets/*.tgz" would match file paths "targets/foo.tgz" and "targets/bar.tgz", but not "targets/foo.txt".
{
Pattern: []string{"targets/*.tgz"},
TargetPath: "targets/foo.tgz",
Expected: true,
},
{
Pattern: []string{"targets/*.tgz"},
TargetPath: "targets/bar.tgz",
Expected: true,
},
{
Pattern: []string{"targets/*.tgz"},
TargetPath: "targets/foo.txt",
Expected: false,
},
// a PATHPATTERN of "foo-version-?.tgz" matches "foo-version-2.tgz" and "foo-version-a.tgz", but not "foo-version-alpha.tgz".
{
Pattern: []string{"foo-version-?.tgz"},
TargetPath: "foo-version-2.tgz",
Expected: true,
},
{
Pattern: []string{"foo-version-?.tgz"},
TargetPath: "foo-version-a.tgz",
Expected: true,
},
{
Pattern: []string{"foo-version-?.tgz"},
TargetPath: "foo-version-alpha.tgz",
Expected: false,
},
// a PATHPATTERN of "*.tgz" would match "foo.tgz" and "bar.tgz", but not "targets/foo.tgz"
{
Pattern: []string{"*.tgz"},
TargetPath: "foo.tgz",
Expected: true,
},
{
Pattern: []string{"*.tgz"},
TargetPath: "bar.tgz",
Expected: true,
},
{
Pattern: []string{"*.tgz"},
TargetPath: "targets/foo.tgz",
Expected: false,
},
// a PATHPATTERN of "foo.tgz" would match only "foo.tgz"
{
Pattern: []string{"foo.tgz"},
TargetPath: "foo.tgz",
Expected: true,
},
{
Pattern: []string{"foo.tgz"},
TargetPath: "foosy.tgz",
Expected: false,
},
}
for _, match := range matches {
role := &DelegatedRole{
Paths: match.Pattern,
}
ok, err := role.IsDelegatedPath(match.TargetPath)
assert.Equal(t, match.Expected, ok)
assert.Nil(t, err)
}
}
func TestClearSignatures(t *testing.T) {
meta := Root()
// verify signatures is empty
assert.Equal(t, []Signature{}, meta.Signatures)
// create a signature
sig := &Signature{
KeyID: "keyid",
Signature: HexBytes{},
}
// update the Signatures part
meta.Signatures = append(meta.Signatures, *sig)
// verify signatures is not empty
assert.NotEqual(t, []Signature{}, meta.Signatures)
// clear signatures
meta.ClearSignatures()
// verify signatures is empty
assert.Equal(t, []Signature{}, meta.Signatures)
}
func TestIsExpiredRoot(t *testing.T) {
// without setting expiration
meta := Root()
assert.NotNil(t, meta)
// ensure time passed
time.Sleep(1 * time.Microsecond)
assert.True(t, meta.Signed.IsExpired(time.Now().UTC()))
// setting expiration in 2 days from now
expire := time.Now().AddDate(0, 0, 2).UTC()
meta = Root(expire)
assert.NotNil(t, meta)
assert.False(t, meta.Signed.IsExpired(time.Now().UTC()))
}
func TestIsExpiredSnapshot(t *testing.T) {
// without setting expiration
meta := Snapshot()
assert.NotNil(t, meta)
// ensure time passed
time.Sleep(1 * time.Microsecond)
assert.True(t, meta.Signed.IsExpired(time.Now().UTC()))
// setting expiration in 2 days from now
expire := time.Now().AddDate(0, 0, 2).UTC()
meta = Snapshot(expire)
assert.NotNil(t, meta)
assert.False(t, meta.Signed.IsExpired(time.Now().UTC()))
}
func TestIsExpiredTimestamp(t *testing.T) {
// without setting expiration
meta := Timestamp()
assert.NotNil(t, meta)
// ensure time passed
time.Sleep(1 * time.Microsecond)
assert.True(t, meta.Signed.IsExpired(time.Now().UTC()))
// setting expiration in 2 days from now
expire := time.Now().AddDate(0, 0, 2).UTC()
meta = Timestamp(expire)
assert.NotNil(t, meta)
assert.False(t, meta.Signed.IsExpired(time.Now().UTC()))
}
func TestIsExpiredTargets(t *testing.T) {
// without setting expiration
meta := Targets()
assert.NotNil(t, meta)
// ensure time passed
time.Sleep(1 * time.Microsecond)
assert.True(t, meta.Signed.IsExpired(time.Now().UTC()))
// setting expiration in 2 days from now
expire := time.Now().AddDate(0, 0, 2).UTC()
meta = Targets(expire)
assert.NotNil(t, meta)
assert.False(t, meta.Signed.IsExpired(time.Now().UTC()))
}
func TestUnrecognizedFieldRolesSigned(t *testing.T) {
// unrecognized field to test
// added to the Signed portion of each role type
testUnrecognizedField := map[string]any{"test": "true"}
root := Root(fixedExpire)
root.Signed.UnrecognizedFields = testUnrecognizedField
rootJSON, err := root.ToBytes(false)
assert.NoError(t, err)
assert.Equal(t, []byte("{\"signatures\":[],\"signed\":{\"_type\":\"root\",\"consistent_snapshot\":true,\"expires\":\"2030-08-15T14:30:45.0000001Z\",\"keys\":{},\"roles\":{\"root\":{\"keyids\":[],\"threshold\":1},\"snapshot\":{\"keyids\":[],\"threshold\":1},\"targets\":{\"keyids\":[],\"threshold\":1},\"timestamp\":{\"keyids\":[],\"threshold\":1}},\"spec_version\":\"1.0.31\",\"test\":\"true\",\"version\":1}}"), rootJSON)
targets := Targets(fixedExpire)
targets.Signed.UnrecognizedFields = testUnrecognizedField
targetsJSON, err := targets.ToBytes(false)
assert.NoError(t, err)
assert.Equal(t, []byte("{\"signatures\":[],\"signed\":{\"_type\":\"targets\",\"expires\":\"2030-08-15T14:30:45.0000001Z\",\"spec_version\":\"1.0.31\",\"targets\":{},\"test\":\"true\",\"version\":1}}"), targetsJSON)
snapshot := Snapshot(fixedExpire)
snapshot.Signed.UnrecognizedFields = testUnrecognizedField
snapshotJSON, err := snapshot.ToBytes(false)
assert.NoError(t, err)
assert.Equal(t, []byte("{\"signatures\":[],\"signed\":{\"_type\":\"snapshot\",\"expires\":\"2030-08-15T14:30:45.0000001Z\",\"meta\":{\"targets.json\":{\"version\":1}},\"spec_version\":\"1.0.31\",\"test\":\"true\",\"version\":1}}"), snapshotJSON)
timestamp := Timestamp(fixedExpire)
timestamp.Signed.UnrecognizedFields = testUnrecognizedField
timestampJSON, err := timestamp.ToBytes(false)
assert.NoError(t, err)
assert.Equal(t, []byte("{\"signatures\":[],\"signed\":{\"_type\":\"timestamp\",\"expires\":\"2030-08-15T14:30:45.0000001Z\",\"meta\":{\"snapshot.json\":{\"version\":1}},\"spec_version\":\"1.0.31\",\"test\":\"true\",\"version\":1}}"), timestampJSON)
}
func TestUnrecognizedFieldGenericMetadata(t *testing.T) {
// fixed expire
expire := time.Date(2030, 8, 15, 14, 30, 45, 100, time.UTC)
// unrecognized field to test
// added to the generic metadata type
testUnrecognizedField := map[string]any{"test": "true"}
root := Root(expire)
root.UnrecognizedFields = testUnrecognizedField
rootJSON, err := root.ToBytes(false)
assert.NoError(t, err)
assert.Equal(t, []byte("{\"signatures\":[],\"signed\":{\"_type\":\"root\",\"consistent_snapshot\":true,\"expires\":\"2030-08-15T14:30:45.0000001Z\",\"keys\":{},\"roles\":{\"root\":{\"keyids\":[],\"threshold\":1},\"snapshot\":{\"keyids\":[],\"threshold\":1},\"targets\":{\"keyids\":[],\"threshold\":1},\"timestamp\":{\"keyids\":[],\"threshold\":1}},\"spec_version\":\"1.0.31\",\"version\":1},\"test\":\"true\"}"), rootJSON)
}
func TestTargetFilesCustomField(t *testing.T) {
// custom JSON to test
testCustomJSON := json.RawMessage([]byte(`{"test":true}`))
// create a targets metadata
targets := Targets(fixedExpire)
assert.NotNil(t, targets)
// create a targetfile with the custom JSON
targetFile := TargetFile()
targetFile.Custom = &testCustomJSON
// add the targetfile to targets metadata
targets.Signed.Targets["testTarget"] = targetFile
targetsJSON, err := targets.ToBytes(false)
assert.NoError(t, err)
assert.Equal(t, []byte("{\"signatures\":[],\"signed\":{\"_type\":\"targets\",\"expires\":\"2030-08-15T14:30:45.0000001Z\",\"spec_version\":\"1.0.31\",\"targets\":{\"testTarget\":{\"custom\":{\"test\":true},\"hashes\":{},\"length\":0}},\"version\":1}}"), targetsJSON)
}
func TestFromBytes(t *testing.T) {
root := Root(fixedExpire)
assert.Equal(t, fixedExpire, root.Signed.Expires)
_, err := root.FromBytes(testRootBytes)
assert.NoError(t, err)
assert.Equal(t, fixedExpire, root.Signed.Expires)
assert.Equal(t, fixedExpire, root.Signed.Expires)
assert.Equal(t, ROOT, root.Signed.Type)
assert.True(t, root.Signed.ConsistentSnapshot)
assert.Equal(t, 4, len(root.Signed.Keys))
assert.Contains(t, root.Signed.Roles, ROOT)
assert.Equal(t, 1, root.Signed.Roles[ROOT].Threshold)
assert.NotEmpty(t, root.Signed.Roles[ROOT].KeyIDs)
assert.Contains(t, root.Signed.Keys, root.Signed.Roles[ROOT].KeyIDs[0])
assert.Equal(t, "roothash", root.Signed.Roles[ROOT].KeyIDs[0])
assert.Contains(t, root.Signed.Roles, SNAPSHOT)
assert.Equal(t, 1, root.Signed.Roles[SNAPSHOT].Threshold)
assert.NotEmpty(t, root.Signed.Roles[SNAPSHOT].KeyIDs)
assert.Contains(t, root.Signed.Keys, root.Signed.Roles[SNAPSHOT].KeyIDs[0])
assert.Equal(t, "snapshothash", root.Signed.Roles[SNAPSHOT].KeyIDs[0])
assert.Contains(t, root.Signed.Roles, TARGETS)
assert.Equal(t, 1, root.Signed.Roles[TARGETS].Threshold)
assert.NotEmpty(t, root.Signed.Roles[TARGETS].KeyIDs)
assert.Contains(t, root.Signed.Keys, root.Signed.Roles[TARGETS].KeyIDs[0])
assert.Equal(t, "targetshash", root.Signed.Roles[TARGETS].KeyIDs[0])
assert.Contains(t, root.Signed.Roles, TIMESTAMP)
assert.Equal(t, 1, root.Signed.Roles[TIMESTAMP].Threshold)
assert.NotEmpty(t, root.Signed.Roles[TIMESTAMP].KeyIDs)
assert.Contains(t, root.Signed.Keys, root.Signed.Roles[TIMESTAMP].KeyIDs[0])
assert.Equal(t, "timestamphash", root.Signed.Roles[TIMESTAMP].KeyIDs[0])
assert.Equal(t, int64(1), root.Signed.Version)
assert.NotEmpty(t, root.Signatures)
assert.Equal(t, "roothash", root.Signatures[0].KeyID)
data := []byte("some data")
h32 := sha256.Sum256(data)
h := h32[:]
assert.Equal(t, HexBytes(h), root.Signatures[0].Signature)
}
func TestToByte(t *testing.T) {
rootBytesExpireStr := "2030-08-15T14:30:45.0000001Z"
rootBytesExpire, err := time.Parse(time.RFC3339, rootBytesExpireStr)
assert.NoError(t, err)
root := Root(rootBytesExpire)
root.Signed.Keys["roothash"] = &Key{Type: "ed25519", Value: KeyVal{PublicKey: "pubrootval"}, Scheme: "ed25519"}
root.Signed.Keys["snapshothash"] = &Key{Type: "ed25519", Value: KeyVal{PublicKey: "pubsval"}, Scheme: "ed25519"}
root.Signed.Keys["targetshash"] = &Key{Type: "ed25519", Value: KeyVal{PublicKey: "pubtrval"}, Scheme: "ed25519"}
root.Signed.Keys["timestamphash"] = &Key{Type: "ed25519", Value: KeyVal{PublicKey: "pubtmval"}, Scheme: "ed25519"}
root.Signed.Roles[ROOT] = &Role{
Threshold: 1,
KeyIDs: []string{"roothash"},
}
root.Signed.Roles[SNAPSHOT] = &Role{
Threshold: 1,
KeyIDs: []string{"snapshothash"},
}
root.Signed.Roles[TARGETS] = &Role{
Threshold: 1,
KeyIDs: []string{"targetshash"},
}
root.Signed.Roles[TIMESTAMP] = &Role{
Threshold: 1,
KeyIDs: []string{"timestamphash"},
}
data := []byte("some data")
h32 := sha256.Sum256(data)
h := h32[:]
hash := map[string]HexBytes{"ed25519": h}
root.Signatures = append(root.Signatures, Signature{KeyID: "roothash", Signature: hash["ed25519"]})
rootBytes, err := root.ToBytes(false)
assert.NoError(t, err)
assert.Equal(t, string(testRootBytes), string(rootBytes))
}
func TestFromFile(t *testing.T) {
root := Root(fixedExpire)
_, err := root.FromFile(filepath.Join(TEST_REPOSITORY_DATA, "1.root.json"))
assert.NoError(t, err)
assert.Equal(t, fixedExpire, root.Signed.Expires)
assert.Equal(t, fixedExpire, root.Signed.Expires)
assert.Equal(t, ROOT, root.Signed.Type)
assert.True(t, root.Signed.ConsistentSnapshot)
assert.Equal(t, 4, len(root.Signed.Keys))
assert.Contains(t, root.Signed.Roles, ROOT)
assert.Equal(t, 1, root.Signed.Roles[ROOT].Threshold)
assert.NotEmpty(t, root.Signed.Roles[ROOT].KeyIDs)
assert.Contains(t, root.Signed.Keys, root.Signed.Roles[ROOT].KeyIDs[0])
assert.Equal(t, "d5fa855fce82db75ec64283e828cc90517df5edf5cdc57e7958a890d6556f5b7", root.Signed.Roles[ROOT].KeyIDs[0])
assert.Contains(t, root.Signed.Roles, SNAPSHOT)
assert.Equal(t, 1, root.Signed.Roles[SNAPSHOT].Threshold)
assert.NotEmpty(t, root.Signed.Roles[SNAPSHOT].KeyIDs)
assert.Contains(t, root.Signed.Keys, root.Signed.Roles[SNAPSHOT].KeyIDs[0])
assert.Equal(t, "700464ea12f4cb5f06a7512c75b73c0b6eeb2cd42854b085eed5b3c993607cba", root.Signed.Roles[SNAPSHOT].KeyIDs[0])
assert.Contains(t, root.Signed.Roles, TARGETS)
assert.Equal(t, 1, root.Signed.Roles[TARGETS].Threshold)
assert.NotEmpty(t, root.Signed.Roles[TARGETS].KeyIDs)
assert.Contains(t, root.Signed.Keys, root.Signed.Roles[TARGETS].KeyIDs[0])
assert.Equal(t, "409fb816e403e0c00646665eac21cb8adfab8e318272ca7589b2d1fc0bccb255", root.Signed.Roles[TARGETS].KeyIDs[0])
assert.Contains(t, root.Signed.Roles, TIMESTAMP)
assert.Equal(t, 1, root.Signed.Roles[TIMESTAMP].Threshold)
assert.NotEmpty(t, root.Signed.Roles[TIMESTAMP].KeyIDs)
assert.Contains(t, root.Signed.Keys, root.Signed.Roles[TIMESTAMP].KeyIDs[0])
assert.Equal(t, "0a5842e65e9c8c428354f40708435de6793ac379a275effe40d6358be2de835c", root.Signed.Roles[TIMESTAMP].KeyIDs[0])
assert.Equal(t, SPECIFICATION_VERSION, root.Signed.SpecVersion)
assert.Contains(t, root.Signed.UnrecognizedFields, "test")
assert.Equal(t, "true", root.Signed.UnrecognizedFields["test"])
assert.Equal(t, int64(1), root.Signed.Version)
assert.NotEmpty(t, root.Signatures)
assert.Equal(t, "d5fa855fce82db75ec64283e828cc90517df5edf5cdc57e7958a890d6556f5b7", root.Signatures[0].KeyID)
}
func TestToFile(t *testing.T) {
tmp := os.TempDir()
tmpDir, err := os.MkdirTemp(tmp, "0750")
assert.NoError(t, err)
fileName := filepath.Join(tmpDir, "1.root.json")
assert.NoFileExists(t, fileName)
root, err := Root().FromBytes(testRootBytes)
assert.NoError(t, err)
err = root.ToFile(fileName, false)
assert.NoError(t, err)
assert.FileExists(t, fileName)
data, err := os.ReadFile(fileName)
assert.NoError(t, err)
assert.Equal(t, string(testRootBytes), string(data))
err = os.RemoveAll(tmpDir)
assert.NoError(t, err)
assert.NoFileExists(t, fileName)
}
func TestVerifyDelegate(t *testing.T) {
root := Root(fixedExpire)
err := root.VerifyDelegate("test", root)
assert.EqualError(t, err, "value error: no delegation found for test")
targets := Targets(fixedExpire)
err = targets.VerifyDelegate("test", targets)
assert.EqualError(t, err, "value error: no delegations found")
key, _, err := ed25519.GenerateKey(nil)
assert.NoError(t, err)
delegateeKey, _ := KeyFromPublicKey(key)
delegations := &Delegations{
Keys: map[string]*Key{
delegateeKey.ID(): delegateeKey,
},
Roles: []DelegatedRole{
{
Name: "test",
KeyIDs: []string{delegateeKey.ID()},
},
},
}
targets.Signed.Delegations = delegations
err = targets.VerifyDelegate("test", root)
assert.NoError(t, err)
err = targets.VerifyDelegate("test", targets)
assert.NoError(t, err)
err = targets.VerifyDelegate("non-existing", root)
assert.EqualError(t, err, "value error: no delegation found for non-existing")
err = targets.VerifyDelegate("non-existing", targets)
assert.EqualError(t, err, "value error: no delegation found for non-existing")
targets.Signed.Delegations.Roles[0].Threshold = 1
err = targets.VerifyDelegate("test", targets)
assert.Errorf(t, err, "Verifying test failed, not enough signatures, got %d, want %d", 0, 1)
delegations.Keys["incorrectkey"] = delegations.Keys[delegateeKey.ID()]
delete(delegations.Keys, delegateeKey.ID())
err = targets.VerifyDelegate("test", root)
assert.Errorf(t, err, "key with ID %s not found in test keyids", delegateeKey.ID())
timestamp := Timestamp(fixedExpire)
err = timestamp.VerifyDelegate("test", timestamp)
assert.EqualError(t, err, "type error: call is valid only on delegator metadata (should be either root or targets)")
snapshot := Snapshot(fixedExpire)
err = snapshot.VerifyDelegate("test", snapshot)
assert.EqualError(t, err, "type error: call is valid only on delegator metadata (should be either root or targets)")
}
func TestVerifyLengthHashesTargetFiles(t *testing.T) {
targetFiles := TargetFile()
targetFiles.Hashes = map[string]HexBytes{}
data := []byte{}
err := targetFiles.VerifyLengthHashes(data)
assert.NoError(t, err)
data = []byte("some data")
err = targetFiles.VerifyLengthHashes(data)
assert.Error(t, err, "length/hash verification error: length verification failed - expected 0, got 9")
h32 := sha256.Sum256(data)
h := h32[:]
targetFiles.Hashes["sha256"] = h
targetFiles.Length = int64(len(data))
err = targetFiles.VerifyLengthHashes(data)
assert.NoError(t, err)
targetFiles.Hashes = map[string]HexBytes{"unknownAlg": data}
err = targetFiles.VerifyLengthHashes(data)
assert.Error(t, err, "length/hash verification error: hash verification failed - unknown hashing algorithm - unknownArg")
targetFiles.Hashes = map[string]HexBytes{"sha256": data}
err = targetFiles.VerifyLengthHashes(data)
assert.Error(t, err, "length/hash verification error: hash verification failed - mismatch for algorithm sha256")
}
func TestVerifyLengthHashesMetaFiles(t *testing.T) {
version := int64(0)
metaFile := MetaFile(version)
data := []byte("some data")
metaFile.Hashes = map[string]HexBytes{"unknownAlg": data}
err := metaFile.VerifyLengthHashes(data)
assert.Error(t, err, "length/hash verification error: hash verification failed - unknown hashing algorithm - unknownArg")
metaFile.Hashes = map[string]HexBytes{"sha256": data}
err = metaFile.VerifyLengthHashes(data)
assert.Error(t, err, "length/hash verification error: hash verification failed - mismatch for algorithm sha256")
h32 := sha256.Sum256(data)
h := h32[:]
metaFile.Hashes = map[string]HexBytes{"sha256": h}
err = metaFile.VerifyLengthHashes(data)
assert.NoError(t, err)
incorrectData := []byte("another data")
err = metaFile.VerifyLengthHashes(incorrectData)
assert.Error(t, err, "length/hash verification error: length verification failed - expected 0, got 9")
}
golang-github-theupdateframework-go-tuf-2.0.2/metadata/multirepo/ 0000775 0000000 0000000 00000000000 14706111210 0025131 5 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/metadata/multirepo/multirepo.go 0000664 0000000 0000000 00000032205 14706111210 0027502 0 ustar 00root root 0000000 0000000 // Copyright 2024 The Update Framework Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
//
// SPDX-License-Identifier: Apache-2.0
//
package multirepo
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"slices"
"github.com/theupdateframework/go-tuf/v2/metadata"
"github.com/theupdateframework/go-tuf/v2/metadata/config"
"github.com/theupdateframework/go-tuf/v2/metadata/updater"
)
// The following represent the map file described in TAP 4
type Mapping struct {
Paths []string `json:"paths"`
Repositories []string `json:"repositories"`
Threshold int `json:"threshold"`
Terminating bool `json:"terminating"`
}
type MultiRepoMapType struct {
Repositories map[string][]string `json:"repositories"`
Mapping []*Mapping `json:"mapping"`
}
// MultiRepoConfig represents the configuration for a set of trusted TUF clients
type MultiRepoConfig struct {
RepoMap *MultiRepoMapType
TrustedRoots map[string][]byte
LocalMetadataDir string
LocalTargetsDir string
DisableLocalCache bool
}
// MultiRepoClient represents a multi-repository TUF client
type MultiRepoClient struct {
TUFClients map[string]*updater.Updater
Config *MultiRepoConfig
}
type targetMatch struct {
targetInfo *metadata.TargetFiles
repositories []string
}
// NewConfig returns configuration for a multi-repo TUF client
func NewConfig(repoMap []byte, roots map[string][]byte) (*MultiRepoConfig, error) {
// error if we don't have the necessary arguments
if len(repoMap) == 0 || len(roots) == 0 {
return nil, fmt.Errorf("failed to create multi-repository config: no map file and/or trusted root metadata is provided")
}
// unmarshal the map file (note: should we expect/support unrecognized values here?)
var mapFile *MultiRepoMapType
if err := json.Unmarshal(repoMap, &mapFile); err != nil {
return nil, err
}
// make sure we have enough trusted root metadata files provided based on the repository list
for repo := range mapFile.Repositories {
// check if we have a trusted root metadata for this repository
_, ok := roots[repo]
if !ok {
return nil, fmt.Errorf("no trusted root metadata provided for repository - %s", repo)
}
}
return &MultiRepoConfig{
RepoMap: mapFile,
TrustedRoots: roots,
}, nil
}
// New returns a multi-repository TUF client. All repositories described in the provided map file are initialized too
func New(config *MultiRepoConfig) (*MultiRepoClient, error) {
// create a multi repo client instance
client := &MultiRepoClient{
Config: config,
TUFClients: map[string]*updater.Updater{},
}
// create TUF clients for each repository listed in the map file
if err := client.initTUFClients(); err != nil {
return nil, err
}
return client, nil
}
// initTUFClients loop through all repositories listed in the map file and create a TUF client for each
func (client *MultiRepoClient) initTUFClients() error {
log := metadata.GetLogger()
// loop through each repository listed in the map file and initialize it
for repoName, repoURL := range client.Config.RepoMap.Repositories {
log.Info("Initializing", "name", repoName, "url", repoURL[0])
// get the trusted root file from the location specified in the map file relevant to its path
// NOTE: the root.json file is expected to be in a folder named after the repository it corresponds to placed in the same folder as the map file
// i.e //root.json
rootBytes, ok := client.Config.TrustedRoots[repoName]
if !ok {
return fmt.Errorf("failed to get trusted root metadata from config for repository - %s", repoName)
}
// path of where each of the repository's metadata files will be persisted
metadataDir := filepath.Join(client.Config.LocalMetadataDir, repoName)
// location of where the target files will be downloaded (propagated to each client from the multi-repo config)
// WARNING: Do note that using a single folder for storing targets from various repositories as it might lead to a conflict
targetsDir := client.Config.LocalTargetsDir
if len(client.Config.LocalTargetsDir) == 0 {
// if it was not set, create a targets folder under each repository so there's no chance of conflict
targetsDir = filepath.Join(metadataDir, "targets")
}
// ensure paths exist, doesn't do anything if caching is disabled
err := client.Config.EnsurePathsExist()
if err != nil {
return err
}
// default config for a TUF Client
cfg, err := config.New(repoURL[0], rootBytes) // support only one mirror for the time being
if err != nil {
return err
}
cfg.LocalMetadataDir = metadataDir
cfg.LocalTargetsDir = targetsDir
cfg.DisableLocalCache = client.Config.DisableLocalCache // propagate global cache policy
// create a new Updater instance for each repository
repoTUFClient, err := updater.New(cfg)
if err != nil {
return fmt.Errorf("failed to create Updater instance: %w", err)
}
// save the client
client.TUFClients[repoName] = repoTUFClient
log.Info("Successfully initialized", "name", repoName, "url", repoURL)
}
return nil
}
// Refresh refreshes all repository clients
func (client *MultiRepoClient) Refresh() error {
log := metadata.GetLogger()
// loop through each initialized TUF client and refresh it
for name, repoTUFClient := range client.TUFClients {
log.Info("Refreshing", "name", name)
err := repoTUFClient.Refresh()
if err != nil {
return err
}
}
return nil
}
// GetTopLevelTargets returns the top-level target files for all repositories
func (client *MultiRepoClient) GetTopLevelTargets() (map[string]*metadata.TargetFiles, error) {
// collection of all target files for all clients
result := map[string]*metadata.TargetFiles{}
// loop through each repository
for _, tufClient := range client.TUFClients {
// loop through the top level targets for each repository
for targetName := range tufClient.GetTopLevelTargets() {
// see if this target should be kept, this goes through the TAP4 search algorithm
targetInfo, _, err := client.GetTargetInfo(targetName)
if err != nil {
// we skip saving this target since there's no way/policy do download it with this map.json file
// possible causes like not enough repositories for that threshold, target info mismatch, etc.
return nil, err
}
// check if this target file is already present in the collection
if val, ok := result[targetName]; ok {
// target file is already present
if !val.Equal(*targetInfo) {
// target files have the same target name but have different target infos
// this means the map.json file allows downloading two different target infos mapped to the same target name
// TODO: confirm if this should raise an error
return nil, fmt.Errorf("target name conflict")
}
// same target info, no need to do anything
} else {
// save the target
result[targetName] = targetInfo
}
}
}
return result, nil
}
// GetTargetInfo returns metadata.TargetFiles instance with information
// for targetPath and a list of repositories that serve the matching target.
// It implements the TAP 4 search algorithm.
func (client *MultiRepoClient) GetTargetInfo(targetPath string) (*metadata.TargetFiles, []string, error) {
terminated := false
// loop through each mapping
for _, eachMap := range client.Config.RepoMap.Mapping {
// loop through each path for this mapping
for _, pathPattern := range eachMap.Paths {
// check if the targetPath matches each path mapping
patternMatched, err := filepath.Match(pathPattern, targetPath)
if err != nil {
// error looking for a match
return nil, nil, err
} else {
if patternMatched {
// if there's a pattern match, loop through all of the repositories listed for that mapping
// and see if we can find a consensus among them to cover the threshold for that mapping
var matchedTargetGroups []targetMatch
for _, repoName := range eachMap.Repositories {
// get target info from that repository
newTargetInfo, err := client.TUFClients[repoName].GetTargetInfo(targetPath)
if err != nil {
// failed to get target info for the given target
// there's probably no such target
// skip the rest and proceed trying to get target info from the next repository
continue
}
found := false
// loop through all target infos we found so far
for i, target := range matchedTargetGroups {
// see if we already have found one like that
if target.targetInfo.Equal(*newTargetInfo) {
found = true
// if so, update its repository list
if slices.Contains(target.repositories, repoName) {
// we have a duplicate repository listed in the mapping
// decide if we should error out here
// nevertheless we won't take it into account when we calculate the threshold
} else {
// a new repository vouched for this target
matchedTargetGroups[i].repositories = append(target.repositories, repoName)
}
}
}
// this target as not part of the list so far, so we should add it
if !found {
matchedTargetGroups = append(matchedTargetGroups, targetMatch{
targetInfo: newTargetInfo,
repositories: []string{repoName},
})
}
// proceed with searching for this target in the next repository
}
// we went through all repositories listed in that mapping
// lets see if we have matched the threshold consensus for the given target file
var result *targetMatch
for _, target := range matchedTargetGroups {
// compare thresholds for each target info we found with the value stated for its mapping
if len(target.repositories) >= eachMap.Threshold {
// this target has enough repositories signed for it
if result != nil {
// it seems there's more than one target info matching the threshold for this mapping
// it is a conflict since it's impossible to establish a consensus which of the found targets
// we should actually trust, so we error out
return nil, nil, fmt.Errorf("more than one target info matching the necessary threshold value")
} else {
// this is the first target we found matching the necessary threshold so save it
result = &target
}
}
}
// search finished, see if we have found a matching target
if result != nil {
return result.targetInfo, result.repositories, nil
}
// if we are here, we haven't found enough target infos to match the threshold number
// for this mapping
if eachMap.Terminating {
// stop the search if this was a terminating map
terminated = eachMap.Terminating
break
}
}
}
// no match, continue looking at the next path pattern from this mapping
}
// stop the search if this was a terminating map, otherwise continue with the next mapping
if terminated {
break
}
}
// looped through all mappings and there was nothing, not even a terminating one
return nil, nil, fmt.Errorf("target info not found")
}
// DownloadTarget downloads the target file specified by targetFile
func (client *MultiRepoClient) DownloadTarget(repos []string, targetFile *metadata.TargetFiles, filePath, targetBaseURL string) (string, []byte, error) {
log := metadata.GetLogger()
for _, repoName := range repos {
// see if the target is already present locally
targetPath, targetBytes, err := client.TUFClients[repoName].FindCachedTarget(targetFile, filePath)
if err != nil {
return "", nil, err
}
if len(targetPath) != 0 && len(targetBytes) != 0 {
// we already got the target for this target info cached locally, so return it
log.Info("Target already present locally from repo", "target", targetFile.Path, "repo", repoName)
return targetPath, targetBytes, nil
}
// not present locally, so let's try to download it
targetPath, targetBytes, err = client.TUFClients[repoName].DownloadTarget(targetFile, filePath, targetBaseURL)
if err != nil {
// TODO: decide if we should error if one repository serves the expected target info, but we fail to download the actual target
// try downloading the target from the next available repository
continue
}
// we got the target for this target info, so return it
log.Info("Downloaded target from repo", "target", targetFile.Path, "repo", repoName)
return targetPath, targetBytes, nil
}
// error out as we haven't succeeded downloading the target file
return "", nil, fmt.Errorf("failed to download target file %s", targetFile.Path)
}
func (cfg *MultiRepoConfig) EnsurePathsExist() error {
if cfg.DisableLocalCache {
return nil
}
for _, path := range []string{cfg.LocalMetadataDir, cfg.LocalTargetsDir} {
err := os.MkdirAll(path, os.ModePerm)
if err != nil {
return err
}
}
return nil
}
golang-github-theupdateframework-go-tuf-2.0.2/metadata/repository/ 0000775 0000000 0000000 00000000000 14706111210 0025330 5 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/metadata/repository/repository.go 0000664 0000000 0000000 00000004544 14706111210 0030105 0 ustar 00root root 0000000 0000000 // Copyright 2024 The Update Framework Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
//
// SPDX-License-Identifier: Apache-2.0
//
package repository
import (
"github.com/theupdateframework/go-tuf/v2/metadata"
)
// repositoryType struct for storing metadata
type repositoryType struct {
root *metadata.Metadata[metadata.RootType]
snapshot *metadata.Metadata[metadata.SnapshotType]
timestamp *metadata.Metadata[metadata.TimestampType]
targets map[string]*metadata.Metadata[metadata.TargetsType]
}
// New creates an empty repository instance
func New() *repositoryType {
return &repositoryType{
targets: map[string]*metadata.Metadata[metadata.TargetsType]{},
}
}
// Root returns metadata of type Root
func (r *repositoryType) Root() *metadata.Metadata[metadata.RootType] {
return r.root
}
// SetRoot sets metadata of type Root
func (r *repositoryType) SetRoot(meta *metadata.Metadata[metadata.RootType]) {
r.root = meta
}
// Snapshot returns metadata of type Snapshot
func (r *repositoryType) Snapshot() *metadata.Metadata[metadata.SnapshotType] {
return r.snapshot
}
// SetSnapshot sets metadata of type Snapshot
func (r *repositoryType) SetSnapshot(meta *metadata.Metadata[metadata.SnapshotType]) {
r.snapshot = meta
}
// Timestamp returns metadata of type Timestamp
func (r *repositoryType) Timestamp() *metadata.Metadata[metadata.TimestampType] {
return r.timestamp
}
// SetTimestamp sets metadata of type Timestamp
func (r *repositoryType) SetTimestamp(meta *metadata.Metadata[metadata.TimestampType]) {
r.timestamp = meta
}
// Targets returns metadata of type Targets
func (r *repositoryType) Targets(name string) *metadata.Metadata[metadata.TargetsType] {
return r.targets[name]
}
// SetTargets sets metadata of type Targets
func (r *repositoryType) SetTargets(name string, meta *metadata.Metadata[metadata.TargetsType]) {
r.targets[name] = meta
}
golang-github-theupdateframework-go-tuf-2.0.2/metadata/repository/repository_test.go 0000664 0000000 0000000 00000004006 14706111210 0031135 0 ustar 00root root 0000000 0000000 // Copyright 2024 The Update Framework Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
//
// SPDX-License-Identifier: Apache-2.0
//
package repository
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/theupdateframework/go-tuf/v2/metadata"
)
func TestNewRepository(t *testing.T) {
repo := New()
now := time.Now().UTC()
safeExpiry := now.Truncate(time.Second).AddDate(0, 0, 30)
root := metadata.Root(safeExpiry)
repo.SetRoot(root)
assert.Equal(t, "root", repo.Root().Signed.Type)
assert.Equal(t, int64(1), repo.Root().Signed.Version)
assert.Equal(t, metadata.SPECIFICATION_VERSION, repo.Root().Signed.SpecVersion)
targets := metadata.Targets(safeExpiry)
repo.SetTargets("targets", targets)
assert.Equal(t, "targets", repo.Targets("targets").Signed.Type)
assert.Equal(t, int64(1), repo.Targets("targets").Signed.Version)
assert.Equal(t, metadata.SPECIFICATION_VERSION, repo.Targets("targets").Signed.SpecVersion)
timestamp := metadata.Timestamp(safeExpiry)
repo.SetTimestamp(timestamp)
// repo.SetRoot(root)
assert.Equal(t, "timestamp", repo.Timestamp().Signed.Type)
assert.Equal(t, int64(1), repo.Timestamp().Signed.Version)
assert.Equal(t, metadata.SPECIFICATION_VERSION, repo.Timestamp().Signed.SpecVersion)
snapshot := metadata.Snapshot(safeExpiry)
repo.SetSnapshot(snapshot)
assert.Equal(t, "snapshot", repo.Snapshot().Signed.Type)
assert.Equal(t, int64(1), repo.Snapshot().Signed.Version)
assert.Equal(t, metadata.SPECIFICATION_VERSION, repo.Snapshot().Signed.SpecVersion)
}
golang-github-theupdateframework-go-tuf-2.0.2/metadata/trustedmetadata/ 0000775 0000000 0000000 00000000000 14706111210 0026304 5 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/metadata/trustedmetadata/trustedmetadata.go 0000664 0000000 0000000 00000033576 14706111210 0032044 0 ustar 00root root 0000000 0000000 // Copyright 2024 The Update Framework Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
//
// SPDX-License-Identifier: Apache-2.0
//
package trustedmetadata
import (
"fmt"
"time"
"github.com/theupdateframework/go-tuf/v2/metadata"
)
// TrustedMetadata struct for storing trusted metadata
type TrustedMetadata struct {
Root *metadata.Metadata[metadata.RootType]
Snapshot *metadata.Metadata[metadata.SnapshotType]
Timestamp *metadata.Metadata[metadata.TimestampType]
Targets map[string]*metadata.Metadata[metadata.TargetsType]
RefTime time.Time
}
// New creates a new TrustedMetadata instance which ensures that the
// collection of metadata in it is valid and trusted through the whole
// client update workflow. It provides easy ways to update the metadata
// with the caller making decisions on what is updated
func New(rootData []byte) (*TrustedMetadata, error) {
res := &TrustedMetadata{
Targets: map[string]*metadata.Metadata[metadata.TargetsType]{},
RefTime: time.Now().UTC(),
}
// load and validate the local root metadata
// valid initial trusted root metadata is required
err := res.loadTrustedRoot(rootData)
if err != nil {
return nil, err
}
return res, nil
}
// UpdateRoot verifies and loads “rootData“ as new root metadata.
// Note that an expired intermediate root is considered valid: expiry is
// only checked for the final root in UpdateTimestamp()
func (trusted *TrustedMetadata) UpdateRoot(rootData []byte) (*metadata.Metadata[metadata.RootType], error) {
log := metadata.GetLogger()
if trusted.Timestamp != nil {
return nil, &metadata.ErrRuntime{Msg: "cannot update root after timestamp"}
}
log.Info("Updating root")
// generate root metadata
newRoot, err := metadata.Root().FromBytes(rootData)
if err != nil {
return nil, err
}
// check metadata type matches root
if newRoot.Signed.Type != metadata.ROOT {
return nil, &metadata.ErrRepository{Msg: fmt.Sprintf("expected %s, got %s", metadata.ROOT, newRoot.Signed.Type)}
}
// verify that new root is signed by trusted root
err = trusted.Root.VerifyDelegate(metadata.ROOT, newRoot)
if err != nil {
return nil, err
}
// verify version
if newRoot.Signed.Version != trusted.Root.Signed.Version+1 {
return nil, &metadata.ErrBadVersionNumber{Msg: fmt.Sprintf("bad version number, expected %d, got %d", trusted.Root.Signed.Version+1, newRoot.Signed.Version)}
}
// verify that new root is signed by itself
err = newRoot.VerifyDelegate(metadata.ROOT, newRoot)
if err != nil {
return nil, err
}
// save root if verified
trusted.Root = newRoot
log.Info("Updated root", "version", trusted.Root.Signed.Version)
return trusted.Root, nil
}
// UpdateTimestamp verifies and loads “timestampData“ as new timestamp metadata.
// Note that an intermediate timestamp is allowed to be expired. "TrustedMetadata"
// will error in this case but the intermediate timestamp will be loaded.
// This way a newer timestamp can still be loaded (and the intermediate
// timestamp will be used for rollback protection). Expired timestamp will
// prevent loading snapshot metadata.
func (trusted *TrustedMetadata) UpdateTimestamp(timestampData []byte) (*metadata.Metadata[metadata.TimestampType], error) {
log := metadata.GetLogger()
if trusted.Snapshot != nil {
return nil, &metadata.ErrRuntime{Msg: "cannot update timestamp after snapshot"}
}
// client workflow 5.3.10: Make sure final root is not expired.
if trusted.Root.Signed.IsExpired(trusted.RefTime) {
// no need to check for 5.3.11 (fast forward attack recovery):
// timestamp/snapshot can not yet be loaded at this point
return nil, &metadata.ErrExpiredMetadata{Msg: "final root.json is expired"}
}
log.Info("Updating timestamp")
newTimestamp, err := metadata.Timestamp().FromBytes(timestampData)
if err != nil {
return nil, err
}
// check metadata type matches timestamp
if newTimestamp.Signed.Type != metadata.TIMESTAMP {
return nil, &metadata.ErrRepository{Msg: fmt.Sprintf("expected %s, got %s", metadata.TIMESTAMP, newTimestamp.Signed.Type)}
}
// verify that new timestamp is signed by trusted root
err = trusted.Root.VerifyDelegate(metadata.TIMESTAMP, newTimestamp)
if err != nil {
return nil, err
}
// if an existing trusted timestamp is updated,
// check for a rollback attack
if trusted.Timestamp != nil {
// prevent rolling back timestamp version
if newTimestamp.Signed.Version < trusted.Timestamp.Signed.Version {
return nil, &metadata.ErrBadVersionNumber{Msg: fmt.Sprintf("new timestamp version %d must be >= %d", newTimestamp.Signed.Version, trusted.Timestamp.Signed.Version)}
}
// keep using old timestamp if versions are equal
if newTimestamp.Signed.Version == trusted.Timestamp.Signed.Version {
log.Info("New timestamp version equals the old one", "new", newTimestamp.Signed.Version, "old", trusted.Timestamp.Signed.Version)
return nil, &metadata.ErrEqualVersionNumber{Msg: fmt.Sprintf("new timestamp version %d equals the old one %d", newTimestamp.Signed.Version, trusted.Timestamp.Signed.Version)}
}
// prevent rolling back snapshot version
snapshotMeta := trusted.Timestamp.Signed.Meta[fmt.Sprintf("%s.json", metadata.SNAPSHOT)]
newSnapshotMeta := newTimestamp.Signed.Meta[fmt.Sprintf("%s.json", metadata.SNAPSHOT)]
if newSnapshotMeta.Version < snapshotMeta.Version {
return nil, &metadata.ErrBadVersionNumber{Msg: fmt.Sprintf("new snapshot version %d must be >= %d", newSnapshotMeta.Version, snapshotMeta.Version)}
}
}
// expiry not checked to allow old timestamp to be used for rollback
// protection of new timestamp: expiry is checked in UpdateSnapshot()
// save root if verified
trusted.Timestamp = newTimestamp
log.Info("Updated timestamp", "version", trusted.Timestamp.Signed.Version)
// timestamp is loaded: error if it is not valid _final_ timestamp
err = trusted.checkFinalTimestamp()
if err != nil {
return nil, err
}
// all okay
return trusted.Timestamp, nil
}
// checkFinalTimestamp verifies if trusted timestamp is not expired
func (trusted *TrustedMetadata) checkFinalTimestamp() error {
if trusted.Timestamp.Signed.IsExpired(trusted.RefTime) {
return &metadata.ErrExpiredMetadata{Msg: "timestamp.json is expired"}
}
return nil
}
// UpdateSnapshot verifies and loads “snapshotData“ as new snapshot metadata.
// Note that an intermediate snapshot is allowed to be expired and version
// is allowed to not match timestamp meta version: TrustedMetadata
// will error for case of expired metadata or when using bad versions but the
// intermediate snapshot will be loaded. This way a newer snapshot can still
// be loaded (and the intermediate snapshot will be used for rollback protection).
// Expired snapshot or snapshot that does not match timestamp meta version will
// prevent loading targets.
func (trusted *TrustedMetadata) UpdateSnapshot(snapshotData []byte, isTrusted bool) (*metadata.Metadata[metadata.SnapshotType], error) {
log := metadata.GetLogger()
if trusted.Timestamp == nil {
return nil, &metadata.ErrRuntime{Msg: "cannot update snapshot before timestamp"}
}
if trusted.Targets[metadata.TARGETS] != nil {
return nil, &metadata.ErrRuntime{Msg: "cannot update snapshot after targets"}
}
log.Info("Updating snapshot")
// snapshot cannot be loaded if final timestamp is expired
err := trusted.checkFinalTimestamp()
if err != nil {
return nil, err
}
snapshotMeta := trusted.Timestamp.Signed.Meta[fmt.Sprintf("%s.json", metadata.SNAPSHOT)]
// verify non-trusted data against the hashes in timestamp, if any.
// trusted snapshot data has already been verified once.
if !isTrusted {
err = snapshotMeta.VerifyLengthHashes(snapshotData)
if err != nil {
return nil, err
}
}
newSnapshot, err := metadata.Snapshot().FromBytes(snapshotData)
if err != nil {
return nil, err
}
// check metadata type matches snapshot
if newSnapshot.Signed.Type != metadata.SNAPSHOT {
return nil, &metadata.ErrRepository{Msg: fmt.Sprintf("expected %s, got %s", metadata.SNAPSHOT, newSnapshot.Signed.Type)}
}
// verify that new snapshot is signed by trusted root
err = trusted.Root.VerifyDelegate(metadata.SNAPSHOT, newSnapshot)
if err != nil {
return nil, err
}
// version not checked against meta version to allow old snapshot to be
// used in rollback protection: it is checked when targets is updated
// if an existing trusted snapshot is updated, check for rollback attack
if trusted.Snapshot != nil {
for name, info := range trusted.Snapshot.Signed.Meta {
newFileInfo, ok := newSnapshot.Signed.Meta[name]
// prevent removal of any metadata in meta
if !ok {
return nil, &metadata.ErrRepository{Msg: fmt.Sprintf("new snapshot is missing info for %s", name)}
}
// prevent rollback of any metadata versions
if newFileInfo.Version < info.Version {
return nil, &metadata.ErrBadVersionNumber{Msg: fmt.Sprintf("expected %s version %d, got %d", name, newFileInfo.Version, info.Version)}
}
}
}
// expiry not checked to allow old snapshot to be used for rollback
// protection of new snapshot: it is checked when targets is updated
trusted.Snapshot = newSnapshot
log.Info("Updated snapshot", "version", trusted.Snapshot.Signed.Version)
// snapshot is loaded, but we error if it's not valid _final_ snapshot
err = trusted.checkFinalSnapshot()
if err != nil {
return nil, err
}
// all okay
return trusted.Snapshot, nil
}
// checkFinalSnapshot verifies if it's not expired and snapshot version matches timestamp meta version
func (trusted *TrustedMetadata) checkFinalSnapshot() error {
if trusted.Snapshot.Signed.IsExpired(trusted.RefTime) {
return &metadata.ErrExpiredMetadata{Msg: "snapshot.json is expired"}
}
snapshotMeta := trusted.Timestamp.Signed.Meta[fmt.Sprintf("%s.json", metadata.SNAPSHOT)]
if trusted.Snapshot.Signed.Version != snapshotMeta.Version {
return &metadata.ErrBadVersionNumber{Msg: fmt.Sprintf("expected %d, got %d", snapshotMeta.Version, trusted.Snapshot.Signed.Version)}
}
return nil
}
// UpdateTargets verifies and loads “targetsData“ as new top-level targets metadata.
func (trusted *TrustedMetadata) UpdateTargets(targetsData []byte) (*metadata.Metadata[metadata.TargetsType], error) {
return trusted.UpdateDelegatedTargets(targetsData, metadata.TARGETS, metadata.ROOT)
}
// UpdateDelegatedTargets verifies and loads “targetsData“ as new metadata for target “roleName“
func (trusted *TrustedMetadata) UpdateDelegatedTargets(targetsData []byte, roleName, delegatorName string) (*metadata.Metadata[metadata.TargetsType], error) {
log := metadata.GetLogger()
var ok bool
if trusted.Snapshot == nil {
return nil, &metadata.ErrRuntime{Msg: "cannot load targets before snapshot"}
}
// targets cannot be loaded if final snapshot is expired or its version
// does not match meta version in timestamp
err := trusted.checkFinalSnapshot()
if err != nil {
return nil, err
}
// check if delegator metadata is present
if delegatorName == metadata.ROOT {
if trusted.Root != nil {
ok = true
} else {
ok = false
}
} else {
_, ok = trusted.Targets[delegatorName]
}
if !ok {
return nil, &metadata.ErrRuntime{Msg: "cannot load targets before delegator"}
}
log.Info("Updating delegated role", "role", roleName, "delegator", delegatorName)
// Verify against the hashes in snapshot, if any
meta, ok := trusted.Snapshot.Signed.Meta[fmt.Sprintf("%s.json", roleName)]
if !ok {
return nil, &metadata.ErrRepository{Msg: fmt.Sprintf("snapshot does not contain information for %s", roleName)}
}
err = meta.VerifyLengthHashes(targetsData)
if err != nil {
return nil, err
}
newDelegate, err := metadata.Targets().FromBytes(targetsData)
if err != nil {
return nil, err
}
// check metadata type matches targets
if newDelegate.Signed.Type != metadata.TARGETS {
return nil, &metadata.ErrRepository{Msg: fmt.Sprintf("expected %s, got %s", metadata.TARGETS, newDelegate.Signed.Type)}
}
// get delegator metadata and verify the new delegatee
if delegatorName == metadata.ROOT {
err = trusted.Root.VerifyDelegate(roleName, newDelegate)
if err != nil {
return nil, err
}
} else {
err = trusted.Targets[delegatorName].VerifyDelegate(roleName, newDelegate)
if err != nil {
return nil, err
}
}
// check versions
if newDelegate.Signed.Version != meta.Version {
return nil, &metadata.ErrBadVersionNumber{Msg: fmt.Sprintf("expected %s version %d, got %d", roleName, meta.Version, newDelegate.Signed.Version)}
}
// check expiration
if newDelegate.Signed.IsExpired(trusted.RefTime) {
return nil, &metadata.ErrExpiredMetadata{Msg: fmt.Sprintf("new %s is expired", roleName)}
}
trusted.Targets[roleName] = newDelegate
log.Info("Updated role", "role", roleName, "version", trusted.Targets[roleName].Signed.Version)
return trusted.Targets[roleName], nil
}
// loadTrustedRoot verifies and loads "data" as trusted root metadata.
// Note that an expired initial root is considered valid: expiry is
// only checked for the final root in “UpdateTimestamp()“.
func (trusted *TrustedMetadata) loadTrustedRoot(rootData []byte) error {
log := metadata.GetLogger()
// generate root metadata
newRoot, err := metadata.Root().FromBytes(rootData)
if err != nil {
return err
}
// check metadata type matches root
if newRoot.Signed.Type != metadata.ROOT {
return &metadata.ErrRepository{Msg: fmt.Sprintf("expected %s, got %s", metadata.ROOT, newRoot.Signed.Type)}
}
// verify root by itself
err = newRoot.VerifyDelegate(metadata.ROOT, newRoot)
if err != nil {
return err
}
// save root if verified
trusted.Root = newRoot
log.Info("Loaded trusted root", "version", trusted.Root.Signed.Version)
return nil
}
golang-github-theupdateframework-go-tuf-2.0.2/metadata/trustedmetadata/trustedmetadata_test.go 0000664 0000000 0000000 00000062775 14706111210 0033106 0 ustar 00root root 0000000 0000000 // Copyright 2024 The Update Framework Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
//
// SPDX-License-Identifier: Apache-2.0
//
package trustedmetadata
import (
"crypto"
"os"
"path/filepath"
"testing"
"time"
"github.com/sigstore/sigstore/pkg/cryptoutils"
"github.com/sigstore/sigstore/pkg/signature"
"github.com/stretchr/testify/assert"
"github.com/theupdateframework/go-tuf/v2/internal/testutils"
"github.com/theupdateframework/go-tuf/v2/metadata"
)
var allRoles map[string][]byte
func setAllRolesBytes(path string) {
log := metadata.GetLogger()
allRoles = make(map[string][]byte)
rootPath := filepath.Join(path, "root.json")
root, err := os.ReadFile(rootPath)
if err != nil {
log.Error(err, "failed to read root bytes")
os.Exit(1)
}
allRoles[metadata.ROOT] = root
targetsPath := filepath.Join(path, "targets.json")
targets, err := os.ReadFile(targetsPath)
if err != nil {
log.Error(err, "failed to read targets bytes")
os.Exit(1)
}
allRoles[metadata.TARGETS] = targets
snapshotPath := filepath.Join(path, "snapshot.json")
snapshot, err := os.ReadFile(snapshotPath)
if err != nil {
log.Error(err, "failed to read snapshot bytes")
os.Exit(1)
}
allRoles[metadata.SNAPSHOT] = snapshot
timestampPath := filepath.Join(path, "timestamp.json")
timestamp, err := os.ReadFile(timestampPath)
if err != nil {
log.Error(err, "failed to read timestamp bytes")
os.Exit(1)
}
allRoles[metadata.TIMESTAMP] = timestamp
role1Path := filepath.Join(path, "role1.json")
role1, err := os.ReadFile(role1Path)
if err != nil {
log.Error(err, "failed to read role1 bytes")
os.Exit(1)
}
allRoles["role1"] = role1
role2Path := filepath.Join(path, "role2.json")
role2, err := os.ReadFile(role2Path)
if err != nil {
log.Error(err, "failed to read role2 bytes")
os.Exit(1)
}
allRoles["role2"] = role2
}
func TestMain(m *testing.M) {
log := metadata.GetLogger()
repoPath := "../../internal/testutils/repository_data/repository/metadata"
keystorePath := "../../internal/testutils/repository_data/keystore"
targetsPath := "../../internal/testutils/repository_data/repository/targets"
err := testutils.SetupTestDirs(repoPath, targetsPath, keystorePath)
defer testutils.Cleanup()
if err != nil {
log.Error(err, "failed to setup test dirs")
os.Exit(1)
}
setAllRolesBytes(testutils.RepoDir)
m.Run()
}
type modifyRoot func(*metadata.Metadata[metadata.RootType])
func modifyRootMetadata(fn modifyRoot) ([]byte, error) {
log := metadata.GetLogger()
root, err := metadata.Root().FromBytes(allRoles[metadata.ROOT])
if err != nil {
log.Error(err, "failed to create root metadata from bytes")
}
fn(root)
signer, err := signature.LoadSignerFromPEMFile(filepath.Join(testutils.KeystoreDir, "root_key"), crypto.SHA256, cryptoutils.SkipPassword)
if err != nil {
log.Error(err, "failed to load signer from pem file")
}
root.ClearSignatures()
_, err = root.Sign(signer)
if err != nil {
log.Error(err, "failed to sign root")
}
return root.ToBytes(true)
}
type modifyTimestamp func(*metadata.Metadata[metadata.TimestampType])
func modifyTimestamptMetadata(fn modifyTimestamp) ([]byte, error) {
log := metadata.GetLogger()
timestamp, err := metadata.Timestamp().FromBytes(allRoles[metadata.TIMESTAMP])
if err != nil {
log.Error(err, "failed to create timestamp metadata from bytes")
}
fn(timestamp)
signer, err := signature.LoadSignerFromPEMFile(filepath.Join(testutils.KeystoreDir, "timestamp_key"), crypto.SHA256, cryptoutils.SkipPassword)
if err != nil {
log.Error(err, "failed to load signer from pem file")
}
timestamp.ClearSignatures()
_, err = timestamp.Sign(signer)
if err != nil {
log.Error(err, "failed to sign timestamp")
}
return timestamp.ToBytes(true)
}
type modifySnapshot func(*metadata.Metadata[metadata.SnapshotType])
func modifySnapshotMetadata(fn modifySnapshot) ([]byte, error) {
log := metadata.GetLogger()
snapshot, err := metadata.Snapshot().FromBytes(allRoles[metadata.SNAPSHOT])
if err != nil {
log.Error(err, "failed to create snapshot metadata from bytes")
}
fn(snapshot)
signer, err := signature.LoadSignerFromPEMFile(filepath.Join(testutils.KeystoreDir, "snapshot_key"), crypto.SHA256, cryptoutils.SkipPassword)
if err != nil {
log.Error(err, "failed to load signer from pem file")
}
snapshot.ClearSignatures()
_, err = snapshot.Sign(signer)
if err != nil {
log.Error(err, "failed to sign snapshot")
}
return snapshot.ToBytes(true)
}
type modifyTargets func(*metadata.Metadata[metadata.TargetsType])
func modifyTargetsMetadata(fn modifyTargets) ([]byte, error) {
log := metadata.GetLogger()
targets, err := metadata.Targets().FromBytes(allRoles[metadata.TARGETS])
if err != nil {
log.Error(err, "failed to create targets metadata from bytes")
}
fn(targets)
signer, err := signature.LoadSignerFromPEMFile(filepath.Join(testutils.KeystoreDir, "targets_key"), crypto.SHA256, cryptoutils.SkipPassword)
if err != nil {
log.Error(err, "failed to load signer from pem file")
}
targets.ClearSignatures()
_, err = targets.Sign(signer)
if err != nil {
log.Error(err, "failed to sign targets")
}
return targets.ToBytes(true)
}
func TestUpdate(t *testing.T) {
trustedSet, err := New(allRoles[metadata.ROOT])
assert.NoError(t, err)
_, err = trustedSet.UpdateTimestamp(allRoles[metadata.TIMESTAMP])
assert.NoError(t, err)
_, err = trustedSet.UpdateSnapshot(allRoles[metadata.SNAPSHOT], false)
assert.NoError(t, err)
_, err = trustedSet.UpdateTargets(allRoles[metadata.TARGETS])
assert.NoError(t, err)
_, err = trustedSet.UpdateDelegatedTargets(allRoles["role1"], "role1", metadata.TARGETS)
assert.NoError(t, err)
_, err = trustedSet.UpdateDelegatedTargets(allRoles["role2"], "role2", "role1")
assert.NoError(t, err)
// The 4 top level metadata objects + 2 additional delegated targets
// self.assertTrue(len(self.trusted_set), 6)
assert.NotNil(t, trustedSet.Root)
assert.NotNil(t, trustedSet.Timestamp)
assert.NotNil(t, trustedSet.Snapshot)
assert.NotNil(t, trustedSet.Targets)
}
func TestOutOfOrderOps(t *testing.T) {
trustedSet, err := New(allRoles[metadata.ROOT])
assert.NoError(t, err)
// Update snapshot before timestamp
_, err = trustedSet.UpdateSnapshot(allRoles[metadata.SNAPSHOT], false)
assert.ErrorIs(t, err, &metadata.ErrRuntime{Msg: "cannot update snapshot before timestamp"})
_, err = trustedSet.UpdateTimestamp(allRoles[metadata.TIMESTAMP])
assert.NoError(t, err)
// Update root after timestamp
_, err = trustedSet.UpdateRoot(allRoles[metadata.ROOT])
assert.ErrorIs(t, err, &metadata.ErrRuntime{Msg: "cannot update root after timestamp"})
// Update targets before snapshot
_, err = trustedSet.UpdateTargets(allRoles[metadata.TARGETS])
assert.ErrorIs(t, err, &metadata.ErrRuntime{Msg: "cannot load targets before snapshot"})
_, err = trustedSet.UpdateSnapshot(allRoles[metadata.SNAPSHOT], false)
assert.NoError(t, err)
// Update timestamp after snapshot
_, err = trustedSet.UpdateTimestamp(allRoles[metadata.TIMESTAMP])
assert.ErrorIs(t, err, &metadata.ErrRuntime{Msg: "cannot update timestamp after snapshot"})
// Update delegated targets before targets
_, err = trustedSet.UpdateDelegatedTargets(allRoles["role1"], "role1", metadata.TARGETS)
assert.ErrorIs(t, err, &metadata.ErrRuntime{Msg: "cannot load targets before delegator"})
_, err = trustedSet.UpdateTargets(allRoles[metadata.TARGETS])
assert.NoError(t, err)
// Update snapshot after sucessful targets update
_, err = trustedSet.UpdateSnapshot(allRoles[metadata.SNAPSHOT], false)
assert.ErrorIs(t, err, &metadata.ErrRuntime{Msg: "cannot update snapshot after targets"})
_, err = trustedSet.UpdateDelegatedTargets(allRoles["role1"], "role1", metadata.TARGETS)
assert.NoError(t, err)
}
func TestRootWithInvalidJson(t *testing.T) {
trustedSet, err := New(allRoles[metadata.ROOT])
assert.NoError(t, err)
// Test loading initial root and root update
// root is not json
_, err = trustedSet.UpdateRoot([]byte(""))
assert.ErrorContains(t, err, "unexpected end of JSON input")
// root is not valid
root, err := metadata.Root().FromBytes(allRoles[metadata.ROOT])
assert.NoError(t, err)
root.Signed.Version += 1
rootBytes, err := root.ToBytes(true)
assert.NoError(t, err)
_, err = trustedSet.UpdateRoot(rootBytes)
assert.ErrorIs(t, err, &metadata.ErrUnsignedMetadata{Msg: "Verifying root failed, not enough signatures, got 0, want 1"})
// metadata is of wrong type
_, err = trustedSet.UpdateRoot(allRoles[metadata.SNAPSHOT])
assert.ErrorIs(t, err, &metadata.ErrValue{Msg: "expected metadata type root, got - snapshot"})
}
func TestTopLevelMetadataWithInvalidJSON(t *testing.T) {
trustedSet, err := New(allRoles[metadata.ROOT])
assert.NoError(t, err)
//TIMESTAMP
// timestamp is not json
_, err = trustedSet.UpdateTimestamp([]byte(""))
assert.ErrorContains(t, err, "unexpected end of JSON input")
// timestamp is not valid
timestamp, err := metadata.Timestamp().FromBytes(allRoles[metadata.TIMESTAMP])
assert.NoError(t, err)
properTimestampBytes, err := timestamp.ToBytes(true)
assert.NoError(t, err)
timestamp.Signed.Version += 1
timestampBytes, err := timestamp.ToBytes(true)
assert.NoError(t, err)
_, err = trustedSet.UpdateTimestamp(timestampBytes)
assert.ErrorIs(t, err, &metadata.ErrUnsignedMetadata{Msg: "Verifying timestamp failed, not enough signatures, got 0, want 1"})
// timestamp is of wrong type
_, err = trustedSet.UpdateTimestamp(allRoles[metadata.ROOT])
assert.ErrorIs(t, err, &metadata.ErrValue{Msg: "expected metadata type timestamp, got - root"})
// SNAPSHOT
_, err = trustedSet.UpdateTimestamp(properTimestampBytes)
assert.NoError(t, err)
// snapshot is not json
_, err = trustedSet.UpdateSnapshot([]byte(""), false)
assert.ErrorContains(t, err, "unexpected end of JSON input")
// snapshot is not valid
snapshot, err := metadata.Snapshot().FromBytes(allRoles[metadata.SNAPSHOT])
assert.NoError(t, err)
properSnapshotBytes, err := snapshot.ToBytes(true)
assert.NoError(t, err)
snapshot.Signed.Version += 1
snapshotBytes, err := snapshot.ToBytes(true)
assert.NoError(t, err)
_, err = trustedSet.UpdateSnapshot(snapshotBytes, false)
assert.ErrorIs(t, err, &metadata.ErrUnsignedMetadata{Msg: "Verifying snapshot failed, not enough signatures, got 0, want 1"})
// snapshot is of wrong type
_, err = trustedSet.UpdateSnapshot(allRoles[metadata.ROOT], false)
assert.ErrorIs(t, err, &metadata.ErrValue{Msg: "expected metadata type snapshot, got - root"})
// TARGETS
_, err = trustedSet.UpdateSnapshot(properSnapshotBytes, false)
assert.NoError(t, err)
// targets is not json
_, err = trustedSet.UpdateTargets([]byte(""))
assert.ErrorContains(t, err, "unexpected end of JSON input")
// targets is not valid
targets, err := metadata.Targets().FromBytes(allRoles[metadata.TARGETS])
assert.NoError(t, err)
targets.Signed.Version += 1
targetsBytes, err := targets.ToBytes(true)
assert.NoError(t, err)
_, err = trustedSet.UpdateTargets(targetsBytes)
assert.ErrorIs(t, err, &metadata.ErrUnsignedMetadata{Msg: "Verifying targets failed, not enough signatures, got 0, want 1"})
// targets is of wrong type
_, err = trustedSet.UpdateTargets(allRoles[metadata.ROOT])
assert.ErrorIs(t, err, &metadata.ErrValue{Msg: "expected metadata type targets, got - root"})
}
func TestUpdateRootNewRoot(t *testing.T) {
// Test that root can be updated with a new valid version
modifyRootVersion := func(root *metadata.Metadata[metadata.RootType]) {
root.Signed.Version += 1
}
root, err := modifyRootMetadata(modifyRootVersion)
assert.NoError(t, err)
trustedSet, err := New(allRoles[metadata.ROOT])
assert.NoError(t, err)
_, err = trustedSet.UpdateRoot(root)
assert.NoError(t, err)
}
func TestUpdateRootNewRootFailTreshholdVerification(t *testing.T) {
// Increase threshold in new root, do not add enough keys
bumpRootThreshold := func(root *metadata.Metadata[metadata.RootType]) {
root.Signed.Version += 1
root.Signed.Roles[metadata.ROOT].Threshold += 1
}
root, err := modifyRootMetadata(bumpRootThreshold)
assert.NoError(t, err)
trustedSet, err := New(allRoles[metadata.ROOT])
assert.NoError(t, err)
_, err = trustedSet.UpdateRoot(root)
assert.ErrorIs(t, err, &metadata.ErrUnsignedMetadata{Msg: "Verifying root failed, not enough signatures, got 1, want 2"})
}
func TestUpdateRootNewRootVerSameAsTrustedRootVer(t *testing.T) {
trustedSet, err := New(allRoles[metadata.ROOT])
assert.NoError(t, err)
_, err = trustedSet.UpdateRoot(allRoles[metadata.ROOT])
assert.ErrorIs(t, err, &metadata.ErrBadVersionNumber{Msg: "bad version number, expected 2, got 1"})
}
func TestRootExpiredFinalRoot(t *testing.T) {
// test that root can be updated with a new valid version
modifyRootExpiry := func(root *metadata.Metadata[metadata.RootType]) {
root.Signed.Expires = time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC)
}
// Intermediate root can be expired
root, err := modifyRootMetadata(modifyRootExpiry)
assert.NoError(t, err)
trustedSet, err := New(root)
assert.NoError(t, err)
// Update timestamp to trigger final root expiry check
_, err = trustedSet.UpdateTimestamp(allRoles[metadata.TIMESTAMP])
assert.ErrorIs(t, err, &metadata.ErrExpiredMetadata{Msg: "final root.json is expired"})
}
func TestUpdateTimestampNewTimestampVerBelowTrustedVer(t *testing.T) {
// newTimestamp.Version < trustedTimestamp.Version
modifyTimestampVersion := func(timestamp *metadata.Metadata[metadata.TimestampType]) {
timestamp.Signed.Version = 3
}
timestamp, err := modifyTimestamptMetadata(modifyTimestampVersion)
assert.NoError(t, err)
trustedSet, err := New(allRoles[metadata.ROOT])
assert.NoError(t, err)
_, err = trustedSet.UpdateTimestamp(timestamp)
assert.NoError(t, err)
_, err = trustedSet.UpdateTimestamp(allRoles[metadata.TIMESTAMP])
assert.ErrorIs(t, err, &metadata.ErrBadVersionNumber{Msg: "new timestamp version 1 must be >= 3"})
}
func TestUpdateTimestampWithSameTimestamp(t *testing.T) {
// Test that timestamp is NOT updated if:
// newTimestamp.Version = trustedTimestamp.Version
trustedSet, err := New(allRoles[metadata.ROOT])
assert.NoError(t, err)
_, err = trustedSet.UpdateTimestamp(allRoles[metadata.TIMESTAMP])
assert.NoError(t, err)
initialTimestamp := trustedSet.Timestamp
// Update timestamp with the same version.
_, err = trustedSet.UpdateTimestamp(allRoles[metadata.TIMESTAMP])
// EqualVersionNumberError
assert.ErrorIs(t, err, &metadata.ErrEqualVersionNumber{Msg: "new timestamp version 1 equals the old one 1"})
// Verify that the timestamp object was not updated.
assert.Equal(t, initialTimestamp, trustedSet.Timestamp)
}
func TestUpdateTimestampSnapshotCerBellowCurrent(t *testing.T) {
bumpSnapshotVersion := func(timestamp *metadata.Metadata[metadata.TimestampType]) {
timestamp.Signed.Meta["snapshot.json"].Version = 2
// The timestamp version must be increased to initiate a update.
timestamp.Signed.Version += 1
}
// Set current known snapshot.json version to 2
timestamp, err := modifyTimestamptMetadata(bumpSnapshotVersion)
assert.NoError(t, err)
trustedSet, err := New(allRoles[metadata.ROOT])
assert.NoError(t, err)
_, err = trustedSet.UpdateTimestamp(timestamp)
assert.NoError(t, err)
// new timestamp meta version < trusted timestamp meta version
_, err = trustedSet.UpdateTimestamp(allRoles[metadata.TIMESTAMP])
assert.ErrorIs(t, err, &metadata.ErrBadVersionNumber{Msg: "new timestamp version 1 must be >= 2"})
}
func TestUpdateTimestampExpired(t *testing.T) {
// New timestamp has expired
modifyTimestampExpiry := func(timestamp *metadata.Metadata[metadata.TimestampType]) {
timestamp.Signed.Expires = time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC)
}
// Expired intermediate timestamp is loaded but raises ExpiredMetadataError
timestamp, err := modifyTimestamptMetadata(modifyTimestampExpiry)
assert.NoError(t, err)
trustedSet, err := New(allRoles[metadata.ROOT])
assert.NoError(t, err)
_, err = trustedSet.UpdateTimestamp(timestamp)
assert.ErrorIs(t, err, &metadata.ErrExpiredMetadata{Msg: "timestamp.json is expired"})
_, err = trustedSet.UpdateSnapshot(allRoles[metadata.SNAPSHOT], false)
assert.ErrorIs(t, err, &metadata.ErrExpiredMetadata{Msg: "timestamp.json is expired"})
}
func TestUpdateSnapshotLengthOrHashMismatch(t *testing.T) {
modifySnapshotLength := func(timestamp *metadata.Metadata[metadata.TimestampType]) {
timestamp.Signed.Meta["snapshot.json"].Length = 1
}
// Set known snapshot.json length to 1
timestamp, err := modifyTimestamptMetadata(modifySnapshotLength)
assert.NoError(t, err)
trustedSet, err := New(allRoles[metadata.ROOT])
assert.NoError(t, err)
_, err = trustedSet.UpdateTimestamp(timestamp)
assert.NoError(t, err)
_, err = trustedSet.UpdateSnapshot(allRoles[metadata.SNAPSHOT], false)
assert.ErrorIs(t, err, &metadata.ErrLengthOrHashMismatch{Msg: "length verification failed - expected 1, got 652"})
}
func TestUpdateSnapshotFailThreshholdVerification(t *testing.T) {
trustedSet, err := New(allRoles[metadata.ROOT])
assert.NoError(t, err)
_, err = trustedSet.UpdateTimestamp(allRoles[metadata.TIMESTAMP])
assert.NoError(t, err)
snapshot, err := metadata.Snapshot().FromBytes(allRoles[metadata.SNAPSHOT])
assert.NoError(t, err)
snapshot.ClearSignatures()
snapshotBytes, err := snapshot.ToBytes(true)
assert.NoError(t, err)
_, err = trustedSet.UpdateSnapshot(snapshotBytes, false)
assert.ErrorIs(t, err, &metadata.ErrUnsignedMetadata{Msg: "Verifying snapshot failed, not enough signatures, got 0, want 1"})
}
func TestUpdateSnapshotVersionDivergeTimestampSnapshotVersion(t *testing.T) {
modifyTimestampVersion := func(timestamp *metadata.Metadata[metadata.TimestampType]) {
timestamp.Signed.Meta["snapshot.json"].Version = 2
}
timestamp, err := modifyTimestamptMetadata(modifyTimestampVersion)
assert.NoError(t, err)
trustedSet, err := New(allRoles[metadata.ROOT])
assert.NoError(t, err)
_, err = trustedSet.UpdateTimestamp(timestamp)
assert.NoError(t, err)
// If intermediate snapshot version is incorrect, load it but also raise
_, err = trustedSet.UpdateSnapshot(allRoles[metadata.SNAPSHOT], false)
assert.ErrorIs(t, err, &metadata.ErrBadVersionNumber{Msg: "expected 2, got 1"})
// Targets update starts but fails if snapshot version does not match
_, err = trustedSet.UpdateTargets(allRoles[metadata.TARGETS])
assert.ErrorIs(t, err, &metadata.ErrBadVersionNumber{Msg: "expected 2, got 1"})
}
// Update all metadata roles besides targets.
func updateAllBesidesTargets(trustedSet *TrustedMetadata, timestampBytes []byte, snapshotBytes []byte) error {
if len(timestampBytes) <= 0 {
timestampBytes = allRoles[metadata.TIMESTAMP]
}
_, err := trustedSet.UpdateTimestamp(timestampBytes)
if err != nil {
return err
}
if len(snapshotBytes) <= 0 {
snapshotBytes = allRoles[metadata.SNAPSHOT]
}
_, err = trustedSet.UpdateSnapshot(snapshotBytes, false)
if err != nil {
return err
}
return nil
}
func TestUpdateSnapshotFileRemovedFromMeta(t *testing.T) {
trustedSet, err := New(allRoles[metadata.ROOT])
assert.NoError(t, err)
err = updateAllBesidesTargets(trustedSet, allRoles[metadata.TIMESTAMP], []byte{})
assert.NoError(t, err)
removeFileFromMeta := func(snaphot *metadata.Metadata[metadata.SnapshotType]) {
delete(snaphot.Signed.Meta, "targets.json")
}
// Test removing a meta_file in new_snapshot compared to the old snapshot
snapshot, err := modifySnapshotMetadata(removeFileFromMeta)
assert.NoError(t, err)
_, err = trustedSet.UpdateSnapshot(snapshot, false)
assert.ErrorIs(t, err, &metadata.ErrRepository{Msg: "new snapshot is missing info for targets.json"})
}
func TestUpdateSnapshotMetaVersionDecreases(t *testing.T) {
trustedSet, err := New(allRoles[metadata.ROOT])
assert.NoError(t, err)
_, err = trustedSet.UpdateTimestamp(allRoles[metadata.TIMESTAMP])
assert.NoError(t, err)
modifyMetaVersion := func(snaphot *metadata.Metadata[metadata.SnapshotType]) {
snaphot.Signed.Meta["targets.json"].Version += 1
}
snapshot, err := modifySnapshotMetadata(modifyMetaVersion)
assert.NoError(t, err)
_, err = trustedSet.UpdateSnapshot(snapshot, false)
assert.NoError(t, err)
_, err = trustedSet.UpdateSnapshot(allRoles[metadata.SNAPSHOT], false)
assert.ErrorIs(t, err, &metadata.ErrBadVersionNumber{Msg: "expected targets.json version 1, got 2"})
}
func TestUpdateSnapshotExpiredNewSnapshot(t *testing.T) {
trustedSet, err := New(allRoles[metadata.ROOT])
assert.NoError(t, err)
_, err = trustedSet.UpdateTimestamp(allRoles[metadata.TIMESTAMP])
assert.NoError(t, err)
modifySnapshotExpired := func(snaphot *metadata.Metadata[metadata.SnapshotType]) {
snaphot.Signed.Expires = time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC)
}
// Expired intermediate snapshot is loaded but will raise
snapshot, err := modifySnapshotMetadata(modifySnapshotExpired)
assert.NoError(t, err)
_, err = trustedSet.UpdateSnapshot(snapshot, false)
assert.ErrorIs(t, err, &metadata.ErrExpiredMetadata{Msg: "snapshot.json is expired"})
// Targets update does start but fails because snapshot is expired
_, err = trustedSet.UpdateTargets(allRoles[metadata.TARGETS])
assert.ErrorIs(t, err, &metadata.ErrExpiredMetadata{Msg: "snapshot.json is expired"})
}
func TestUpdateSnapshotSuccessfulRollbackChecks(t *testing.T) {
// Load a "local" timestamp, then update to newer one:
trustedSet, err := New(allRoles[metadata.ROOT])
assert.NoError(t, err)
_, err = trustedSet.UpdateTimestamp(allRoles[metadata.TIMESTAMP])
assert.NoError(t, err)
bumpMetaVersion := func(timestamp *metadata.Metadata[metadata.TimestampType]) {
timestamp.Signed.Meta["snapshot.json"].Version += 1
// The timestamp version must be increased to initiate a update.
timestamp.Signed.Version += 1
}
newTimestamp, err := modifyTimestamptMetadata(bumpMetaVersion)
assert.NoError(t, err)
_, err = trustedSet.UpdateTimestamp(newTimestamp)
assert.NoError(t, err)
// Load a "local" snapshot with mismatching version (loading happens but
// ErrBadVersionNumber is raised), then update to newer one:
_, err = trustedSet.UpdateSnapshot(allRoles[metadata.SNAPSHOT], false)
assert.ErrorIs(t, err, &metadata.ErrBadVersionNumber{Msg: "expected 2, got 1"})
bumpVersion := func(snapahot *metadata.Metadata[metadata.SnapshotType]) {
snapahot.Signed.Version += 1
}
newSnapshot, err := modifySnapshotMetadata(bumpVersion)
assert.NoError(t, err)
_, err = trustedSet.UpdateSnapshot(newSnapshot, false)
assert.NoError(t, err)
// Update targets to trigger final snapshot meta version check
_, err = trustedSet.UpdateTargets(allRoles[metadata.TARGETS])
assert.NoError(t, err)
}
func TestUpdateTargetsMoMetaInSnapshot(t *testing.T) {
clearMeta := func(snapshot *metadata.Metadata[metadata.SnapshotType]) {
for key := range snapshot.Signed.Meta {
delete(snapshot.Signed.Meta, key)
}
}
snapshot, err := modifySnapshotMetadata(clearMeta)
assert.NoError(t, err)
trustedSet, err := New(allRoles[metadata.ROOT])
assert.NoError(t, err)
err = updateAllBesidesTargets(trustedSet, allRoles[metadata.TIMESTAMP], snapshot)
assert.NoError(t, err)
// Remove meta information with information about targets from snapshot
_, err = trustedSet.UpdateTargets(allRoles[metadata.TARGETS])
assert.ErrorIs(t, err, &metadata.ErrRepository{Msg: "snapshot does not contain information for targets"})
}
func TestUpdateTargetsHashDiverfeFromSnapshotMetaHash(t *testing.T) {
modifyMetaLength := func(snapshot *metadata.Metadata[metadata.SnapshotType]) {
for metafilePath := range snapshot.Signed.Meta {
snapshot.Signed.Meta[metafilePath] = &metadata.MetaFiles{
Version: 1,
Length: 1,
}
}
}
snapshot, err := modifySnapshotMetadata(modifyMetaLength)
assert.NoError(t, err)
trustedSet, err := New(allRoles[metadata.ROOT])
assert.NoError(t, err)
err = updateAllBesidesTargets(trustedSet, allRoles[metadata.TIMESTAMP], snapshot)
assert.NoError(t, err)
// Observed hash != stored hash in snapshot meta for targets
_, err = trustedSet.UpdateTargets(allRoles[metadata.TARGETS])
assert.ErrorIs(t, err, &metadata.ErrLengthOrHashMismatch{Msg: "length verification failed - expected 1, got 1266"})
}
func TestUpdateTargetsVersionDivergeSnapshotMetaVersion(t *testing.T) {
modifyMeta := func(snapshot *metadata.Metadata[metadata.SnapshotType]) {
for metafilePath := range snapshot.Signed.Meta {
snapshot.Signed.Meta[metafilePath] = &metadata.MetaFiles{Version: 2}
}
}
snapshot, err := modifySnapshotMetadata(modifyMeta)
assert.NoError(t, err)
trustedSet, err := New(allRoles[metadata.ROOT])
assert.NoError(t, err)
err = updateAllBesidesTargets(trustedSet, allRoles[metadata.TIMESTAMP], snapshot)
assert.NoError(t, err)
// New delegate sigfned version != meta version stored in snapshot
_, err = trustedSet.UpdateTargets(allRoles[metadata.TARGETS])
assert.ErrorIs(t, err, &metadata.ErrBadVersionNumber{Msg: "expected targets version 2, got 1"})
}
func TestUpdateTargetsExpiredMewTarget(t *testing.T) {
trustedSet, err := New(allRoles[metadata.ROOT])
assert.NoError(t, err)
err = updateAllBesidesTargets(trustedSet, allRoles[metadata.TIMESTAMP], allRoles[metadata.SNAPSHOT])
assert.NoError(t, err)
// New delegated target has expired
modifyTargetExpiry := func(targets *metadata.Metadata[metadata.TargetsType]) {
targets.Signed.Expires = time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC)
}
targets, err := modifyTargetsMetadata(modifyTargetExpiry)
assert.NoError(t, err)
_, err = trustedSet.UpdateTargets(targets)
assert.ErrorIs(t, err, &metadata.ErrExpiredMetadata{Msg: "new targets is expired"})
}
golang-github-theupdateframework-go-tuf-2.0.2/metadata/types.go 0000664 0000000 0000000 00000014777 14706111210 0024624 0 ustar 00root root 0000000 0000000 // Copyright 2024 The Update Framework Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
//
// SPDX-License-Identifier: Apache-2.0
//
package metadata
import (
"encoding/json"
"time"
)
// Generic type constraint
type Roles interface {
RootType | SnapshotType | TimestampType | TargetsType
}
// Define version of the TUF specification
const (
SPECIFICATION_VERSION = "1.0.31"
)
// Define top level role names
const (
ROOT = "root"
SNAPSHOT = "snapshot"
TARGETS = "targets"
TIMESTAMP = "timestamp"
)
var TOP_LEVEL_ROLE_NAMES = [...]string{ROOT, TIMESTAMP, SNAPSHOT, TARGETS}
// Metadata[T Roles] represents a TUF metadata.
// Provides methods to read and write to and
// from file and bytes, also to create, verify and clear metadata signatures.
type Metadata[T Roles] struct {
Signed T `json:"signed"`
Signatures []Signature `json:"signatures"`
UnrecognizedFields map[string]any `json:"-"`
}
// Signature represents the Signature part of a TUF metadata
type Signature struct {
KeyID string `json:"keyid"`
Signature HexBytes `json:"sig"`
UnrecognizedFields map[string]any `json:"-"`
}
// RootType represents the Signed portion of a root metadata
type RootType struct {
Type string `json:"_type"`
SpecVersion string `json:"spec_version"`
ConsistentSnapshot bool `json:"consistent_snapshot"`
Version int64 `json:"version"`
Expires time.Time `json:"expires"`
Keys map[string]*Key `json:"keys"`
Roles map[string]*Role `json:"roles"`
UnrecognizedFields map[string]any `json:"-"`
}
// SnapshotType represents the Signed portion of a snapshot metadata
type SnapshotType struct {
Type string `json:"_type"`
SpecVersion string `json:"spec_version"`
Version int64 `json:"version"`
Expires time.Time `json:"expires"`
Meta map[string]*MetaFiles `json:"meta"`
UnrecognizedFields map[string]any `json:"-"`
}
// TargetsType represents the Signed portion of a targets metadata
type TargetsType struct {
Type string `json:"_type"`
SpecVersion string `json:"spec_version"`
Version int64 `json:"version"`
Expires time.Time `json:"expires"`
Targets map[string]*TargetFiles `json:"targets"`
Delegations *Delegations `json:"delegations,omitempty"`
UnrecognizedFields map[string]any `json:"-"`
}
// TimestampType represents the Signed portion of a timestamp metadata
type TimestampType struct {
Type string `json:"_type"`
SpecVersion string `json:"spec_version"`
Version int64 `json:"version"`
Expires time.Time `json:"expires"`
Meta map[string]*MetaFiles `json:"meta"`
UnrecognizedFields map[string]any `json:"-"`
}
// Key represents a key in TUF
type Key struct {
Type string `json:"keytype"`
Scheme string `json:"scheme"`
Value KeyVal `json:"keyval"`
id string `json:"-"`
UnrecognizedFields map[string]any `json:"-"`
}
type KeyVal struct {
PublicKey string `json:"public"`
UnrecognizedFields map[string]any `json:"-"`
}
// Role represents one of the top-level roles in TUF
type Role struct {
KeyIDs []string `json:"keyids"`
Threshold int `json:"threshold"`
UnrecognizedFields map[string]any `json:"-"`
}
type HexBytes []byte
type Hashes map[string]HexBytes
// MetaFiles represents the value portion of METAFILES in TUF (used in Snapshot and Timestamp metadata). Used to store information about a particular meta file.
type MetaFiles struct {
Length int64 `json:"length,omitempty"`
Hashes Hashes `json:"hashes,omitempty"`
Version int64 `json:"version"`
UnrecognizedFields map[string]any `json:"-"`
}
// TargetFiles represents the value portion of TARGETS in TUF (used Targets metadata). Used to store information about a particular target file.
type TargetFiles struct {
Length int64 `json:"length"`
Hashes Hashes `json:"hashes"`
Custom *json.RawMessage `json:"custom,omitempty"`
Path string `json:"-"`
UnrecognizedFields map[string]any `json:"-"`
}
// Delegations is an optional object which represents delegation roles and their corresponding keys
type Delegations struct {
Keys map[string]*Key `json:"keys"`
Roles []DelegatedRole `json:"roles,omitempty"`
SuccinctRoles *SuccinctRoles `json:"succinct_roles,omitempty"`
UnrecognizedFields map[string]any `json:"-"`
}
// DelegatedRole represents a delegated role in TUF
type DelegatedRole struct {
Name string `json:"name"`
KeyIDs []string `json:"keyids"`
Threshold int `json:"threshold"`
Terminating bool `json:"terminating"`
PathHashPrefixes []string `json:"path_hash_prefixes,omitempty"`
Paths []string `json:"paths,omitempty"`
UnrecognizedFields map[string]any `json:"-"`
}
// SuccinctRoles represents a delegation graph that covers all targets,
// distributing them uniformly over the delegated roles (i.e. bins) in the graph.
type SuccinctRoles struct {
KeyIDs []string `json:"keyids"`
Threshold int `json:"threshold"`
BitLength int `json:"bit_length"`
NamePrefix string `json:"name_prefix"`
UnrecognizedFields map[string]any `json:"-"`
}
// RoleResult represents the name and terminating status of a delegated role that is responsible for targetFilepath
type RoleResult struct {
Name string
Terminating bool
}
golang-github-theupdateframework-go-tuf-2.0.2/metadata/updater/ 0000775 0000000 0000000 00000000000 14706111210 0024555 5 ustar 00root root 0000000 0000000 golang-github-theupdateframework-go-tuf-2.0.2/metadata/updater/updater.go 0000664 0000000 0000000 00000061346 14706111210 0026562 0 ustar 00root root 0000000 0000000 // Copyright 2024 The Update Framework Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
//
// SPDX-License-Identifier: Apache-2.0
//
package updater
import (
"encoding/hex"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"github.com/theupdateframework/go-tuf/v2/metadata"
"github.com/theupdateframework/go-tuf/v2/metadata/config"
"github.com/theupdateframework/go-tuf/v2/metadata/trustedmetadata"
)
// Client update workflow implementation
//
// The "Updater" provides an implementation of the TUF client workflow (ref. https://theupdateframework.github.io/specification/latest/#detailed-client-workflow).
// "Updater" provides an API to query available targets and to download them in a
// secure manner: All downloaded files are verified by signed metadata.
// High-level description of "Updater" functionality:
// - Initializing an "Updater" loads and validates the trusted local root
// metadata: This root metadata is used as the source of trust for all other
// metadata.
// - Refresh() can optionally be called to update and load all top-level
// metadata as described in the specification, using both locally cached
// metadata and metadata downloaded from the remote repository. If refresh is
// not done explicitly, it will happen automatically during the first target
// info lookup.
// - Updater can be used to download targets. For each target:
// - GetTargetInfo() is first used to find information about a
// specific target. This will load new targets metadata as needed (from
// local cache or remote repository).
// - FindCachedTarget() can optionally be used to check if a
// target file is already locally cached.
// - DownloadTarget() downloads a target file and ensures it is
// verified correct by the metadata.
type Updater struct {
trusted *trustedmetadata.TrustedMetadata
cfg *config.UpdaterConfig
}
type roleParentTuple struct {
Role string
Parent string
}
// New creates a new Updater instance and loads trusted root metadata
func New(config *config.UpdaterConfig) (*Updater, error) {
// make sure the trusted root metadata and remote URL were provided
if len(config.LocalTrustedRoot) == 0 || len(config.RemoteMetadataURL) == 0 {
return nil, fmt.Errorf("no initial trusted root metadata or remote URL provided")
}
// create a new trusted metadata instance using the trusted root.json
trustedMetadataSet, err := trustedmetadata.New(config.LocalTrustedRoot)
if err != nil {
return nil, err
}
// create an updater instance
updater := &Updater{
cfg: config,
trusted: trustedMetadataSet, // save trusted metadata set
}
// ensure paths exist, doesn't do anything if caching is disabled
err = updater.cfg.EnsurePathsExist()
if err != nil {
return nil, err
}
// persist the initial root metadata to the local metadata folder
err = updater.persistMetadata(metadata.ROOT, updater.cfg.LocalTrustedRoot)
if err != nil {
return nil, err
}
// all okay, return the updater instance
return updater, nil
}
// Refresh loads and possibly refreshes top-level metadata.
// Downloads, verifies, and loads metadata for the top-level roles in the
// specified order (root -> timestamp -> snapshot -> targets) implementing
// all the checks required in the TUF client workflow.
// A Refresh() can be done only once during the lifetime of an Updater.
// If Refresh() has not been explicitly called before the first
// GetTargetInfo() call, it will be done implicitly at that time.
// The metadata for delegated roles is not updated by Refresh():
// that happens on demand during GetTargetInfo(). However, if the
// repository uses consistent snapshots (ref. https://theupdateframework.github.io/specification/latest/#consistent-snapshots),
// then all metadata downloaded by the Updater will use the same consistent repository state.
//
// If UnsafeLocalMode is set, no network interaction is performed, only
// the cached files on disk are used. If the cached data is not complete,
// this call will fail.
func (update *Updater) Refresh() error {
if update.cfg.UnsafeLocalMode {
return update.unsafeLocalRefresh()
}
return update.onlineRefresh()
}
// onlineRefresh implements the TUF client workflow as described for
// the Refresh function.
func (update *Updater) onlineRefresh() error {
err := update.loadRoot()
if err != nil {
return err
}
err = update.loadTimestamp()
if err != nil {
return err
}
err = update.loadSnapshot()
if err != nil {
return err
}
_, err = update.loadTargets(metadata.TARGETS, metadata.ROOT)
if err != nil {
return err
}
return nil
}
// unsafeLoadRefresh tries to load the persisted metadata already cached
// on disk. Note that this is an usafe function, and does deviate from the
// TUF specification section 5.3 to 5.7 (update phases).
// The metadata on disk are verified against the provided root though,
// and expiration dates are verified.
func (update *Updater) unsafeLocalRefresh() error {
// Root is already loaded
// load timestamp
var p = filepath.Join(update.cfg.LocalMetadataDir, metadata.TIMESTAMP)
data, err := update.loadLocalMetadata(p)
if err != nil {
return err
}
_, err = update.trusted.UpdateTimestamp(data)
if err != nil {
return err
}
// load snapshot
p = filepath.Join(update.cfg.LocalMetadataDir, metadata.SNAPSHOT)
data, err = update.loadLocalMetadata(p)
if err != nil {
return err
}
_, err = update.trusted.UpdateSnapshot(data, false)
if err != nil {
return err
}
// targets
p = filepath.Join(update.cfg.LocalMetadataDir, metadata.TARGETS)
data, err = update.loadLocalMetadata(p)
if err != nil {
return err
}
// verify and load the new target metadata
_, err = update.trusted.UpdateDelegatedTargets(data, metadata.TARGETS, metadata.ROOT)
if err != nil {
return err
}
return nil
}
// GetTargetInfo returns metadata.TargetFiles instance with information
// for targetPath. The return value can be used as an argument to
// DownloadTarget() and FindCachedTarget().
// If Refresh() has not been called before calling
// GetTargetInfo(), the refresh will be done implicitly.
// As a side-effect this method downloads all the additional (delegated
// targets) metadata it needs to return the target information.
func (update *Updater) GetTargetInfo(targetPath string) (*metadata.TargetFiles, error) {
// do a Refresh() in case there's no trusted targets.json yet
if update.trusted.Targets[metadata.TARGETS] == nil {
err := update.Refresh()
if err != nil {
return nil, err
}
}
return update.preOrderDepthFirstWalk(targetPath)
}
// DownloadTarget downloads the target file specified by targetFile
func (update *Updater) DownloadTarget(targetFile *metadata.TargetFiles, filePath, targetBaseURL string) (string, []byte, error) {
log := metadata.GetLogger()
var err error
if filePath == "" {
filePath, err = update.generateTargetFilePath(targetFile)
if err != nil {
return "", nil, err
}
}
if targetBaseURL == "" {
if update.cfg.RemoteTargetsURL == "" {
return "", nil, &metadata.ErrValue{Msg: "targetBaseURL must be set in either DownloadTarget() or the Updater struct"}
}
targetBaseURL = ensureTrailingSlash(update.cfg.RemoteTargetsURL)
} else {
targetBaseURL = ensureTrailingSlash(targetBaseURL)
}
targetFilePath := targetFile.Path
targetRemotePath := targetFilePath
consistentSnapshot := update.trusted.Root.Signed.ConsistentSnapshot
if consistentSnapshot && update.cfg.PrefixTargetsWithHash {
hashes := ""
// get first hex value of hashes
for _, v := range targetFile.Hashes {
hashes = hex.EncodeToString(v)
break
}
baseName := filepath.Base(targetFilePath)
dirName, ok := strings.CutSuffix(targetFilePath, "/"+baseName)
if !ok {
// .
targetRemotePath = fmt.Sprintf("%s.%s", hashes, baseName)
} else {
// /.
targetRemotePath = fmt.Sprintf("%s/%s.%s", dirName, hashes, baseName)
}
}
fullURL := fmt.Sprintf("%s%s", targetBaseURL, targetRemotePath)
data, err := update.cfg.Fetcher.DownloadFile(fullURL, targetFile.Length, time.Second*15)
if err != nil {
return "", nil, err
}
err = targetFile.VerifyLengthHashes(data)
if err != nil {
return "", nil, err
}
// do not persist the target file if cache is disabled
if !update.cfg.DisableLocalCache {
err = os.WriteFile(filePath, data, 0644)
if err != nil {
return "", nil, err
}
}
log.Info("Downloaded target", "path", targetFile.Path)
return filePath, data, nil
}
// FindCachedTarget checks whether a local file is an up to date target
func (update *Updater) FindCachedTarget(targetFile *metadata.TargetFiles, filePath string) (string, []byte, error) {
var err error
targetFilePath := ""
// do not look for cached target file if cache is disabled
if update.cfg.DisableLocalCache {
return "", nil, nil
}
// get its path if not provided
if filePath == "" {
targetFilePath, err = update.generateTargetFilePath(targetFile)
if err != nil {
return "", nil, err
}
} else {
targetFilePath = filePath
}
// get file content
data, err := readFile(targetFilePath)
if err != nil {
// do not want to return err, instead we say that there's no cached target available
return "", nil, nil
}
// verify if the length and hashes of this target file match the expected values
err = targetFile.VerifyLengthHashes(data)
if err != nil {
// do not want to return err, instead we say that there's no cached target available
return "", nil, nil
}
// if all okay, return its path
return targetFilePath, data, nil
}
// loadTimestamp load local and remote timestamp metadata
func (update *Updater) loadTimestamp() error {
log := metadata.GetLogger()
// try to read local timestamp
data, err := update.loadLocalMetadata(filepath.Join(update.cfg.LocalMetadataDir, metadata.TIMESTAMP))
if err != nil {
// this means there's no existing local timestamp so we should proceed downloading it without the need to UpdateTimestamp
log.Info("Local timestamp does not exist")
} else {
// local timestamp exists, let's try to verify it and load it to the trusted metadata set
_, err := update.trusted.UpdateTimestamp(data)
if err != nil {
if errors.Is(err, &metadata.ErrRepository{}) {
// local timestamp is not valid, proceed downloading from remote; note that this error type includes several other subset errors
log.Info("Local timestamp is not valid")
} else {
// another error
return err
}
}
log.Info("Local timestamp is valid")
// all okay, local timestamp exists and it is valid, nevertheless proceed with downloading from remote
}
// load from remote (whether local load succeeded or not)
data, err = update.downloadMetadata(metadata.TIMESTAMP, update.cfg.TimestampMaxLength, "")
if err != nil {
return err
}
// try to verify and load the newly downloaded timestamp
_, err = update.trusted.UpdateTimestamp(data)
if err != nil {
if errors.Is(err, &metadata.ErrEqualVersionNumber{}) {
// if the new timestamp version is the same as current, discard the
// new timestamp; this is normal and it shouldn't raise any error
return nil
} else {
// another error
return err
}
}
// proceed with persisting the new timestamp
err = update.persistMetadata(metadata.TIMESTAMP, data)
if err != nil {
return err
}
return nil
}
// loadSnapshot load local (and if needed remote) snapshot metadata
func (update *Updater) loadSnapshot() error {
log := metadata.GetLogger()
// try to read local snapshot
data, err := update.loadLocalMetadata(filepath.Join(update.cfg.LocalMetadataDir, metadata.SNAPSHOT))
if err != nil {
// this means there's no existing local snapshot so we should proceed downloading it without the need to UpdateSnapshot
log.Info("Local snapshot does not exist")
} else {
// successfully read a local snapshot metadata, so let's try to verify and load it to the trusted metadata set
_, err = update.trusted.UpdateSnapshot(data, true)
if err != nil {
// this means snapshot verification/loading failed
if errors.Is(err, &metadata.ErrRepository{}) {
// local snapshot is not valid, proceed downloading from remote; note that this error type includes several other subset errors
log.Info("Local snapshot is not valid")
} else {
// another error
return err
}
} else {
// this means snapshot verification/loading succeeded
log.Info("Local snapshot is valid: not downloading new one")
return nil
}
}
// local snapshot does not exist or is invalid, update from remote
log.Info("Failed to load local snapshot")
if update.trusted.Timestamp == nil {
return fmt.Errorf("trusted timestamp not set")
}
// extract the snapshot meta from the trusted timestamp metadata
snapshotMeta := update.trusted.Timestamp.Signed.Meta[fmt.Sprintf("%s.json", metadata.SNAPSHOT)]
// extract the length of the snapshot metadata to be downloaded
length := snapshotMeta.Length
if length == 0 {
length = update.cfg.SnapshotMaxLength
}
// extract which snapshot version should be downloaded in case of consistent snapshots
version := ""
if update.trusted.Root.Signed.ConsistentSnapshot {
version = strconv.FormatInt(snapshotMeta.Version, 10)
}
// download snapshot metadata
data, err = update.downloadMetadata(metadata.SNAPSHOT, length, version)
if err != nil {
return err
}
// verify and load the new snapshot
_, err = update.trusted.UpdateSnapshot(data, false)
if err != nil {
return err
}
// persist the new snapshot
err = update.persistMetadata(metadata.SNAPSHOT, data)
if err != nil {
return err
}
return nil
}
// loadTargets load local (and if needed remote) metadata for roleName
func (update *Updater) loadTargets(roleName, parentName string) (*metadata.Metadata[metadata.TargetsType], error) {
log := metadata.GetLogger()
// avoid loading "roleName" more than once during "GetTargetInfo"
role, ok := update.trusted.Targets[roleName]
if ok {
return role, nil
}
// try to read local targets
data, err := update.loadLocalMetadata(filepath.Join(update.cfg.LocalMetadataDir, roleName))
if err != nil {
// this means there's no existing local target file so we should proceed downloading it without the need to UpdateDelegatedTargets
log.Info("Local role does not exist", "role", roleName)
} else {
// successfully read a local targets metadata, so let's try to verify and load it to the trusted metadata set
delegatedTargets, err := update.trusted.UpdateDelegatedTargets(data, roleName, parentName)
if err != nil {
// this means targets verification/loading failed
if errors.Is(err, &metadata.ErrRepository{}) {
// local target file is not valid, proceed downloading from remote; note that this error type includes several other subset errors
log.Info("Local role is not valid", "role", roleName)
} else {
// another error
return nil, err
}
} else {
// this means targets verification/loading succeeded
log.Info("Local role is valid: not downloading new one", "role", roleName)
return delegatedTargets, nil
}
}
// local "roleName" does not exist or is invalid, update from remote
log.Info("Failed to load local role", "role", roleName)
if update.trusted.Snapshot == nil {
return nil, fmt.Errorf("trusted snapshot not set")
}
// extract the targets' meta from the trusted snapshot metadata
metaInfo, ok := update.trusted.Snapshot.Signed.Meta[fmt.Sprintf("%s.json", roleName)]
if !ok {
return nil, fmt.Errorf("role %s not found in snapshot", roleName)
}
// extract the length of the target metadata to be downloaded
length := metaInfo.Length
if length == 0 {
length = update.cfg.TargetsMaxLength
}
// extract which target metadata version should be downloaded in case of consistent snapshots
version := ""
if update.trusted.Root.Signed.ConsistentSnapshot {
version = strconv.FormatInt(metaInfo.Version, 10)
}
// download targets metadata
data, err = update.downloadMetadata(roleName, length, version)
if err != nil {
return nil, err
}
// verify and load the new target metadata
delegatedTargets, err := update.trusted.UpdateDelegatedTargets(data, roleName, parentName)
if err != nil {
return nil, err
}
// persist the new target metadata
err = update.persistMetadata(roleName, data)
if err != nil {
return nil, err
}
return delegatedTargets, nil
}
// loadRoot load remote root metadata. Sequentially load and
// persist on local disk every newer root metadata version
// available on the remote
func (update *Updater) loadRoot() error {
// calculate boundaries
lowerBound := update.trusted.Root.Signed.Version + 1
upperBound := lowerBound + update.cfg.MaxRootRotations
// loop until we find the latest available version of root (download -> verify -> load -> persist)
for nextVersion := lowerBound; nextVersion < upperBound; nextVersion++ {
data, err := update.downloadMetadata(metadata.ROOT, update.cfg.RootMaxLength, strconv.FormatInt(nextVersion, 10))
if err != nil {
// downloading the root metadata failed for some reason
var tmpErr *metadata.ErrDownloadHTTP
if errors.As(err, &tmpErr) {
if tmpErr.StatusCode != http.StatusNotFound && tmpErr.StatusCode != http.StatusForbidden {
// unexpected HTTP status code
return err
}
// 404/403 means current root is newest available, so we can stop the loop and move forward
break
}
// some other error ocurred
return err
} else {
// downloading root metadata succeeded, so let's try to verify and load it
_, err = update.trusted.UpdateRoot(data)
if err != nil {
return err
}
// persist root metadata to disk
err = update.persistMetadata(metadata.ROOT, data)
if err != nil {
return err
}
}
}
return nil
}
// preOrderDepthFirstWalk interrogates the tree of target delegations
// in order of appearance (which implicitly order trustworthiness),
// and returns the matching target found in the most trusted role.
func (update *Updater) preOrderDepthFirstWalk(targetFilePath string) (*metadata.TargetFiles, error) {
log := metadata.GetLogger()
// list of delegations to be interrogated. A (role, parent role) pair
// is needed to load and verify the delegated targets metadata
delegationsToVisit := []roleParentTuple{{
Role: metadata.TARGETS,
Parent: metadata.ROOT,
}}
visitedRoleNames := map[string]bool{}
// pre-order depth-first traversal of the graph of target delegations
for len(visitedRoleNames) <= update.cfg.MaxDelegations && len(delegationsToVisit) > 0 {
// pop the role name from the top of the stack
delegation := delegationsToVisit[len(delegationsToVisit)-1]
delegationsToVisit = delegationsToVisit[:len(delegationsToVisit)-1]
// skip any visited current role to prevent cycles
_, ok := visitedRoleNames[delegation.Role]
if ok {
log.Info("Skipping visited current role", "role", delegation.Role)
continue
}
// the metadata for delegation.Role must be downloaded/updated before
// its targets, delegations, and child roles can be inspected
targets, err := update.loadTargets(delegation.Role, delegation.Parent)
if err != nil {
return nil, err
}
target, ok := targets.Signed.Targets[targetFilePath]
if ok {
log.Info("Found target in current role", "role", delegation.Role)
return target, nil
}
// after pre-order check, add current role to set of visited roles
visitedRoleNames[delegation.Role] = true
if targets.Signed.Delegations != nil {
var childRolesToVisit []roleParentTuple
// note that this may be a slow operation if there are many
// delegated roles
roles := targets.Signed.Delegations.GetRolesForTarget(targetFilePath)
for _, rolesForTarget := range roles {
log.Info("Adding child role", "role", rolesForTarget.Name)
childRolesToVisit = append(childRolesToVisit, roleParentTuple{Role: rolesForTarget.Name, Parent: delegation.Role})
if rolesForTarget.Terminating {
log.Info("Not backtracking to other roles")
delegationsToVisit = []roleParentTuple{}
break
}
}
// push childRolesToVisit in reverse order of appearance
// onto delegationsToVisit. Roles are popped from the end of
// the list
reverseSlice(childRolesToVisit)
delegationsToVisit = append(delegationsToVisit, childRolesToVisit...)
}
}
if len(delegationsToVisit) > 0 {
log.Info("Too many roles left to visit for max allowed delegations",
"roles-left", len(delegationsToVisit),
"allowed-delegations", update.cfg.MaxDelegations)
}
// if this point is reached then target is not found, return nil
return nil, fmt.Errorf("target %s not found", targetFilePath)
}
// persistMetadata writes metadata to disk atomically to avoid data loss
func (update *Updater) persistMetadata(roleName string, data []byte) error {
log := metadata.GetLogger()
// do not persist the metadata if we have disabled local caching
if update.cfg.DisableLocalCache {
return nil
}
// caching enabled, proceed with persisting the metadata locally
fileName := filepath.Join(update.cfg.LocalMetadataDir, fmt.Sprintf("%s.json", url.QueryEscape(roleName)))
// create a temporary file
file, err := os.CreateTemp(update.cfg.LocalMetadataDir, "tuf_tmp")
if err != nil {
return err
}
defer file.Close()
// write the data content to the temporary file
err = os.WriteFile(file.Name(), data, 0644)
if err != nil {
// delete the temporary file if there was an error while writing
errRemove := os.Remove(file.Name())
if errRemove != nil {
log.Info("Failed to delete temporary file", "name", file.Name())
}
return err
}
// can't move/rename an open file on windows, so close it first
err = file.Close()
if err != nil {
return err
}
// if all okay, rename the temporary file to the desired one
err = os.Rename(file.Name(), fileName)
if err != nil {
return err
}
read, err := os.ReadFile(fileName)
if err != nil {
return err
}
if string(read) != string(data) {
return fmt.Errorf("failed to persist metadata")
}
return nil
}
// downloadMetadata download a metadata file and return it as bytes
func (update *Updater) downloadMetadata(roleName string, length int64, version string) ([]byte, error) {
urlPath := ensureTrailingSlash(update.cfg.RemoteMetadataURL)
// build urlPath
if version == "" {
urlPath = fmt.Sprintf("%s%s.json", urlPath, url.QueryEscape(roleName))
} else {
urlPath = fmt.Sprintf("%s%s.%s.json", urlPath, version, url.QueryEscape(roleName))
}
return update.cfg.Fetcher.DownloadFile(urlPath, length, time.Second*15)
}
// generateTargetFilePath generates path from TargetFiles
func (update *Updater) generateTargetFilePath(tf *metadata.TargetFiles) (string, error) {
// LocalTargetsDir can be omitted if caching is disabled
if update.cfg.LocalTargetsDir == "" && !update.cfg.DisableLocalCache {
return "", &metadata.ErrValue{Msg: "LocalTargetsDir must be set if filepath is not given"}
}
// Use URL encoded target path as filename
return filepath.Join(update.cfg.LocalTargetsDir, url.QueryEscape(tf.Path)), nil
}
// loadLocalMetadata reads a local .json file and returns its bytes
func (update *Updater) loadLocalMetadata(roleName string) ([]byte, error) {
return readFile(fmt.Sprintf("%s.json", roleName))
}
// GetTopLevelTargets returns the top-level target files
func (update *Updater) GetTopLevelTargets() map[string]*metadata.TargetFiles {
return update.trusted.Targets[metadata.TARGETS].Signed.Targets
}
// GetTrustedMetadataSet returns the trusted metadata set
func (update *Updater) GetTrustedMetadataSet() trustedmetadata.TrustedMetadata {
return *update.trusted
}
// UnsafeSetRefTime sets the reference time that the updater uses.
// This should only be done in tests.
// Using this function is useful when testing time-related behavior in go-tuf.
func (update *Updater) UnsafeSetRefTime(t time.Time) {
update.trusted.RefTime = t
}
func IsWindowsPath(path string) bool {
match, _ := regexp.MatchString(`^[a-zA-Z]:\\`, path)
return match
}
// ensureTrailingSlash ensures url ends with a slash
func ensureTrailingSlash(url string) string {
if IsWindowsPath(url) {
slash := string(filepath.Separator)
if strings.HasSuffix(url, slash) {
return url
}
return url + slash
}
if strings.HasSuffix(url, "/") {
return url
}
return url + "/"
}
// reverseSlice reverses the elements in a generic type of slice
func reverseSlice[S ~[]E, E any](s S) {
for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
s[i], s[j] = s[j], s[i]
}
}
// readFile reads the content of a file and return its bytes
func readFile(name string) ([]byte, error) {
in, err := os.Open(name)
if err != nil {
return nil, err
}
defer in.Close()
data, err := io.ReadAll(in)
if err != nil {
return nil, err
}
return data, nil
}
golang-github-theupdateframework-go-tuf-2.0.2/metadata/updater/updater_consistent_snapshot_test.go 0000664 0000000 0000000 00000016257 14706111210 0034012 0 ustar 00root root 0000000 0000000 // Copyright 2024 The Update Framework Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
//
// SPDX-License-Identifier: Apache-2.0
//
package updater
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/theupdateframework/go-tuf/v2/internal/testutils/simulator"
"github.com/theupdateframework/go-tuf/v2/metadata"
)
func TestTopLevelRolesUpdateWithConsistentSnapshotDisabled(t *testing.T) {
// Test if the client fetches and stores metadata files with the
// correct version prefix when ConsistentSnapshot is false
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
simulator.Sim.MDRoot.Signed.ConsistentSnapshot = false
simulator.Sim.MDRoot.Signed.Version += 1
simulator.Sim.PublishRoot()
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
updater := initUpdater(updaterConfig)
// cleanup fetch tracker metadata
simulator.Sim.FetchTracker.Metadata = []simulator.FTMetadata{}
err = updater.Refresh()
if err != nil {
t.Fatal(err)
}
assert.NoError(t, err)
// metadata files are fetched with the expected version (or None)
expectedsnapshotEnabled := []simulator.FTMetadata{
{Name: "root", Value: 2},
{Name: "root", Value: 3},
{Name: "timestamp", Value: -1},
{Name: "snapshot", Value: -1},
{Name: "targets", Value: -1},
}
assert.EqualValues(t, expectedsnapshotEnabled, simulator.Sim.FetchTracker.Metadata)
// metadata files are always persisted without a version prefix
assertFilesExist(t, metadata.TOP_LEVEL_ROLE_NAMES[:])
}
func TestTopLevelRolesUpdateWithConsistentSnapshotEnabled(t *testing.T) {
// Test if the client fetches and stores metadata files with the
// correct version prefix when ConsistentSnapshot is true
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
simulator.Sim.MDRoot.Signed.ConsistentSnapshot = true
simulator.Sim.MDRoot.Signed.Version += 1
simulator.Sim.PublishRoot()
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
updater := initUpdater(updaterConfig)
if updater == nil {
t.Fatal("updater is nil")
}
// cleanup fetch tracker metadata
simulator.Sim.FetchTracker.Metadata = []simulator.FTMetadata{}
err = updater.Refresh()
assert.NoError(t, err)
// metadata files are fetched with the expected version (or None)
expectedSnapshotDisabled := []simulator.FTMetadata{
{Name: "root", Value: 2},
{Name: "root", Value: 3},
{Name: "timestamp", Value: -1},
{Name: "snapshot", Value: 1},
{Name: "targets", Value: 1},
}
assert.EqualValues(t, expectedSnapshotDisabled, simulator.Sim.FetchTracker.Metadata)
// metadata files are always persisted without a version prefix
assertFilesExist(t, metadata.TOP_LEVEL_ROLE_NAMES[:])
}
func TestDelegatesRolesUpdateWithConsistentSnapshotDisabled(t *testing.T) {
// Test if the client fetches and stores delegated metadata files with
// the correct version prefix when ConsistentSnapshot is false
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
simulator.Sim.MDRoot.Signed.ConsistentSnapshot = false
simulator.Sim.MDRoot.Signed.Version += 1
simulator.Sim.PublishRoot()
target := metadata.Targets(simulator.Sim.SafeExpiry)
delegatedRole := metadata.DelegatedRole{
Name: "role1",
KeyIDs: []string{},
Threshold: 1,
Terminating: false,
Paths: []string{"*"},
}
simulator.Sim.AddDelegation("targets", delegatedRole, target.Signed)
delegatedRole = metadata.DelegatedRole{
Name: "..",
KeyIDs: []string{},
Threshold: 1,
Terminating: false,
Paths: []string{"*"},
}
simulator.Sim.AddDelegation("targets", delegatedRole, target.Signed)
delegatedRole = metadata.DelegatedRole{
Name: ".",
KeyIDs: []string{},
Threshold: 1,
Terminating: false,
Paths: []string{"*"},
}
simulator.Sim.AddDelegation("targets", delegatedRole, target.Signed)
simulator.Sim.UpdateSnapshot()
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
updater := initUpdater(updaterConfig)
if updater == nil {
t.Fatal("updater is nil")
}
err = updater.Refresh()
assert.NoError(t, err)
// cleanup fetch tracker metadata
simulator.Sim.FetchTracker.Metadata = []simulator.FTMetadata{}
// trigger updater to fetch the delegated metadata
_, err = updater.GetTargetInfo("anything")
assert.ErrorContains(t, err, "target anything not found")
// metadata files are fetched with the expected version (or None)
expectedsnapshotEnabled := []simulator.FTMetadata{
{Name: "role1", Value: -1},
{Name: "..", Value: -1},
{Name: ".", Value: -1},
}
assert.ElementsMatch(t, expectedsnapshotEnabled, simulator.Sim.FetchTracker.Metadata)
// metadata files are always persisted without a version prefix
assertFilesExist(t, metadata.TOP_LEVEL_ROLE_NAMES[:])
}
func TestDelegatesRolesUpdateWithConsistentSnapshotEnabled(t *testing.T) {
// Test if the client fetches and stores delegated metadata files with
// the correct version prefix when ConsistentSnapshot is true
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
simulator.Sim.MDRoot.Signed.ConsistentSnapshot = true
simulator.Sim.MDRoot.Signed.Version += 1
simulator.Sim.PublishRoot()
target := metadata.Targets(simulator.Sim.SafeExpiry)
delegatedRole := metadata.DelegatedRole{
Name: "role1",
KeyIDs: []string{},
Threshold: 1,
Terminating: false,
Paths: []string{"*"},
}
simulator.Sim.AddDelegation("targets", delegatedRole, target.Signed)
delegatedRole = metadata.DelegatedRole{
Name: "..",
KeyIDs: []string{},
Threshold: 1,
Terminating: false,
Paths: []string{"*"},
}
simulator.Sim.AddDelegation("targets", delegatedRole, target.Signed)
delegatedRole = metadata.DelegatedRole{
Name: ".",
KeyIDs: []string{},
Threshold: 1,
Terminating: false,
Paths: []string{"*"},
}
simulator.Sim.AddDelegation("targets", delegatedRole, target.Signed)
simulator.Sim.UpdateSnapshot()
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
updater := initUpdater(updaterConfig)
if updater == nil {
t.Fatal("updater is nil")
}
err = updater.Refresh()
assert.NoError(t, err)
// cleanup fetch tracker metadata
simulator.Sim.FetchTracker.Metadata = []simulator.FTMetadata{}
// trigger updater to fetch the delegated metadata
_, err = updater.GetTargetInfo("anything")
assert.ErrorContains(t, err, "target anything not found")
// metadata files are fetched with the expected version (or None)
expectedsnapshotEnabled := []simulator.FTMetadata{
{Name: "role1", Value: 1},
{Name: "..", Value: 1},
{Name: ".", Value: 1},
}
assert.ElementsMatch(t, expectedsnapshotEnabled, simulator.Sim.FetchTracker.Metadata)
// metadata files are always persisted without a version prefix
assertFilesExist(t, metadata.TOP_LEVEL_ROLE_NAMES[:])
}
golang-github-theupdateframework-go-tuf-2.0.2/metadata/updater/updater_top_level_update_test.go 0000664 0000000 0000000 00000107455 14706111210 0033236 0 ustar 00root root 0000000 0000000 // Copyright 2024 The Update Framework Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
//
// SPDX-License-Identifier: Apache-2.0
//
package updater
import (
"fmt"
"log/slog"
"os"
"path/filepath"
"testing"
"time"
"github.com/sigstore/sigstore/pkg/signature"
"github.com/stretchr/testify/assert"
"github.com/theupdateframework/go-tuf/v2/internal/testutils"
"github.com/theupdateframework/go-tuf/v2/internal/testutils/simulator"
"github.com/theupdateframework/go-tuf/v2/metadata"
"github.com/theupdateframework/go-tuf/v2/metadata/config"
)
func TestMain(m *testing.M) {
err := loadOrResetTrustedRootMetadata()
simulator.PastDateTime = time.Now().UTC().Truncate(24 * time.Hour).Add(-5 * 24 * time.Hour)
if err != nil {
simulator.RepositoryCleanup(simulator.MetadataDir)
slog.Error("Failed to load TrustedRootMetadata", "err", err)
os.Exit(1)
}
defer simulator.RepositoryCleanup(simulator.MetadataDir)
m.Run()
}
func loadOrResetTrustedRootMetadata() error {
// TODO: This should be a t.Helper() function
var err error
simulator.Sim, simulator.MetadataDir, testutils.TargetsDir, err = simulator.InitMetadataDir()
if err != nil {
slog.Error("Failed to initialize metadata dir", "err", err)
return err
}
simulator.RootBytes, err = simulator.GetRootBytes(simulator.MetadataDir)
if err != nil {
slog.Error("Failed to load root bytes", "err", err)
return err
}
return nil
}
func loadUpdaterConfig() (*config.UpdaterConfig, error) {
updaterConfig, err := config.New(simulator.MetadataDir, simulator.RootBytes)
updaterConfig.Fetcher = simulator.Sim
updaterConfig.LocalMetadataDir = simulator.MetadataDir
updaterConfig.LocalTargetsDir = testutils.TargetsDir
return updaterConfig, err
}
func loadUnsafeUpdaterConfig() (*config.UpdaterConfig, error) {
updaterConfig, err := loadUpdaterConfig()
if err != nil {
return nil, err
}
updaterConfig.UnsafeLocalMode = true
return updaterConfig, nil
}
// runRefresh creates new Updater instance and runs Refresh
func runRefresh(updaterConfig *config.UpdaterConfig, moveInTime time.Time) (Updater, error) {
if len(simulator.Sim.DumpDir) > 0 {
simulator.Sim.Write()
}
updater, err := New(updaterConfig)
if err != nil {
slog.Error("Failed to create new updater config", "err", err)
return Updater{}, err
}
if moveInTime != time.Now() {
updater.UnsafeSetRefTime(moveInTime)
}
return *updater, updater.Refresh()
}
func initUpdater(updaterConfig *config.UpdaterConfig) *Updater {
if len(simulator.Sim.DumpDir) > 0 {
simulator.Sim.Write()
}
updater, err := New(updaterConfig)
if err != nil {
slog.Error("Failed to create new updater config", "err", err)
}
return updater
}
// Asserts that local metadata files exist for 'roles'
func assertFilesExist(t *testing.T, roles []string) {
expectedFiles := []string{}
for _, role := range roles {
expectedFiles = append(expectedFiles, fmt.Sprintf("%s.json", role))
}
localMetadataFiles, err := os.ReadDir(simulator.MetadataDir)
assert.NoError(t, err)
actual := []string{}
for _, file := range localMetadataFiles {
actual = append(actual, file.Name())
}
for _, file := range expectedFiles {
assert.Contains(t, actual, file)
}
}
func assertFilesExact(t *testing.T, roles []string) {
expectedFiles := []string{}
for _, role := range roles {
expectedFiles = append(expectedFiles, fmt.Sprintf("%s.json", role))
}
localMetadataFiles, err := os.ReadDir(simulator.MetadataDir)
assert.NoError(t, err)
actual := []string{}
for _, file := range localMetadataFiles {
actual = append(actual, file.Name())
}
assert.ElementsMatch(t, actual, expectedFiles)
}
// Asserts that local file content is the expected
func assertContentEquals(t *testing.T, role string, version *int) {
expectedContent, err := simulator.Sim.FetchMetadata(role, version)
assert.NoError(t, err)
content, err := os.ReadFile(filepath.Join(simulator.MetadataDir, fmt.Sprintf("%s.json", role)))
assert.NoError(t, err)
assert.Equal(t, string(expectedContent), string(content))
}
func assertVersionEquals(t *testing.T, role string, expectedVersion int64) {
path := filepath.Join(simulator.MetadataDir, fmt.Sprintf("%s.json", role))
switch role {
case metadata.ROOT:
md, err := simulator.Sim.MDRoot.FromFile(path)
assert.NoError(t, err)
assert.Equal(t, md.Signed.Version, expectedVersion)
case metadata.TARGETS:
md, err := simulator.Sim.MDTargets.FromFile(path)
assert.NoError(t, err)
assert.Equal(t, md.Signed.Version, expectedVersion)
case metadata.TIMESTAMP:
md, err := simulator.Sim.MDTimestamp.FromFile(path)
assert.NoError(t, err)
assert.Equal(t, md.Signed.Version, expectedVersion)
case metadata.SNAPSHOT:
md, err := simulator.Sim.MDSnapshot.FromFile(path)
assert.NoError(t, err)
assert.Equal(t, md.Signed.Version, expectedVersion)
}
}
func TestLoadTrustedRootMetadata(t *testing.T) {
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
updater, err := New(updaterConfig)
assert.NoError(t, err)
assert.Nil(t, err)
if assert.NotNil(t, updater) {
assert.Equal(t, metadata.ROOT, updater.trusted.Root.Signed.Type)
assert.Equal(t, metadata.SPECIFICATION_VERSION, updater.trusted.Root.Signed.SpecVersion)
assert.True(t, updater.trusted.Root.Signed.ConsistentSnapshot)
assert.Equal(t, int64(1), updater.trusted.Root.Signed.Version)
assert.Nil(t, updater.trusted.Snapshot)
assert.Nil(t, updater.trusted.Timestamp)
assert.Empty(t, updater.trusted.Targets)
}
}
func TestUnsafeLoadTrustedRootMetadata(t *testing.T) {
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
updaterConfig, err := loadUnsafeUpdaterConfig()
assert.NoError(t, err)
updater, err := New(updaterConfig)
assert.NoError(t, err)
assert.Nil(t, err)
if assert.NotNil(t, updater) {
assert.Equal(t, metadata.ROOT, updater.trusted.Root.Signed.Type)
assert.Equal(t, metadata.SPECIFICATION_VERSION, updater.trusted.Root.Signed.SpecVersion)
assert.True(t, updater.trusted.Root.Signed.ConsistentSnapshot)
assert.Equal(t, int64(1), updater.trusted.Root.Signed.Version)
assert.Nil(t, updater.trusted.Snapshot)
assert.Nil(t, updater.trusted.Timestamp)
assert.Empty(t, updater.trusted.Targets)
}
}
func TestFirstTimeRefresh(t *testing.T) {
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
assertFilesExist(t, []string{metadata.ROOT})
simulator.Sim.MDRoot.Signed.Version += 1
simulator.Sim.PublishRoot()
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
_, err = runRefresh(updaterConfig, time.Now())
assert.NoError(t, err)
assertFilesExist(t, metadata.TOP_LEVEL_ROLE_NAMES[:])
for _, role := range metadata.TOP_LEVEL_ROLE_NAMES {
var version int
if role == metadata.ROOT {
version = 2
}
assertContentEquals(t, role, &version)
}
}
func TestFirstUnsafeTimeRefresh(t *testing.T) {
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
assertFilesExist(t, []string{metadata.ROOT})
simulator.Sim.MDRoot.Signed.Version += 1
simulator.Sim.PublishRoot()
updaterConfig, err := loadUnsafeUpdaterConfig()
assert.NoError(t, err)
_, err = runRefresh(updaterConfig, time.Now())
assert.Error(t, err)
// As no update was made only the root file should be present
assertFilesExact(t, []string{metadata.ROOT})
}
func TestUnsafeRefresh(t *testing.T) {
// First run a "real" refresh
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
assertFilesExist(t, []string{metadata.ROOT})
simulator.Sim.MDRoot.Signed.Version += 1
simulator.Sim.PublishRoot()
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
_, err = runRefresh(updaterConfig, time.Now())
assert.NoError(t, err)
assertFilesExist(t, metadata.TOP_LEVEL_ROLE_NAMES[:])
// Create a new unsafe updater, verify content is still valid
updaterConfig, err = loadUnsafeUpdaterConfig()
assert.NoError(t, err)
updater, err := runRefresh(updaterConfig, time.Now())
assert.NotNil(t, updater)
assert.NoError(t, err)
assertFilesExist(t, metadata.TOP_LEVEL_ROLE_NAMES[:])
for _, role := range metadata.TOP_LEVEL_ROLE_NAMES {
var version int
if role == metadata.ROOT {
// The root file is written when the updater is
// created, so the version is reset.
version = 1
}
assertContentEquals(t, role, &version)
}
assert.Equal(t, metadata.ROOT, updater.trusted.Root.Signed.Type)
assert.Equal(t, metadata.SPECIFICATION_VERSION, updater.trusted.Root.Signed.SpecVersion)
assert.True(t, updater.trusted.Root.Signed.ConsistentSnapshot)
assert.Equal(t, int64(1), updater.trusted.Root.Signed.Version)
assert.NotNil(t, updater.trusted.Snapshot)
assert.NotNil(t, updater.trusted.Timestamp)
assert.Equal(t, 1, len(updater.trusted.Targets))
}
func TestTrustedRootMissing(t *testing.T) {
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
localTrusedRoot := updaterConfig.LocalTrustedRoot
updaterConfig.LocalTrustedRoot = []byte{}
_, err = runRefresh(updaterConfig, time.Now())
assert.ErrorContains(t, err, "no initial trusted root metadata or remote URL provided")
updaterConfig.LocalTrustedRoot = localTrusedRoot
}
func TestTrustedRootExpired(t *testing.T) {
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
simulator.Sim.MDRoot.Signed.Expires = simulator.PastDateTime
simulator.Sim.MDRoot.Signed.Version += 1
simulator.Sim.PublishRoot()
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
updater := initUpdater(updaterConfig)
err = updater.Refresh()
assert.ErrorIs(t, err, &metadata.ErrExpiredMetadata{Msg: "final root.json is expired"})
assertFilesExist(t, []string{metadata.ROOT})
version := 2
assertContentEquals(t, metadata.ROOT, &version)
updater = initUpdater(updaterConfig)
simulator.Sim.MDRoot.Signed.Expires = simulator.Sim.SafeExpiry
simulator.Sim.MDRoot.Signed.Version += 1
simulator.Sim.PublishRoot()
err = updater.Refresh()
assert.NoError(t, err)
assertFilesExist(t, metadata.TOP_LEVEL_ROLE_NAMES[:])
version = 3
assertContentEquals(t, metadata.ROOT, &version)
}
func TestTrustedRootUnsigned(t *testing.T) {
// Local trusted root is not signed
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
rootPath := filepath.Join(simulator.MetadataDir, fmt.Sprintf("%s.json", metadata.ROOT))
mdRoot, err := simulator.Sim.MDRoot.FromFile(rootPath)
assert.NoError(t, err)
mdRoot.ClearSignatures()
err = mdRoot.ToFile(rootPath, true)
assert.NoError(t, err)
newRootBytes, err := os.ReadFile(rootPath)
assert.NoError(t, err)
simulator.RootBytes = newRootBytes
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
_, err = runRefresh(updaterConfig, time.Now())
assert.ErrorIs(t, err, &metadata.ErrUnsignedMetadata{Msg: "Verifying root failed, not enough signatures, got 0, want 1"})
assertFilesExist(t, []string{metadata.ROOT})
mdRootAfter, err := simulator.Sim.MDRoot.FromFile(rootPath)
assert.NoError(t, err)
expected, err := mdRoot.ToBytes(false)
assert.NoError(t, err)
actual, err := mdRootAfter.ToBytes(false)
assert.NoError(t, err)
assert.Equal(t, expected, actual)
}
func TestMaxRootRotations(t *testing.T) {
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
updater := initUpdater(updaterConfig)
updater.cfg.MaxRootRotations = 3
for simulator.Sim.MDRoot.Signed.Version < updater.cfg.MaxRootRotations+3 {
simulator.Sim.MDRoot.Signed.Version += 1
simulator.Sim.PublishRoot()
}
rootPath := filepath.Join(simulator.MetadataDir, fmt.Sprintf("%s.json", metadata.ROOT))
mdRoot, err := simulator.Sim.MDRoot.FromFile(rootPath)
assert.NoError(t, err)
initialRootVersion := mdRoot.Signed.Version
err = updater.Refresh()
assert.NoError(t, err)
assertVersionEquals(t, metadata.ROOT, initialRootVersion+updaterConfig.MaxRootRotations)
}
func TestIntermediateRootInclorrectlySigned(t *testing.T) {
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
simulator.Sim.MDRoot.Signed.Version += 1
rootSigners := make(map[string]*signature.Signer)
for k, v := range simulator.Sim.Signers[metadata.ROOT] {
rootSigners[k] = v
}
for k := range simulator.Sim.Signers[metadata.ROOT] {
delete(simulator.Sim.Signers[metadata.ROOT], k)
}
simulator.Sim.PublishRoot()
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
_, err = runRefresh(updaterConfig, time.Now())
assert.ErrorIs(t, err, &metadata.ErrUnsignedMetadata{Msg: "Verifying root failed, not enough signatures, got 0, want 1"})
assertFilesExist(t, []string{metadata.ROOT})
version := 1
assertContentEquals(t, metadata.ROOT, &version)
}
func TestIntermediateRootExpired(t *testing.T) {
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
// The expiration of the new (intermediate) root metadata file
// does not matter yet
// Intermediate root v2 is expired
simulator.Sim.MDRoot.Signed.Expires = simulator.PastDateTime
simulator.Sim.MDRoot.Signed.Version += 1
simulator.Sim.PublishRoot()
// Final root v3 is up to date
simulator.Sim.MDRoot.Signed.Expires = simulator.Sim.SafeExpiry
simulator.Sim.MDRoot.Signed.Version += 1
simulator.Sim.PublishRoot()
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
_, err = runRefresh(updaterConfig, time.Now())
assert.NoError(t, err)
// Successfully updated to root v3
assertFilesExist(t, metadata.TOP_LEVEL_ROLE_NAMES[:])
version := 3
assertContentEquals(t, metadata.ROOT, &version)
}
func TestNewRootSameVersion(t *testing.T) {
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
// Check for a rollback_attack
// Repository serves a root file with the same version as previous
simulator.Sim.PublishRoot()
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
_, err = runRefresh(updaterConfig, time.Now())
assert.ErrorIs(t, err, &metadata.ErrBadVersionNumber{Msg: "bad version number, expected 2, got 1"})
// The update failed, latest root version is v1
assertFilesExist(t, []string{metadata.ROOT})
version := 1
assertContentEquals(t, metadata.ROOT, &version)
}
func TestNewRootNonconsecutiveVersion(t *testing.T) {
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
// Repository serves non-consecutive root version
simulator.Sim.MDRoot.Signed.Version += 2
simulator.Sim.PublishRoot()
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
_, err = runRefresh(updaterConfig, time.Now())
assert.ErrorIs(t, err, &metadata.ErrBadVersionNumber{Msg: "bad version number, expected 2, got 3"})
// The update failed, latest root version is v1
assertFilesExist(t, []string{metadata.ROOT})
version := 1
assertContentEquals(t, metadata.ROOT, &version)
}
func TestFinalRootExpired(t *testing.T) {
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
// Check for a freeze attack
// Final root is expired
simulator.Sim.MDRoot.Signed.Expires = simulator.PastDateTime
simulator.Sim.MDRoot.Signed.Version += 1
simulator.Sim.PublishRoot()
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
_, err = runRefresh(updaterConfig, time.Now())
assert.ErrorIs(t, err, &metadata.ErrExpiredMetadata{Msg: "final root.json is expired"})
// The update failed but final root is persisted on the file system
assertFilesExist(t, []string{metadata.ROOT})
version := 2
assertContentEquals(t, metadata.ROOT, &version)
}
func TestNewTimestampUnsigned(t *testing.T) {
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
// Check for an arbitrary software attack
delete(simulator.Sim.Signers, metadata.TIMESTAMP)
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
_, err = runRefresh(updaterConfig, time.Now())
assert.ErrorIs(t, err, &metadata.ErrUnsignedMetadata{Msg: "Verifying timestamp failed, not enough signatures, got 0, want 1"})
assertFilesExist(t, []string{metadata.ROOT})
}
func TestExpiredTimestampVersionRollback(t *testing.T) {
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
// Verifies that local timestamp is used in rollback checks even if it is expired.
// The timestamp updates and rollback checks are performed
// with the following timing:
// - Timestamp v1 expiry set to day 7
// - First updater refresh performed on day 0
// - Repository publishes timestamp v2 on day 0
// - Timestamp v2 expiry set to day 21
// - Second updater refresh performed on day 18:
// assert that rollback check uses expired timestamp v1
now := time.Now()
simulator.Sim.MDTimestamp.Signed.Expires = now.Add(time.Hour * 7 * 24)
simulator.Sim.MDTimestamp.Signed.Version = 2
// Make a successful update of valid metadata which stores it in cache
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
_, err = runRefresh(updaterConfig, time.Now())
assert.NoError(t, err)
simulator.Sim.MDTimestamp.Signed.Expires = now.Add(time.Hour * 21 * 24)
simulator.Sim.MDTimestamp.Signed.Version = 1
// Check that a rollback protection is performed even if
// local timestamp has expired
moveInTime := time.Now().Add(time.Hour * 18 * 24)
_, err = runRefresh(updaterConfig, moveInTime)
assert.ErrorIs(t, err, &metadata.ErrBadVersionNumber{Msg: "new timestamp version 1 must be >= 2"})
assertVersionEquals(t, metadata.TIMESTAMP, 2)
}
func TestNewTimestampVersionRollback(t *testing.T) {
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
// Check for a rollback attack
simulator.Sim.MDTimestamp.Signed.Version = 2
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
_, err = runRefresh(updaterConfig, time.Now())
assert.NoError(t, err)
simulator.Sim.MDTimestamp.Signed.Version = 1
_, err = runRefresh(updaterConfig, time.Now())
assert.ErrorIs(t, err, &metadata.ErrBadVersionNumber{Msg: "new timestamp version 1 must be >= 2"})
assertVersionEquals(t, metadata.TIMESTAMP, 2)
}
func TestNewTimestampSnapshotRollback(t *testing.T) {
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
// Check for a rollback attack.
simulator.Sim.MDSnapshot.Signed.Version = 2
simulator.Sim.UpdateTimestamp() // timestamp v2
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
_, err = runRefresh(updaterConfig, time.Now())
assert.NoError(t, err)
// Snapshot meta version is smaller than previous
simulator.Sim.MDTimestamp.Signed.Meta["snapshot.json"].Version = 1
simulator.Sim.MDTimestamp.Signed.Version += 1 // timestamp v3
_, err = runRefresh(updaterConfig, time.Now())
assert.ErrorIs(t, err, &metadata.ErrBadVersionNumber{Msg: "new snapshot version 1 must be >= 2"})
assertVersionEquals(t, metadata.TIMESTAMP, 2)
}
func TestNewTimestampExpired(t *testing.T) {
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
// Check for a freeze attack
simulator.Sim.MDTimestamp.Signed.Expires = simulator.PastDateTime
simulator.Sim.UpdateTimestamp()
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
_, err = runRefresh(updaterConfig, time.Now())
assert.ErrorIs(t, err, &metadata.ErrExpiredMetadata{Msg: "timestamp.json is expired"})
assertFilesExist(t, []string{metadata.ROOT})
}
func TestNewTimestampFastForwardRecovery(t *testing.T) {
//Test timestamp fast-forward recovery using key rotation.
// The timestamp recovery is made by the following steps
// - Remove the timestamp key
// - Create and add a new key for timestamp
// - Bump and publish root
// - Rollback the timestamp version
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
// attacker updates to a higher version
simulator.Sim.MDTimestamp.Signed.Version = 99999
// client refreshes the metadata and see the new timestamp version
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
_, err = runRefresh(updaterConfig, time.Now())
assert.NoError(t, err)
assertVersionEquals(t, metadata.TIMESTAMP, 99999)
// repository rotates timestamp keys, rolls back timestamp version
simulator.Sim.RotateKeys(metadata.TIMESTAMP)
simulator.Sim.MDRoot.Signed.Version += 1
simulator.Sim.PublishRoot()
simulator.Sim.MDTimestamp.Signed.Version = 1
// client refresh the metadata and see the initial timestamp version
_, err = runRefresh(updaterConfig, time.Now())
assert.NoError(t, err)
assertVersionEquals(t, metadata.TIMESTAMP, 1)
}
func TestNewSnapshotHashMismatch(t *testing.T) {
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
// Check against timestamp role’s snapshot hash
// Update timestamp with snapshot's hashes
simulator.Sim.ComputeMetafileHashesAndLength = true
simulator.Sim.UpdateTimestamp() // timestamp v2
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
_, err = runRefresh(updaterConfig, time.Now())
assert.NoError(t, err)
// Modify snapshot contents without updating
// timestamp's snapshot hash
simulator.Sim.MDSnapshot.Signed.Expires = simulator.Sim.MDSnapshot.Signed.Expires.Add(time.Hour * 24)
simulator.Sim.MDSnapshot.Signed.Version += 1 // snapshot v2
simulator.Sim.MDTimestamp.Signed.Meta["snapshot.json"].Version = simulator.Sim.MDSnapshot.Signed.Version
simulator.Sim.MDTimestamp.Signed.Version += 1 // timestamp v3
// Hash mismatch error
_, err = runRefresh(updaterConfig, time.Now())
assert.ErrorIs(t, err, &metadata.ErrLengthOrHashMismatch{Msg: "hash verification failed - mismatch for algorithm sha256"})
assertVersionEquals(t, metadata.TIMESTAMP, 3)
assertVersionEquals(t, metadata.SNAPSHOT, 1)
}
func TestNewSnapshotUnsigned(t *testing.T) {
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
// Check for an arbitrary software attack
delete(simulator.Sim.Signers, metadata.SNAPSHOT)
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
_, err = runRefresh(updaterConfig, time.Now())
assert.ErrorIs(t, err, &metadata.ErrUnsignedMetadata{Msg: "Verifying snapshot failed, not enough signatures, got 0, want 1"})
assertFilesExist(t, []string{metadata.ROOT, metadata.TIMESTAMP})
}
func TestNewSnapshotVersionMismatch(t *testing.T) {
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
// Check against timestamp role’s snapshot version
// Increase snapshot version without updating timestamp
simulator.Sim.MDSnapshot.Signed.Version += 1
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
_, err = runRefresh(updaterConfig, time.Now())
assert.ErrorIs(t, err, &metadata.ErrBadVersionNumber{Msg: "expected 1, got 2"})
assertFilesExist(t, []string{metadata.ROOT, metadata.TIMESTAMP})
}
func TestNewSnapshotVersionRollback(t *testing.T) {
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
// Check for a rollback attack
simulator.Sim.MDSnapshot.Signed.Version = 2
simulator.Sim.UpdateTimestamp()
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
_, err = runRefresh(updaterConfig, time.Now())
assert.NoError(t, err)
simulator.Sim.MDSnapshot.Signed.Version = 1
simulator.Sim.UpdateTimestamp()
_, err = runRefresh(updaterConfig, time.Now())
assert.ErrorIs(t, err, &metadata.ErrBadVersionNumber{Msg: "new snapshot version 1 must be >= 2"})
assertVersionEquals(t, metadata.SNAPSHOT, 2)
}
func TestNewSnapshotFastForwardRecovery(t *testing.T) {
// Test snapshot fast-forward recovery using key rotation.
// The snapshot recovery requires the snapshot and timestamp key rotation.
// It is made by the following steps:
// - Remove the snapshot and timestamp keys
// - Create and add a new key for snapshot and timestamp
// - Rollback snapshot version
// - Bump and publish root
// - Bump the timestamp
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
// attacker updates to a higher version (bumping timestamp is required)
simulator.Sim.MDSnapshot.Signed.Version = 99999
simulator.Sim.UpdateTimestamp()
// client refreshes the metadata and see the new snapshot version
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
_, err = runRefresh(updaterConfig, time.Now())
assert.NoError(t, err)
assertVersionEquals(t, metadata.SNAPSHOT, 99999)
// repository rotates snapshot & timestamp keys, rolls back snapshot
simulator.Sim.RotateKeys(metadata.SNAPSHOT)
simulator.Sim.RotateKeys(metadata.TIMESTAMP)
simulator.Sim.MDRoot.Signed.Version += 1
simulator.Sim.PublishRoot()
simulator.Sim.MDSnapshot.Signed.Version = 1
simulator.Sim.UpdateTimestamp()
// client refresh the metadata and see the initial snapshot version
_, err = runRefresh(updaterConfig, time.Now())
assert.NoError(t, err)
assertVersionEquals(t, metadata.SNAPSHOT, 1)
}
func TestNewSnapshotExpired(t *testing.T) {
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
// Check for a freeze attack
simulator.Sim.MDSnapshot.Signed.Expires = simulator.PastDateTime
simulator.Sim.UpdateSnapshot()
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
_, err = runRefresh(updaterConfig, time.Now())
assert.ErrorIs(t, err, &metadata.ErrExpiredMetadata{Msg: "snapshot.json is expired"})
assertFilesExist(t, []string{metadata.ROOT})
}
func TestNewTargetsHashMismatch(t *testing.T) {
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
// Check against snapshot role’s targets hashes
simulator.Sim.ComputeMetafileHashesAndLength = true
simulator.Sim.UpdateSnapshot()
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
_, err = runRefresh(updaterConfig, time.Now())
assert.NoError(t, err)
// Modify targets contents without updating
// snapshot's targets hashes
simulator.Sim.MDTargets.Signed.Version += 1
simulator.Sim.MDSnapshot.Signed.Meta["targets.json"].Version = simulator.Sim.MDTargets.Signed.Version
simulator.Sim.MDSnapshot.Signed.Version += 1
simulator.Sim.UpdateTimestamp()
_, err = runRefresh(updaterConfig, time.Now())
assert.ErrorIs(t, err, &metadata.ErrLengthOrHashMismatch{Msg: "hash verification failed - mismatch for algorithm sha256"})
assertVersionEquals(t, metadata.SNAPSHOT, 3)
assertVersionEquals(t, metadata.TARGETS, 1)
}
func TestNewTargetsUnsigned(t *testing.T) {
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
// Check for an arbitrary software attack
delete(simulator.Sim.Signers, metadata.TARGETS)
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
_, err = runRefresh(updaterConfig, time.Now())
assert.ErrorIs(t, err, &metadata.ErrUnsignedMetadata{Msg: "Verifying targets failed, not enough signatures, got 0, want 1"})
assertFilesExist(t, []string{metadata.ROOT, metadata.TIMESTAMP, metadata.SNAPSHOT})
}
func TestNewTargetsVersionMismatch(t *testing.T) {
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
// Check against snapshot role’s targets version
// Increase targets version without updating snapshot
simulator.Sim.MDTargets.Signed.Version += 1
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
_, err = runRefresh(updaterConfig, time.Now())
assert.ErrorIs(t, err, &metadata.ErrBadVersionNumber{Msg: "expected targets version 1, got 2"})
assertFilesExist(t, []string{metadata.ROOT, metadata.TIMESTAMP, metadata.SNAPSHOT})
}
func TestNewTargetsExpired(t *testing.T) {
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
// Check for a freeze attack.
simulator.Sim.MDTargets.Signed.Expires = simulator.PastDateTime
simulator.Sim.UpdateSnapshot()
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
_, err = runRefresh(updaterConfig, time.Now())
assert.ErrorIs(t, err, &metadata.ErrExpiredMetadata{Msg: "new targets is expired"})
assertFilesExist(t, []string{metadata.ROOT, metadata.TIMESTAMP, metadata.SNAPSHOT})
}
func TestComputeMetafileHashesLength(t *testing.T) {
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
simulator.Sim.ComputeMetafileHashesAndLength = true
simulator.Sim.UpdateSnapshot()
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
_, err = runRefresh(updaterConfig, time.Now())
assert.NoError(t, err)
assertVersionEquals(t, metadata.TIMESTAMP, 2)
assertVersionEquals(t, metadata.SNAPSHOT, 2)
simulator.Sim.ComputeMetafileHashesAndLength = false
simulator.Sim.UpdateSnapshot()
_, err = runRefresh(updaterConfig, time.Now())
assert.NoError(t, err)
assertVersionEquals(t, metadata.TIMESTAMP, 3)
assertVersionEquals(t, metadata.SNAPSHOT, 3)
}
func TestNewTargetsFastForwardRecovery(t *testing.T) {
//Test targets fast-forward recovery using key rotation.
// The targets recovery is made by issuing new Snapshot keys, by following
// steps:
// - Remove the snapshot key
// - Create and add a new key for snapshot
// - Bump and publish root
// - Rollback the target version
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
// attacker updates to a higher version
simulator.Sim.MDTargets.Signed.Version = 99999
simulator.Sim.UpdateSnapshot()
// client refreshes the metadata and see the new targets version
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
_, err = runRefresh(updaterConfig, time.Now())
assert.NoError(t, err)
assertVersionEquals(t, metadata.TARGETS, 99999)
// repository rotates snapshot keys, rolls back targets version
simulator.Sim.RotateKeys(metadata.SNAPSHOT)
simulator.Sim.MDRoot.Signed.Version += 1
simulator.Sim.PublishRoot()
simulator.Sim.MDTargets.Signed.Version = 1
simulator.Sim.UpdateSnapshot()
// client refreshes the metadata version and see initial targets version
_, err = runRefresh(updaterConfig, time.Now())
assert.NoError(t, err)
assertVersionEquals(t, metadata.TARGETS, 1)
}
func TestSnapshotRollbackWithLocalSnapshotHashMismatch(t *testing.T) {
// Test triggering snapshot rollback check on a newly downloaded snapshot
// when the local snapshot is loaded even when there is a hash mismatch
// with timestamp.snapshot_meta.
// By raising this flag on timestamp update the simulator would:
// 1) compute the hash of the new modified version of snapshot
// 2) assign the hash to timestamp.snapshot_meta
// The purpose is to create a hash mismatch between timestamp.meta and
// the local snapshot, but to have hash match between timestamp.meta and
// the next snapshot version.
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
simulator.Sim.ComputeMetafileHashesAndLength = true
// Initialize all metadata and assign targets version higher than 1.
simulator.Sim.MDTargets.Signed.Version = 2
simulator.Sim.UpdateSnapshot()
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
_, err = runRefresh(updaterConfig, time.Now())
assert.NoError(t, err)
// The new targets must have a lower version than the local trusted one.
simulator.Sim.MDTargets.Signed.Version = 1
simulator.Sim.UpdateSnapshot()
// During the snapshot update, the local snapshot will be loaded even if
// there is a hash mismatch with timestamp snapshot meta, because it will
// be considered as trusted.
// Should fail as a new version of snapshot will be fetched which lowers
// the snapshot meta "targets.json" version by 1 and throws an error.
_, err = runRefresh(updaterConfig, time.Now())
assert.ErrorIs(t, err, &metadata.ErrBadVersionNumber{Msg: "expected targets.json version 1, got 2"})
}
func TestExpiredMetadata(t *testing.T) {
// Verifies that expired local timestamp/snapshot can be used for
// updating from remote.
// The updates and verifications are performed with the following timing:
// - Timestamp v1 expiry set to day 7
// - First updater refresh performed on day 0
// - Repository bumps snapshot and targets to v2 on day 0
// - Timestamp v2 expiry set to day 21
// - Second updater refresh performed on day 18,
// it is successful and timestamp/snaphot final versions are v2"
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
now := time.Now()
simulator.Sim.MDTimestamp.Signed.Expires = now.Add(time.Hour * 7 * 24)
// Make a successful update of valid metadata which stores it in cache
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
_, err = runRefresh(updaterConfig, time.Now())
assert.NoError(t, err)
simulator.Sim.MDTargets.Signed.Version += 1
simulator.Sim.UpdateSnapshot()
simulator.Sim.MDTimestamp.Signed.Expires = now.Add(time.Hour * 21 * 24)
// Mocking time so that local timestam has expired
// but the new timestamp has not
moveInTime := now.Add(time.Hour * 18 * 24)
_, err = runRefresh(updaterConfig, moveInTime)
assert.NoError(t, err)
// Assert that the final version of timestamp/snapshot is version 2
// which means a successful refresh is performed
// with expired local metadata
mdTimestamp, err := metadata.Timestamp().FromFile(filepath.Join(simulator.MetadataDir, "timestamp.json"))
assert.NoError(t, err)
assert.Equal(t, int64(2), mdTimestamp.Signed.Version)
mdSnapshot, err := metadata.Snapshot().FromFile(filepath.Join(simulator.MetadataDir, "snapshot.json"))
assert.NoError(t, err)
assert.Equal(t, int64(2), mdSnapshot.Signed.Version)
mdTargets, err := metadata.Targets().FromFile(filepath.Join(simulator.MetadataDir, "targets.json"))
assert.NoError(t, err)
assert.Equal(t, int64(2), mdTargets.Signed.Version)
}
func TestMaxMetadataLengths(t *testing.T) {
// Test that clients configured max metadata lengths are respected
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
// client has root v1 already: create a new one available for download
simulator.Sim.MDRoot.Signed.Version += 1
simulator.Sim.PublishRoot()
// make sure going over any length limit raises DownloadLengthMismatchError
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
updater := initUpdater(updaterConfig)
updater.cfg.RootMaxLength = 100
err = updater.Refresh()
assert.ErrorIs(t, err, &metadata.ErrDownloadLengthMismatch{Msg: "Downloaded 1567 bytes exceeding the maximum allowed length of 100"})
updater = initUpdater(updaterConfig)
updater.cfg.TimestampMaxLength = 100
err = updater.Refresh()
assert.ErrorIs(t, err, &metadata.ErrDownloadLengthMismatch{Msg: "Downloaded 1567 bytes exceeding the maximum allowed length of 100"})
updater = initUpdater(updaterConfig)
updater.cfg.SnapshotMaxLength = 100
err = updater.Refresh()
assert.ErrorIs(t, err, &metadata.ErrDownloadLengthMismatch{Msg: "Downloaded 1567 bytes exceeding the maximum allowed length of 100"})
updater = initUpdater(updaterConfig)
updater.cfg.TargetsMaxLength = 100
err = updater.Refresh()
assert.ErrorIs(t, err, &metadata.ErrDownloadLengthMismatch{Msg: "Downloaded 1567 bytes exceeding the maximum allowed length of 100"})
// All good with normal length limits
updater = initUpdater(updaterConfig)
err = updater.Refresh()
assert.ErrorIs(t, err, &metadata.ErrDownloadLengthMismatch{Msg: "Downloaded 1567 bytes exceeding the maximum allowed length of 100"})
}
func TestTimestampEqVersionsCheck(t *testing.T) {
// Test that a modified timestamp with different content, but the same
// version doesn't replace the valid locally stored one.
err := loadOrResetTrustedRootMetadata()
assert.NoError(t, err)
// Make a successful update of valid metadata which stores it in cache
updaterConfig, err := loadUpdaterConfig()
assert.NoError(t, err)
_, err = runRefresh(updaterConfig, time.Now())
assert.NoError(t, err)
initialTimestampMetadataVer := simulator.Sim.MDTimestamp.Signed.Meta["snapshot.json"].Version
// Change timestamp without bumping its version in order to test if a new
// timestamp with the same version will be persisted.
simulator.Sim.MDTimestamp.Signed.Meta["snapshot.json"].Version = 100
_, err = runRefresh(updaterConfig, time.Now())
assert.NoError(t, err)
// If the local timestamp md file has the same snapshot_meta.version as
// the initial one, then the new modified timestamp has not been stored.
timestamp, err := metadata.Timestamp().FromFile(simulator.MetadataDir + "/timestamp.json")
assert.NoError(t, err)
assert.Equal(t, initialTimestampMetadataVer, timestamp.Signed.Meta["snapshot.json"].Version)
}